repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemActiveMB
def GetMemActiveMB(self): '''Retrieves the amount of memory the virtual machine is actively using its estimated working set size.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemActiveMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemActiveMB(self): '''Retrieves the amount of memory the virtual machine is actively using its estimated working set size.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemActiveMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemActiveMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemActiveMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of memory the virtual machine is actively using its estimated working set size.
[ "Retrieves", "the", "amount", "of", "memory", "the", "virtual", "machine", "is", "actively", "using", "its", "estimated", "working", "set", "size", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L307-L313
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemBalloonedMB
def GetMemBalloonedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by the vSphere memory balloon driver (also referred to as the "vmmemctl" driver).''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemBalloonedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by the vSphere memory balloon driver (also referred to as the "vmmemctl" driver).''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemBalloonedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemBalloonedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of memory that has been reclaimed from this virtual machine by the vSphere memory balloon driver (also referred to as the "vmmemctl" driver).
[ "Retrieves", "the", "amount", "of", "memory", "that", "has", "been", "reclaimed", "from", "this", "virtual", "machine", "by", "the", "vSphere", "memory", "balloon", "driver", "(", "also", "referred", "to", "as", "the", "vmmemctl", "driver", ")", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L315-L322
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemBalloonMaxMB
def GetMemBalloonMaxMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemBalloonMaxMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonMaxMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemBalloonMaxMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemBalloonMaxMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L325-L330
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemBalloonTargetMB
def GetMemBalloonTargetMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonTargetMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemBalloonTargetMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemBalloonTargetMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemBalloonTargetMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemBalloonTargetMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L333-L338
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemLimitMB
def GetMemLimitMB(self): '''Retrieves the upper limit of memory that is available to the virtual machine. For information about setting a memory limit, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemLimitMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemLimitMB(self): '''Retrieves the upper limit of memory that is available to the virtual machine. For information about setting a memory limit, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemLimitMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemLimitMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemLimitMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the upper limit of memory that is available to the virtual machine. For information about setting a memory limit, see "Limits and Reservations" on page 14.
[ "Retrieves", "the", "upper", "limit", "of", "memory", "that", "is", "available", "to", "the", "virtual", "machine", ".", "For", "information", "about", "setting", "a", "memory", "limit", "see", "Limits", "and", "Reservations", "on", "page", "14", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L340-L347
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemLLSwappedMB
def GetMemLLSwappedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemLLSwappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemLLSwappedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemLLSwappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemLLSwappedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemLLSwappedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L350-L355
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemMappedMB
def GetMemMappedMB(self): '''Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemMappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemMappedMB(self): '''Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemMappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemMappedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemMappedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of memory that is allocated to the virtual machine. Memory that is ballooned, swapped, or has never been accessed is excluded.
[ "Retrieves", "the", "amount", "of", "memory", "that", "is", "allocated", "to", "the", "virtual", "machine", ".", "Memory", "that", "is", "ballooned", "swapped", "or", "has", "never", "been", "accessed", "is", "excluded", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L357-L364
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemOverheadMB
def GetMemOverheadMB(self): '''Retrieves the amount of "overhead" memory associated with this virtual machine that is currently consumed on the host system. Overhead memory is additional memory that is reserved for data structures required by the virtualization layer.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemOverheadMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemOverheadMB(self): '''Retrieves the amount of "overhead" memory associated with this virtual machine that is currently consumed on the host system. Overhead memory is additional memory that is reserved for data structures required by the virtualization layer.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemOverheadMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemOverheadMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemOverheadMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of "overhead" memory associated with this virtual machine that is currently consumed on the host system. Overhead memory is additional memory that is reserved for data structures required by the virtualization layer.
[ "Retrieves", "the", "amount", "of", "overhead", "memory", "associated", "with", "this", "virtual", "machine", "that", "is", "currently", "consumed", "on", "the", "host", "system", ".", "Overhead", "memory", "is", "additional", "memory", "that", "is", "reserved", "for", "data", "structures", "required", "by", "the", "virtualization", "layer", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L366-L374
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemReservationMB
def GetMemReservationMB(self): '''Retrieves the minimum amount of memory that is reserved for the virtual machine. For information about setting a memory reservation, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemReservationMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemReservationMB(self): '''Retrieves the minimum amount of memory that is reserved for the virtual machine. For information about setting a memory reservation, see "Limits and Reservations" on page 14.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemReservationMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemReservationMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemReservationMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the minimum amount of memory that is reserved for the virtual machine. For information about setting a memory reservation, see "Limits and Reservations" on page 14.
[ "Retrieves", "the", "minimum", "amount", "of", "memory", "that", "is", "reserved", "for", "the", "virtual", "machine", ".", "For", "information", "about", "setting", "a", "memory", "reservation", "see", "Limits", "and", "Reservations", "on", "page", "14", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L376-L383
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemSharedMB
def GetMemSharedMB(self): '''Retrieves the amount of physical memory associated with this virtual machine that is copy-on-write (COW) shared on the host.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemSharedMB(self): '''Retrieves the amount of physical memory associated with this virtual machine that is copy-on-write (COW) shared on the host.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSharedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSharedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of physical memory associated with this virtual machine that is copy-on-write (COW) shared on the host.
[ "Retrieves", "the", "amount", "of", "physical", "memory", "associated", "with", "this", "virtual", "machine", "that", "is", "copy", "-", "on", "-", "write", "(", "COW", ")", "shared", "on", "the", "host", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L385-L391
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemSharedSavedMB
def GetMemSharedSavedMB(self): '''Retrieves the estimated amount of physical memory on the host saved from copy-on-write (COW) shared guest physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedSavedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemSharedSavedMB(self): '''Retrieves the estimated amount of physical memory on the host saved from copy-on-write (COW) shared guest physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedSavedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSharedSavedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSharedSavedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the estimated amount of physical memory on the host saved from copy-on-write (COW) shared guest physical memory.
[ "Retrieves", "the", "estimated", "amount", "of", "physical", "memory", "on", "the", "host", "saved", "from", "copy", "-", "on", "-", "write", "(", "COW", ")", "shared", "guest", "physical", "memory", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L393-L399
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemShares
def GetMemShares(self): '''Retrieves the number of memory shares allocated to the virtual machine. For information about how an ESX server uses memory shares to manage virtual machine priority, see the vSphere Resource Management Guide.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemShares(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemShares(self): '''Retrieves the number of memory shares allocated to the virtual machine. For information about how an ESX server uses memory shares to manage virtual machine priority, see the vSphere Resource Management Guide.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemShares(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemShares", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemShares", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the number of memory shares allocated to the virtual machine. For information about how an ESX server uses memory shares to manage virtual machine priority, see the vSphere Resource Management Guide.
[ "Retrieves", "the", "number", "of", "memory", "shares", "allocated", "to", "the", "virtual", "machine", ".", "For", "information", "about", "how", "an", "ESX", "server", "uses", "memory", "shares", "to", "manage", "virtual", "machine", "priority", "see", "the", "vSphere", "Resource", "Management", "Guide", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L401-L408
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemSwappedMB
def GetMemSwappedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by transparently swapping guest memory to disk.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSwappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemSwappedMB(self): '''Retrieves the amount of memory that has been reclaimed from this virtual machine by transparently swapping guest memory to disk.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSwappedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSwappedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSwappedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the amount of memory that has been reclaimed from this virtual machine by transparently swapping guest memory to disk.
[ "Retrieves", "the", "amount", "of", "memory", "that", "has", "been", "reclaimed", "from", "this", "virtual", "machine", "by", "transparently", "swapping", "guest", "memory", "to", "disk", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L410-L416
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemSwapTargetMB
def GetMemSwapTargetMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSwapTargetMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemSwapTargetMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSwapTargetMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSwapTargetMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSwapTargetMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L419-L424
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemTargetSizeMB
def GetMemTargetSizeMB(self): '''Retrieves the size of the target memory allocation for this virtual machine.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemTargetSizeMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemTargetSizeMB(self): '''Retrieves the size of the target memory allocation for this virtual machine.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemTargetSizeMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemTargetSizeMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemTargetSizeMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the size of the target memory allocation for this virtual machine.
[ "Retrieves", "the", "size", "of", "the", "target", "memory", "allocation", "for", "this", "virtual", "machine", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L426-L431
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemUsedMB
def GetMemUsedMB(self): '''Retrieves the estimated amount of physical host memory currently consumed for this virtual machine's physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemUsedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemUsedMB(self): '''Retrieves the estimated amount of physical host memory currently consumed for this virtual machine's physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemUsedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemUsedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemUsedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Retrieves the estimated amount of physical host memory currently consumed for this virtual machine's physical memory.
[ "Retrieves", "the", "estimated", "amount", "of", "physical", "host", "memory", "currently", "consumed", "for", "this", "virtual", "machine", "s", "physical", "memory", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L433-L439
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemZippedMB
def GetMemZippedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemZippedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemZippedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemZippedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemZippedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemZippedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L442-L447
dagwieers/vmguestlib
vmguestlib.py
VMGuestLib.GetMemZipSavedMB
def GetMemZipSavedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemZipSavedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
python
def GetMemZipSavedMB(self): '''Undocumented.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemZipSavedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemZipSavedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemZipSavedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", "VMGUESTLIB_ERROR_SUCCESS", ":", "raise", "VMGuestLibException", "(", "ret", ")", "return", "counter", ".", "value" ]
Undocumented.
[ "Undocumented", "." ]
train
https://github.com/dagwieers/vmguestlib/blob/2ba9333a745628cf9e6b4c767427a5bd997a71ad/vmguestlib.py#L450-L455
markchil/gptools
gptools/splines.py
spev
def spev(t_int, C, deg, x, cov_C=None, M_spline=False, I_spline=False, n=0): """Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients. `deg` boundary knots are appended at both sides of the domain. The zeroth order basis functions are modified to ensure continuity at the right-hand boundary. Note that the I-splines include the :math:`i=0` case in order to have a "DC offset". This way your functions do not have to start at zero. If you want to not include this, simply set the first coefficient in `C` to zero. Parameters ---------- t_int : array of float, (`M`,) The internal knot locations. Must be monotonic (this is NOT checked). C : array of float, (`M + deg - 1`,) The coefficients applied to the basis functions. deg : nonnegative int The polynomial degree to use. x : array of float, (`N`,) The locations to evaluate the spline at. cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional The covariance matrix of the coefficients. If a 1d array is passed, this is treated as the variance. If None, then the uncertainty is not computed. M_spline : bool, optional If True, compute the M-spline instead of the B-spline. M-splines are normalized to integrate to unity, as opposed to B-splines which sum to unity at all points. Default is False (compute B-spline). I_spline : bool, optional If True, compute the I-spline instead of the B-spline. Note that this will override `M_spline`. I-splines are the integrals of the M-splines, and hence ensure curves are monotonic if all coefficients are of the same sign. Note that the I-splines returned will be of polynomial degree `deg` (i.e., the integral of what is returned from calling the function with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline or M-spline). n : int, optional The derivative order to compute. Default is 0. If `n>d`, all zeros are returned (i.e., the discontinuities are not included). Returns ------- `y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline at the specified locations. """ C = scipy.asarray(C, dtype=float) t_int = scipy.asarray(t_int, dtype=float) if (t_int != scipy.sort(t_int)).any(): raise ValueError("Knots must be in increasing order!") # if len(scipy.unique(t_int)) != len(t_int): # raise ValueError("Knots must be unique!") if n > deg: return scipy.zeros_like(x, dtype=float) if I_spline: # I_{i,k} = int_L^x M_{i,k}(u)du, so just take the derivative of the # underlying M-spline. Discarding the first coefficient dumps the "DC # offset" term. if cov_C is not None: cov_C = scipy.asarray(cov_C) if cov_C.ndim == 1: cov_C = cov_C[1:] elif cov_C.ndim == 2: cov_C = cov_C[1:, 1:] if n > 0: return spev( t_int, C[1:], deg - 1, x, cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1 ) M_spline = True if n > 0: if M_spline: t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg)) C = (deg + 1.0) * ( C[1:] / (t[deg + 2:len(t_int) + 2 * deg] - t[1:len(t_int) + deg - 1]) - C[:-1] / (t[deg + 1:len(t_int) + 2 * deg - 1] - t[:len(t_int) + deg - 2]) ) else: C = C[1:] - C[:-1] return spev( t_int, C, deg - 1, x, cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1 ) if len(C) != len(t_int) + deg - 1: raise ValueError("Length of C must be equal to M + deg - 1!") # Append the external knots directly at the boundary: t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg)) # Compute the different orders: B = scipy.zeros((deg + 1, len(t) - 1, len(x))) # NOTE: The first dimension is indexed by deg, and is zero-indexed. # Zeroth order: constant function d = 0 for i in xrange(deg, deg + len(t_int) - 2 + 1): # The second condition contains a hack to make the basis functions # continuous at the right-hand edge. mask = (t[i] <= x) & ( (x < t[i + 1]) | ((i == deg + len(t_int) - 2) & (x == t[-1])) ) B[d, i, mask] = 1.0 / (t[i + 1] - t[i]) if M_spline else 1.0 # Loop over other orders: for d in xrange(1, deg + 1): for i in xrange(deg - d, deg + len(t_int) - 2 + 1): if t[i + d] != t[i]: v = (x - t[i]) * B[d - 1, i, :] if not M_spline: v /= t[i + d] - t[i] B[d, i, :] += v if t[i + d + 1] != t[i + 1]: v = (t[i + d + 1] - x) * B[d - 1, i + 1, :] if not M_spline: v /= t[i + d + 1] - t[i + 1] B[d, i, :] += v if M_spline and ((t[i + d] != t[i]) or (t[i + d + 1] != t[i + 1])): B[d, i, :] *= (d + 1) / (d * (t[i + d + 1] - t[i])) B = B[deg, 0:len(C), :].T # Now compute the I-splines, if needed: if I_spline: I = scipy.zeros_like(B) for i in xrange(0, len(C)): for m in xrange(i, len(C)): I[:, i] += (t[m + deg + 1] - t[m]) * B[:, m] / (deg + 1.0) B = I y = B.dot(C) if cov_C is not None: cov_C = scipy.asarray(cov_C) # If there are no covariances, promote cov_C to a diagonal matrix if cov_C.ndim == 1: cov_C = scipy.diag(cov_C) cov_y = B.dot(cov_C).dot(B.T) return (y, cov_y) else: return y
python
def spev(t_int, C, deg, x, cov_C=None, M_spline=False, I_spline=False, n=0): """Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients. `deg` boundary knots are appended at both sides of the domain. The zeroth order basis functions are modified to ensure continuity at the right-hand boundary. Note that the I-splines include the :math:`i=0` case in order to have a "DC offset". This way your functions do not have to start at zero. If you want to not include this, simply set the first coefficient in `C` to zero. Parameters ---------- t_int : array of float, (`M`,) The internal knot locations. Must be monotonic (this is NOT checked). C : array of float, (`M + deg - 1`,) The coefficients applied to the basis functions. deg : nonnegative int The polynomial degree to use. x : array of float, (`N`,) The locations to evaluate the spline at. cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional The covariance matrix of the coefficients. If a 1d array is passed, this is treated as the variance. If None, then the uncertainty is not computed. M_spline : bool, optional If True, compute the M-spline instead of the B-spline. M-splines are normalized to integrate to unity, as opposed to B-splines which sum to unity at all points. Default is False (compute B-spline). I_spline : bool, optional If True, compute the I-spline instead of the B-spline. Note that this will override `M_spline`. I-splines are the integrals of the M-splines, and hence ensure curves are monotonic if all coefficients are of the same sign. Note that the I-splines returned will be of polynomial degree `deg` (i.e., the integral of what is returned from calling the function with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline or M-spline). n : int, optional The derivative order to compute. Default is 0. If `n>d`, all zeros are returned (i.e., the discontinuities are not included). Returns ------- `y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline at the specified locations. """ C = scipy.asarray(C, dtype=float) t_int = scipy.asarray(t_int, dtype=float) if (t_int != scipy.sort(t_int)).any(): raise ValueError("Knots must be in increasing order!") # if len(scipy.unique(t_int)) != len(t_int): # raise ValueError("Knots must be unique!") if n > deg: return scipy.zeros_like(x, dtype=float) if I_spline: # I_{i,k} = int_L^x M_{i,k}(u)du, so just take the derivative of the # underlying M-spline. Discarding the first coefficient dumps the "DC # offset" term. if cov_C is not None: cov_C = scipy.asarray(cov_C) if cov_C.ndim == 1: cov_C = cov_C[1:] elif cov_C.ndim == 2: cov_C = cov_C[1:, 1:] if n > 0: return spev( t_int, C[1:], deg - 1, x, cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1 ) M_spline = True if n > 0: if M_spline: t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg)) C = (deg + 1.0) * ( C[1:] / (t[deg + 2:len(t_int) + 2 * deg] - t[1:len(t_int) + deg - 1]) - C[:-1] / (t[deg + 1:len(t_int) + 2 * deg - 1] - t[:len(t_int) + deg - 2]) ) else: C = C[1:] - C[:-1] return spev( t_int, C, deg - 1, x, cov_C=cov_C, M_spline=True, I_spline=False, n=n - 1 ) if len(C) != len(t_int) + deg - 1: raise ValueError("Length of C must be equal to M + deg - 1!") # Append the external knots directly at the boundary: t = scipy.concatenate(([t_int[0]] * deg, t_int, [t_int[-1]] * deg)) # Compute the different orders: B = scipy.zeros((deg + 1, len(t) - 1, len(x))) # NOTE: The first dimension is indexed by deg, and is zero-indexed. # Zeroth order: constant function d = 0 for i in xrange(deg, deg + len(t_int) - 2 + 1): # The second condition contains a hack to make the basis functions # continuous at the right-hand edge. mask = (t[i] <= x) & ( (x < t[i + 1]) | ((i == deg + len(t_int) - 2) & (x == t[-1])) ) B[d, i, mask] = 1.0 / (t[i + 1] - t[i]) if M_spline else 1.0 # Loop over other orders: for d in xrange(1, deg + 1): for i in xrange(deg - d, deg + len(t_int) - 2 + 1): if t[i + d] != t[i]: v = (x - t[i]) * B[d - 1, i, :] if not M_spline: v /= t[i + d] - t[i] B[d, i, :] += v if t[i + d + 1] != t[i + 1]: v = (t[i + d + 1] - x) * B[d - 1, i + 1, :] if not M_spline: v /= t[i + d + 1] - t[i + 1] B[d, i, :] += v if M_spline and ((t[i + d] != t[i]) or (t[i + d + 1] != t[i + 1])): B[d, i, :] *= (d + 1) / (d * (t[i + d + 1] - t[i])) B = B[deg, 0:len(C), :].T # Now compute the I-splines, if needed: if I_spline: I = scipy.zeros_like(B) for i in xrange(0, len(C)): for m in xrange(i, len(C)): I[:, i] += (t[m + deg + 1] - t[m]) * B[:, m] / (deg + 1.0) B = I y = B.dot(C) if cov_C is not None: cov_C = scipy.asarray(cov_C) # If there are no covariances, promote cov_C to a diagonal matrix if cov_C.ndim == 1: cov_C = scipy.diag(cov_C) cov_y = B.dot(cov_C).dot(B.T) return (y, cov_y) else: return y
[ "def", "spev", "(", "t_int", ",", "C", ",", "deg", ",", "x", ",", "cov_C", "=", "None", ",", "M_spline", "=", "False", ",", "I_spline", "=", "False", ",", "n", "=", "0", ")", ":", "C", "=", "scipy", ".", "asarray", "(", "C", ",", "dtype", "=", "float", ")", "t_int", "=", "scipy", ".", "asarray", "(", "t_int", ",", "dtype", "=", "float", ")", "if", "(", "t_int", "!=", "scipy", ".", "sort", "(", "t_int", ")", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Knots must be in increasing order!\"", ")", "# if len(scipy.unique(t_int)) != len(t_int):", "# raise ValueError(\"Knots must be unique!\")", "if", "n", ">", "deg", ":", "return", "scipy", ".", "zeros_like", "(", "x", ",", "dtype", "=", "float", ")", "if", "I_spline", ":", "# I_{i,k} = int_L^x M_{i,k}(u)du, so just take the derivative of the", "# underlying M-spline. Discarding the first coefficient dumps the \"DC", "# offset\" term.", "if", "cov_C", "is", "not", "None", ":", "cov_C", "=", "scipy", ".", "asarray", "(", "cov_C", ")", "if", "cov_C", ".", "ndim", "==", "1", ":", "cov_C", "=", "cov_C", "[", "1", ":", "]", "elif", "cov_C", ".", "ndim", "==", "2", ":", "cov_C", "=", "cov_C", "[", "1", ":", ",", "1", ":", "]", "if", "n", ">", "0", ":", "return", "spev", "(", "t_int", ",", "C", "[", "1", ":", "]", ",", "deg", "-", "1", ",", "x", ",", "cov_C", "=", "cov_C", ",", "M_spline", "=", "True", ",", "I_spline", "=", "False", ",", "n", "=", "n", "-", "1", ")", "M_spline", "=", "True", "if", "n", ">", "0", ":", "if", "M_spline", ":", "t", "=", "scipy", ".", "concatenate", "(", "(", "[", "t_int", "[", "0", "]", "]", "*", "deg", ",", "t_int", ",", "[", "t_int", "[", "-", "1", "]", "]", "*", "deg", ")", ")", "C", "=", "(", "deg", "+", "1.0", ")", "*", "(", "C", "[", "1", ":", "]", "/", "(", "t", "[", "deg", "+", "2", ":", "len", "(", "t_int", ")", "+", "2", "*", "deg", "]", "-", "t", "[", "1", ":", "len", "(", "t_int", ")", "+", "deg", "-", "1", "]", ")", "-", "C", "[", ":", "-", "1", "]", "/", "(", "t", "[", "deg", "+", "1", ":", "len", "(", "t_int", ")", "+", "2", "*", "deg", "-", "1", "]", "-", "t", "[", ":", "len", "(", "t_int", ")", "+", "deg", "-", "2", "]", ")", ")", "else", ":", "C", "=", "C", "[", "1", ":", "]", "-", "C", "[", ":", "-", "1", "]", "return", "spev", "(", "t_int", ",", "C", ",", "deg", "-", "1", ",", "x", ",", "cov_C", "=", "cov_C", ",", "M_spline", "=", "True", ",", "I_spline", "=", "False", ",", "n", "=", "n", "-", "1", ")", "if", "len", "(", "C", ")", "!=", "len", "(", "t_int", ")", "+", "deg", "-", "1", ":", "raise", "ValueError", "(", "\"Length of C must be equal to M + deg - 1!\"", ")", "# Append the external knots directly at the boundary:", "t", "=", "scipy", ".", "concatenate", "(", "(", "[", "t_int", "[", "0", "]", "]", "*", "deg", ",", "t_int", ",", "[", "t_int", "[", "-", "1", "]", "]", "*", "deg", ")", ")", "# Compute the different orders:", "B", "=", "scipy", ".", "zeros", "(", "(", "deg", "+", "1", ",", "len", "(", "t", ")", "-", "1", ",", "len", "(", "x", ")", ")", ")", "# NOTE: The first dimension is indexed by deg, and is zero-indexed.", "# Zeroth order: constant function", "d", "=", "0", "for", "i", "in", "xrange", "(", "deg", ",", "deg", "+", "len", "(", "t_int", ")", "-", "2", "+", "1", ")", ":", "# The second condition contains a hack to make the basis functions", "# continuous at the right-hand edge.", "mask", "=", "(", "t", "[", "i", "]", "<=", "x", ")", "&", "(", "(", "x", "<", "t", "[", "i", "+", "1", "]", ")", "|", "(", "(", "i", "==", "deg", "+", "len", "(", "t_int", ")", "-", "2", ")", "&", "(", "x", "==", "t", "[", "-", "1", "]", ")", ")", ")", "B", "[", "d", ",", "i", ",", "mask", "]", "=", "1.0", "/", "(", "t", "[", "i", "+", "1", "]", "-", "t", "[", "i", "]", ")", "if", "M_spline", "else", "1.0", "# Loop over other orders:", "for", "d", "in", "xrange", "(", "1", ",", "deg", "+", "1", ")", ":", "for", "i", "in", "xrange", "(", "deg", "-", "d", ",", "deg", "+", "len", "(", "t_int", ")", "-", "2", "+", "1", ")", ":", "if", "t", "[", "i", "+", "d", "]", "!=", "t", "[", "i", "]", ":", "v", "=", "(", "x", "-", "t", "[", "i", "]", ")", "*", "B", "[", "d", "-", "1", ",", "i", ",", ":", "]", "if", "not", "M_spline", ":", "v", "/=", "t", "[", "i", "+", "d", "]", "-", "t", "[", "i", "]", "B", "[", "d", ",", "i", ",", ":", "]", "+=", "v", "if", "t", "[", "i", "+", "d", "+", "1", "]", "!=", "t", "[", "i", "+", "1", "]", ":", "v", "=", "(", "t", "[", "i", "+", "d", "+", "1", "]", "-", "x", ")", "*", "B", "[", "d", "-", "1", ",", "i", "+", "1", ",", ":", "]", "if", "not", "M_spline", ":", "v", "/=", "t", "[", "i", "+", "d", "+", "1", "]", "-", "t", "[", "i", "+", "1", "]", "B", "[", "d", ",", "i", ",", ":", "]", "+=", "v", "if", "M_spline", "and", "(", "(", "t", "[", "i", "+", "d", "]", "!=", "t", "[", "i", "]", ")", "or", "(", "t", "[", "i", "+", "d", "+", "1", "]", "!=", "t", "[", "i", "+", "1", "]", ")", ")", ":", "B", "[", "d", ",", "i", ",", ":", "]", "*=", "(", "d", "+", "1", ")", "/", "(", "d", "*", "(", "t", "[", "i", "+", "d", "+", "1", "]", "-", "t", "[", "i", "]", ")", ")", "B", "=", "B", "[", "deg", ",", "0", ":", "len", "(", "C", ")", ",", ":", "]", ".", "T", "# Now compute the I-splines, if needed:", "if", "I_spline", ":", "I", "=", "scipy", ".", "zeros_like", "(", "B", ")", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "C", ")", ")", ":", "for", "m", "in", "xrange", "(", "i", ",", "len", "(", "C", ")", ")", ":", "I", "[", ":", ",", "i", "]", "+=", "(", "t", "[", "m", "+", "deg", "+", "1", "]", "-", "t", "[", "m", "]", ")", "*", "B", "[", ":", ",", "m", "]", "/", "(", "deg", "+", "1.0", ")", "B", "=", "I", "y", "=", "B", ".", "dot", "(", "C", ")", "if", "cov_C", "is", "not", "None", ":", "cov_C", "=", "scipy", ".", "asarray", "(", "cov_C", ")", "# If there are no covariances, promote cov_C to a diagonal matrix", "if", "cov_C", ".", "ndim", "==", "1", ":", "cov_C", "=", "scipy", ".", "diag", "(", "cov_C", ")", "cov_y", "=", "B", ".", "dot", "(", "cov_C", ")", ".", "dot", "(", "B", ".", "T", ")", "return", "(", "y", ",", "cov_y", ")", "else", ":", "return", "y" ]
Evaluate a B-, M- or I-spline with the specified internal knots, order and coefficients. `deg` boundary knots are appended at both sides of the domain. The zeroth order basis functions are modified to ensure continuity at the right-hand boundary. Note that the I-splines include the :math:`i=0` case in order to have a "DC offset". This way your functions do not have to start at zero. If you want to not include this, simply set the first coefficient in `C` to zero. Parameters ---------- t_int : array of float, (`M`,) The internal knot locations. Must be monotonic (this is NOT checked). C : array of float, (`M + deg - 1`,) The coefficients applied to the basis functions. deg : nonnegative int The polynomial degree to use. x : array of float, (`N`,) The locations to evaluate the spline at. cov_C : array of float, (`M + deg - 1`,) or (`M + deg - 1`, `M + deg - 1`), optional The covariance matrix of the coefficients. If a 1d array is passed, this is treated as the variance. If None, then the uncertainty is not computed. M_spline : bool, optional If True, compute the M-spline instead of the B-spline. M-splines are normalized to integrate to unity, as opposed to B-splines which sum to unity at all points. Default is False (compute B-spline). I_spline : bool, optional If True, compute the I-spline instead of the B-spline. Note that this will override `M_spline`. I-splines are the integrals of the M-splines, and hence ensure curves are monotonic if all coefficients are of the same sign. Note that the I-splines returned will be of polynomial degree `deg` (i.e., the integral of what is returned from calling the function with `deg=deg-1` and `M_spline=True`. Default is False (compute B-spline or M-spline). n : int, optional The derivative order to compute. Default is 0. If `n>d`, all zeros are returned (i.e., the discontinuities are not included). Returns ------- `y` or (`y`, `cov_y`): The values (and possibly uncertainties) of the spline at the specified locations.
[ "Evaluate", "a", "B", "-", "M", "-", "or", "I", "-", "spline", "with", "the", "specified", "internal", "knots", "order", "and", "coefficients", ".", "deg", "boundary", "knots", "are", "appended", "at", "both", "sides", "of", "the", "domain", ".", "The", "zeroth", "order", "basis", "functions", "are", "modified", "to", "ensure", "continuity", "at", "the", "right", "-", "hand", "boundary", ".", "Note", "that", "the", "I", "-", "splines", "include", "the", ":", "math", ":", "i", "=", "0", "case", "in", "order", "to", "have", "a", "DC", "offset", ".", "This", "way", "your", "functions", "do", "not", "have", "to", "start", "at", "zero", ".", "If", "you", "want", "to", "not", "include", "this", "simply", "set", "the", "first", "coefficient", "in", "C", "to", "zero", ".", "Parameters", "----------", "t_int", ":", "array", "of", "float", "(", "M", ")", "The", "internal", "knot", "locations", ".", "Must", "be", "monotonic", "(", "this", "is", "NOT", "checked", ")", ".", "C", ":", "array", "of", "float", "(", "M", "+", "deg", "-", "1", ")", "The", "coefficients", "applied", "to", "the", "basis", "functions", ".", "deg", ":", "nonnegative", "int", "The", "polynomial", "degree", "to", "use", ".", "x", ":", "array", "of", "float", "(", "N", ")", "The", "locations", "to", "evaluate", "the", "spline", "at", ".", "cov_C", ":", "array", "of", "float", "(", "M", "+", "deg", "-", "1", ")", "or", "(", "M", "+", "deg", "-", "1", "M", "+", "deg", "-", "1", ")", "optional", "The", "covariance", "matrix", "of", "the", "coefficients", ".", "If", "a", "1d", "array", "is", "passed", "this", "is", "treated", "as", "the", "variance", ".", "If", "None", "then", "the", "uncertainty", "is", "not", "computed", ".", "M_spline", ":", "bool", "optional", "If", "True", "compute", "the", "M", "-", "spline", "instead", "of", "the", "B", "-", "spline", ".", "M", "-", "splines", "are", "normalized", "to", "integrate", "to", "unity", "as", "opposed", "to", "B", "-", "splines", "which", "sum", "to", "unity", "at", "all", "points", ".", "Default", "is", "False", "(", "compute", "B", "-", "spline", ")", ".", "I_spline", ":", "bool", "optional", "If", "True", "compute", "the", "I", "-", "spline", "instead", "of", "the", "B", "-", "spline", ".", "Note", "that", "this", "will", "override", "M_spline", ".", "I", "-", "splines", "are", "the", "integrals", "of", "the", "M", "-", "splines", "and", "hence", "ensure", "curves", "are", "monotonic", "if", "all", "coefficients", "are", "of", "the", "same", "sign", ".", "Note", "that", "the", "I", "-", "splines", "returned", "will", "be", "of", "polynomial", "degree", "deg", "(", "i", ".", "e", ".", "the", "integral", "of", "what", "is", "returned", "from", "calling", "the", "function", "with", "deg", "=", "deg", "-", "1", "and", "M_spline", "=", "True", ".", "Default", "is", "False", "(", "compute", "B", "-", "spline", "or", "M", "-", "spline", ")", ".", "n", ":", "int", "optional", "The", "derivative", "order", "to", "compute", ".", "Default", "is", "0", ".", "If", "n", ">", "d", "all", "zeros", "are", "returned", "(", "i", ".", "e", ".", "the", "discontinuities", "are", "not", "included", ")", ".", "Returns", "-------", "y", "or", "(", "y", "cov_y", ")", ":", "The", "values", "(", "and", "possibly", "uncertainties", ")", "of", "the", "spline", "at", "the", "specified", "locations", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/splines.py#L5-L146
coursera/courseraoauth2client
courseraoauth2client/commands/version.py
parser
def parser(subparsers): "Build an argparse argument parser to parse the command line." # create the parser for the version subcommand. parser_version = subparsers.add_parser( 'version', help="Output the version of %(prog)s to the console.") parser_version.set_defaults(func=command_version) return parser_version
python
def parser(subparsers): "Build an argparse argument parser to parse the command line." # create the parser for the version subcommand. parser_version = subparsers.add_parser( 'version', help="Output the version of %(prog)s to the console.") parser_version.set_defaults(func=command_version) return parser_version
[ "def", "parser", "(", "subparsers", ")", ":", "# create the parser for the version subcommand.", "parser_version", "=", "subparsers", ".", "add_parser", "(", "'version'", ",", "help", "=", "\"Output the version of %(prog)s to the console.\"", ")", "parser_version", ".", "set_defaults", "(", "func", "=", "command_version", ")", "return", "parser_version" ]
Build an argparse argument parser to parse the command line.
[ "Build", "an", "argparse", "argument", "parser", "to", "parse", "the", "command", "line", "." ]
train
https://github.com/coursera/courseraoauth2client/blob/4edd991defe26bfc768ab28a30368cace40baf44/courseraoauth2client/commands/version.py#L53-L62
CTPUG/wafer
wafer/registration/templatetags/wafer_crispy.py
wafer_form_helper
def wafer_form_helper(context, helper_name): ''' Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms. ''' request = context.request module, class_name = helper_name.rsplit('.', 1) if module not in sys.modules: __import__(module) mod = sys.modules[module] class_ = getattr(mod, class_name) return class_(request=request)
python
def wafer_form_helper(context, helper_name): ''' Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms. ''' request = context.request module, class_name = helper_name.rsplit('.', 1) if module not in sys.modules: __import__(module) mod = sys.modules[module] class_ = getattr(mod, class_name) return class_(request=request)
[ "def", "wafer_form_helper", "(", "context", ",", "helper_name", ")", ":", "request", "=", "context", ".", "request", "module", ",", "class_name", "=", "helper_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "if", "module", "not", "in", "sys", ".", "modules", ":", "__import__", "(", "module", ")", "mod", "=", "sys", ".", "modules", "[", "module", "]", "class_", "=", "getattr", "(", "mod", ",", "class_name", ")", "return", "class_", "(", "request", "=", "request", ")" ]
Find the specified Crispy FormHelper and instantiate it. Handy when you are crispyifying other apps' forms.
[ "Find", "the", "specified", "Crispy", "FormHelper", "and", "instantiate", "it", ".", "Handy", "when", "you", "are", "crispyifying", "other", "apps", "forms", "." ]
train
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/registration/templatetags/wafer_crispy.py#L8-L19
CTPUG/wafer
wafer/pages/models.py
page_menus
def page_menus(root_menu): """Add page menus.""" for page in Page.objects.filter(include_in_menu=True): path = page.get_path() menu = path[0] if len(path) > 1 else None try: root_menu.add_item(page.name, page.get_absolute_url(), menu=menu) except MenuError as e: logger.error("Bad menu item %r for page with slug %r." % (e, page.slug))
python
def page_menus(root_menu): """Add page menus.""" for page in Page.objects.filter(include_in_menu=True): path = page.get_path() menu = path[0] if len(path) > 1 else None try: root_menu.add_item(page.name, page.get_absolute_url(), menu=menu) except MenuError as e: logger.error("Bad menu item %r for page with slug %r." % (e, page.slug))
[ "def", "page_menus", "(", "root_menu", ")", ":", "for", "page", "in", "Page", ".", "objects", ".", "filter", "(", "include_in_menu", "=", "True", ")", ":", "path", "=", "page", ".", "get_path", "(", ")", "menu", "=", "path", "[", "0", "]", "if", "len", "(", "path", ")", ">", "1", "else", "None", "try", ":", "root_menu", ".", "add_item", "(", "page", ".", "name", ",", "page", ".", "get_absolute_url", "(", ")", ",", "menu", "=", "menu", ")", "except", "MenuError", "as", "e", ":", "logger", ".", "error", "(", "\"Bad menu item %r for page with slug %r.\"", "%", "(", "e", ",", "page", ".", "slug", ")", ")" ]
Add page menus.
[ "Add", "page", "menus", "." ]
train
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/pages/models.py#L162-L171
CTPUG/wafer
wafer/registration/views.py
redirect_profile
def redirect_profile(request): ''' The default destination from logging in, redirect to the actual profile URL ''' if request.user.is_authenticated: return HttpResponseRedirect(reverse('wafer_user_profile', args=(request.user.username,))) else: return redirect_to_login(next=reverse(redirect_profile))
python
def redirect_profile(request): ''' The default destination from logging in, redirect to the actual profile URL ''' if request.user.is_authenticated: return HttpResponseRedirect(reverse('wafer_user_profile', args=(request.user.username,))) else: return redirect_to_login(next=reverse(redirect_profile))
[ "def", "redirect_profile", "(", "request", ")", ":", "if", "request", ".", "user", ".", "is_authenticated", ":", "return", "HttpResponseRedirect", "(", "reverse", "(", "'wafer_user_profile'", ",", "args", "=", "(", "request", ".", "user", ".", "username", ",", ")", ")", ")", "else", ":", "return", "redirect_to_login", "(", "next", "=", "reverse", "(", "redirect_profile", ")", ")" ]
The default destination from logging in, redirect to the actual profile URL
[ "The", "default", "destination", "from", "logging", "in", "redirect", "to", "the", "actual", "profile", "URL" ]
train
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/registration/views.py#L12-L20
CTPUG/wafer
wafer/talks/templatetags/review.py
reviewed_badge
def reviewed_badge(user, talk): """Returns a badge for the user's reviews of the talk""" context = { 'reviewed': False, } review = None if user and not user.is_anonymous(): review = talk.reviews.filter(reviewer=user).first() if review: context['reviewed'] = True context['review_is_current'] = review.is_current() return context
python
def reviewed_badge(user, talk): """Returns a badge for the user's reviews of the talk""" context = { 'reviewed': False, } review = None if user and not user.is_anonymous(): review = talk.reviews.filter(reviewer=user).first() if review: context['reviewed'] = True context['review_is_current'] = review.is_current() return context
[ "def", "reviewed_badge", "(", "user", ",", "talk", ")", ":", "context", "=", "{", "'reviewed'", ":", "False", ",", "}", "review", "=", "None", "if", "user", "and", "not", "user", ".", "is_anonymous", "(", ")", ":", "review", "=", "talk", ".", "reviews", ".", "filter", "(", "reviewer", "=", "user", ")", ".", "first", "(", ")", "if", "review", ":", "context", "[", "'reviewed'", "]", "=", "True", "context", "[", "'review_is_current'", "]", "=", "review", ".", "is_current", "(", ")", "return", "context" ]
Returns a badge for the user's reviews of the talk
[ "Returns", "a", "badge", "for", "the", "user", "s", "reviews", "of", "the", "talk" ]
train
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/talks/templatetags/review.py#L7-L21
markchil/gptools
gptools/kernel/warping.py
beta_cdf_warp
def beta_cdf_warp(X, d, n, *args): r"""Warp inputs that are confined to the unit hypercube using the regularized incomplete beta function. Applies separately to each dimension, designed for use with :py:class:`WarpingFunction`. Assumes that your inputs `X` lie entirely within the unit hypercube [0, 1]. Note that you may experience some issues with constraining and computing derivatives at :math:`x=0` when :math:`\alpha < 1` and at :math:`x=1` when :math:`\beta < 1`. As a workaround, try mapping your data to not touch the boundaries of the unit hypercube. Parameters ---------- X : array, (`M`,) `M` inputs from dimension `d`. d : non-negative int The index (starting from zero) of the dimension to apply the warping to. n : non-negative int The derivative order to compute. *args : 2N scalars The remaining parameters to describe the warping, given as scalars. These are given as `alpha_i`, `beta_i` for each of the `D` dimensions. Note that these must ALL be provided for each call. References ---------- .. [1] J. Snoek, K. Swersky, R. Zemel, R. P. Adams, "Input Warping for Bayesian Optimization of Non-stationary Functions" ICML (2014) """ X = scipy.asarray(X) a = args[2 * d] b = args[2 * d + 1] if n == 0: return scipy.special.betainc(a, b, X) elif n == 1: # http://functions.wolfram.com/GammaBetaErf/BetaRegularized/20/01/01/ return (1 - X)**(b - 1) * X**(a - 1) / scipy.special.beta(a, b) else: # http://functions.wolfram.com/GammaBetaErf/BetaRegularized/20/02/01/ out = scipy.zeros_like(X) for k in range(0, n): out += ( (-1.0)**(n - k) * scipy.special.binom(n - 1, k) * fixed_poch(1.0 - b, k) * fixed_poch(1.0 - a, n - k - 1.0) * (X / (1.0 - X))**k ) return -(1.0 - X)**(b - 1.0) * X**(a - n) * out / scipy.special.beta(a, b)
python
def beta_cdf_warp(X, d, n, *args): r"""Warp inputs that are confined to the unit hypercube using the regularized incomplete beta function. Applies separately to each dimension, designed for use with :py:class:`WarpingFunction`. Assumes that your inputs `X` lie entirely within the unit hypercube [0, 1]. Note that you may experience some issues with constraining and computing derivatives at :math:`x=0` when :math:`\alpha < 1` and at :math:`x=1` when :math:`\beta < 1`. As a workaround, try mapping your data to not touch the boundaries of the unit hypercube. Parameters ---------- X : array, (`M`,) `M` inputs from dimension `d`. d : non-negative int The index (starting from zero) of the dimension to apply the warping to. n : non-negative int The derivative order to compute. *args : 2N scalars The remaining parameters to describe the warping, given as scalars. These are given as `alpha_i`, `beta_i` for each of the `D` dimensions. Note that these must ALL be provided for each call. References ---------- .. [1] J. Snoek, K. Swersky, R. Zemel, R. P. Adams, "Input Warping for Bayesian Optimization of Non-stationary Functions" ICML (2014) """ X = scipy.asarray(X) a = args[2 * d] b = args[2 * d + 1] if n == 0: return scipy.special.betainc(a, b, X) elif n == 1: # http://functions.wolfram.com/GammaBetaErf/BetaRegularized/20/01/01/ return (1 - X)**(b - 1) * X**(a - 1) / scipy.special.beta(a, b) else: # http://functions.wolfram.com/GammaBetaErf/BetaRegularized/20/02/01/ out = scipy.zeros_like(X) for k in range(0, n): out += ( (-1.0)**(n - k) * scipy.special.binom(n - 1, k) * fixed_poch(1.0 - b, k) * fixed_poch(1.0 - a, n - k - 1.0) * (X / (1.0 - X))**k ) return -(1.0 - X)**(b - 1.0) * X**(a - n) * out / scipy.special.beta(a, b)
[ "def", "beta_cdf_warp", "(", "X", ",", "d", ",", "n", ",", "*", "args", ")", ":", "X", "=", "scipy", ".", "asarray", "(", "X", ")", "a", "=", "args", "[", "2", "*", "d", "]", "b", "=", "args", "[", "2", "*", "d", "+", "1", "]", "if", "n", "==", "0", ":", "return", "scipy", ".", "special", ".", "betainc", "(", "a", ",", "b", ",", "X", ")", "elif", "n", "==", "1", ":", "# http://functions.wolfram.com/GammaBetaErf/BetaRegularized/20/01/01/", "return", "(", "1", "-", "X", ")", "**", "(", "b", "-", "1", ")", "*", "X", "**", "(", "a", "-", "1", ")", "/", "scipy", ".", "special", ".", "beta", "(", "a", ",", "b", ")", "else", ":", "# http://functions.wolfram.com/GammaBetaErf/BetaRegularized/20/02/01/", "out", "=", "scipy", ".", "zeros_like", "(", "X", ")", "for", "k", "in", "range", "(", "0", ",", "n", ")", ":", "out", "+=", "(", "(", "-", "1.0", ")", "**", "(", "n", "-", "k", ")", "*", "scipy", ".", "special", ".", "binom", "(", "n", "-", "1", ",", "k", ")", "*", "fixed_poch", "(", "1.0", "-", "b", ",", "k", ")", "*", "fixed_poch", "(", "1.0", "-", "a", ",", "n", "-", "k", "-", "1.0", ")", "*", "(", "X", "/", "(", "1.0", "-", "X", ")", ")", "**", "k", ")", "return", "-", "(", "1.0", "-", "X", ")", "**", "(", "b", "-", "1.0", ")", "*", "X", "**", "(", "a", "-", "n", ")", "*", "out", "/", "scipy", ".", "special", ".", "beta", "(", "a", ",", "b", ")" ]
r"""Warp inputs that are confined to the unit hypercube using the regularized incomplete beta function. Applies separately to each dimension, designed for use with :py:class:`WarpingFunction`. Assumes that your inputs `X` lie entirely within the unit hypercube [0, 1]. Note that you may experience some issues with constraining and computing derivatives at :math:`x=0` when :math:`\alpha < 1` and at :math:`x=1` when :math:`\beta < 1`. As a workaround, try mapping your data to not touch the boundaries of the unit hypercube. Parameters ---------- X : array, (`M`,) `M` inputs from dimension `d`. d : non-negative int The index (starting from zero) of the dimension to apply the warping to. n : non-negative int The derivative order to compute. *args : 2N scalars The remaining parameters to describe the warping, given as scalars. These are given as `alpha_i`, `beta_i` for each of the `D` dimensions. Note that these must ALL be provided for each call. References ---------- .. [1] J. Snoek, K. Swersky, R. Zemel, R. P. Adams, "Input Warping for Bayesian Optimization of Non-stationary Functions" ICML (2014)
[ "r", "Warp", "inputs", "that", "are", "confined", "to", "the", "unit", "hypercube", "using", "the", "regularized", "incomplete", "beta", "function", ".", "Applies", "separately", "to", "each", "dimension", "designed", "for", "use", "with", ":", "py", ":", "class", ":", "WarpingFunction", ".", "Assumes", "that", "your", "inputs", "X", "lie", "entirely", "within", "the", "unit", "hypercube", "[", "0", "1", "]", ".", "Note", "that", "you", "may", "experience", "some", "issues", "with", "constraining", "and", "computing", "derivatives", "at", ":", "math", ":", "x", "=", "0", "when", ":", "math", ":", "\\", "alpha", "<", "1", "and", "at", ":", "math", ":", "x", "=", "1", "when", ":", "math", ":", "\\", "beta", "<", "1", ".", "As", "a", "workaround", "try", "mapping", "your", "data", "to", "not", "touch", "the", "boundaries", "of", "the", "unit", "hypercube", ".", "Parameters", "----------", "X", ":", "array", "(", "M", ")", "M", "inputs", "from", "dimension", "d", ".", "d", ":", "non", "-", "negative", "int", "The", "index", "(", "starting", "from", "zero", ")", "of", "the", "dimension", "to", "apply", "the", "warping", "to", ".", "n", ":", "non", "-", "negative", "int", "The", "derivative", "order", "to", "compute", ".", "*", "args", ":", "2N", "scalars", "The", "remaining", "parameters", "to", "describe", "the", "warping", "given", "as", "scalars", ".", "These", "are", "given", "as", "alpha_i", "beta_i", "for", "each", "of", "the", "D", "dimensions", ".", "Note", "that", "these", "must", "ALL", "be", "provided", "for", "each", "call", ".", "References", "----------", "..", "[", "1", "]", "J", ".", "Snoek", "K", ".", "Swersky", "R", ".", "Zemel", "R", ".", "P", ".", "Adams", "Input", "Warping", "for", "Bayesian", "Optimization", "of", "Non", "-", "stationary", "Functions", "ICML", "(", "2014", ")" ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L315-L365
markchil/gptools
gptools/kernel/warping.py
linear_warp
def linear_warp(X, d, n, *args): r"""Warp inputs with a linear transformation. Applies the warping .. math:: w(x) = \frac{x-a}{b-a} to each dimension. If you set `a=min(X)` and `b=max(X)` then this is a convenient way to map your inputs to the unit hypercube. Parameters ---------- X : array, (`M`,) `M` inputs from dimension `d`. d : non-negative int The index (starting from zero) of the dimension to apply the warping to. n : non-negative int The derivative order to compute. *args : 2N scalars The remaining parameters to describe the warping, given as scalars. These are given as `a_i`, `b_i` for each of the `D` dimensions. Note that these must ALL be provided for each call. """ X = scipy.asarray(X, dtype=float) a = args[2 * d] b = args[2 * d + 1] if n == 0: return (X - a) / (b - a) elif n == 1: return 1.0 / (b - a) * scipy.ones_like(X) else: return scipy.zeros_like(X)
python
def linear_warp(X, d, n, *args): r"""Warp inputs with a linear transformation. Applies the warping .. math:: w(x) = \frac{x-a}{b-a} to each dimension. If you set `a=min(X)` and `b=max(X)` then this is a convenient way to map your inputs to the unit hypercube. Parameters ---------- X : array, (`M`,) `M` inputs from dimension `d`. d : non-negative int The index (starting from zero) of the dimension to apply the warping to. n : non-negative int The derivative order to compute. *args : 2N scalars The remaining parameters to describe the warping, given as scalars. These are given as `a_i`, `b_i` for each of the `D` dimensions. Note that these must ALL be provided for each call. """ X = scipy.asarray(X, dtype=float) a = args[2 * d] b = args[2 * d + 1] if n == 0: return (X - a) / (b - a) elif n == 1: return 1.0 / (b - a) * scipy.ones_like(X) else: return scipy.zeros_like(X)
[ "def", "linear_warp", "(", "X", ",", "d", ",", "n", ",", "*", "args", ")", ":", "X", "=", "scipy", ".", "asarray", "(", "X", ",", "dtype", "=", "float", ")", "a", "=", "args", "[", "2", "*", "d", "]", "b", "=", "args", "[", "2", "*", "d", "+", "1", "]", "if", "n", "==", "0", ":", "return", "(", "X", "-", "a", ")", "/", "(", "b", "-", "a", ")", "elif", "n", "==", "1", ":", "return", "1.0", "/", "(", "b", "-", "a", ")", "*", "scipy", ".", "ones_like", "(", "X", ")", "else", ":", "return", "scipy", ".", "zeros_like", "(", "X", ")" ]
r"""Warp inputs with a linear transformation. Applies the warping .. math:: w(x) = \frac{x-a}{b-a} to each dimension. If you set `a=min(X)` and `b=max(X)` then this is a convenient way to map your inputs to the unit hypercube. Parameters ---------- X : array, (`M`,) `M` inputs from dimension `d`. d : non-negative int The index (starting from zero) of the dimension to apply the warping to. n : non-negative int The derivative order to compute. *args : 2N scalars The remaining parameters to describe the warping, given as scalars. These are given as `a_i`, `b_i` for each of the `D` dimensions. Note that these must ALL be provided for each call.
[ "r", "Warp", "inputs", "with", "a", "linear", "transformation", ".", "Applies", "the", "warping", "..", "math", "::", "w", "(", "x", ")", "=", "\\", "frac", "{", "x", "-", "a", "}", "{", "b", "-", "a", "}", "to", "each", "dimension", ".", "If", "you", "set", "a", "=", "min", "(", "X", ")", "and", "b", "=", "max", "(", "X", ")", "then", "this", "is", "a", "convenient", "way", "to", "map", "your", "inputs", "to", "the", "unit", "hypercube", ".", "Parameters", "----------", "X", ":", "array", "(", "M", ")", "M", "inputs", "from", "dimension", "d", ".", "d", ":", "non", "-", "negative", "int", "The", "index", "(", "starting", "from", "zero", ")", "of", "the", "dimension", "to", "apply", "the", "warping", "to", ".", "n", ":", "non", "-", "negative", "int", "The", "derivative", "order", "to", "compute", ".", "*", "args", ":", "2N", "scalars", "The", "remaining", "parameters", "to", "describe", "the", "warping", "given", "as", "scalars", ".", "These", "are", "given", "as", "a_i", "b_i", "for", "each", "of", "the", "D", "dimensions", ".", "Note", "that", "these", "must", "ALL", "be", "provided", "for", "each", "call", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L367-L402
markchil/gptools
gptools/kernel/warping.py
WarpedKernel.w_func
def w_func(self, X, d, n): """Evaluate the (possibly recursive) warping function and its derivatives. Parameters ---------- X : array, (`M`,) The points (from dimension `d`) to evaluate the warping function at. d : int The dimension to warp. n : int The derivative order to compute. So far only 0 and 1 are supported. """ if n == 0: wX = self.w(X, d, 0) if isinstance(self.k, WarpedKernel): wX = self.k.w_func(wX, d, 0) return wX elif n == 1: wXn = self.w(X, d, n) if isinstance(self.k, WarpedKernel): wX = self.w_func(X, d, 0) wXn *= self.k.w_func(wX, d, n) return wXn else: raise ValueError("Derivative orders greater than one are not supported!")
python
def w_func(self, X, d, n): """Evaluate the (possibly recursive) warping function and its derivatives. Parameters ---------- X : array, (`M`,) The points (from dimension `d`) to evaluate the warping function at. d : int The dimension to warp. n : int The derivative order to compute. So far only 0 and 1 are supported. """ if n == 0: wX = self.w(X, d, 0) if isinstance(self.k, WarpedKernel): wX = self.k.w_func(wX, d, 0) return wX elif n == 1: wXn = self.w(X, d, n) if isinstance(self.k, WarpedKernel): wX = self.w_func(X, d, 0) wXn *= self.k.w_func(wX, d, n) return wXn else: raise ValueError("Derivative orders greater than one are not supported!")
[ "def", "w_func", "(", "self", ",", "X", ",", "d", ",", "n", ")", ":", "if", "n", "==", "0", ":", "wX", "=", "self", ".", "w", "(", "X", ",", "d", ",", "0", ")", "if", "isinstance", "(", "self", ".", "k", ",", "WarpedKernel", ")", ":", "wX", "=", "self", ".", "k", ".", "w_func", "(", "wX", ",", "d", ",", "0", ")", "return", "wX", "elif", "n", "==", "1", ":", "wXn", "=", "self", ".", "w", "(", "X", ",", "d", ",", "n", ")", "if", "isinstance", "(", "self", ".", "k", ",", "WarpedKernel", ")", ":", "wX", "=", "self", ".", "w_func", "(", "X", ",", "d", ",", "0", ")", "wXn", "*=", "self", ".", "k", ".", "w_func", "(", "wX", ",", "d", ",", "n", ")", "return", "wXn", "else", ":", "raise", "ValueError", "(", "\"Derivative orders greater than one are not supported!\"", ")" ]
Evaluate the (possibly recursive) warping function and its derivatives. Parameters ---------- X : array, (`M`,) The points (from dimension `d`) to evaluate the warping function at. d : int The dimension to warp. n : int The derivative order to compute. So far only 0 and 1 are supported.
[ "Evaluate", "the", "(", "possibly", "recursive", ")", "warping", "function", "and", "its", "derivatives", ".", "Parameters", "----------", "X", ":", "array", "(", "M", ")", "The", "points", "(", "from", "dimension", "d", ")", "to", "evaluate", "the", "warping", "function", "at", ".", "d", ":", "int", "The", "dimension", "to", "warp", ".", "n", ":", "int", "The", "derivative", "order", "to", "compute", ".", "So", "far", "only", "0", "and", "1", "are", "supported", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L507-L531
markchil/gptools
gptools/kernel/warping.py
WarpedKernel.enforce_bounds
def enforce_bounds(self, v): """Set `enforce_bounds` for both of the kernels to a new value. """ self._enforce_bounds = v self.k.enforce_bounds = v self.w.enforce_bounds = v
python
def enforce_bounds(self, v): """Set `enforce_bounds` for both of the kernels to a new value. """ self._enforce_bounds = v self.k.enforce_bounds = v self.w.enforce_bounds = v
[ "def", "enforce_bounds", "(", "self", ",", "v", ")", ":", "self", ".", "_enforce_bounds", "=", "v", "self", ".", "k", ".", "enforce_bounds", "=", "v", "self", ".", "w", ".", "enforce_bounds", "=", "v" ]
Set `enforce_bounds` for both of the kernels to a new value.
[ "Set", "enforce_bounds", "for", "both", "of", "the", "kernels", "to", "a", "new", "value", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L540-L545
markchil/gptools
gptools/kernel/warping.py
WarpedKernel.free_params
def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params]
python
def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params]
[ "def", "free_params", "(", "self", ",", "value", ")", ":", "value", "=", "scipy", ".", "asarray", "(", "value", ",", "dtype", "=", "float", ")", "self", ".", "K_up_to_date", "=", "False", "self", ".", "k", ".", "free_params", "=", "value", "[", ":", "self", ".", "k", ".", "num_free_params", "]", "self", ".", "w", ".", "free_params", "=", "value", "[", "self", ".", "k", ".", "num_free_params", ":", "self", ".", "k", ".", "num_free_params", "+", "self", ".", "w", ".", "num_free_params", "]" ]
Set the free parameters. Note that this bypasses enforce_bounds.
[ "Set", "the", "free", "parameters", ".", "Note", "that", "this", "bypasses", "enforce_bounds", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L582-L588
markchil/gptools
gptools/kernel/warping.py
WarpedKernel.set_hyperparams
def set_hyperparams(self, new_params): """Set the (free) hyperparameters. Parameters ---------- new_params : :py:class:`Array` or other Array-like New values of the free parameters. Raises ------ ValueError If the length of `new_params` is not consistent with :py:attr:`self.params`. """ new_params = scipy.asarray(new_params, dtype=float) if len(new_params) == len(self.free_params): num_free_k = sum(~self.k.fixed_params) self.k.set_hyperparams(new_params[:num_free_k]) self.w.set_hyperparams(new_params[num_free_k:]) else: raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
python
def set_hyperparams(self, new_params): """Set the (free) hyperparameters. Parameters ---------- new_params : :py:class:`Array` or other Array-like New values of the free parameters. Raises ------ ValueError If the length of `new_params` is not consistent with :py:attr:`self.params`. """ new_params = scipy.asarray(new_params, dtype=float) if len(new_params) == len(self.free_params): num_free_k = sum(~self.k.fixed_params) self.k.set_hyperparams(new_params[:num_free_k]) self.w.set_hyperparams(new_params[num_free_k:]) else: raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
[ "def", "set_hyperparams", "(", "self", ",", "new_params", ")", ":", "new_params", "=", "scipy", ".", "asarray", "(", "new_params", ",", "dtype", "=", "float", ")", "if", "len", "(", "new_params", ")", "==", "len", "(", "self", ".", "free_params", ")", ":", "num_free_k", "=", "sum", "(", "~", "self", ".", "k", ".", "fixed_params", ")", "self", ".", "k", ".", "set_hyperparams", "(", "new_params", "[", ":", "num_free_k", "]", ")", "self", ".", "w", ".", "set_hyperparams", "(", "new_params", "[", "num_free_k", ":", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Length of new_params must be %s!\"", "%", "(", "len", "(", "self", ".", "free_params", ")", ",", ")", ")" ]
Set the (free) hyperparameters. Parameters ---------- new_params : :py:class:`Array` or other Array-like New values of the free parameters. Raises ------ ValueError If the length of `new_params` is not consistent with :py:attr:`self.params`.
[ "Set", "the", "(", "free", ")", "hyperparameters", ".", "Parameters", "----------", "new_params", ":", ":", "py", ":", "class", ":", "Array", "or", "other", "Array", "-", "like", "New", "values", "of", "the", "free", "parameters", ".", "Raises", "------", "ValueError", "If", "the", "length", "of", "new_params", "is", "not", "consistent", "with", ":", "py", ":", "attr", ":", "self", ".", "params", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L611-L631
codenerix/django-codenerix
codenerix/multiforms.py
MultiForm.get
def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates blank versions of the form and its inline formsets. """ # Prepare base if 'pk' in kwargs: self.object = self.get_object() else: self.object = None form_class = self.get_form_class() # Get prefix if 'field_prefix' in form_class.Meta.__dict__: # Get name from the form field_prefix = form_class.Meta.field_prefix else: # Get name from the class field_prefix = str(form_class).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Build form form = self.get_form(form_class) # Find groups if 'groups' in dir(self): # Save groups groups = self.groups # Redefine groups inside the form form.__groups__ = lambda: groups # Initialize list of fields fields = [] else: groups = None # Add special prefix support to properly support form independency form.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) if 'autofill' not in dir(form.Meta): form.Meta.autofill = {} # For every extra form forms = [] position_form_default = 0 for (formelement, linkerfield, modelfilter) in self.forms: if formelement is None: formobj = form position_form_default = len(forms) else: # Locate linked element if self.object: related_name = formelement._meta.model._meta.get_field(linkerfield).related_query_name() queryset = getattr(self.object, related_name) if modelfilter: queryset = queryset.filter(eval("Q(%s)" % (modelfilter))) get_method = getattr(queryset, 'get', None) if get_method: instance = queryset.get() else: instance = queryset else: instance = None if 'autofill' in dir(formelement.Meta): formname = str(formelement).split('.')[-1].split("'")[0] for key in formelement.Meta.autofill: form.Meta.autofill['{}_{}'.format(formname, key)] = formelement.Meta.autofill[key] # Get prefix if 'field_prefix' in formelement.Meta.__dict__: # Get name from the form field_prefix = formelement.Meta.field_prefix else: # Get name from the class field_prefix = str(formelement).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Prepare form formobj = formelement(instance=instance) formobj.form_name = form.form_name # Excluded fields if 'exclude' not in formobj.Meta.__dict__: formobj.Meta.exclude = [linkerfield] elif linkerfield not in formobj.Meta.exclude: formobj.Meta.exclude.append(linkerfield) if linkerfield in formobj.fields: del(formobj.fields[linkerfield]) # Add special prefix support to properly support form independency formobj.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) formobj.scope_prefix = field_prefix # Save fields to the list if groups: for field in formobj: fields.append(field) else: # Add the form to the list of forms forms.append(formobj) if position_form_default == 0: open_tabs = 1 else: open_tabs = 0 # Remember list of fields if groups: form.list_fields = fields # Add context and return new context return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default))
python
def get(self, request, *args, **kwargs): """ Handles GET requests and instantiates blank versions of the form and its inline formsets. """ # Prepare base if 'pk' in kwargs: self.object = self.get_object() else: self.object = None form_class = self.get_form_class() # Get prefix if 'field_prefix' in form_class.Meta.__dict__: # Get name from the form field_prefix = form_class.Meta.field_prefix else: # Get name from the class field_prefix = str(form_class).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Build form form = self.get_form(form_class) # Find groups if 'groups' in dir(self): # Save groups groups = self.groups # Redefine groups inside the form form.__groups__ = lambda: groups # Initialize list of fields fields = [] else: groups = None # Add special prefix support to properly support form independency form.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) if 'autofill' not in dir(form.Meta): form.Meta.autofill = {} # For every extra form forms = [] position_form_default = 0 for (formelement, linkerfield, modelfilter) in self.forms: if formelement is None: formobj = form position_form_default = len(forms) else: # Locate linked element if self.object: related_name = formelement._meta.model._meta.get_field(linkerfield).related_query_name() queryset = getattr(self.object, related_name) if modelfilter: queryset = queryset.filter(eval("Q(%s)" % (modelfilter))) get_method = getattr(queryset, 'get', None) if get_method: instance = queryset.get() else: instance = queryset else: instance = None if 'autofill' in dir(formelement.Meta): formname = str(formelement).split('.')[-1].split("'")[0] for key in formelement.Meta.autofill: form.Meta.autofill['{}_{}'.format(formname, key)] = formelement.Meta.autofill[key] # Get prefix if 'field_prefix' in formelement.Meta.__dict__: # Get name from the form field_prefix = formelement.Meta.field_prefix else: # Get name from the class field_prefix = str(formelement).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Prepare form formobj = formelement(instance=instance) formobj.form_name = form.form_name # Excluded fields if 'exclude' not in formobj.Meta.__dict__: formobj.Meta.exclude = [linkerfield] elif linkerfield not in formobj.Meta.exclude: formobj.Meta.exclude.append(linkerfield) if linkerfield in formobj.fields: del(formobj.fields[linkerfield]) # Add special prefix support to properly support form independency formobj.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) formobj.scope_prefix = field_prefix # Save fields to the list if groups: for field in formobj: fields.append(field) else: # Add the form to the list of forms forms.append(formobj) if position_form_default == 0: open_tabs = 1 else: open_tabs = 0 # Remember list of fields if groups: form.list_fields = fields # Add context and return new context return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default))
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Prepare base", "if", "'pk'", "in", "kwargs", ":", "self", ".", "object", "=", "self", ".", "get_object", "(", ")", "else", ":", "self", ".", "object", "=", "None", "form_class", "=", "self", ".", "get_form_class", "(", ")", "# Get prefix", "if", "'field_prefix'", "in", "form_class", ".", "Meta", ".", "__dict__", ":", "# Get name from the form", "field_prefix", "=", "form_class", ".", "Meta", ".", "field_prefix", "else", ":", "# Get name from the class", "field_prefix", "=", "str", "(", "form_class", ")", ".", "split", "(", "\"'\"", ")", "[", "1", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "self", ".", "field_prefix", "=", "field_prefix", "# Build form", "form", "=", "self", ".", "get_form", "(", "form_class", ")", "# Find groups", "if", "'groups'", "in", "dir", "(", "self", ")", ":", "# Save groups", "groups", "=", "self", ".", "groups", "# Redefine groups inside the form", "form", ".", "__groups__", "=", "lambda", ":", "groups", "# Initialize list of fields", "fields", "=", "[", "]", "else", ":", "groups", "=", "None", "# Add special prefix support to properly support form independency", "form", ".", "add_prefix", "=", "lambda", "fields_name", ",", "field_prefix", "=", "field_prefix", ":", "\"%s_%s\"", "%", "(", "field_prefix", ",", "fields_name", ")", "if", "'autofill'", "not", "in", "dir", "(", "form", ".", "Meta", ")", ":", "form", ".", "Meta", ".", "autofill", "=", "{", "}", "# For every extra form", "forms", "=", "[", "]", "position_form_default", "=", "0", "for", "(", "formelement", ",", "linkerfield", ",", "modelfilter", ")", "in", "self", ".", "forms", ":", "if", "formelement", "is", "None", ":", "formobj", "=", "form", "position_form_default", "=", "len", "(", "forms", ")", "else", ":", "# Locate linked element", "if", "self", ".", "object", ":", "related_name", "=", "formelement", ".", "_meta", ".", "model", ".", "_meta", ".", "get_field", "(", "linkerfield", ")", ".", "related_query_name", "(", ")", "queryset", "=", "getattr", "(", "self", ".", "object", ",", "related_name", ")", "if", "modelfilter", ":", "queryset", "=", "queryset", ".", "filter", "(", "eval", "(", "\"Q(%s)\"", "%", "(", "modelfilter", ")", ")", ")", "get_method", "=", "getattr", "(", "queryset", ",", "'get'", ",", "None", ")", "if", "get_method", ":", "instance", "=", "queryset", ".", "get", "(", ")", "else", ":", "instance", "=", "queryset", "else", ":", "instance", "=", "None", "if", "'autofill'", "in", "dir", "(", "formelement", ".", "Meta", ")", ":", "formname", "=", "str", "(", "formelement", ")", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", ".", "split", "(", "\"'\"", ")", "[", "0", "]", "for", "key", "in", "formelement", ".", "Meta", ".", "autofill", ":", "form", ".", "Meta", ".", "autofill", "[", "'{}_{}'", ".", "format", "(", "formname", ",", "key", ")", "]", "=", "formelement", ".", "Meta", ".", "autofill", "[", "key", "]", "# Get prefix", "if", "'field_prefix'", "in", "formelement", ".", "Meta", ".", "__dict__", ":", "# Get name from the form", "field_prefix", "=", "formelement", ".", "Meta", ".", "field_prefix", "else", ":", "# Get name from the class", "field_prefix", "=", "str", "(", "formelement", ")", ".", "split", "(", "\"'\"", ")", "[", "1", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "self", ".", "field_prefix", "=", "field_prefix", "# Prepare form", "formobj", "=", "formelement", "(", "instance", "=", "instance", ")", "formobj", ".", "form_name", "=", "form", ".", "form_name", "# Excluded fields", "if", "'exclude'", "not", "in", "formobj", ".", "Meta", ".", "__dict__", ":", "formobj", ".", "Meta", ".", "exclude", "=", "[", "linkerfield", "]", "elif", "linkerfield", "not", "in", "formobj", ".", "Meta", ".", "exclude", ":", "formobj", ".", "Meta", ".", "exclude", ".", "append", "(", "linkerfield", ")", "if", "linkerfield", "in", "formobj", ".", "fields", ":", "del", "(", "formobj", ".", "fields", "[", "linkerfield", "]", ")", "# Add special prefix support to properly support form independency", "formobj", ".", "add_prefix", "=", "lambda", "fields_name", ",", "field_prefix", "=", "field_prefix", ":", "\"%s_%s\"", "%", "(", "field_prefix", ",", "fields_name", ")", "formobj", ".", "scope_prefix", "=", "field_prefix", "# Save fields to the list", "if", "groups", ":", "for", "field", "in", "formobj", ":", "fields", ".", "append", "(", "field", ")", "else", ":", "# Add the form to the list of forms", "forms", ".", "append", "(", "formobj", ")", "if", "position_form_default", "==", "0", ":", "open_tabs", "=", "1", "else", ":", "open_tabs", "=", "0", "# Remember list of fields", "if", "groups", ":", "form", ".", "list_fields", "=", "fields", "# Add context and return new context", "return", "self", ".", "render_to_response", "(", "self", ".", "get_context_data", "(", "form", "=", "form", ",", "forms", "=", "forms", ",", "open_tabs", "=", "open_tabs", ",", "position_form_default", "=", "position_form_default", ")", ")" ]
Handles GET requests and instantiates blank versions of the form and its inline formsets.
[ "Handles", "GET", "requests", "and", "instantiates", "blank", "versions", "of", "the", "form", "and", "its", "inline", "formsets", "." ]
train
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/multiforms.py#L51-L160
codenerix/django-codenerix
codenerix/multiforms.py
MultiForm.post
def post(self, request, *args, **kwargs): """ andles POST requests, instantiating a form instance and its inline formsets with the passed POST variables and then checking them for validity. """ # Prepare base if 'pk' in kwargs: self.object = self.get_object() else: self.object = None form_class = self.get_form_class() # Get prefix if 'field_prefix' in form_class.Meta.__dict__: # Get name from the form field_prefix = form_class.Meta.field_prefix # Initialize list of fields else: # Get name from the class field_prefix = str(form_class).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Build the form form = self.get_form(form_class) # Find groups if 'groups' in dir(self): # Save groups groups = self.groups # Redefine groups inside the form form.__groups__ = lambda: groups # Initialize list of fields fields = [] else: groups = None # Add special prefix support to properly support form independency form.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) # Check validation valid = form.is_valid() if (not valid) and ('non_field_errors' in dir(self)): errors = [element[5] for element in list(self.non_field_errors())[:-1]] elif form.errors.as_data(): errors = [] for element in form.errors.as_data(): for err in form.errors.as_data()[element][0]: errors.append(err) else: errors = [] # For every extra form temp_forms = [] position_form_default = 0 for (formelement, linkerfield, modelfilter) in self.forms: if formelement is None: formobj = form position_form_default = len(temp_forms) else: # Locate linked element if self.object: related_name = formelement._meta.model._meta.get_field(linkerfield).related_query_name() queryset = getattr(self.object, related_name) if modelfilter: queryset = queryset.filter(eval("Q(%s)" % (modelfilter))) get_method = getattr(queryset, 'get', None) if get_method: instance = queryset.get() else: instance = queryset else: instance = None # Get prefix if 'field_prefix' in formelement.Meta.__dict__: # Get name from the form field_prefix = formelement.Meta.field_prefix else: # Get name from the class field_prefix = str(formelement).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Prepare form formobj = formelement(instance=instance, data=self.request.POST) formobj.form_name = form.form_name # Excluded fields if 'exclude' not in formobj.Meta.__dict__: formobj.Meta.exclude = [linkerfield] elif linkerfield not in formobj.Meta.exclude: formobj.Meta.exclude.append(linkerfield) if linkerfield in formobj.fields: del(formobj.fields[linkerfield]) # Link it to the main model formobj.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) # Validate valid *= formobj.is_valid() # append error if not formobj.is_valid() and ('non_field_errors' in dir(formobj)): errors += [element[5] for element in list(formobj.non_field_errors())[:-1]] # Save fields to the list if groups: for field in formobj: # raise Exception (field.__dict__) if 'unblock_t2ime' in field.html_name: raise Exception(field.field.__dict__) fields.append(field) # Add a new form temp_forms.append((formobj, linkerfield)) # execute validation specified validate_forms = None if valid and ("validate" in dir(self)): validate_forms = [tform[0] for tform in temp_forms] errors = self.validate(*validate_forms) # valid = len(errors) == 0 valid = False if errors is None or len(errors) == 0: valid = True # Remember list of fields if groups: form.list_fields = fields forms = [] else: if validate_forms: forms = validate_forms else: forms = [tform[0] for tform in temp_forms] if position_form_default == 0: open_tabs = 1 else: open_tabs = 0 # Check validation result if valid: # Everything is OK, call valid return self.form_valid(form, temp_forms) else: # Something went wrong, attach error and call invalid form.list_errors = errors return self.form_invalid(form, forms, open_tabs, position_form_default)
python
def post(self, request, *args, **kwargs): """ andles POST requests, instantiating a form instance and its inline formsets with the passed POST variables and then checking them for validity. """ # Prepare base if 'pk' in kwargs: self.object = self.get_object() else: self.object = None form_class = self.get_form_class() # Get prefix if 'field_prefix' in form_class.Meta.__dict__: # Get name from the form field_prefix = form_class.Meta.field_prefix # Initialize list of fields else: # Get name from the class field_prefix = str(form_class).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Build the form form = self.get_form(form_class) # Find groups if 'groups' in dir(self): # Save groups groups = self.groups # Redefine groups inside the form form.__groups__ = lambda: groups # Initialize list of fields fields = [] else: groups = None # Add special prefix support to properly support form independency form.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) # Check validation valid = form.is_valid() if (not valid) and ('non_field_errors' in dir(self)): errors = [element[5] for element in list(self.non_field_errors())[:-1]] elif form.errors.as_data(): errors = [] for element in form.errors.as_data(): for err in form.errors.as_data()[element][0]: errors.append(err) else: errors = [] # For every extra form temp_forms = [] position_form_default = 0 for (formelement, linkerfield, modelfilter) in self.forms: if formelement is None: formobj = form position_form_default = len(temp_forms) else: # Locate linked element if self.object: related_name = formelement._meta.model._meta.get_field(linkerfield).related_query_name() queryset = getattr(self.object, related_name) if modelfilter: queryset = queryset.filter(eval("Q(%s)" % (modelfilter))) get_method = getattr(queryset, 'get', None) if get_method: instance = queryset.get() else: instance = queryset else: instance = None # Get prefix if 'field_prefix' in formelement.Meta.__dict__: # Get name from the form field_prefix = formelement.Meta.field_prefix else: # Get name from the class field_prefix = str(formelement).split("'")[1].split(".")[-1] self.field_prefix = field_prefix # Prepare form formobj = formelement(instance=instance, data=self.request.POST) formobj.form_name = form.form_name # Excluded fields if 'exclude' not in formobj.Meta.__dict__: formobj.Meta.exclude = [linkerfield] elif linkerfield not in formobj.Meta.exclude: formobj.Meta.exclude.append(linkerfield) if linkerfield in formobj.fields: del(formobj.fields[linkerfield]) # Link it to the main model formobj.add_prefix = lambda fields_name, field_prefix=field_prefix: "%s_%s" % (field_prefix, fields_name) # Validate valid *= formobj.is_valid() # append error if not formobj.is_valid() and ('non_field_errors' in dir(formobj)): errors += [element[5] for element in list(formobj.non_field_errors())[:-1]] # Save fields to the list if groups: for field in formobj: # raise Exception (field.__dict__) if 'unblock_t2ime' in field.html_name: raise Exception(field.field.__dict__) fields.append(field) # Add a new form temp_forms.append((formobj, linkerfield)) # execute validation specified validate_forms = None if valid and ("validate" in dir(self)): validate_forms = [tform[0] for tform in temp_forms] errors = self.validate(*validate_forms) # valid = len(errors) == 0 valid = False if errors is None or len(errors) == 0: valid = True # Remember list of fields if groups: form.list_fields = fields forms = [] else: if validate_forms: forms = validate_forms else: forms = [tform[0] for tform in temp_forms] if position_form_default == 0: open_tabs = 1 else: open_tabs = 0 # Check validation result if valid: # Everything is OK, call valid return self.form_valid(form, temp_forms) else: # Something went wrong, attach error and call invalid form.list_errors = errors return self.form_invalid(form, forms, open_tabs, position_form_default)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Prepare base", "if", "'pk'", "in", "kwargs", ":", "self", ".", "object", "=", "self", ".", "get_object", "(", ")", "else", ":", "self", ".", "object", "=", "None", "form_class", "=", "self", ".", "get_form_class", "(", ")", "# Get prefix", "if", "'field_prefix'", "in", "form_class", ".", "Meta", ".", "__dict__", ":", "# Get name from the form", "field_prefix", "=", "form_class", ".", "Meta", ".", "field_prefix", "# Initialize list of fields", "else", ":", "# Get name from the class", "field_prefix", "=", "str", "(", "form_class", ")", ".", "split", "(", "\"'\"", ")", "[", "1", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "self", ".", "field_prefix", "=", "field_prefix", "# Build the form", "form", "=", "self", ".", "get_form", "(", "form_class", ")", "# Find groups", "if", "'groups'", "in", "dir", "(", "self", ")", ":", "# Save groups", "groups", "=", "self", ".", "groups", "# Redefine groups inside the form", "form", ".", "__groups__", "=", "lambda", ":", "groups", "# Initialize list of fields", "fields", "=", "[", "]", "else", ":", "groups", "=", "None", "# Add special prefix support to properly support form independency", "form", ".", "add_prefix", "=", "lambda", "fields_name", ",", "field_prefix", "=", "field_prefix", ":", "\"%s_%s\"", "%", "(", "field_prefix", ",", "fields_name", ")", "# Check validation", "valid", "=", "form", ".", "is_valid", "(", ")", "if", "(", "not", "valid", ")", "and", "(", "'non_field_errors'", "in", "dir", "(", "self", ")", ")", ":", "errors", "=", "[", "element", "[", "5", "]", "for", "element", "in", "list", "(", "self", ".", "non_field_errors", "(", ")", ")", "[", ":", "-", "1", "]", "]", "elif", "form", ".", "errors", ".", "as_data", "(", ")", ":", "errors", "=", "[", "]", "for", "element", "in", "form", ".", "errors", ".", "as_data", "(", ")", ":", "for", "err", "in", "form", ".", "errors", ".", "as_data", "(", ")", "[", "element", "]", "[", "0", "]", ":", "errors", ".", "append", "(", "err", ")", "else", ":", "errors", "=", "[", "]", "# For every extra form", "temp_forms", "=", "[", "]", "position_form_default", "=", "0", "for", "(", "formelement", ",", "linkerfield", ",", "modelfilter", ")", "in", "self", ".", "forms", ":", "if", "formelement", "is", "None", ":", "formobj", "=", "form", "position_form_default", "=", "len", "(", "temp_forms", ")", "else", ":", "# Locate linked element", "if", "self", ".", "object", ":", "related_name", "=", "formelement", ".", "_meta", ".", "model", ".", "_meta", ".", "get_field", "(", "linkerfield", ")", ".", "related_query_name", "(", ")", "queryset", "=", "getattr", "(", "self", ".", "object", ",", "related_name", ")", "if", "modelfilter", ":", "queryset", "=", "queryset", ".", "filter", "(", "eval", "(", "\"Q(%s)\"", "%", "(", "modelfilter", ")", ")", ")", "get_method", "=", "getattr", "(", "queryset", ",", "'get'", ",", "None", ")", "if", "get_method", ":", "instance", "=", "queryset", ".", "get", "(", ")", "else", ":", "instance", "=", "queryset", "else", ":", "instance", "=", "None", "# Get prefix", "if", "'field_prefix'", "in", "formelement", ".", "Meta", ".", "__dict__", ":", "# Get name from the form", "field_prefix", "=", "formelement", ".", "Meta", ".", "field_prefix", "else", ":", "# Get name from the class", "field_prefix", "=", "str", "(", "formelement", ")", ".", "split", "(", "\"'\"", ")", "[", "1", "]", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", "self", ".", "field_prefix", "=", "field_prefix", "# Prepare form", "formobj", "=", "formelement", "(", "instance", "=", "instance", ",", "data", "=", "self", ".", "request", ".", "POST", ")", "formobj", ".", "form_name", "=", "form", ".", "form_name", "# Excluded fields", "if", "'exclude'", "not", "in", "formobj", ".", "Meta", ".", "__dict__", ":", "formobj", ".", "Meta", ".", "exclude", "=", "[", "linkerfield", "]", "elif", "linkerfield", "not", "in", "formobj", ".", "Meta", ".", "exclude", ":", "formobj", ".", "Meta", ".", "exclude", ".", "append", "(", "linkerfield", ")", "if", "linkerfield", "in", "formobj", ".", "fields", ":", "del", "(", "formobj", ".", "fields", "[", "linkerfield", "]", ")", "# Link it to the main model", "formobj", ".", "add_prefix", "=", "lambda", "fields_name", ",", "field_prefix", "=", "field_prefix", ":", "\"%s_%s\"", "%", "(", "field_prefix", ",", "fields_name", ")", "# Validate", "valid", "*=", "formobj", ".", "is_valid", "(", ")", "# append error", "if", "not", "formobj", ".", "is_valid", "(", ")", "and", "(", "'non_field_errors'", "in", "dir", "(", "formobj", ")", ")", ":", "errors", "+=", "[", "element", "[", "5", "]", "for", "element", "in", "list", "(", "formobj", ".", "non_field_errors", "(", ")", ")", "[", ":", "-", "1", "]", "]", "# Save fields to the list", "if", "groups", ":", "for", "field", "in", "formobj", ":", "# raise Exception (field.__dict__)", "if", "'unblock_t2ime'", "in", "field", ".", "html_name", ":", "raise", "Exception", "(", "field", ".", "field", ".", "__dict__", ")", "fields", ".", "append", "(", "field", ")", "# Add a new form", "temp_forms", ".", "append", "(", "(", "formobj", ",", "linkerfield", ")", ")", "# execute validation specified", "validate_forms", "=", "None", "if", "valid", "and", "(", "\"validate\"", "in", "dir", "(", "self", ")", ")", ":", "validate_forms", "=", "[", "tform", "[", "0", "]", "for", "tform", "in", "temp_forms", "]", "errors", "=", "self", ".", "validate", "(", "*", "validate_forms", ")", "# valid = len(errors) == 0", "valid", "=", "False", "if", "errors", "is", "None", "or", "len", "(", "errors", ")", "==", "0", ":", "valid", "=", "True", "# Remember list of fields", "if", "groups", ":", "form", ".", "list_fields", "=", "fields", "forms", "=", "[", "]", "else", ":", "if", "validate_forms", ":", "forms", "=", "validate_forms", "else", ":", "forms", "=", "[", "tform", "[", "0", "]", "for", "tform", "in", "temp_forms", "]", "if", "position_form_default", "==", "0", ":", "open_tabs", "=", "1", "else", ":", "open_tabs", "=", "0", "# Check validation result", "if", "valid", ":", "# Everything is OK, call valid", "return", "self", ".", "form_valid", "(", "form", ",", "temp_forms", ")", "else", ":", "# Something went wrong, attach error and call invalid", "form", ".", "list_errors", "=", "errors", "return", "self", ".", "form_invalid", "(", "form", ",", "forms", ",", "open_tabs", ",", "position_form_default", ")" ]
andles POST requests, instantiating a form instance and its inline formsets with the passed POST variables and then checking them for validity.
[ "andles", "POST", "requests", "instantiating", "a", "form", "instance", "and", "its", "inline", "formsets", "with", "the", "passed", "POST", "variables", "and", "then", "checking", "them", "for", "validity", "." ]
train
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/multiforms.py#L162-L309
codenerix/django-codenerix
codenerix/multiforms.py
MultiForm.form_valid
def form_valid(self, form, forms): """ Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page. """ if self.object: form.save() for (formobj, linkerfield) in forms: if form != formobj: formobj.save() else: self.object = form.save() for (formobj, linkerfield) in forms: if form != formobj: setattr(formobj.instance, linkerfield, self.object) formobj.save() return HttpResponseRedirect(self.get_success_url())
python
def form_valid(self, form, forms): """ Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page. """ if self.object: form.save() for (formobj, linkerfield) in forms: if form != formobj: formobj.save() else: self.object = form.save() for (formobj, linkerfield) in forms: if form != formobj: setattr(formobj.instance, linkerfield, self.object) formobj.save() return HttpResponseRedirect(self.get_success_url())
[ "def", "form_valid", "(", "self", ",", "form", ",", "forms", ")", ":", "if", "self", ".", "object", ":", "form", ".", "save", "(", ")", "for", "(", "formobj", ",", "linkerfield", ")", "in", "forms", ":", "if", "form", "!=", "formobj", ":", "formobj", ".", "save", "(", ")", "else", ":", "self", ".", "object", "=", "form", ".", "save", "(", ")", "for", "(", "formobj", ",", "linkerfield", ")", "in", "forms", ":", "if", "form", "!=", "formobj", ":", "setattr", "(", "formobj", ".", "instance", ",", "linkerfield", ",", "self", ".", "object", ")", "formobj", ".", "save", "(", ")", "return", "HttpResponseRedirect", "(", "self", ".", "get_success_url", "(", ")", ")" ]
Called if all forms are valid. Creates a Recipe instance along with associated Ingredients and Instructions and then redirects to a success page.
[ "Called", "if", "all", "forms", "are", "valid", ".", "Creates", "a", "Recipe", "instance", "along", "with", "associated", "Ingredients", "and", "Instructions", "and", "then", "redirects", "to", "a", "success", "page", "." ]
train
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/multiforms.py#L312-L327
codenerix/django-codenerix
codenerix/multiforms.py
MultiForm.form_invalid
def form_invalid(self, form, forms, open_tabs, position_form_default): """ Called if a form is invalid. Re-renders the context data with the data-filled forms and errors. """ # return self.render_to_response( self.get_context_data( form = form, forms = forms ) ) return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default))
python
def form_invalid(self, form, forms, open_tabs, position_form_default): """ Called if a form is invalid. Re-renders the context data with the data-filled forms and errors. """ # return self.render_to_response( self.get_context_data( form = form, forms = forms ) ) return self.render_to_response(self.get_context_data(form=form, forms=forms, open_tabs=open_tabs, position_form_default=position_form_default))
[ "def", "form_invalid", "(", "self", ",", "form", ",", "forms", ",", "open_tabs", ",", "position_form_default", ")", ":", "# return self.render_to_response( self.get_context_data( form = form, forms = forms ) )", "return", "self", ".", "render_to_response", "(", "self", ".", "get_context_data", "(", "form", "=", "form", ",", "forms", "=", "forms", ",", "open_tabs", "=", "open_tabs", ",", "position_form_default", "=", "position_form_default", ")", ")" ]
Called if a form is invalid. Re-renders the context data with the data-filled forms and errors.
[ "Called", "if", "a", "form", "is", "invalid", ".", "Re", "-", "renders", "the", "context", "data", "with", "the", "data", "-", "filled", "forms", "and", "errors", "." ]
train
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/multiforms.py#L329-L334
markchil/gptools
gptools/utils.py
wrap_fmin_slsqp
def wrap_fmin_slsqp(fun, guess, opt_kwargs={}): """Wrapper for :py:func:`fmin_slsqp` to allow it to be called with :py:func:`minimize`-like syntax. This is included to enable the code to run with :py:mod:`scipy` versions older than 0.11.0. Accepts `opt_kwargs` in the same format as used by :py:func:`scipy.optimize.minimize`, with the additional precondition that the keyword `method` has already been removed by the calling code. Parameters ---------- fun : callable The function to minimize. guess : sequence The initial guess for the parameters. opt_kwargs : dict, optional Dictionary of extra keywords to pass to :py:func:`scipy.optimize.minimize`. Refer to that function's docstring for valid options. The keywords 'jac', 'hess' and 'hessp' are ignored. Note that if you were planning to use `jac` = True (i.e., optimization function returns Jacobian) and have set `args` = (True,) to tell :py:meth:`update_hyperparameters` to compute and return the Jacobian this may cause unexpected behavior. Default is: {}. Returns ------- Result : namedtuple :py:class:`namedtuple` that mimics the fields of the :py:class:`Result` object returned by :py:func:`scipy.optimize.minimize`. Has the following fields: ======= ======= =================================================================================== status int Code indicating the exit mode of the optimizer (`imode` from :py:func:`fmin_slsqp`) success bool Boolean indicating whether or not the optimizer thinks a minimum was found. fun float Value of the optimized function (-1*LL). x ndarray Optimal values of the hyperparameters. message str String describing the exit state (`smode` from :py:func:`fmin_slsqp`) nit int Number of iterations. ======= ======= =================================================================================== Raises ------ ValueError Invalid constraint type in `constraints`. (See documentation for :py:func:`scipy.optimize.minimize`.) """ opt_kwargs = dict(opt_kwargs) opt_kwargs.pop('method', None) eqcons = [] ieqcons = [] if 'constraints' in opt_kwargs: if isinstance(opt_kwargs['constraints'], dict): opt_kwargs['constraints'] = [opt_kwargs['constraints'],] for con in opt_kwargs.pop('constraints'): if con['type'] == 'eq': eqcons += [con['fun'],] elif con['type'] == 'ineq': ieqcons += [con['fun'],] else: raise ValueError("Invalid constraint type %s!" % (con['type'],)) if 'jac' in opt_kwargs: warnings.warn("Jacobian not supported for default solver SLSQP!", RuntimeWarning) opt_kwargs.pop('jac') if 'tol' in opt_kwargs: opt_kwargs['acc'] = opt_kwargs.pop('tol') if 'options' in opt_kwargs: opts = opt_kwargs.pop('options') opt_kwargs = dict(opt_kwargs.items() + opts.items()) # Other keywords with less likelihood for causing failures are silently ignored: opt_kwargs.pop('hess', None) opt_kwargs.pop('hessp', None) opt_kwargs.pop('callback', None) out, fx, its, imode, smode = scipy.optimize.fmin_slsqp( fun, guess, full_output=True, eqcons=eqcons, ieqcons=ieqcons, **opt_kwargs ) Result = collections.namedtuple('Result', ['status', 'success', 'fun', 'x', 'message', 'nit']) return Result(status=imode, success=(imode == 0), fun=fx, x=out, message=smode, nit=its)
python
def wrap_fmin_slsqp(fun, guess, opt_kwargs={}): """Wrapper for :py:func:`fmin_slsqp` to allow it to be called with :py:func:`minimize`-like syntax. This is included to enable the code to run with :py:mod:`scipy` versions older than 0.11.0. Accepts `opt_kwargs` in the same format as used by :py:func:`scipy.optimize.minimize`, with the additional precondition that the keyword `method` has already been removed by the calling code. Parameters ---------- fun : callable The function to minimize. guess : sequence The initial guess for the parameters. opt_kwargs : dict, optional Dictionary of extra keywords to pass to :py:func:`scipy.optimize.minimize`. Refer to that function's docstring for valid options. The keywords 'jac', 'hess' and 'hessp' are ignored. Note that if you were planning to use `jac` = True (i.e., optimization function returns Jacobian) and have set `args` = (True,) to tell :py:meth:`update_hyperparameters` to compute and return the Jacobian this may cause unexpected behavior. Default is: {}. Returns ------- Result : namedtuple :py:class:`namedtuple` that mimics the fields of the :py:class:`Result` object returned by :py:func:`scipy.optimize.minimize`. Has the following fields: ======= ======= =================================================================================== status int Code indicating the exit mode of the optimizer (`imode` from :py:func:`fmin_slsqp`) success bool Boolean indicating whether or not the optimizer thinks a minimum was found. fun float Value of the optimized function (-1*LL). x ndarray Optimal values of the hyperparameters. message str String describing the exit state (`smode` from :py:func:`fmin_slsqp`) nit int Number of iterations. ======= ======= =================================================================================== Raises ------ ValueError Invalid constraint type in `constraints`. (See documentation for :py:func:`scipy.optimize.minimize`.) """ opt_kwargs = dict(opt_kwargs) opt_kwargs.pop('method', None) eqcons = [] ieqcons = [] if 'constraints' in opt_kwargs: if isinstance(opt_kwargs['constraints'], dict): opt_kwargs['constraints'] = [opt_kwargs['constraints'],] for con in opt_kwargs.pop('constraints'): if con['type'] == 'eq': eqcons += [con['fun'],] elif con['type'] == 'ineq': ieqcons += [con['fun'],] else: raise ValueError("Invalid constraint type %s!" % (con['type'],)) if 'jac' in opt_kwargs: warnings.warn("Jacobian not supported for default solver SLSQP!", RuntimeWarning) opt_kwargs.pop('jac') if 'tol' in opt_kwargs: opt_kwargs['acc'] = opt_kwargs.pop('tol') if 'options' in opt_kwargs: opts = opt_kwargs.pop('options') opt_kwargs = dict(opt_kwargs.items() + opts.items()) # Other keywords with less likelihood for causing failures are silently ignored: opt_kwargs.pop('hess', None) opt_kwargs.pop('hessp', None) opt_kwargs.pop('callback', None) out, fx, its, imode, smode = scipy.optimize.fmin_slsqp( fun, guess, full_output=True, eqcons=eqcons, ieqcons=ieqcons, **opt_kwargs ) Result = collections.namedtuple('Result', ['status', 'success', 'fun', 'x', 'message', 'nit']) return Result(status=imode, success=(imode == 0), fun=fx, x=out, message=smode, nit=its)
[ "def", "wrap_fmin_slsqp", "(", "fun", ",", "guess", ",", "opt_kwargs", "=", "{", "}", ")", ":", "opt_kwargs", "=", "dict", "(", "opt_kwargs", ")", "opt_kwargs", ".", "pop", "(", "'method'", ",", "None", ")", "eqcons", "=", "[", "]", "ieqcons", "=", "[", "]", "if", "'constraints'", "in", "opt_kwargs", ":", "if", "isinstance", "(", "opt_kwargs", "[", "'constraints'", "]", ",", "dict", ")", ":", "opt_kwargs", "[", "'constraints'", "]", "=", "[", "opt_kwargs", "[", "'constraints'", "]", ",", "]", "for", "con", "in", "opt_kwargs", ".", "pop", "(", "'constraints'", ")", ":", "if", "con", "[", "'type'", "]", "==", "'eq'", ":", "eqcons", "+=", "[", "con", "[", "'fun'", "]", ",", "]", "elif", "con", "[", "'type'", "]", "==", "'ineq'", ":", "ieqcons", "+=", "[", "con", "[", "'fun'", "]", ",", "]", "else", ":", "raise", "ValueError", "(", "\"Invalid constraint type %s!\"", "%", "(", "con", "[", "'type'", "]", ",", ")", ")", "if", "'jac'", "in", "opt_kwargs", ":", "warnings", ".", "warn", "(", "\"Jacobian not supported for default solver SLSQP!\"", ",", "RuntimeWarning", ")", "opt_kwargs", ".", "pop", "(", "'jac'", ")", "if", "'tol'", "in", "opt_kwargs", ":", "opt_kwargs", "[", "'acc'", "]", "=", "opt_kwargs", ".", "pop", "(", "'tol'", ")", "if", "'options'", "in", "opt_kwargs", ":", "opts", "=", "opt_kwargs", ".", "pop", "(", "'options'", ")", "opt_kwargs", "=", "dict", "(", "opt_kwargs", ".", "items", "(", ")", "+", "opts", ".", "items", "(", ")", ")", "# Other keywords with less likelihood for causing failures are silently ignored:", "opt_kwargs", ".", "pop", "(", "'hess'", ",", "None", ")", "opt_kwargs", ".", "pop", "(", "'hessp'", ",", "None", ")", "opt_kwargs", ".", "pop", "(", "'callback'", ",", "None", ")", "out", ",", "fx", ",", "its", ",", "imode", ",", "smode", "=", "scipy", ".", "optimize", ".", "fmin_slsqp", "(", "fun", ",", "guess", ",", "full_output", "=", "True", ",", "eqcons", "=", "eqcons", ",", "ieqcons", "=", "ieqcons", ",", "*", "*", "opt_kwargs", ")", "Result", "=", "collections", ".", "namedtuple", "(", "'Result'", ",", "[", "'status'", ",", "'success'", ",", "'fun'", ",", "'x'", ",", "'message'", ",", "'nit'", "]", ")", "return", "Result", "(", "status", "=", "imode", ",", "success", "=", "(", "imode", "==", "0", ")", ",", "fun", "=", "fx", ",", "x", "=", "out", ",", "message", "=", "smode", ",", "nit", "=", "its", ")" ]
Wrapper for :py:func:`fmin_slsqp` to allow it to be called with :py:func:`minimize`-like syntax. This is included to enable the code to run with :py:mod:`scipy` versions older than 0.11.0. Accepts `opt_kwargs` in the same format as used by :py:func:`scipy.optimize.minimize`, with the additional precondition that the keyword `method` has already been removed by the calling code. Parameters ---------- fun : callable The function to minimize. guess : sequence The initial guess for the parameters. opt_kwargs : dict, optional Dictionary of extra keywords to pass to :py:func:`scipy.optimize.minimize`. Refer to that function's docstring for valid options. The keywords 'jac', 'hess' and 'hessp' are ignored. Note that if you were planning to use `jac` = True (i.e., optimization function returns Jacobian) and have set `args` = (True,) to tell :py:meth:`update_hyperparameters` to compute and return the Jacobian this may cause unexpected behavior. Default is: {}. Returns ------- Result : namedtuple :py:class:`namedtuple` that mimics the fields of the :py:class:`Result` object returned by :py:func:`scipy.optimize.minimize`. Has the following fields: ======= ======= =================================================================================== status int Code indicating the exit mode of the optimizer (`imode` from :py:func:`fmin_slsqp`) success bool Boolean indicating whether or not the optimizer thinks a minimum was found. fun float Value of the optimized function (-1*LL). x ndarray Optimal values of the hyperparameters. message str String describing the exit state (`smode` from :py:func:`fmin_slsqp`) nit int Number of iterations. ======= ======= =================================================================================== Raises ------ ValueError Invalid constraint type in `constraints`. (See documentation for :py:func:`scipy.optimize.minimize`.)
[ "Wrapper", "for", ":", "py", ":", "func", ":", "fmin_slsqp", "to", "allow", "it", "to", "be", "called", "with", ":", "py", ":", "func", ":", "minimize", "-", "like", "syntax", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1269-L1367
markchil/gptools
gptools/utils.py
fixed_poch
def fixed_poch(a, n): """Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order. """ # Old form, calls gamma function: # if a < 0.0 and a % 1 == 0 and n <= -a: # p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n) # else: # p = scipy.special.poch(a, n) # return p if (int(n) != n) or (n < 0): raise ValueError("Parameter n must be a nonnegative int!") n = int(n) # Direct form based on product: terms = [a + k for k in range(0, n)] return scipy.prod(terms)
python
def fixed_poch(a, n): """Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order. """ # Old form, calls gamma function: # if a < 0.0 and a % 1 == 0 and n <= -a: # p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n) # else: # p = scipy.special.poch(a, n) # return p if (int(n) != n) or (n < 0): raise ValueError("Parameter n must be a nonnegative int!") n = int(n) # Direct form based on product: terms = [a + k for k in range(0, n)] return scipy.prod(terms)
[ "def", "fixed_poch", "(", "a", ",", "n", ")", ":", "# Old form, calls gamma function:", "# if a < 0.0 and a % 1 == 0 and n <= -a:", "# p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n)", "# else:", "# p = scipy.special.poch(a, n)", "# return p", "if", "(", "int", "(", "n", ")", "!=", "n", ")", "or", "(", "n", "<", "0", ")", ":", "raise", "ValueError", "(", "\"Parameter n must be a nonnegative int!\"", ")", "n", "=", "int", "(", "n", ")", "# Direct form based on product:", "terms", "=", "[", "a", "+", "k", "for", "k", "in", "range", "(", "0", ",", "n", ")", "]", "return", "scipy", ".", "prod", "(", "terms", ")" ]
Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order.
[ "Implementation", "of", "the", "Pochhammer", "symbol", ":", "math", ":", "(", "a", ")", "_n", "which", "handles", "negative", "integer", "arguments", "properly", ".", "Need", "conditional", "statement", "because", "scipy", "s", "impelementation", "of", "the", "Pochhammer", "symbol", "is", "wrong", "for", "negative", "integer", "arguments", ".", "This", "function", "uses", "the", "definition", "from", "http", ":", "//", "functions", ".", "wolfram", ".", "com", "/", "GammaBetaErf", "/", "Pochhammer", "/", "02", "/", "Parameters", "----------", "a", ":", "float", "The", "argument", ".", "n", ":", "nonnegative", "int", "The", "order", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1369-L1395
markchil/gptools
gptools/utils.py
Kn2Der
def Kn2Der(nu, y, n=0): r"""Find the derivatives of :math:`K_\nu(y^{1/2})`. Parameters ---------- nu : float The order of the modified Bessel function of the second kind. y : array of float The values to evaluate at. n : nonnegative int, optional The order of derivative to take. """ n = int(n) y = scipy.asarray(y, dtype=float) sqrty = scipy.sqrt(y) if n == 0: K = scipy.special.kv(nu, sqrty) else: K = scipy.zeros_like(y) x = scipy.asarray( [ fixed_poch(1.5 - j, j) * y**(0.5 - j) for j in scipy.arange(1.0, n + 1.0, dtype=float) ] ).T for k in scipy.arange(1.0, n + 1.0, dtype=float): K += ( scipy.special.kvp(nu, sqrty, n=int(k)) * incomplete_bell_poly(n, int(k), x) ) return K
python
def Kn2Der(nu, y, n=0): r"""Find the derivatives of :math:`K_\nu(y^{1/2})`. Parameters ---------- nu : float The order of the modified Bessel function of the second kind. y : array of float The values to evaluate at. n : nonnegative int, optional The order of derivative to take. """ n = int(n) y = scipy.asarray(y, dtype=float) sqrty = scipy.sqrt(y) if n == 0: K = scipy.special.kv(nu, sqrty) else: K = scipy.zeros_like(y) x = scipy.asarray( [ fixed_poch(1.5 - j, j) * y**(0.5 - j) for j in scipy.arange(1.0, n + 1.0, dtype=float) ] ).T for k in scipy.arange(1.0, n + 1.0, dtype=float): K += ( scipy.special.kvp(nu, sqrty, n=int(k)) * incomplete_bell_poly(n, int(k), x) ) return K
[ "def", "Kn2Der", "(", "nu", ",", "y", ",", "n", "=", "0", ")", ":", "n", "=", "int", "(", "n", ")", "y", "=", "scipy", ".", "asarray", "(", "y", ",", "dtype", "=", "float", ")", "sqrty", "=", "scipy", ".", "sqrt", "(", "y", ")", "if", "n", "==", "0", ":", "K", "=", "scipy", ".", "special", ".", "kv", "(", "nu", ",", "sqrty", ")", "else", ":", "K", "=", "scipy", ".", "zeros_like", "(", "y", ")", "x", "=", "scipy", ".", "asarray", "(", "[", "fixed_poch", "(", "1.5", "-", "j", ",", "j", ")", "*", "y", "**", "(", "0.5", "-", "j", ")", "for", "j", "in", "scipy", ".", "arange", "(", "1.0", ",", "n", "+", "1.0", ",", "dtype", "=", "float", ")", "]", ")", ".", "T", "for", "k", "in", "scipy", ".", "arange", "(", "1.0", ",", "n", "+", "1.0", ",", "dtype", "=", "float", ")", ":", "K", "+=", "(", "scipy", ".", "special", ".", "kvp", "(", "nu", ",", "sqrty", ",", "n", "=", "int", "(", "k", ")", ")", "*", "incomplete_bell_poly", "(", "n", ",", "int", "(", "k", ")", ",", "x", ")", ")", "return", "K" ]
r"""Find the derivatives of :math:`K_\nu(y^{1/2})`. Parameters ---------- nu : float The order of the modified Bessel function of the second kind. y : array of float The values to evaluate at. n : nonnegative int, optional The order of derivative to take.
[ "r", "Find", "the", "derivatives", "of", ":", "math", ":", "K_", "\\", "nu", "(", "y^", "{", "1", "/", "2", "}", ")", ".", "Parameters", "----------", "nu", ":", "float", "The", "order", "of", "the", "modified", "Bessel", "function", "of", "the", "second", "kind", ".", "y", ":", "array", "of", "float", "The", "values", "to", "evaluate", "at", ".", "n", ":", "nonnegative", "int", "optional", "The", "order", "of", "derivative", "to", "take", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1397-L1427
markchil/gptools
gptools/utils.py
yn2Kn2Der
def yn2Kn2Der(nu, y, n=0, tol=5e-4, nterms=1, nu_step=0.001): r"""Computes the function :math:`y^{\nu/2} K_{\nu}(y^{1/2})` and its derivatives. Care has been taken to handle the conditions at :math:`y=0`. For `n=0`, uses a direct evaluation of the expression, replacing points where `y=0` with the appropriate value. For `n>0`, uses a general sum expression to evaluate the expression, and handles the value at `y=0` using a power series expansion. Where it becomes infinite, the infinities will have the appropriate sign for a limit approaching zero from the right. Uses a power series expansion around :math:`y=0` to avoid numerical issues. Handles integer `nu` by performing a linear interpolation between values of `nu` slightly above and below the requested value. Parameters ---------- nu : float The order of the modified Bessel function and the exponent of `y`. y : array of float The points to evaluate the function at. These are assumed to be nonegative. n : nonnegative int, optional The order of derivative to take. Set to zero (the default) to get the value. tol : float, optional The distance from zero for which the power series is used. Default is 5e-4. nterms : int, optional The number of terms to include in the power series. Default is 1. nu_step : float, optional The amount to vary `nu` by when handling integer values of `nu`. Default is 0.001. """ n = int(n) y = scipy.asarray(y, dtype=float) if n == 0: K = y**(nu / 2.0) * scipy.special.kv(nu, scipy.sqrt(y)) K[y == 0.0] = scipy.special.gamma(nu) / 2.0**(1.0 - nu) else: K = scipy.zeros_like(y) for k in scipy.arange(0.0, n + 1.0, dtype=float): K += ( scipy.special.binom(n, k) * fixed_poch(1.0 + nu / 2.0 - k, k) * y**(nu / 2.0 - k) * Kn2Der(nu, y, n=n-k) ) # Do the extra work to handle y == 0 only if we need to: mask = (y == 0.0) if (mask).any(): if int(nu) == nu: K[mask] = 0.5 * ( yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) + yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) ) else: if n > nu: K[mask] = scipy.special.gamma(-nu) * fixed_poch(1 + nu - n, n) * scipy.inf else: K[mask] = scipy.special.gamma(nu) * scipy.special.gamma(n + 1.0) / ( 2.0**(1.0 - nu + 2.0 * n) * fixed_poch(1.0 - nu, n) * scipy.special.factorial(n) ) if tol > 0.0: # Replace points within tol (absolute distance) of zero with the power # series approximation: mask = (y <= tol) & (y > 0.0) K[mask] = 0.0 if int(nu) == nu: K[mask] = 0.5 * ( yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) + yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) ) else: for k in scipy.arange(n, n + nterms, dtype=float): K[mask] += ( scipy.special.gamma(nu) * fixed_poch(1.0 + k - n, n) * y[mask]**(k - n) / ( 2.0**(1.0 - nu + 2 * k) * fixed_poch(1.0 - nu, k) * scipy.special.factorial(k)) ) for k in scipy.arange(0, nterms, dtype=float): K[mask] += ( scipy.special.gamma(-nu) * fixed_poch(1.0 + nu + k - n, n) * y[mask]**(nu + k - n) / ( 2.0**(1.0 + nu + 2.0 * k) * fixed_poch(1.0 + nu, k) * scipy.special.factorial(k) ) ) return K
python
def yn2Kn2Der(nu, y, n=0, tol=5e-4, nterms=1, nu_step=0.001): r"""Computes the function :math:`y^{\nu/2} K_{\nu}(y^{1/2})` and its derivatives. Care has been taken to handle the conditions at :math:`y=0`. For `n=0`, uses a direct evaluation of the expression, replacing points where `y=0` with the appropriate value. For `n>0`, uses a general sum expression to evaluate the expression, and handles the value at `y=0` using a power series expansion. Where it becomes infinite, the infinities will have the appropriate sign for a limit approaching zero from the right. Uses a power series expansion around :math:`y=0` to avoid numerical issues. Handles integer `nu` by performing a linear interpolation between values of `nu` slightly above and below the requested value. Parameters ---------- nu : float The order of the modified Bessel function and the exponent of `y`. y : array of float The points to evaluate the function at. These are assumed to be nonegative. n : nonnegative int, optional The order of derivative to take. Set to zero (the default) to get the value. tol : float, optional The distance from zero for which the power series is used. Default is 5e-4. nterms : int, optional The number of terms to include in the power series. Default is 1. nu_step : float, optional The amount to vary `nu` by when handling integer values of `nu`. Default is 0.001. """ n = int(n) y = scipy.asarray(y, dtype=float) if n == 0: K = y**(nu / 2.0) * scipy.special.kv(nu, scipy.sqrt(y)) K[y == 0.0] = scipy.special.gamma(nu) / 2.0**(1.0 - nu) else: K = scipy.zeros_like(y) for k in scipy.arange(0.0, n + 1.0, dtype=float): K += ( scipy.special.binom(n, k) * fixed_poch(1.0 + nu / 2.0 - k, k) * y**(nu / 2.0 - k) * Kn2Der(nu, y, n=n-k) ) # Do the extra work to handle y == 0 only if we need to: mask = (y == 0.0) if (mask).any(): if int(nu) == nu: K[mask] = 0.5 * ( yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) + yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) ) else: if n > nu: K[mask] = scipy.special.gamma(-nu) * fixed_poch(1 + nu - n, n) * scipy.inf else: K[mask] = scipy.special.gamma(nu) * scipy.special.gamma(n + 1.0) / ( 2.0**(1.0 - nu + 2.0 * n) * fixed_poch(1.0 - nu, n) * scipy.special.factorial(n) ) if tol > 0.0: # Replace points within tol (absolute distance) of zero with the power # series approximation: mask = (y <= tol) & (y > 0.0) K[mask] = 0.0 if int(nu) == nu: K[mask] = 0.5 * ( yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) + yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) ) else: for k in scipy.arange(n, n + nterms, dtype=float): K[mask] += ( scipy.special.gamma(nu) * fixed_poch(1.0 + k - n, n) * y[mask]**(k - n) / ( 2.0**(1.0 - nu + 2 * k) * fixed_poch(1.0 - nu, k) * scipy.special.factorial(k)) ) for k in scipy.arange(0, nterms, dtype=float): K[mask] += ( scipy.special.gamma(-nu) * fixed_poch(1.0 + nu + k - n, n) * y[mask]**(nu + k - n) / ( 2.0**(1.0 + nu + 2.0 * k) * fixed_poch(1.0 + nu, k) * scipy.special.factorial(k) ) ) return K
[ "def", "yn2Kn2Der", "(", "nu", ",", "y", ",", "n", "=", "0", ",", "tol", "=", "5e-4", ",", "nterms", "=", "1", ",", "nu_step", "=", "0.001", ")", ":", "n", "=", "int", "(", "n", ")", "y", "=", "scipy", ".", "asarray", "(", "y", ",", "dtype", "=", "float", ")", "if", "n", "==", "0", ":", "K", "=", "y", "**", "(", "nu", "/", "2.0", ")", "*", "scipy", ".", "special", ".", "kv", "(", "nu", ",", "scipy", ".", "sqrt", "(", "y", ")", ")", "K", "[", "y", "==", "0.0", "]", "=", "scipy", ".", "special", ".", "gamma", "(", "nu", ")", "/", "2.0", "**", "(", "1.0", "-", "nu", ")", "else", ":", "K", "=", "scipy", ".", "zeros_like", "(", "y", ")", "for", "k", "in", "scipy", ".", "arange", "(", "0.0", ",", "n", "+", "1.0", ",", "dtype", "=", "float", ")", ":", "K", "+=", "(", "scipy", ".", "special", ".", "binom", "(", "n", ",", "k", ")", "*", "fixed_poch", "(", "1.0", "+", "nu", "/", "2.0", "-", "k", ",", "k", ")", "*", "y", "**", "(", "nu", "/", "2.0", "-", "k", ")", "*", "Kn2Der", "(", "nu", ",", "y", ",", "n", "=", "n", "-", "k", ")", ")", "# Do the extra work to handle y == 0 only if we need to:", "mask", "=", "(", "y", "==", "0.0", ")", "if", "(", "mask", ")", ".", "any", "(", ")", ":", "if", "int", "(", "nu", ")", "==", "nu", ":", "K", "[", "mask", "]", "=", "0.5", "*", "(", "yn2Kn2Der", "(", "nu", "-", "nu_step", ",", "y", "[", "mask", "]", ",", "n", "=", "n", ",", "tol", "=", "tol", ",", "nterms", "=", "nterms", ",", "nu_step", "=", "nu_step", ")", "+", "yn2Kn2Der", "(", "nu", "+", "nu_step", ",", "y", "[", "mask", "]", ",", "n", "=", "n", ",", "tol", "=", "tol", ",", "nterms", "=", "nterms", ",", "nu_step", "=", "nu_step", ")", ")", "else", ":", "if", "n", ">", "nu", ":", "K", "[", "mask", "]", "=", "scipy", ".", "special", ".", "gamma", "(", "-", "nu", ")", "*", "fixed_poch", "(", "1", "+", "nu", "-", "n", ",", "n", ")", "*", "scipy", ".", "inf", "else", ":", "K", "[", "mask", "]", "=", "scipy", ".", "special", ".", "gamma", "(", "nu", ")", "*", "scipy", ".", "special", ".", "gamma", "(", "n", "+", "1.0", ")", "/", "(", "2.0", "**", "(", "1.0", "-", "nu", "+", "2.0", "*", "n", ")", "*", "fixed_poch", "(", "1.0", "-", "nu", ",", "n", ")", "*", "scipy", ".", "special", ".", "factorial", "(", "n", ")", ")", "if", "tol", ">", "0.0", ":", "# Replace points within tol (absolute distance) of zero with the power", "# series approximation:", "mask", "=", "(", "y", "<=", "tol", ")", "&", "(", "y", ">", "0.0", ")", "K", "[", "mask", "]", "=", "0.0", "if", "int", "(", "nu", ")", "==", "nu", ":", "K", "[", "mask", "]", "=", "0.5", "*", "(", "yn2Kn2Der", "(", "nu", "-", "nu_step", ",", "y", "[", "mask", "]", ",", "n", "=", "n", ",", "tol", "=", "tol", ",", "nterms", "=", "nterms", ",", "nu_step", "=", "nu_step", ")", "+", "yn2Kn2Der", "(", "nu", "+", "nu_step", ",", "y", "[", "mask", "]", ",", "n", "=", "n", ",", "tol", "=", "tol", ",", "nterms", "=", "nterms", ",", "nu_step", "=", "nu_step", ")", ")", "else", ":", "for", "k", "in", "scipy", ".", "arange", "(", "n", ",", "n", "+", "nterms", ",", "dtype", "=", "float", ")", ":", "K", "[", "mask", "]", "+=", "(", "scipy", ".", "special", ".", "gamma", "(", "nu", ")", "*", "fixed_poch", "(", "1.0", "+", "k", "-", "n", ",", "n", ")", "*", "y", "[", "mask", "]", "**", "(", "k", "-", "n", ")", "/", "(", "2.0", "**", "(", "1.0", "-", "nu", "+", "2", "*", "k", ")", "*", "fixed_poch", "(", "1.0", "-", "nu", ",", "k", ")", "*", "scipy", ".", "special", ".", "factorial", "(", "k", ")", ")", ")", "for", "k", "in", "scipy", ".", "arange", "(", "0", ",", "nterms", ",", "dtype", "=", "float", ")", ":", "K", "[", "mask", "]", "+=", "(", "scipy", ".", "special", ".", "gamma", "(", "-", "nu", ")", "*", "fixed_poch", "(", "1.0", "+", "nu", "+", "k", "-", "n", ",", "n", ")", "*", "y", "[", "mask", "]", "**", "(", "nu", "+", "k", "-", "n", ")", "/", "(", "2.0", "**", "(", "1.0", "+", "nu", "+", "2.0", "*", "k", ")", "*", "fixed_poch", "(", "1.0", "+", "nu", ",", "k", ")", "*", "scipy", ".", "special", ".", "factorial", "(", "k", ")", ")", ")", "return", "K" ]
r"""Computes the function :math:`y^{\nu/2} K_{\nu}(y^{1/2})` and its derivatives. Care has been taken to handle the conditions at :math:`y=0`. For `n=0`, uses a direct evaluation of the expression, replacing points where `y=0` with the appropriate value. For `n>0`, uses a general sum expression to evaluate the expression, and handles the value at `y=0` using a power series expansion. Where it becomes infinite, the infinities will have the appropriate sign for a limit approaching zero from the right. Uses a power series expansion around :math:`y=0` to avoid numerical issues. Handles integer `nu` by performing a linear interpolation between values of `nu` slightly above and below the requested value. Parameters ---------- nu : float The order of the modified Bessel function and the exponent of `y`. y : array of float The points to evaluate the function at. These are assumed to be nonegative. n : nonnegative int, optional The order of derivative to take. Set to zero (the default) to get the value. tol : float, optional The distance from zero for which the power series is used. Default is 5e-4. nterms : int, optional The number of terms to include in the power series. Default is 1. nu_step : float, optional The amount to vary `nu` by when handling integer values of `nu`. Default is 0.001.
[ "r", "Computes", "the", "function", ":", "math", ":", "y^", "{", "\\", "nu", "/", "2", "}", "K_", "{", "\\", "nu", "}", "(", "y^", "{", "1", "/", "2", "}", ")", "and", "its", "derivatives", ".", "Care", "has", "been", "taken", "to", "handle", "the", "conditions", "at", ":", "math", ":", "y", "=", "0", ".", "For", "n", "=", "0", "uses", "a", "direct", "evaluation", "of", "the", "expression", "replacing", "points", "where", "y", "=", "0", "with", "the", "appropriate", "value", ".", "For", "n", ">", "0", "uses", "a", "general", "sum", "expression", "to", "evaluate", "the", "expression", "and", "handles", "the", "value", "at", "y", "=", "0", "using", "a", "power", "series", "expansion", ".", "Where", "it", "becomes", "infinite", "the", "infinities", "will", "have", "the", "appropriate", "sign", "for", "a", "limit", "approaching", "zero", "from", "the", "right", ".", "Uses", "a", "power", "series", "expansion", "around", ":", "math", ":", "y", "=", "0", "to", "avoid", "numerical", "issues", ".", "Handles", "integer", "nu", "by", "performing", "a", "linear", "interpolation", "between", "values", "of", "nu", "slightly", "above", "and", "below", "the", "requested", "value", ".", "Parameters", "----------", "nu", ":", "float", "The", "order", "of", "the", "modified", "Bessel", "function", "and", "the", "exponent", "of", "y", ".", "y", ":", "array", "of", "float", "The", "points", "to", "evaluate", "the", "function", "at", ".", "These", "are", "assumed", "to", "be", "nonegative", ".", "n", ":", "nonnegative", "int", "optional", "The", "order", "of", "derivative", "to", "take", ".", "Set", "to", "zero", "(", "the", "default", ")", "to", "get", "the", "value", ".", "tol", ":", "float", "optional", "The", "distance", "from", "zero", "for", "which", "the", "power", "series", "is", "used", ".", "Default", "is", "5e", "-", "4", ".", "nterms", ":", "int", "optional", "The", "number", "of", "terms", "to", "include", "in", "the", "power", "series", ".", "Default", "is", "1", ".", "nu_step", ":", "float", "optional", "The", "amount", "to", "vary", "nu", "by", "when", "handling", "integer", "values", "of", "nu", ".", "Default", "is", "0", ".", "001", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1429-L1518
markchil/gptools
gptools/utils.py
incomplete_bell_poly
def incomplete_bell_poly(n, k, x): r"""Recursive evaluation of the incomplete Bell polynomial :math:`B_{n, k}(x)`. Evaluates the incomplete Bell polynomial :math:`B_{n, k}(x_1, x_2, \dots, x_{n-k+1})`, also known as the partial Bell polynomial or the Bell polynomial of the second kind. This polynomial is useful in the evaluation of (the univariate) Faa di Bruno's formula which generalizes the chain rule to higher order derivatives. The implementation here is based on the implementation in: :py:func:`sympy.functions.combinatorial.numbers.bell._bell_incomplete_poly` Following that function's documentation, the polynomial is computed according to the recurrence formula: .. math:: B_{n, k}(x_1, x_2, \dots, x_{n-k+1}) = \sum_{m=1}^{n-k+1}x_m\binom{n-1}{m-1}B_{n-m, k-1}(x_1, x_2, \dots, x_{n-m-k}) | The end cases are: | :math:`B_{0, 0} = 1` | :math:`B_{n, 0} = 0` for :math:`n \ge 1` | :math:`B_{0, k} = 0` for :math:`k \ge 1` Parameters ---------- n : scalar int The first subscript of the polynomial. k : scalar int The second subscript of the polynomial. x : :py:class:`Array` of floats, (`p`, `n` - `k` + 1) `p` sets of `n` - `k` + 1 points to use as the arguments to :math:`B_{n,k}`. The second dimension can be longer than required, in which case the extra entries are silently ignored (this facilitates recursion without needing to subset the array `x`). Returns ------- result : :py:class:`Array`, (`p`,) Incomplete Bell polynomial evaluated at the desired values. """ if n == 0 and k == 0: return scipy.ones(x.shape[0], dtype=float) elif k == 0 and n >= 1: return scipy.zeros(x.shape[0], dtype=float) elif n == 0 and k >= 1: return scipy.zeros(x.shape[0], dtype=float) else: result = scipy.zeros(x.shape[0], dtype=float) for m in xrange(0, n - k + 1): result += x[:, m] * scipy.special.binom(n - 1, m) * incomplete_bell_poly(n - (m + 1), k - 1, x) return result
python
def incomplete_bell_poly(n, k, x): r"""Recursive evaluation of the incomplete Bell polynomial :math:`B_{n, k}(x)`. Evaluates the incomplete Bell polynomial :math:`B_{n, k}(x_1, x_2, \dots, x_{n-k+1})`, also known as the partial Bell polynomial or the Bell polynomial of the second kind. This polynomial is useful in the evaluation of (the univariate) Faa di Bruno's formula which generalizes the chain rule to higher order derivatives. The implementation here is based on the implementation in: :py:func:`sympy.functions.combinatorial.numbers.bell._bell_incomplete_poly` Following that function's documentation, the polynomial is computed according to the recurrence formula: .. math:: B_{n, k}(x_1, x_2, \dots, x_{n-k+1}) = \sum_{m=1}^{n-k+1}x_m\binom{n-1}{m-1}B_{n-m, k-1}(x_1, x_2, \dots, x_{n-m-k}) | The end cases are: | :math:`B_{0, 0} = 1` | :math:`B_{n, 0} = 0` for :math:`n \ge 1` | :math:`B_{0, k} = 0` for :math:`k \ge 1` Parameters ---------- n : scalar int The first subscript of the polynomial. k : scalar int The second subscript of the polynomial. x : :py:class:`Array` of floats, (`p`, `n` - `k` + 1) `p` sets of `n` - `k` + 1 points to use as the arguments to :math:`B_{n,k}`. The second dimension can be longer than required, in which case the extra entries are silently ignored (this facilitates recursion without needing to subset the array `x`). Returns ------- result : :py:class:`Array`, (`p`,) Incomplete Bell polynomial evaluated at the desired values. """ if n == 0 and k == 0: return scipy.ones(x.shape[0], dtype=float) elif k == 0 and n >= 1: return scipy.zeros(x.shape[0], dtype=float) elif n == 0 and k >= 1: return scipy.zeros(x.shape[0], dtype=float) else: result = scipy.zeros(x.shape[0], dtype=float) for m in xrange(0, n - k + 1): result += x[:, m] * scipy.special.binom(n - 1, m) * incomplete_bell_poly(n - (m + 1), k - 1, x) return result
[ "def", "incomplete_bell_poly", "(", "n", ",", "k", ",", "x", ")", ":", "if", "n", "==", "0", "and", "k", "==", "0", ":", "return", "scipy", ".", "ones", "(", "x", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")", "elif", "k", "==", "0", "and", "n", ">=", "1", ":", "return", "scipy", ".", "zeros", "(", "x", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")", "elif", "n", "==", "0", "and", "k", ">=", "1", ":", "return", "scipy", ".", "zeros", "(", "x", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")", "else", ":", "result", "=", "scipy", ".", "zeros", "(", "x", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")", "for", "m", "in", "xrange", "(", "0", ",", "n", "-", "k", "+", "1", ")", ":", "result", "+=", "x", "[", ":", ",", "m", "]", "*", "scipy", ".", "special", ".", "binom", "(", "n", "-", "1", ",", "m", ")", "*", "incomplete_bell_poly", "(", "n", "-", "(", "m", "+", "1", ")", ",", "k", "-", "1", ",", "x", ")", "return", "result" ]
r"""Recursive evaluation of the incomplete Bell polynomial :math:`B_{n, k}(x)`. Evaluates the incomplete Bell polynomial :math:`B_{n, k}(x_1, x_2, \dots, x_{n-k+1})`, also known as the partial Bell polynomial or the Bell polynomial of the second kind. This polynomial is useful in the evaluation of (the univariate) Faa di Bruno's formula which generalizes the chain rule to higher order derivatives. The implementation here is based on the implementation in: :py:func:`sympy.functions.combinatorial.numbers.bell._bell_incomplete_poly` Following that function's documentation, the polynomial is computed according to the recurrence formula: .. math:: B_{n, k}(x_1, x_2, \dots, x_{n-k+1}) = \sum_{m=1}^{n-k+1}x_m\binom{n-1}{m-1}B_{n-m, k-1}(x_1, x_2, \dots, x_{n-m-k}) | The end cases are: | :math:`B_{0, 0} = 1` | :math:`B_{n, 0} = 0` for :math:`n \ge 1` | :math:`B_{0, k} = 0` for :math:`k \ge 1` Parameters ---------- n : scalar int The first subscript of the polynomial. k : scalar int The second subscript of the polynomial. x : :py:class:`Array` of floats, (`p`, `n` - `k` + 1) `p` sets of `n` - `k` + 1 points to use as the arguments to :math:`B_{n,k}`. The second dimension can be longer than required, in which case the extra entries are silently ignored (this facilitates recursion without needing to subset the array `x`). Returns ------- result : :py:class:`Array`, (`p`,) Incomplete Bell polynomial evaluated at the desired values.
[ "r", "Recursive", "evaluation", "of", "the", "incomplete", "Bell", "polynomial", ":", "math", ":", "B_", "{", "n", "k", "}", "(", "x", ")", ".", "Evaluates", "the", "incomplete", "Bell", "polynomial", ":", "math", ":", "B_", "{", "n", "k", "}", "(", "x_1", "x_2", "\\", "dots", "x_", "{", "n", "-", "k", "+", "1", "}", ")", "also", "known", "as", "the", "partial", "Bell", "polynomial", "or", "the", "Bell", "polynomial", "of", "the", "second", "kind", ".", "This", "polynomial", "is", "useful", "in", "the", "evaluation", "of", "(", "the", "univariate", ")", "Faa", "di", "Bruno", "s", "formula", "which", "generalizes", "the", "chain", "rule", "to", "higher", "order", "derivatives", ".", "The", "implementation", "here", "is", "based", "on", "the", "implementation", "in", ":", ":", "py", ":", "func", ":", "sympy", ".", "functions", ".", "combinatorial", ".", "numbers", ".", "bell", ".", "_bell_incomplete_poly", "Following", "that", "function", "s", "documentation", "the", "polynomial", "is", "computed", "according", "to", "the", "recurrence", "formula", ":", "..", "math", "::", "B_", "{", "n", "k", "}", "(", "x_1", "x_2", "\\", "dots", "x_", "{", "n", "-", "k", "+", "1", "}", ")", "=", "\\", "sum_", "{", "m", "=", "1", "}", "^", "{", "n", "-", "k", "+", "1", "}", "x_m", "\\", "binom", "{", "n", "-", "1", "}", "{", "m", "-", "1", "}", "B_", "{", "n", "-", "m", "k", "-", "1", "}", "(", "x_1", "x_2", "\\", "dots", "x_", "{", "n", "-", "m", "-", "k", "}", ")", "|", "The", "end", "cases", "are", ":", "|", ":", "math", ":", "B_", "{", "0", "0", "}", "=", "1", "|", ":", "math", ":", "B_", "{", "n", "0", "}", "=", "0", "for", ":", "math", ":", "n", "\\", "ge", "1", "|", ":", "math", ":", "B_", "{", "0", "k", "}", "=", "0", "for", ":", "math", ":", "k", "\\", "ge", "1", "Parameters", "----------", "n", ":", "scalar", "int", "The", "first", "subscript", "of", "the", "polynomial", ".", "k", ":", "scalar", "int", "The", "second", "subscript", "of", "the", "polynomial", ".", "x", ":", ":", "py", ":", "class", ":", "Array", "of", "floats", "(", "p", "n", "-", "k", "+", "1", ")", "p", "sets", "of", "n", "-", "k", "+", "1", "points", "to", "use", "as", "the", "arguments", "to", ":", "math", ":", "B_", "{", "n", "k", "}", ".", "The", "second", "dimension", "can", "be", "longer", "than", "required", "in", "which", "case", "the", "extra", "entries", "are", "silently", "ignored", "(", "this", "facilitates", "recursion", "without", "needing", "to", "subset", "the", "array", "x", ")", ".", "Returns", "-------", "result", ":", ":", "py", ":", "class", ":", "Array", "(", "p", ")", "Incomplete", "Bell", "polynomial", "evaluated", "at", "the", "desired", "values", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1520-L1570
markchil/gptools
gptools/utils.py
generate_set_partition_strings
def generate_set_partition_strings(n): """Generate the restricted growth strings for all of the partitions of an `n`-member set. Uses Algorithm H from page 416 of volume 4A of Knuth's `The Art of Computer Programming`. Returns the partitions in lexicographical order. Parameters ---------- n : scalar int, non-negative Number of (unique) elements in the set to be partitioned. Returns ------- partitions : list of :py:class:`Array` List has a number of elements equal to the `n`-th Bell number (i.e., the number of partitions for a set of size `n`). Each element has length `n`, the elements of which are the restricted growth strings describing the partitions of the set. The strings are returned in lexicographic order. """ # Handle edge cases: if n == 0: return [] elif n == 1: return [scipy.array([0])] partitions = [] # Step 1: Initialize a = scipy.zeros(n, dtype=int) b = scipy.ones(n, dtype=int) while True: # Step 2: Visit partitions.append(a.copy()) if a[-1] == b[-1]: # Step 4: Find j. j is the index of the first element from the end # for which a != b, with the exception of the last element. j = (a[:-1] != b[:-1]).nonzero()[0][-1] # Step 5: Increase a_j (or terminate): if j == 0: break else: a[j] += 1 # Step 6: Zero out a_{j+1} to a_n: b[-1] = b[j] + (a[j] == b[j]) a[j + 1:] = 0 b[j + 1 :-1] = b[-1] else: # Step 3: Increase a_n: a[-1] += 1 return partitions
python
def generate_set_partition_strings(n): """Generate the restricted growth strings for all of the partitions of an `n`-member set. Uses Algorithm H from page 416 of volume 4A of Knuth's `The Art of Computer Programming`. Returns the partitions in lexicographical order. Parameters ---------- n : scalar int, non-negative Number of (unique) elements in the set to be partitioned. Returns ------- partitions : list of :py:class:`Array` List has a number of elements equal to the `n`-th Bell number (i.e., the number of partitions for a set of size `n`). Each element has length `n`, the elements of which are the restricted growth strings describing the partitions of the set. The strings are returned in lexicographic order. """ # Handle edge cases: if n == 0: return [] elif n == 1: return [scipy.array([0])] partitions = [] # Step 1: Initialize a = scipy.zeros(n, dtype=int) b = scipy.ones(n, dtype=int) while True: # Step 2: Visit partitions.append(a.copy()) if a[-1] == b[-1]: # Step 4: Find j. j is the index of the first element from the end # for which a != b, with the exception of the last element. j = (a[:-1] != b[:-1]).nonzero()[0][-1] # Step 5: Increase a_j (or terminate): if j == 0: break else: a[j] += 1 # Step 6: Zero out a_{j+1} to a_n: b[-1] = b[j] + (a[j] == b[j]) a[j + 1:] = 0 b[j + 1 :-1] = b[-1] else: # Step 3: Increase a_n: a[-1] += 1 return partitions
[ "def", "generate_set_partition_strings", "(", "n", ")", ":", "# Handle edge cases:", "if", "n", "==", "0", ":", "return", "[", "]", "elif", "n", "==", "1", ":", "return", "[", "scipy", ".", "array", "(", "[", "0", "]", ")", "]", "partitions", "=", "[", "]", "# Step 1: Initialize", "a", "=", "scipy", ".", "zeros", "(", "n", ",", "dtype", "=", "int", ")", "b", "=", "scipy", ".", "ones", "(", "n", ",", "dtype", "=", "int", ")", "while", "True", ":", "# Step 2: Visit", "partitions", ".", "append", "(", "a", ".", "copy", "(", ")", ")", "if", "a", "[", "-", "1", "]", "==", "b", "[", "-", "1", "]", ":", "# Step 4: Find j. j is the index of the first element from the end", "# for which a != b, with the exception of the last element.", "j", "=", "(", "a", "[", ":", "-", "1", "]", "!=", "b", "[", ":", "-", "1", "]", ")", ".", "nonzero", "(", ")", "[", "0", "]", "[", "-", "1", "]", "# Step 5: Increase a_j (or terminate):", "if", "j", "==", "0", ":", "break", "else", ":", "a", "[", "j", "]", "+=", "1", "# Step 6: Zero out a_{j+1} to a_n:", "b", "[", "-", "1", "]", "=", "b", "[", "j", "]", "+", "(", "a", "[", "j", "]", "==", "b", "[", "j", "]", ")", "a", "[", "j", "+", "1", ":", "]", "=", "0", "b", "[", "j", "+", "1", ":", "-", "1", "]", "=", "b", "[", "-", "1", "]", "else", ":", "# Step 3: Increase a_n:", "a", "[", "-", "1", "]", "+=", "1", "return", "partitions" ]
Generate the restricted growth strings for all of the partitions of an `n`-member set. Uses Algorithm H from page 416 of volume 4A of Knuth's `The Art of Computer Programming`. Returns the partitions in lexicographical order. Parameters ---------- n : scalar int, non-negative Number of (unique) elements in the set to be partitioned. Returns ------- partitions : list of :py:class:`Array` List has a number of elements equal to the `n`-th Bell number (i.e., the number of partitions for a set of size `n`). Each element has length `n`, the elements of which are the restricted growth strings describing the partitions of the set. The strings are returned in lexicographic order.
[ "Generate", "the", "restricted", "growth", "strings", "for", "all", "of", "the", "partitions", "of", "an", "n", "-", "member", "set", ".", "Uses", "Algorithm", "H", "from", "page", "416", "of", "volume", "4A", "of", "Knuth", "s", "The", "Art", "of", "Computer", "Programming", ".", "Returns", "the", "partitions", "in", "lexicographical", "order", ".", "Parameters", "----------", "n", ":", "scalar", "int", "non", "-", "negative", "Number", "of", "(", "unique", ")", "elements", "in", "the", "set", "to", "be", "partitioned", ".", "Returns", "-------", "partitions", ":", "list", "of", ":", "py", ":", "class", ":", "Array", "List", "has", "a", "number", "of", "elements", "equal", "to", "the", "n", "-", "th", "Bell", "number", "(", "i", ".", "e", ".", "the", "number", "of", "partitions", "for", "a", "set", "of", "size", "n", ")", ".", "Each", "element", "has", "length", "n", "the", "elements", "of", "which", "are", "the", "restricted", "growth", "strings", "describing", "the", "partitions", "of", "the", "set", ".", "The", "strings", "are", "returned", "in", "lexicographic", "order", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1572-L1624
markchil/gptools
gptools/utils.py
generate_set_partitions
def generate_set_partitions(set_): """Generate all of the partitions of a set. This is a helper function that utilizes the restricted growth strings from :py:func:`generate_set_partition_strings`. The partitions are returned in lexicographic order. Parameters ---------- set_ : :py:class:`Array` or other Array-like, (`m`,) The set to find the partitions of. Returns ------- partitions : list of lists of :py:class:`Array` The number of elements in the outer list is equal to the number of partitions, which is the len(`m`)^th Bell number. Each of the inner lists corresponds to a single possible partition. The length of an inner list is therefore equal to the number of blocks. Each of the arrays in an inner list is hence a block. """ set_ = scipy.asarray(set_) strings = generate_set_partition_strings(len(set_)) partitions = [] for string in strings: blocks = [] for block_num in scipy.unique(string): blocks.append(set_[string == block_num]) partitions.append(blocks) return partitions
python
def generate_set_partitions(set_): """Generate all of the partitions of a set. This is a helper function that utilizes the restricted growth strings from :py:func:`generate_set_partition_strings`. The partitions are returned in lexicographic order. Parameters ---------- set_ : :py:class:`Array` or other Array-like, (`m`,) The set to find the partitions of. Returns ------- partitions : list of lists of :py:class:`Array` The number of elements in the outer list is equal to the number of partitions, which is the len(`m`)^th Bell number. Each of the inner lists corresponds to a single possible partition. The length of an inner list is therefore equal to the number of blocks. Each of the arrays in an inner list is hence a block. """ set_ = scipy.asarray(set_) strings = generate_set_partition_strings(len(set_)) partitions = [] for string in strings: blocks = [] for block_num in scipy.unique(string): blocks.append(set_[string == block_num]) partitions.append(blocks) return partitions
[ "def", "generate_set_partitions", "(", "set_", ")", ":", "set_", "=", "scipy", ".", "asarray", "(", "set_", ")", "strings", "=", "generate_set_partition_strings", "(", "len", "(", "set_", ")", ")", "partitions", "=", "[", "]", "for", "string", "in", "strings", ":", "blocks", "=", "[", "]", "for", "block_num", "in", "scipy", ".", "unique", "(", "string", ")", ":", "blocks", ".", "append", "(", "set_", "[", "string", "==", "block_num", "]", ")", "partitions", ".", "append", "(", "blocks", ")", "return", "partitions" ]
Generate all of the partitions of a set. This is a helper function that utilizes the restricted growth strings from :py:func:`generate_set_partition_strings`. The partitions are returned in lexicographic order. Parameters ---------- set_ : :py:class:`Array` or other Array-like, (`m`,) The set to find the partitions of. Returns ------- partitions : list of lists of :py:class:`Array` The number of elements in the outer list is equal to the number of partitions, which is the len(`m`)^th Bell number. Each of the inner lists corresponds to a single possible partition. The length of an inner list is therefore equal to the number of blocks. Each of the arrays in an inner list is hence a block.
[ "Generate", "all", "of", "the", "partitions", "of", "a", "set", ".", "This", "is", "a", "helper", "function", "that", "utilizes", "the", "restricted", "growth", "strings", "from", ":", "py", ":", "func", ":", "generate_set_partition_strings", ".", "The", "partitions", "are", "returned", "in", "lexicographic", "order", ".", "Parameters", "----------", "set_", ":", ":", "py", ":", "class", ":", "Array", "or", "other", "Array", "-", "like", "(", "m", ")", "The", "set", "to", "find", "the", "partitions", "of", ".", "Returns", "-------", "partitions", ":", "list", "of", "lists", "of", ":", "py", ":", "class", ":", "Array", "The", "number", "of", "elements", "in", "the", "outer", "list", "is", "equal", "to", "the", "number", "of", "partitions", "which", "is", "the", "len", "(", "m", ")", "^th", "Bell", "number", ".", "Each", "of", "the", "inner", "lists", "corresponds", "to", "a", "single", "possible", "partition", ".", "The", "length", "of", "an", "inner", "list", "is", "therefore", "equal", "to", "the", "number", "of", "blocks", ".", "Each", "of", "the", "arrays", "in", "an", "inner", "list", "is", "hence", "a", "block", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1626-L1656
markchil/gptools
gptools/utils.py
unique_rows
def unique_rows(arr, return_index=False, return_inverse=False): """Returns a copy of arr with duplicate rows removed. From Stackoverflow "Find unique rows in numpy.array." Parameters ---------- arr : :py:class:`Array`, (`m`, `n`) The array to find the unique rows of. return_index : bool, optional If True, the indices of the unique rows in the array will also be returned. I.e., unique = arr[idx]. Default is False (don't return indices). return_inverse: bool, optional If True, the indices in the unique array to reconstruct the original array will also be returned. I.e., arr = unique[inv]. Default is False (don't return inverse). Returns ------- unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m` The array `arr` with duplicate rows removed. """ b = scipy.ascontiguousarray(arr).view( scipy.dtype((scipy.void, arr.dtype.itemsize * arr.shape[1])) ) try: out = scipy.unique(b, return_index=True, return_inverse=return_inverse) dum = out[0] idx = out[1] if return_inverse: inv = out[2] except TypeError: if return_inverse: raise RuntimeError( "Error in scipy.unique on older versions of numpy prevents " "return_inverse from working!" ) # Handle bug in numpy 1.6.2: rows = [_Row(row) for row in b] srt_idx = sorted(range(len(rows)), key=rows.__getitem__) rows = scipy.asarray(rows)[srt_idx] row_cmp = [-1] for k in xrange(1, len(srt_idx)): row_cmp.append(rows[k-1].__cmp__(rows[k])) row_cmp = scipy.asarray(row_cmp) transition_idxs = scipy.where(row_cmp != 0)[0] idx = scipy.asarray(srt_idx)[transition_idxs] out = arr[idx] if return_index: out = (out, idx) elif return_inverse: out = (out, inv) elif return_index and return_inverse: out = (out, idx, inv) return out
python
def unique_rows(arr, return_index=False, return_inverse=False): """Returns a copy of arr with duplicate rows removed. From Stackoverflow "Find unique rows in numpy.array." Parameters ---------- arr : :py:class:`Array`, (`m`, `n`) The array to find the unique rows of. return_index : bool, optional If True, the indices of the unique rows in the array will also be returned. I.e., unique = arr[idx]. Default is False (don't return indices). return_inverse: bool, optional If True, the indices in the unique array to reconstruct the original array will also be returned. I.e., arr = unique[inv]. Default is False (don't return inverse). Returns ------- unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m` The array `arr` with duplicate rows removed. """ b = scipy.ascontiguousarray(arr).view( scipy.dtype((scipy.void, arr.dtype.itemsize * arr.shape[1])) ) try: out = scipy.unique(b, return_index=True, return_inverse=return_inverse) dum = out[0] idx = out[1] if return_inverse: inv = out[2] except TypeError: if return_inverse: raise RuntimeError( "Error in scipy.unique on older versions of numpy prevents " "return_inverse from working!" ) # Handle bug in numpy 1.6.2: rows = [_Row(row) for row in b] srt_idx = sorted(range(len(rows)), key=rows.__getitem__) rows = scipy.asarray(rows)[srt_idx] row_cmp = [-1] for k in xrange(1, len(srt_idx)): row_cmp.append(rows[k-1].__cmp__(rows[k])) row_cmp = scipy.asarray(row_cmp) transition_idxs = scipy.where(row_cmp != 0)[0] idx = scipy.asarray(srt_idx)[transition_idxs] out = arr[idx] if return_index: out = (out, idx) elif return_inverse: out = (out, inv) elif return_index and return_inverse: out = (out, idx, inv) return out
[ "def", "unique_rows", "(", "arr", ",", "return_index", "=", "False", ",", "return_inverse", "=", "False", ")", ":", "b", "=", "scipy", ".", "ascontiguousarray", "(", "arr", ")", ".", "view", "(", "scipy", ".", "dtype", "(", "(", "scipy", ".", "void", ",", "arr", ".", "dtype", ".", "itemsize", "*", "arr", ".", "shape", "[", "1", "]", ")", ")", ")", "try", ":", "out", "=", "scipy", ".", "unique", "(", "b", ",", "return_index", "=", "True", ",", "return_inverse", "=", "return_inverse", ")", "dum", "=", "out", "[", "0", "]", "idx", "=", "out", "[", "1", "]", "if", "return_inverse", ":", "inv", "=", "out", "[", "2", "]", "except", "TypeError", ":", "if", "return_inverse", ":", "raise", "RuntimeError", "(", "\"Error in scipy.unique on older versions of numpy prevents \"", "\"return_inverse from working!\"", ")", "# Handle bug in numpy 1.6.2:", "rows", "=", "[", "_Row", "(", "row", ")", "for", "row", "in", "b", "]", "srt_idx", "=", "sorted", "(", "range", "(", "len", "(", "rows", ")", ")", ",", "key", "=", "rows", ".", "__getitem__", ")", "rows", "=", "scipy", ".", "asarray", "(", "rows", ")", "[", "srt_idx", "]", "row_cmp", "=", "[", "-", "1", "]", "for", "k", "in", "xrange", "(", "1", ",", "len", "(", "srt_idx", ")", ")", ":", "row_cmp", ".", "append", "(", "rows", "[", "k", "-", "1", "]", ".", "__cmp__", "(", "rows", "[", "k", "]", ")", ")", "row_cmp", "=", "scipy", ".", "asarray", "(", "row_cmp", ")", "transition_idxs", "=", "scipy", ".", "where", "(", "row_cmp", "!=", "0", ")", "[", "0", "]", "idx", "=", "scipy", ".", "asarray", "(", "srt_idx", ")", "[", "transition_idxs", "]", "out", "=", "arr", "[", "idx", "]", "if", "return_index", ":", "out", "=", "(", "out", ",", "idx", ")", "elif", "return_inverse", ":", "out", "=", "(", "out", ",", "inv", ")", "elif", "return_index", "and", "return_inverse", ":", "out", "=", "(", "out", ",", "idx", ",", "inv", ")", "return", "out" ]
Returns a copy of arr with duplicate rows removed. From Stackoverflow "Find unique rows in numpy.array." Parameters ---------- arr : :py:class:`Array`, (`m`, `n`) The array to find the unique rows of. return_index : bool, optional If True, the indices of the unique rows in the array will also be returned. I.e., unique = arr[idx]. Default is False (don't return indices). return_inverse: bool, optional If True, the indices in the unique array to reconstruct the original array will also be returned. I.e., arr = unique[inv]. Default is False (don't return inverse). Returns ------- unique : :py:class:`Array`, (`p`, `n`) where `p` <= `m` The array `arr` with duplicate rows removed.
[ "Returns", "a", "copy", "of", "arr", "with", "duplicate", "rows", "removed", ".", "From", "Stackoverflow", "Find", "unique", "rows", "in", "numpy", ".", "array", ".", "Parameters", "----------", "arr", ":", ":", "py", ":", "class", ":", "Array", "(", "m", "n", ")", "The", "array", "to", "find", "the", "unique", "rows", "of", ".", "return_index", ":", "bool", "optional", "If", "True", "the", "indices", "of", "the", "unique", "rows", "in", "the", "array", "will", "also", "be", "returned", ".", "I", ".", "e", ".", "unique", "=", "arr", "[", "idx", "]", ".", "Default", "is", "False", "(", "don", "t", "return", "indices", ")", ".", "return_inverse", ":", "bool", "optional", "If", "True", "the", "indices", "in", "the", "unique", "array", "to", "reconstruct", "the", "original", "array", "will", "also", "be", "returned", ".", "I", ".", "e", ".", "arr", "=", "unique", "[", "inv", "]", ".", "Default", "is", "False", "(", "don", "t", "return", "inverse", ")", ".", "Returns", "-------", "unique", ":", ":", "py", ":", "class", ":", "Array", "(", "p", "n", ")", "where", "p", "<", "=", "m", "The", "array", "arr", "with", "duplicate", "rows", "removed", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1666-L1721
markchil/gptools
gptools/utils.py
compute_stats
def compute_stats(vals, check_nan=False, robust=False, axis=1, plot_QQ=False, bins=15, name=''): """Compute the average statistics (mean, std dev) for the given values. Parameters ---------- vals : array-like, (`M`, `D`) Values to compute the average statistics along the specified axis of. check_nan : bool, optional Whether or not to check for (and exclude) NaN's. Default is False (do not attempt to handle NaN's). robust : bool, optional Whether or not to use robust estimators (median for mean, IQR for standard deviation). Default is False (use non-robust estimators). axis : int, optional Axis to compute the statistics along. Presently only supported if `robust` is False. Default is 1. plot_QQ : bool, optional Whether or not a QQ plot and histogram should be drawn for each channel. Default is False (do not draw QQ plots). bins : int, optional Number of bins to use when plotting histogram (for plot_QQ=True). Default is 15 name : str, optional Name to put in the title of the QQ/histogram plot. Returns ------- mean : ndarray, (`M`,) Estimator for the mean of `vals`. std : ndarray, (`M`,) Estimator for the standard deviation of `vals`. Raises ------ NotImplementedError If `axis` != 1 when `robust` is True. NotImplementedError If `plot_QQ` is True. """ if axis != 1 and robust: raise NotImplementedError("Values of axis other than 1 are not supported " "with the robust keyword at this time!") if robust: # TODO: This stuff should really be vectorized if there is something that allows it! if check_nan: mean = scipy.stats.nanmedian(vals, axis=axis) # TODO: HANDLE AXIS PROPERLY! std = scipy.zeros(vals.shape[0], dtype=float) for k in xrange(0, len(vals)): ch = vals[k] ok_idxs = ~scipy.isnan(ch) if ok_idxs.any(): std[k] = (scipy.stats.scoreatpercentile(ch[ok_idxs], 75) - scipy.stats.scoreatpercentile(ch[ok_idxs], 25)) else: # Leave a nan where there are no non-nan values: std[k] = scipy.nan std /= IQR_TO_STD else: mean = scipy.median(vals, axis=axis) # TODO: HANDLE AXIS PROPERLY! std = scipy.asarray([scipy.stats.scoreatpercentile(ch, 75.0) - scipy.stats.scoreatpercentile(ch, 25.0) for ch in vals]) / IQR_TO_STD else: if check_nan: mean = scipy.stats.nanmean(vals, axis=axis) std = scipy.stats.nanstd(vals, axis=axis) else: mean = scipy.mean(vals, axis=axis) std = scipy.std(vals, axis=axis) if plot_QQ: f = plt.figure() gs = mplgs.GridSpec(2, 2, height_ratios=[8, 1]) a_QQ = f.add_subplot(gs[0, 0]) a_hist = f.add_subplot(gs[0, 1]) a_slider = f.add_subplot(gs[1, :]) title = f.suptitle("") def update(val): """Update the index from the results to be displayed. """ a_QQ.clear() a_hist.clear() idx = slider.val title.set_text("%s, n=%d" % (name, idx)) nan_idxs = scipy.isnan(vals[idx, :]) if not nan_idxs.all(): osm, osr = scipy.stats.probplot(vals[idx, ~nan_idxs], dist='norm', plot=None, fit=False) a_QQ.plot(osm, osr, 'bo', markersize=10) a_QQ.set_title('QQ plot') a_QQ.set_xlabel('quantiles of $\mathcal{N}(0,1)$') a_QQ.set_ylabel('quantiles of data') a_hist.hist(vals[idx, ~nan_idxs], bins=bins, normed=True) locs = scipy.linspace(vals[idx, ~nan_idxs].min(), vals[idx, ~nan_idxs].max()) a_hist.plot(locs, scipy.stats.norm.pdf(locs, loc=mean[idx], scale=std[idx])) a_hist.set_title('Normalized histogram and reported PDF') a_hist.set_xlabel('value') a_hist.set_ylabel('density') f.canvas.draw() def arrow_respond(slider, event): """Event handler for arrow key events in plot windows. Pass the slider object to update as a masked argument using a lambda function:: lambda evt: arrow_respond(my_slider, evt) Parameters ---------- slider : Slider instance associated with this handler. event : Event to be handled. """ if event.key == 'right': slider.set_val(min(slider.val + 1, slider.valmax)) elif event.key == 'left': slider.set_val(max(slider.val - 1, slider.valmin)) slider = mplw.Slider(a_slider, 'index', 0, len(vals) - 1, valinit=0, valfmt='%d') slider.on_changed(update) update(0) f.canvas.mpl_connect('key_press_event', lambda evt: arrow_respond(slider, evt)) return (mean, std)
python
def compute_stats(vals, check_nan=False, robust=False, axis=1, plot_QQ=False, bins=15, name=''): """Compute the average statistics (mean, std dev) for the given values. Parameters ---------- vals : array-like, (`M`, `D`) Values to compute the average statistics along the specified axis of. check_nan : bool, optional Whether or not to check for (and exclude) NaN's. Default is False (do not attempt to handle NaN's). robust : bool, optional Whether or not to use robust estimators (median for mean, IQR for standard deviation). Default is False (use non-robust estimators). axis : int, optional Axis to compute the statistics along. Presently only supported if `robust` is False. Default is 1. plot_QQ : bool, optional Whether or not a QQ plot and histogram should be drawn for each channel. Default is False (do not draw QQ plots). bins : int, optional Number of bins to use when plotting histogram (for plot_QQ=True). Default is 15 name : str, optional Name to put in the title of the QQ/histogram plot. Returns ------- mean : ndarray, (`M`,) Estimator for the mean of `vals`. std : ndarray, (`M`,) Estimator for the standard deviation of `vals`. Raises ------ NotImplementedError If `axis` != 1 when `robust` is True. NotImplementedError If `plot_QQ` is True. """ if axis != 1 and robust: raise NotImplementedError("Values of axis other than 1 are not supported " "with the robust keyword at this time!") if robust: # TODO: This stuff should really be vectorized if there is something that allows it! if check_nan: mean = scipy.stats.nanmedian(vals, axis=axis) # TODO: HANDLE AXIS PROPERLY! std = scipy.zeros(vals.shape[0], dtype=float) for k in xrange(0, len(vals)): ch = vals[k] ok_idxs = ~scipy.isnan(ch) if ok_idxs.any(): std[k] = (scipy.stats.scoreatpercentile(ch[ok_idxs], 75) - scipy.stats.scoreatpercentile(ch[ok_idxs], 25)) else: # Leave a nan where there are no non-nan values: std[k] = scipy.nan std /= IQR_TO_STD else: mean = scipy.median(vals, axis=axis) # TODO: HANDLE AXIS PROPERLY! std = scipy.asarray([scipy.stats.scoreatpercentile(ch, 75.0) - scipy.stats.scoreatpercentile(ch, 25.0) for ch in vals]) / IQR_TO_STD else: if check_nan: mean = scipy.stats.nanmean(vals, axis=axis) std = scipy.stats.nanstd(vals, axis=axis) else: mean = scipy.mean(vals, axis=axis) std = scipy.std(vals, axis=axis) if plot_QQ: f = plt.figure() gs = mplgs.GridSpec(2, 2, height_ratios=[8, 1]) a_QQ = f.add_subplot(gs[0, 0]) a_hist = f.add_subplot(gs[0, 1]) a_slider = f.add_subplot(gs[1, :]) title = f.suptitle("") def update(val): """Update the index from the results to be displayed. """ a_QQ.clear() a_hist.clear() idx = slider.val title.set_text("%s, n=%d" % (name, idx)) nan_idxs = scipy.isnan(vals[idx, :]) if not nan_idxs.all(): osm, osr = scipy.stats.probplot(vals[idx, ~nan_idxs], dist='norm', plot=None, fit=False) a_QQ.plot(osm, osr, 'bo', markersize=10) a_QQ.set_title('QQ plot') a_QQ.set_xlabel('quantiles of $\mathcal{N}(0,1)$') a_QQ.set_ylabel('quantiles of data') a_hist.hist(vals[idx, ~nan_idxs], bins=bins, normed=True) locs = scipy.linspace(vals[idx, ~nan_idxs].min(), vals[idx, ~nan_idxs].max()) a_hist.plot(locs, scipy.stats.norm.pdf(locs, loc=mean[idx], scale=std[idx])) a_hist.set_title('Normalized histogram and reported PDF') a_hist.set_xlabel('value') a_hist.set_ylabel('density') f.canvas.draw() def arrow_respond(slider, event): """Event handler for arrow key events in plot windows. Pass the slider object to update as a masked argument using a lambda function:: lambda evt: arrow_respond(my_slider, evt) Parameters ---------- slider : Slider instance associated with this handler. event : Event to be handled. """ if event.key == 'right': slider.set_val(min(slider.val + 1, slider.valmax)) elif event.key == 'left': slider.set_val(max(slider.val - 1, slider.valmin)) slider = mplw.Slider(a_slider, 'index', 0, len(vals) - 1, valinit=0, valfmt='%d') slider.on_changed(update) update(0) f.canvas.mpl_connect('key_press_event', lambda evt: arrow_respond(slider, evt)) return (mean, std)
[ "def", "compute_stats", "(", "vals", ",", "check_nan", "=", "False", ",", "robust", "=", "False", ",", "axis", "=", "1", ",", "plot_QQ", "=", "False", ",", "bins", "=", "15", ",", "name", "=", "''", ")", ":", "if", "axis", "!=", "1", "and", "robust", ":", "raise", "NotImplementedError", "(", "\"Values of axis other than 1 are not supported \"", "\"with the robust keyword at this time!\"", ")", "if", "robust", ":", "# TODO: This stuff should really be vectorized if there is something that allows it!", "if", "check_nan", ":", "mean", "=", "scipy", ".", "stats", ".", "nanmedian", "(", "vals", ",", "axis", "=", "axis", ")", "# TODO: HANDLE AXIS PROPERLY!", "std", "=", "scipy", ".", "zeros", "(", "vals", ".", "shape", "[", "0", "]", ",", "dtype", "=", "float", ")", "for", "k", "in", "xrange", "(", "0", ",", "len", "(", "vals", ")", ")", ":", "ch", "=", "vals", "[", "k", "]", "ok_idxs", "=", "~", "scipy", ".", "isnan", "(", "ch", ")", "if", "ok_idxs", ".", "any", "(", ")", ":", "std", "[", "k", "]", "=", "(", "scipy", ".", "stats", ".", "scoreatpercentile", "(", "ch", "[", "ok_idxs", "]", ",", "75", ")", "-", "scipy", ".", "stats", ".", "scoreatpercentile", "(", "ch", "[", "ok_idxs", "]", ",", "25", ")", ")", "else", ":", "# Leave a nan where there are no non-nan values:", "std", "[", "k", "]", "=", "scipy", ".", "nan", "std", "/=", "IQR_TO_STD", "else", ":", "mean", "=", "scipy", ".", "median", "(", "vals", ",", "axis", "=", "axis", ")", "# TODO: HANDLE AXIS PROPERLY!", "std", "=", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "scoreatpercentile", "(", "ch", ",", "75.0", ")", "-", "scipy", ".", "stats", ".", "scoreatpercentile", "(", "ch", ",", "25.0", ")", "for", "ch", "in", "vals", "]", ")", "/", "IQR_TO_STD", "else", ":", "if", "check_nan", ":", "mean", "=", "scipy", ".", "stats", ".", "nanmean", "(", "vals", ",", "axis", "=", "axis", ")", "std", "=", "scipy", ".", "stats", ".", "nanstd", "(", "vals", ",", "axis", "=", "axis", ")", "else", ":", "mean", "=", "scipy", ".", "mean", "(", "vals", ",", "axis", "=", "axis", ")", "std", "=", "scipy", ".", "std", "(", "vals", ",", "axis", "=", "axis", ")", "if", "plot_QQ", ":", "f", "=", "plt", ".", "figure", "(", ")", "gs", "=", "mplgs", ".", "GridSpec", "(", "2", ",", "2", ",", "height_ratios", "=", "[", "8", ",", "1", "]", ")", "a_QQ", "=", "f", ".", "add_subplot", "(", "gs", "[", "0", ",", "0", "]", ")", "a_hist", "=", "f", ".", "add_subplot", "(", "gs", "[", "0", ",", "1", "]", ")", "a_slider", "=", "f", ".", "add_subplot", "(", "gs", "[", "1", ",", ":", "]", ")", "title", "=", "f", ".", "suptitle", "(", "\"\"", ")", "def", "update", "(", "val", ")", ":", "\"\"\"Update the index from the results to be displayed.\n \"\"\"", "a_QQ", ".", "clear", "(", ")", "a_hist", ".", "clear", "(", ")", "idx", "=", "slider", ".", "val", "title", ".", "set_text", "(", "\"%s, n=%d\"", "%", "(", "name", ",", "idx", ")", ")", "nan_idxs", "=", "scipy", ".", "isnan", "(", "vals", "[", "idx", ",", ":", "]", ")", "if", "not", "nan_idxs", ".", "all", "(", ")", ":", "osm", ",", "osr", "=", "scipy", ".", "stats", ".", "probplot", "(", "vals", "[", "idx", ",", "~", "nan_idxs", "]", ",", "dist", "=", "'norm'", ",", "plot", "=", "None", ",", "fit", "=", "False", ")", "a_QQ", ".", "plot", "(", "osm", ",", "osr", ",", "'bo'", ",", "markersize", "=", "10", ")", "a_QQ", ".", "set_title", "(", "'QQ plot'", ")", "a_QQ", ".", "set_xlabel", "(", "'quantiles of $\\mathcal{N}(0,1)$'", ")", "a_QQ", ".", "set_ylabel", "(", "'quantiles of data'", ")", "a_hist", ".", "hist", "(", "vals", "[", "idx", ",", "~", "nan_idxs", "]", ",", "bins", "=", "bins", ",", "normed", "=", "True", ")", "locs", "=", "scipy", ".", "linspace", "(", "vals", "[", "idx", ",", "~", "nan_idxs", "]", ".", "min", "(", ")", ",", "vals", "[", "idx", ",", "~", "nan_idxs", "]", ".", "max", "(", ")", ")", "a_hist", ".", "plot", "(", "locs", ",", "scipy", ".", "stats", ".", "norm", ".", "pdf", "(", "locs", ",", "loc", "=", "mean", "[", "idx", "]", ",", "scale", "=", "std", "[", "idx", "]", ")", ")", "a_hist", ".", "set_title", "(", "'Normalized histogram and reported PDF'", ")", "a_hist", ".", "set_xlabel", "(", "'value'", ")", "a_hist", ".", "set_ylabel", "(", "'density'", ")", "f", ".", "canvas", ".", "draw", "(", ")", "def", "arrow_respond", "(", "slider", ",", "event", ")", ":", "\"\"\"Event handler for arrow key events in plot windows.\n\n Pass the slider object to update as a masked argument using a lambda function::\n\n lambda evt: arrow_respond(my_slider, evt)\n\n Parameters\n ----------\n slider : Slider instance associated with this handler.\n event : Event to be handled.\n \"\"\"", "if", "event", ".", "key", "==", "'right'", ":", "slider", ".", "set_val", "(", "min", "(", "slider", ".", "val", "+", "1", ",", "slider", ".", "valmax", ")", ")", "elif", "event", ".", "key", "==", "'left'", ":", "slider", ".", "set_val", "(", "max", "(", "slider", ".", "val", "-", "1", ",", "slider", ".", "valmin", ")", ")", "slider", "=", "mplw", ".", "Slider", "(", "a_slider", ",", "'index'", ",", "0", ",", "len", "(", "vals", ")", "-", "1", ",", "valinit", "=", "0", ",", "valfmt", "=", "'%d'", ")", "slider", ".", "on_changed", "(", "update", ")", "update", "(", "0", ")", "f", ".", "canvas", ".", "mpl_connect", "(", "'key_press_event'", ",", "lambda", "evt", ":", "arrow_respond", "(", "slider", ",", "evt", ")", ")", "return", "(", "mean", ",", "std", ")" ]
Compute the average statistics (mean, std dev) for the given values. Parameters ---------- vals : array-like, (`M`, `D`) Values to compute the average statistics along the specified axis of. check_nan : bool, optional Whether or not to check for (and exclude) NaN's. Default is False (do not attempt to handle NaN's). robust : bool, optional Whether or not to use robust estimators (median for mean, IQR for standard deviation). Default is False (use non-robust estimators). axis : int, optional Axis to compute the statistics along. Presently only supported if `robust` is False. Default is 1. plot_QQ : bool, optional Whether or not a QQ plot and histogram should be drawn for each channel. Default is False (do not draw QQ plots). bins : int, optional Number of bins to use when plotting histogram (for plot_QQ=True). Default is 15 name : str, optional Name to put in the title of the QQ/histogram plot. Returns ------- mean : ndarray, (`M`,) Estimator for the mean of `vals`. std : ndarray, (`M`,) Estimator for the standard deviation of `vals`. Raises ------ NotImplementedError If `axis` != 1 when `robust` is True. NotImplementedError If `plot_QQ` is True.
[ "Compute", "the", "average", "statistics", "(", "mean", "std", "dev", ")", "for", "the", "given", "values", ".", "Parameters", "----------", "vals", ":", "array", "-", "like", "(", "M", "D", ")", "Values", "to", "compute", "the", "average", "statistics", "along", "the", "specified", "axis", "of", ".", "check_nan", ":", "bool", "optional", "Whether", "or", "not", "to", "check", "for", "(", "and", "exclude", ")", "NaN", "s", ".", "Default", "is", "False", "(", "do", "not", "attempt", "to", "handle", "NaN", "s", ")", ".", "robust", ":", "bool", "optional", "Whether", "or", "not", "to", "use", "robust", "estimators", "(", "median", "for", "mean", "IQR", "for", "standard", "deviation", ")", ".", "Default", "is", "False", "(", "use", "non", "-", "robust", "estimators", ")", ".", "axis", ":", "int", "optional", "Axis", "to", "compute", "the", "statistics", "along", ".", "Presently", "only", "supported", "if", "robust", "is", "False", ".", "Default", "is", "1", ".", "plot_QQ", ":", "bool", "optional", "Whether", "or", "not", "a", "QQ", "plot", "and", "histogram", "should", "be", "drawn", "for", "each", "channel", ".", "Default", "is", "False", "(", "do", "not", "draw", "QQ", "plots", ")", ".", "bins", ":", "int", "optional", "Number", "of", "bins", "to", "use", "when", "plotting", "histogram", "(", "for", "plot_QQ", "=", "True", ")", ".", "Default", "is", "15", "name", ":", "str", "optional", "Name", "to", "put", "in", "the", "title", "of", "the", "QQ", "/", "histogram", "plot", ".", "Returns", "-------", "mean", ":", "ndarray", "(", "M", ")", "Estimator", "for", "the", "mean", "of", "vals", ".", "std", ":", "ndarray", "(", "M", ")", "Estimator", "for", "the", "standard", "deviation", "of", "vals", ".", "Raises", "------", "NotImplementedError", "If", "axis", "!", "=", "1", "when", "robust", "is", "True", ".", "NotImplementedError", "If", "plot_QQ", "is", "True", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1767-L1899
markchil/gptools
gptools/utils.py
univariate_envelope_plot
def univariate_envelope_plot(x, mean, std, ax=None, base_alpha=0.375, envelopes=[1, 3], lb=None, ub=None, expansion=10, **kwargs): """Make a plot of a mean curve with uncertainty envelopes. """ if ax is None: f = plt.figure() ax = f.add_subplot(1, 1, 1) elif ax == 'gca': ax = plt.gca() mean = scipy.asarray(mean, dtype=float).copy() std = scipy.asarray(std, dtype=float).copy() # Truncate the data so matplotlib doesn't die: if lb is not None and ub is not None and expansion != 1.0: expansion *= ub - lb ub = ub + expansion lb = lb - expansion if ub is not None: mean[mean > ub] = ub if lb is not None: mean[mean < lb] = lb l = ax.plot(x, mean, **kwargs) color = plt.getp(l[0], 'color') e = [] for i in envelopes: lower = mean - i * std upper = mean + i * std if ub is not None: lower[lower > ub] = ub upper[upper > ub] = ub if lb is not None: lower[lower < lb] = lb upper[upper < lb] = lb e.append(ax.fill_between(x, lower, upper, facecolor=color, alpha=base_alpha / i)) return (l, e)
python
def univariate_envelope_plot(x, mean, std, ax=None, base_alpha=0.375, envelopes=[1, 3], lb=None, ub=None, expansion=10, **kwargs): """Make a plot of a mean curve with uncertainty envelopes. """ if ax is None: f = plt.figure() ax = f.add_subplot(1, 1, 1) elif ax == 'gca': ax = plt.gca() mean = scipy.asarray(mean, dtype=float).copy() std = scipy.asarray(std, dtype=float).copy() # Truncate the data so matplotlib doesn't die: if lb is not None and ub is not None and expansion != 1.0: expansion *= ub - lb ub = ub + expansion lb = lb - expansion if ub is not None: mean[mean > ub] = ub if lb is not None: mean[mean < lb] = lb l = ax.plot(x, mean, **kwargs) color = plt.getp(l[0], 'color') e = [] for i in envelopes: lower = mean - i * std upper = mean + i * std if ub is not None: lower[lower > ub] = ub upper[upper > ub] = ub if lb is not None: lower[lower < lb] = lb upper[upper < lb] = lb e.append(ax.fill_between(x, lower, upper, facecolor=color, alpha=base_alpha / i)) return (l, e)
[ "def", "univariate_envelope_plot", "(", "x", ",", "mean", ",", "std", ",", "ax", "=", "None", ",", "base_alpha", "=", "0.375", ",", "envelopes", "=", "[", "1", ",", "3", "]", ",", "lb", "=", "None", ",", "ub", "=", "None", ",", "expansion", "=", "10", ",", "*", "*", "kwargs", ")", ":", "if", "ax", "is", "None", ":", "f", "=", "plt", ".", "figure", "(", ")", "ax", "=", "f", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "elif", "ax", "==", "'gca'", ":", "ax", "=", "plt", ".", "gca", "(", ")", "mean", "=", "scipy", ".", "asarray", "(", "mean", ",", "dtype", "=", "float", ")", ".", "copy", "(", ")", "std", "=", "scipy", ".", "asarray", "(", "std", ",", "dtype", "=", "float", ")", ".", "copy", "(", ")", "# Truncate the data so matplotlib doesn't die:", "if", "lb", "is", "not", "None", "and", "ub", "is", "not", "None", "and", "expansion", "!=", "1.0", ":", "expansion", "*=", "ub", "-", "lb", "ub", "=", "ub", "+", "expansion", "lb", "=", "lb", "-", "expansion", "if", "ub", "is", "not", "None", ":", "mean", "[", "mean", ">", "ub", "]", "=", "ub", "if", "lb", "is", "not", "None", ":", "mean", "[", "mean", "<", "lb", "]", "=", "lb", "l", "=", "ax", ".", "plot", "(", "x", ",", "mean", ",", "*", "*", "kwargs", ")", "color", "=", "plt", ".", "getp", "(", "l", "[", "0", "]", ",", "'color'", ")", "e", "=", "[", "]", "for", "i", "in", "envelopes", ":", "lower", "=", "mean", "-", "i", "*", "std", "upper", "=", "mean", "+", "i", "*", "std", "if", "ub", "is", "not", "None", ":", "lower", "[", "lower", ">", "ub", "]", "=", "ub", "upper", "[", "upper", ">", "ub", "]", "=", "ub", "if", "lb", "is", "not", "None", ":", "lower", "[", "lower", "<", "lb", "]", "=", "lb", "upper", "[", "upper", "<", "lb", "]", "=", "lb", "e", ".", "append", "(", "ax", ".", "fill_between", "(", "x", ",", "lower", ",", "upper", ",", "facecolor", "=", "color", ",", "alpha", "=", "base_alpha", "/", "i", ")", ")", "return", "(", "l", ",", "e", ")" ]
Make a plot of a mean curve with uncertainty envelopes.
[ "Make", "a", "plot", "of", "a", "mean", "curve", "with", "uncertainty", "envelopes", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1901-L1936
markchil/gptools
gptools/utils.py
summarize_sampler
def summarize_sampler(sampler, weights=None, burn=0, ci=0.95, chain_mask=None): r"""Create summary statistics of the flattened chain of the sampler. The confidence regions are computed from the quantiles of the data. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to summarize the chains of. weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. burn : int, optional The number of samples to burn from the beginning of the chain. Default is 0 (no burn). ci : float, optional A number between 0 and 1 indicating the confidence region to compute. Default is 0.95 (return upper and lower bounds of the 95% confidence interval). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. Returns ------- mean : array, (num_params,) Mean values of each of the parameters sampled. ci_l : array, (num_params,) Lower bounds of the `ci*100%` confidence intervals. ci_u : array, (num_params,) Upper bounds of the `ci*100%` confidence intervals. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) cibdry = 100.0 * (1.0 - ci) / 2.0 if weights is None: mean = scipy.mean(flat_trace, axis=0) ci_l, ci_u = scipy.percentile(flat_trace, [cibdry, 100.0 - cibdry], axis=0) else: mean = weights.dot(flat_trace) / weights.sum() ci_l = scipy.zeros(k) ci_u = scipy.zeros(k) p = scipy.asarray([cibdry, 100.0 - cibdry]) for i in range(0, k): srt = flat_trace[:, i].argsort() x = flat_trace[srt, i] w = weights[srt] Sn = w.cumsum() pn = 100.0 / Sn[-1] * (Sn - w / 2.0) j = scipy.digitize(p, pn) - 1 ci_l[i], ci_u[i] = x[j] + (p - pn[j]) / (pn[j + 1] - pn[j]) * (x[j + 1] - x[j]) return (mean, ci_l, ci_u)
python
def summarize_sampler(sampler, weights=None, burn=0, ci=0.95, chain_mask=None): r"""Create summary statistics of the flattened chain of the sampler. The confidence regions are computed from the quantiles of the data. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to summarize the chains of. weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. burn : int, optional The number of samples to burn from the beginning of the chain. Default is 0 (no burn). ci : float, optional A number between 0 and 1 indicating the confidence region to compute. Default is 0.95 (return upper and lower bounds of the 95% confidence interval). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. Returns ------- mean : array, (num_params,) Mean values of each of the parameters sampled. ci_l : array, (num_params,) Lower bounds of the `ci*100%` confidence intervals. ci_u : array, (num_params,) Upper bounds of the `ci*100%` confidence intervals. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) cibdry = 100.0 * (1.0 - ci) / 2.0 if weights is None: mean = scipy.mean(flat_trace, axis=0) ci_l, ci_u = scipy.percentile(flat_trace, [cibdry, 100.0 - cibdry], axis=0) else: mean = weights.dot(flat_trace) / weights.sum() ci_l = scipy.zeros(k) ci_u = scipy.zeros(k) p = scipy.asarray([cibdry, 100.0 - cibdry]) for i in range(0, k): srt = flat_trace[:, i].argsort() x = flat_trace[srt, i] w = weights[srt] Sn = w.cumsum() pn = 100.0 / Sn[-1] * (Sn - w / 2.0) j = scipy.digitize(p, pn) - 1 ci_l[i], ci_u[i] = x[j] + (p - pn[j]) / (pn[j + 1] - pn[j]) * (x[j + 1] - x[j]) return (mean, ci_l, ci_u)
[ "def", "summarize_sampler", "(", "sampler", ",", "weights", "=", "None", ",", "burn", "=", "0", ",", "ci", "=", "0.95", ",", "chain_mask", "=", "None", ")", ":", "try", ":", "k", "=", "sampler", ".", "flatchain", ".", "shape", "[", "-", "1", "]", "except", "AttributeError", ":", "# Assumes array input is only case where there is no \"flatchain\" attribute.", "k", "=", "sampler", ".", "shape", "[", "-", "1", "]", "if", "isinstance", "(", "sampler", ",", "emcee", ".", "EnsembleSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "chain", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "emcee", ".", "PTSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "nwalkers", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "scipy", ".", "ndarray", ")", ":", "if", "sampler", ".", "ndim", "==", "4", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "1", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "3", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "2", ":", "flat_trace", "=", "sampler", "[", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown sampler class: %s\"", "%", "(", "type", "(", "sampler", ")", ",", ")", ")", "cibdry", "=", "100.0", "*", "(", "1.0", "-", "ci", ")", "/", "2.0", "if", "weights", "is", "None", ":", "mean", "=", "scipy", ".", "mean", "(", "flat_trace", ",", "axis", "=", "0", ")", "ci_l", ",", "ci_u", "=", "scipy", ".", "percentile", "(", "flat_trace", ",", "[", "cibdry", ",", "100.0", "-", "cibdry", "]", ",", "axis", "=", "0", ")", "else", ":", "mean", "=", "weights", ".", "dot", "(", "flat_trace", ")", "/", "weights", ".", "sum", "(", ")", "ci_l", "=", "scipy", ".", "zeros", "(", "k", ")", "ci_u", "=", "scipy", ".", "zeros", "(", "k", ")", "p", "=", "scipy", ".", "asarray", "(", "[", "cibdry", ",", "100.0", "-", "cibdry", "]", ")", "for", "i", "in", "range", "(", "0", ",", "k", ")", ":", "srt", "=", "flat_trace", "[", ":", ",", "i", "]", ".", "argsort", "(", ")", "x", "=", "flat_trace", "[", "srt", ",", "i", "]", "w", "=", "weights", "[", "srt", "]", "Sn", "=", "w", ".", "cumsum", "(", ")", "pn", "=", "100.0", "/", "Sn", "[", "-", "1", "]", "*", "(", "Sn", "-", "w", "/", "2.0", ")", "j", "=", "scipy", ".", "digitize", "(", "p", ",", "pn", ")", "-", "1", "ci_l", "[", "i", "]", ",", "ci_u", "[", "i", "]", "=", "x", "[", "j", "]", "+", "(", "p", "-", "pn", "[", "j", "]", ")", "/", "(", "pn", "[", "j", "+", "1", "]", "-", "pn", "[", "j", "]", ")", "*", "(", "x", "[", "j", "+", "1", "]", "-", "x", "[", "j", "]", ")", "return", "(", "mean", ",", "ci_l", ",", "ci_u", ")" ]
r"""Create summary statistics of the flattened chain of the sampler. The confidence regions are computed from the quantiles of the data. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to summarize the chains of. weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. burn : int, optional The number of samples to burn from the beginning of the chain. Default is 0 (no burn). ci : float, optional A number between 0 and 1 indicating the confidence region to compute. Default is 0.95 (return upper and lower bounds of the 95% confidence interval). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. Returns ------- mean : array, (num_params,) Mean values of each of the parameters sampled. ci_l : array, (num_params,) Lower bounds of the `ci*100%` confidence intervals. ci_u : array, (num_params,) Upper bounds of the `ci*100%` confidence intervals.
[ "r", "Create", "summary", "statistics", "of", "the", "flattened", "chain", "of", "the", "sampler", ".", "The", "confidence", "regions", "are", "computed", "from", "the", "quantiles", "of", "the", "data", ".", "Parameters", "----------", "sampler", ":", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", "or", "array", "(", "n_temps", "n_chains", "n_samp", "n_dim", ")", "(", "n_chains", "n_samp", "n_dim", ")", "or", "(", "n_samp", "n_dim", ")", "The", "sampler", "to", "summarize", "the", "chains", "of", ".", "weights", ":", "array", "(", "n_temps", "n_chains", "n_samp", ")", "(", "n_chains", "n_samp", ")", "or", "(", "n_samp", ")", "optional", "The", "weight", "for", "each", "sample", ".", "This", "is", "useful", "for", "post", "-", "processing", "the", "output", "from", "MultiNest", "sampling", "for", "instance", ".", "burn", ":", "int", "optional", "The", "number", "of", "samples", "to", "burn", "from", "the", "beginning", "of", "the", "chain", ".", "Default", "is", "0", "(", "no", "burn", ")", ".", "ci", ":", "float", "optional", "A", "number", "between", "0", "and", "1", "indicating", "the", "confidence", "region", "to", "compute", ".", "Default", "is", "0", ".", "95", "(", "return", "upper", "and", "lower", "bounds", "of", "the", "95%", "confidence", "interval", ")", ".", "chain_mask", ":", "(", "index", ")", "array", "optional", "Mask", "identifying", "the", "chains", "to", "keep", "before", "plotting", "in", "case", "there", "are", "bad", "chains", ".", "Default", "is", "to", "use", "all", "chains", ".", "Returns", "-------", "mean", ":", "array", "(", "num_params", ")", "Mean", "values", "of", "each", "of", "the", "parameters", "sampled", ".", "ci_l", ":", "array", "(", "num_params", ")", "Lower", "bounds", "of", "the", "ci", "*", "100%", "confidence", "intervals", ".", "ci_u", ":", "array", "(", "num_params", ")", "Upper", "bounds", "of", "the", "ci", "*", "100%", "confidence", "intervals", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1938-L2030
markchil/gptools
gptools/utils.py
plot_sampler
def plot_sampler( sampler, suptitle=None, labels=None, bins=50, plot_samples=False, plot_hist=True, plot_chains=True, burn=0, chain_mask=None, temp_idx=0, weights=None, cutoff_weight=None, cmap='gray_r', hist_color='k', chain_alpha=0.1, points=None, covs=None, colors=None, ci=[0.95], max_hist_ticks=None, max_chain_ticks=6, label_chain_y=False, hide_chain_yticklabels=False, chain_ytick_pad=2.0, label_fontsize=None, ticklabel_fontsize=None, chain_label_fontsize=None, chain_ticklabel_fontsize=None, xticklabel_angle=90.0, bottom_sep=0.075, suptitle_space=0.1, fixed_height=None, fixed_width=None, l=0.1, r=0.9, t1=None, b1=None, t2=0.2, b2=0.1, ax_space=0.1 ): """Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1. """ masked_weights = None if points is not None: points = scipy.atleast_2d(points) if covs is not None and len(covs) != len(points): raise ValueError( "If covariance matrices are provided, len(covs) must equal len(points)!" ) elif covs is None: covs = [None,] * len(points) if colors is None: c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) colors = [c_cycle.next() for p in points] # Create axes: try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] if labels is None: labels = [''] * k # Set up geometry: # plot_chains = # True: False: # +-----------+ +-----------+ # | +-------+ | | +-------+ | # | | | | | | | | # | | | | | | | | # | | | | | | | | # | +-------+ | | +-------+ | # | +-------+ | +-----------+ # | | | | # | +-------+ | # +-----------+ # We retain support for the original suptitle_space keyword, but can # override with t1 as needed: if t1 is None: t1 = 1 - suptitle_space # We retain support for the original bottom_sep keyword, but can override # with b1 as needed: if b1 is None: if plot_chains: b1 = t2 + bottom_sep else: b1 = 0.1 if fixed_height is None and fixed_width is None: # Default: use matplotlib's default width, handle remaining parameters # with the fixed width case below: fixed_width = matplotlib.rcParams['figure.figsize'][0] if fixed_height is None and fixed_width is not None: # Only width specified, compute height to yield square histograms: fixed_height = fixed_width * (r - l) / (t1 - b1) elif fixed_height is not None and fixed_width is None: # Only height specified, compute width to yield square histograms fixed_width = fixed_height * (t1 - b1) / (r - l) # Otherwise width and height are fixed, and we may not have square # histograms, at the user's discretion. wspace = ax_space hspace = ax_space # gs1 is the histograms, gs2 is the chains: f = plt.figure(figsize=(fixed_width, fixed_height)) gs1 = mplgs.GridSpec(k, k) gs1.update(bottom=b1, top=t1, left=l, right=r, wspace=wspace, hspace=hspace) if plot_chains: gs2 = mplgs.GridSpec(1, k) gs2.update(bottom=b2, top=t2, left=l, right=r, wspace=wspace, hspace=hspace) axes = [] # j is the row, i is the column. for j in xrange(0, k + int(plot_chains)): row = [] for i in xrange(0, k): if i > j: row.append(None) else: sharey = row[-1] if i > 0 and i < j and j < k else None sharex = axes[-1][i] if j > i and j < k else \ (row[-1] if i > 0 and j == k else None) gs = gs1[j, i] if j < k else gs2[:, i] row.append(f.add_subplot(gs, sharey=sharey, sharex=sharex)) if j < k and ticklabel_fontsize is not None: row[-1].tick_params(labelsize=ticklabel_fontsize) elif j >= k and chain_ticklabel_fontsize is not None: row[-1].tick_params(labelsize=chain_ticklabel_fontsize) axes.append(row) axes = scipy.asarray(axes) # Update axes with the data: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] masked_weights = weights[mask] else: masked_weights = weights else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) # j is the row, i is the column. for i in xrange(0, k): axes[i, i].clear() if plot_hist: axes[i, i].hist(flat_trace[:, i], bins=bins, color=hist_color, weights=masked_weights, normed=True, histtype='stepfilled') if plot_samples: axes[i, i].plot(flat_trace[:, i], scipy.zeros_like(flat_trace[:, i]), ',', alpha=0.1) if points is not None: # axvline can only take a scalar x, so we have to loop: for p, c, cov in zip(points, colors, covs): axes[i, i].axvline(x=p[i], linewidth=3, color=c) if cov is not None: xlim = axes[i, i].get_xlim() i_grid = scipy.linspace(xlim[0], xlim[1], 100) axes[i, i].plot( i_grid, scipy.stats.norm.pdf( i_grid, loc=p[i], scale=scipy.sqrt(cov[i, i]) ), c, linewidth=3.0 ) axes[i, i].set_xlim(xlim) if i == k - 1: axes[i, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[i, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if i < k - 1: plt.setp(axes[i, i].get_xticklabels(), visible=False) plt.setp(axes[i, i].get_yticklabels(), visible=False) for j in xrange(i + 1, k): axes[j, i].clear() if plot_hist: ct, x, y, im = axes[j, i].hist2d( flat_trace[:, i], flat_trace[:, j], bins=bins, cmap=cmap, weights=masked_weights ) if plot_samples: axes[j, i].plot(flat_trace[:, i], flat_trace[:, j], ',', alpha=0.1) if points is not None: for p, c, cov in zip(points, colors, covs): axes[j, i].plot(p[i], p[j], 'o', color=c) if cov is not None: Sigma = scipy.asarray([[cov[i, i], cov[i, j]], [cov[j, i], cov[j, j]]], dtype=float) lam, v = scipy.linalg.eigh(Sigma) chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci] a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2] b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2] ang = scipy.arctan2(v[1, -1], v[0, -1]) for aval, bval in zip(a, b): ell = mplp.Ellipse( [p[i], p[j]], aval, bval, angle=scipy.degrees(ang), facecolor='none', edgecolor=c, linewidth=3 ) axes[j, i].add_artist(ell) # axes[j, i].plot(points[i], points[j], 'o') # xmid = 0.5 * (x[1:] + x[:-1]) # ymid = 0.5 * (y[1:] + y[:-1]) # axes[j, i].contour(xmid, ymid, ct.T, colors='k') if j < k - 1: plt.setp(axes[j, i].get_xticklabels(), visible=False) if i != 0: plt.setp(axes[j, i].get_yticklabels(), visible=False) if i == 0: axes[j, i].set_ylabel(labels[j], fontsize=label_fontsize) if j == k - 1: axes[j, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[j, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if plot_chains: axes[-1, i].clear() if isinstance(sampler, emcee.EnsembleSampler): axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=chain_alpha) elif isinstance(sampler, emcee.PTSampler): axes[-1, i].plot(sampler.chain[temp_idx, :, :, i].T, alpha=chain_alpha) else: if sampler.ndim == 4: axes[-1, i].plot(sampler[temp_idx, :, :, i].T, alpha=chain_alpha) elif sampler.ndim == 3: axes[-1, i].plot(sampler[:, :, i].T, alpha=chain_alpha) elif sampler.ndim == 2: axes[-1, i].plot(sampler[:, i].T, alpha=chain_alpha) # Plot the weights on top of the chains: if weights is not None: a_wt = axes[-1, i].twinx() a_wt.plot(weights, alpha=chain_alpha, linestyle='--', color='r') plt.setp(a_wt.yaxis.get_majorticklabels(), visible=False) a_wt.yaxis.set_ticks_position('none') # Plot the cutoff weight as a horizontal line and the first sample # which is included as a vertical bar. Note that this won't be quite # the right behavior if the weights are not roughly monotonic. if cutoff_weight is not None: a_wt.axhline(cutoff_weight * weights.max(), linestyle='-', color='r') wi, = scipy.where(weights >= cutoff_weight * weights.max()) a_wt.axvline(wi[0], linestyle='-', color='r') if burn > 0: axes[-1, i].axvline(burn, color='r', linewidth=3) if points is not None: for p, c in zip(points, colors): axes[-1, i].axhline(y=p[i], linewidth=3, color=c) # Reset the xlim since it seems to get messed up: axes[-1, i].set_xlim(left=0) # try: # [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]] # except TypeError: # axes[-1, i].axhline(y=points[i], linewidth=3) if label_chain_y: axes[-1, i].set_ylabel(labels[i], fontsize=chain_label_fontsize) axes[-1, i].set_xlabel('step', fontsize=chain_label_fontsize) plt.setp(axes[-1, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) for tick in axes[-1, i].get_yaxis().get_major_ticks(): tick.set_pad(chain_ytick_pad) tick.label1 = tick._get_text1() for i in xrange(0, k): if max_hist_ticks is not None: axes[k - 1, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) axes[i, 0].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) if plot_chains and max_chain_ticks is not None: axes[k, i].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) axes[k, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) if plot_chains and hide_chain_yticklabels: plt.setp(axes[k, i].get_yticklabels(), visible=False) if suptitle is not None: f.suptitle(suptitle) f.canvas.draw() return f
python
def plot_sampler( sampler, suptitle=None, labels=None, bins=50, plot_samples=False, plot_hist=True, plot_chains=True, burn=0, chain_mask=None, temp_idx=0, weights=None, cutoff_weight=None, cmap='gray_r', hist_color='k', chain_alpha=0.1, points=None, covs=None, colors=None, ci=[0.95], max_hist_ticks=None, max_chain_ticks=6, label_chain_y=False, hide_chain_yticklabels=False, chain_ytick_pad=2.0, label_fontsize=None, ticklabel_fontsize=None, chain_label_fontsize=None, chain_ticklabel_fontsize=None, xticklabel_angle=90.0, bottom_sep=0.075, suptitle_space=0.1, fixed_height=None, fixed_width=None, l=0.1, r=0.9, t1=None, b1=None, t2=0.2, b2=0.1, ax_space=0.1 ): """Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1. """ masked_weights = None if points is not None: points = scipy.atleast_2d(points) if covs is not None and len(covs) != len(points): raise ValueError( "If covariance matrices are provided, len(covs) must equal len(points)!" ) elif covs is None: covs = [None,] * len(points) if colors is None: c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) colors = [c_cycle.next() for p in points] # Create axes: try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] if labels is None: labels = [''] * k # Set up geometry: # plot_chains = # True: False: # +-----------+ +-----------+ # | +-------+ | | +-------+ | # | | | | | | | | # | | | | | | | | # | | | | | | | | # | +-------+ | | +-------+ | # | +-------+ | +-----------+ # | | | | # | +-------+ | # +-----------+ # We retain support for the original suptitle_space keyword, but can # override with t1 as needed: if t1 is None: t1 = 1 - suptitle_space # We retain support for the original bottom_sep keyword, but can override # with b1 as needed: if b1 is None: if plot_chains: b1 = t2 + bottom_sep else: b1 = 0.1 if fixed_height is None and fixed_width is None: # Default: use matplotlib's default width, handle remaining parameters # with the fixed width case below: fixed_width = matplotlib.rcParams['figure.figsize'][0] if fixed_height is None and fixed_width is not None: # Only width specified, compute height to yield square histograms: fixed_height = fixed_width * (r - l) / (t1 - b1) elif fixed_height is not None and fixed_width is None: # Only height specified, compute width to yield square histograms fixed_width = fixed_height * (t1 - b1) / (r - l) # Otherwise width and height are fixed, and we may not have square # histograms, at the user's discretion. wspace = ax_space hspace = ax_space # gs1 is the histograms, gs2 is the chains: f = plt.figure(figsize=(fixed_width, fixed_height)) gs1 = mplgs.GridSpec(k, k) gs1.update(bottom=b1, top=t1, left=l, right=r, wspace=wspace, hspace=hspace) if plot_chains: gs2 = mplgs.GridSpec(1, k) gs2.update(bottom=b2, top=t2, left=l, right=r, wspace=wspace, hspace=hspace) axes = [] # j is the row, i is the column. for j in xrange(0, k + int(plot_chains)): row = [] for i in xrange(0, k): if i > j: row.append(None) else: sharey = row[-1] if i > 0 and i < j and j < k else None sharex = axes[-1][i] if j > i and j < k else \ (row[-1] if i > 0 and j == k else None) gs = gs1[j, i] if j < k else gs2[:, i] row.append(f.add_subplot(gs, sharey=sharey, sharex=sharex)) if j < k and ticklabel_fontsize is not None: row[-1].tick_params(labelsize=ticklabel_fontsize) elif j >= k and chain_ticklabel_fontsize is not None: row[-1].tick_params(labelsize=chain_ticklabel_fontsize) axes.append(row) axes = scipy.asarray(axes) # Update axes with the data: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] masked_weights = weights[mask] else: masked_weights = weights else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) # j is the row, i is the column. for i in xrange(0, k): axes[i, i].clear() if plot_hist: axes[i, i].hist(flat_trace[:, i], bins=bins, color=hist_color, weights=masked_weights, normed=True, histtype='stepfilled') if plot_samples: axes[i, i].plot(flat_trace[:, i], scipy.zeros_like(flat_trace[:, i]), ',', alpha=0.1) if points is not None: # axvline can only take a scalar x, so we have to loop: for p, c, cov in zip(points, colors, covs): axes[i, i].axvline(x=p[i], linewidth=3, color=c) if cov is not None: xlim = axes[i, i].get_xlim() i_grid = scipy.linspace(xlim[0], xlim[1], 100) axes[i, i].plot( i_grid, scipy.stats.norm.pdf( i_grid, loc=p[i], scale=scipy.sqrt(cov[i, i]) ), c, linewidth=3.0 ) axes[i, i].set_xlim(xlim) if i == k - 1: axes[i, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[i, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if i < k - 1: plt.setp(axes[i, i].get_xticklabels(), visible=False) plt.setp(axes[i, i].get_yticklabels(), visible=False) for j in xrange(i + 1, k): axes[j, i].clear() if plot_hist: ct, x, y, im = axes[j, i].hist2d( flat_trace[:, i], flat_trace[:, j], bins=bins, cmap=cmap, weights=masked_weights ) if plot_samples: axes[j, i].plot(flat_trace[:, i], flat_trace[:, j], ',', alpha=0.1) if points is not None: for p, c, cov in zip(points, colors, covs): axes[j, i].plot(p[i], p[j], 'o', color=c) if cov is not None: Sigma = scipy.asarray([[cov[i, i], cov[i, j]], [cov[j, i], cov[j, j]]], dtype=float) lam, v = scipy.linalg.eigh(Sigma) chi2 = [-scipy.log(1.0 - cival) * 2.0 for cival in ci] a = [2.0 * scipy.sqrt(chi2val * lam[-1]) for chi2val in chi2] b = [2.0 * scipy.sqrt(chi2val * lam[-2]) for chi2val in chi2] ang = scipy.arctan2(v[1, -1], v[0, -1]) for aval, bval in zip(a, b): ell = mplp.Ellipse( [p[i], p[j]], aval, bval, angle=scipy.degrees(ang), facecolor='none', edgecolor=c, linewidth=3 ) axes[j, i].add_artist(ell) # axes[j, i].plot(points[i], points[j], 'o') # xmid = 0.5 * (x[1:] + x[:-1]) # ymid = 0.5 * (y[1:] + y[:-1]) # axes[j, i].contour(xmid, ymid, ct.T, colors='k') if j < k - 1: plt.setp(axes[j, i].get_xticklabels(), visible=False) if i != 0: plt.setp(axes[j, i].get_yticklabels(), visible=False) if i == 0: axes[j, i].set_ylabel(labels[j], fontsize=label_fontsize) if j == k - 1: axes[j, i].set_xlabel(labels[i], fontsize=label_fontsize) plt.setp(axes[j, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) if plot_chains: axes[-1, i].clear() if isinstance(sampler, emcee.EnsembleSampler): axes[-1, i].plot(sampler.chain[:, :, i].T, alpha=chain_alpha) elif isinstance(sampler, emcee.PTSampler): axes[-1, i].plot(sampler.chain[temp_idx, :, :, i].T, alpha=chain_alpha) else: if sampler.ndim == 4: axes[-1, i].plot(sampler[temp_idx, :, :, i].T, alpha=chain_alpha) elif sampler.ndim == 3: axes[-1, i].plot(sampler[:, :, i].T, alpha=chain_alpha) elif sampler.ndim == 2: axes[-1, i].plot(sampler[:, i].T, alpha=chain_alpha) # Plot the weights on top of the chains: if weights is not None: a_wt = axes[-1, i].twinx() a_wt.plot(weights, alpha=chain_alpha, linestyle='--', color='r') plt.setp(a_wt.yaxis.get_majorticklabels(), visible=False) a_wt.yaxis.set_ticks_position('none') # Plot the cutoff weight as a horizontal line and the first sample # which is included as a vertical bar. Note that this won't be quite # the right behavior if the weights are not roughly monotonic. if cutoff_weight is not None: a_wt.axhline(cutoff_weight * weights.max(), linestyle='-', color='r') wi, = scipy.where(weights >= cutoff_weight * weights.max()) a_wt.axvline(wi[0], linestyle='-', color='r') if burn > 0: axes[-1, i].axvline(burn, color='r', linewidth=3) if points is not None: for p, c in zip(points, colors): axes[-1, i].axhline(y=p[i], linewidth=3, color=c) # Reset the xlim since it seems to get messed up: axes[-1, i].set_xlim(left=0) # try: # [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]] # except TypeError: # axes[-1, i].axhline(y=points[i], linewidth=3) if label_chain_y: axes[-1, i].set_ylabel(labels[i], fontsize=chain_label_fontsize) axes[-1, i].set_xlabel('step', fontsize=chain_label_fontsize) plt.setp(axes[-1, i].xaxis.get_majorticklabels(), rotation=xticklabel_angle) for tick in axes[-1, i].get_yaxis().get_major_ticks(): tick.set_pad(chain_ytick_pad) tick.label1 = tick._get_text1() for i in xrange(0, k): if max_hist_ticks is not None: axes[k - 1, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) axes[i, 0].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_hist_ticks - 1)) if plot_chains and max_chain_ticks is not None: axes[k, i].yaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) axes[k, i].xaxis.set_major_locator(plt.MaxNLocator(nbins=max_chain_ticks - 1)) if plot_chains and hide_chain_yticklabels: plt.setp(axes[k, i].get_yticklabels(), visible=False) if suptitle is not None: f.suptitle(suptitle) f.canvas.draw() return f
[ "def", "plot_sampler", "(", "sampler", ",", "suptitle", "=", "None", ",", "labels", "=", "None", ",", "bins", "=", "50", ",", "plot_samples", "=", "False", ",", "plot_hist", "=", "True", ",", "plot_chains", "=", "True", ",", "burn", "=", "0", ",", "chain_mask", "=", "None", ",", "temp_idx", "=", "0", ",", "weights", "=", "None", ",", "cutoff_weight", "=", "None", ",", "cmap", "=", "'gray_r'", ",", "hist_color", "=", "'k'", ",", "chain_alpha", "=", "0.1", ",", "points", "=", "None", ",", "covs", "=", "None", ",", "colors", "=", "None", ",", "ci", "=", "[", "0.95", "]", ",", "max_hist_ticks", "=", "None", ",", "max_chain_ticks", "=", "6", ",", "label_chain_y", "=", "False", ",", "hide_chain_yticklabels", "=", "False", ",", "chain_ytick_pad", "=", "2.0", ",", "label_fontsize", "=", "None", ",", "ticklabel_fontsize", "=", "None", ",", "chain_label_fontsize", "=", "None", ",", "chain_ticklabel_fontsize", "=", "None", ",", "xticklabel_angle", "=", "90.0", ",", "bottom_sep", "=", "0.075", ",", "suptitle_space", "=", "0.1", ",", "fixed_height", "=", "None", ",", "fixed_width", "=", "None", ",", "l", "=", "0.1", ",", "r", "=", "0.9", ",", "t1", "=", "None", ",", "b1", "=", "None", ",", "t2", "=", "0.2", ",", "b2", "=", "0.1", ",", "ax_space", "=", "0.1", ")", ":", "masked_weights", "=", "None", "if", "points", "is", "not", "None", ":", "points", "=", "scipy", ".", "atleast_2d", "(", "points", ")", "if", "covs", "is", "not", "None", "and", "len", "(", "covs", ")", "!=", "len", "(", "points", ")", ":", "raise", "ValueError", "(", "\"If covariance matrices are provided, len(covs) must equal len(points)!\"", ")", "elif", "covs", "is", "None", ":", "covs", "=", "[", "None", ",", "]", "*", "len", "(", "points", ")", "if", "colors", "is", "None", ":", "c_cycle", "=", "itertools", ".", "cycle", "(", "[", "'b'", ",", "'g'", ",", "'r'", ",", "'c'", ",", "'m'", ",", "'y'", ",", "'k'", "]", ")", "colors", "=", "[", "c_cycle", ".", "next", "(", ")", "for", "p", "in", "points", "]", "# Create axes:", "try", ":", "k", "=", "sampler", ".", "flatchain", ".", "shape", "[", "-", "1", "]", "except", "AttributeError", ":", "# Assumes array input is only case where there is no \"flatchain\" attribute.", "k", "=", "sampler", ".", "shape", "[", "-", "1", "]", "if", "labels", "is", "None", ":", "labels", "=", "[", "''", "]", "*", "k", "# Set up geometry:", "# plot_chains =", "# True: False:", "# +-----------+ +-----------+", "# | +-------+ | | +-------+ |", "# | | | | | | | |", "# | | | | | | | |", "# | | | | | | | |", "# | +-------+ | | +-------+ |", "# | +-------+ | +-----------+", "# | | | |", "# | +-------+ |", "# +-----------+", "# We retain support for the original suptitle_space keyword, but can", "# override with t1 as needed:", "if", "t1", "is", "None", ":", "t1", "=", "1", "-", "suptitle_space", "# We retain support for the original bottom_sep keyword, but can override", "# with b1 as needed:", "if", "b1", "is", "None", ":", "if", "plot_chains", ":", "b1", "=", "t2", "+", "bottom_sep", "else", ":", "b1", "=", "0.1", "if", "fixed_height", "is", "None", "and", "fixed_width", "is", "None", ":", "# Default: use matplotlib's default width, handle remaining parameters", "# with the fixed width case below:", "fixed_width", "=", "matplotlib", ".", "rcParams", "[", "'figure.figsize'", "]", "[", "0", "]", "if", "fixed_height", "is", "None", "and", "fixed_width", "is", "not", "None", ":", "# Only width specified, compute height to yield square histograms:", "fixed_height", "=", "fixed_width", "*", "(", "r", "-", "l", ")", "/", "(", "t1", "-", "b1", ")", "elif", "fixed_height", "is", "not", "None", "and", "fixed_width", "is", "None", ":", "# Only height specified, compute width to yield square histograms", "fixed_width", "=", "fixed_height", "*", "(", "t1", "-", "b1", ")", "/", "(", "r", "-", "l", ")", "# Otherwise width and height are fixed, and we may not have square", "# histograms, at the user's discretion.", "wspace", "=", "ax_space", "hspace", "=", "ax_space", "# gs1 is the histograms, gs2 is the chains:", "f", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "fixed_width", ",", "fixed_height", ")", ")", "gs1", "=", "mplgs", ".", "GridSpec", "(", "k", ",", "k", ")", "gs1", ".", "update", "(", "bottom", "=", "b1", ",", "top", "=", "t1", ",", "left", "=", "l", ",", "right", "=", "r", ",", "wspace", "=", "wspace", ",", "hspace", "=", "hspace", ")", "if", "plot_chains", ":", "gs2", "=", "mplgs", ".", "GridSpec", "(", "1", ",", "k", ")", "gs2", ".", "update", "(", "bottom", "=", "b2", ",", "top", "=", "t2", ",", "left", "=", "l", ",", "right", "=", "r", ",", "wspace", "=", "wspace", ",", "hspace", "=", "hspace", ")", "axes", "=", "[", "]", "# j is the row, i is the column.", "for", "j", "in", "xrange", "(", "0", ",", "k", "+", "int", "(", "plot_chains", ")", ")", ":", "row", "=", "[", "]", "for", "i", "in", "xrange", "(", "0", ",", "k", ")", ":", "if", "i", ">", "j", ":", "row", ".", "append", "(", "None", ")", "else", ":", "sharey", "=", "row", "[", "-", "1", "]", "if", "i", ">", "0", "and", "i", "<", "j", "and", "j", "<", "k", "else", "None", "sharex", "=", "axes", "[", "-", "1", "]", "[", "i", "]", "if", "j", ">", "i", "and", "j", "<", "k", "else", "(", "row", "[", "-", "1", "]", "if", "i", ">", "0", "and", "j", "==", "k", "else", "None", ")", "gs", "=", "gs1", "[", "j", ",", "i", "]", "if", "j", "<", "k", "else", "gs2", "[", ":", ",", "i", "]", "row", ".", "append", "(", "f", ".", "add_subplot", "(", "gs", ",", "sharey", "=", "sharey", ",", "sharex", "=", "sharex", ")", ")", "if", "j", "<", "k", "and", "ticklabel_fontsize", "is", "not", "None", ":", "row", "[", "-", "1", "]", ".", "tick_params", "(", "labelsize", "=", "ticklabel_fontsize", ")", "elif", "j", ">=", "k", "and", "chain_ticklabel_fontsize", "is", "not", "None", ":", "row", "[", "-", "1", "]", ".", "tick_params", "(", "labelsize", "=", "chain_ticklabel_fontsize", ")", "axes", ".", "append", "(", "row", ")", "axes", "=", "scipy", ".", "asarray", "(", "axes", ")", "# Update axes with the data:", "if", "isinstance", "(", "sampler", ",", "emcee", ".", "EnsembleSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "chain", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "emcee", ".", "PTSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "nwalkers", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "scipy", ".", "ndarray", ")", ":", "if", "sampler", ".", "ndim", "==", "4", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "1", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "3", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "2", ":", "flat_trace", "=", "sampler", "[", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "if", "cutoff_weight", "is", "not", "None", "and", "weights", "is", "not", "None", ":", "mask", "=", "weights", ">=", "cutoff_weight", "*", "weights", ".", "max", "(", ")", "flat_trace", "=", "flat_trace", "[", "mask", ",", ":", "]", "masked_weights", "=", "weights", "[", "mask", "]", "else", ":", "masked_weights", "=", "weights", "else", ":", "raise", "ValueError", "(", "\"Unknown sampler class: %s\"", "%", "(", "type", "(", "sampler", ")", ",", ")", ")", "# j is the row, i is the column.", "for", "i", "in", "xrange", "(", "0", ",", "k", ")", ":", "axes", "[", "i", ",", "i", "]", ".", "clear", "(", ")", "if", "plot_hist", ":", "axes", "[", "i", ",", "i", "]", ".", "hist", "(", "flat_trace", "[", ":", ",", "i", "]", ",", "bins", "=", "bins", ",", "color", "=", "hist_color", ",", "weights", "=", "masked_weights", ",", "normed", "=", "True", ",", "histtype", "=", "'stepfilled'", ")", "if", "plot_samples", ":", "axes", "[", "i", ",", "i", "]", ".", "plot", "(", "flat_trace", "[", ":", ",", "i", "]", ",", "scipy", ".", "zeros_like", "(", "flat_trace", "[", ":", ",", "i", "]", ")", ",", "','", ",", "alpha", "=", "0.1", ")", "if", "points", "is", "not", "None", ":", "# axvline can only take a scalar x, so we have to loop:", "for", "p", ",", "c", ",", "cov", "in", "zip", "(", "points", ",", "colors", ",", "covs", ")", ":", "axes", "[", "i", ",", "i", "]", ".", "axvline", "(", "x", "=", "p", "[", "i", "]", ",", "linewidth", "=", "3", ",", "color", "=", "c", ")", "if", "cov", "is", "not", "None", ":", "xlim", "=", "axes", "[", "i", ",", "i", "]", ".", "get_xlim", "(", ")", "i_grid", "=", "scipy", ".", "linspace", "(", "xlim", "[", "0", "]", ",", "xlim", "[", "1", "]", ",", "100", ")", "axes", "[", "i", ",", "i", "]", ".", "plot", "(", "i_grid", ",", "scipy", ".", "stats", ".", "norm", ".", "pdf", "(", "i_grid", ",", "loc", "=", "p", "[", "i", "]", ",", "scale", "=", "scipy", ".", "sqrt", "(", "cov", "[", "i", ",", "i", "]", ")", ")", ",", "c", ",", "linewidth", "=", "3.0", ")", "axes", "[", "i", ",", "i", "]", ".", "set_xlim", "(", "xlim", ")", "if", "i", "==", "k", "-", "1", ":", "axes", "[", "i", ",", "i", "]", ".", "set_xlabel", "(", "labels", "[", "i", "]", ",", "fontsize", "=", "label_fontsize", ")", "plt", ".", "setp", "(", "axes", "[", "i", ",", "i", "]", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "xticklabel_angle", ")", "if", "i", "<", "k", "-", "1", ":", "plt", ".", "setp", "(", "axes", "[", "i", ",", "i", "]", ".", "get_xticklabels", "(", ")", ",", "visible", "=", "False", ")", "plt", ".", "setp", "(", "axes", "[", "i", ",", "i", "]", ".", "get_yticklabels", "(", ")", ",", "visible", "=", "False", ")", "for", "j", "in", "xrange", "(", "i", "+", "1", ",", "k", ")", ":", "axes", "[", "j", ",", "i", "]", ".", "clear", "(", ")", "if", "plot_hist", ":", "ct", ",", "x", ",", "y", ",", "im", "=", "axes", "[", "j", ",", "i", "]", ".", "hist2d", "(", "flat_trace", "[", ":", ",", "i", "]", ",", "flat_trace", "[", ":", ",", "j", "]", ",", "bins", "=", "bins", ",", "cmap", "=", "cmap", ",", "weights", "=", "masked_weights", ")", "if", "plot_samples", ":", "axes", "[", "j", ",", "i", "]", ".", "plot", "(", "flat_trace", "[", ":", ",", "i", "]", ",", "flat_trace", "[", ":", ",", "j", "]", ",", "','", ",", "alpha", "=", "0.1", ")", "if", "points", "is", "not", "None", ":", "for", "p", ",", "c", ",", "cov", "in", "zip", "(", "points", ",", "colors", ",", "covs", ")", ":", "axes", "[", "j", ",", "i", "]", ".", "plot", "(", "p", "[", "i", "]", ",", "p", "[", "j", "]", ",", "'o'", ",", "color", "=", "c", ")", "if", "cov", "is", "not", "None", ":", "Sigma", "=", "scipy", ".", "asarray", "(", "[", "[", "cov", "[", "i", ",", "i", "]", ",", "cov", "[", "i", ",", "j", "]", "]", ",", "[", "cov", "[", "j", ",", "i", "]", ",", "cov", "[", "j", ",", "j", "]", "]", "]", ",", "dtype", "=", "float", ")", "lam", ",", "v", "=", "scipy", ".", "linalg", ".", "eigh", "(", "Sigma", ")", "chi2", "=", "[", "-", "scipy", ".", "log", "(", "1.0", "-", "cival", ")", "*", "2.0", "for", "cival", "in", "ci", "]", "a", "=", "[", "2.0", "*", "scipy", ".", "sqrt", "(", "chi2val", "*", "lam", "[", "-", "1", "]", ")", "for", "chi2val", "in", "chi2", "]", "b", "=", "[", "2.0", "*", "scipy", ".", "sqrt", "(", "chi2val", "*", "lam", "[", "-", "2", "]", ")", "for", "chi2val", "in", "chi2", "]", "ang", "=", "scipy", ".", "arctan2", "(", "v", "[", "1", ",", "-", "1", "]", ",", "v", "[", "0", ",", "-", "1", "]", ")", "for", "aval", ",", "bval", "in", "zip", "(", "a", ",", "b", ")", ":", "ell", "=", "mplp", ".", "Ellipse", "(", "[", "p", "[", "i", "]", ",", "p", "[", "j", "]", "]", ",", "aval", ",", "bval", ",", "angle", "=", "scipy", ".", "degrees", "(", "ang", ")", ",", "facecolor", "=", "'none'", ",", "edgecolor", "=", "c", ",", "linewidth", "=", "3", ")", "axes", "[", "j", ",", "i", "]", ".", "add_artist", "(", "ell", ")", "# axes[j, i].plot(points[i], points[j], 'o')", "# xmid = 0.5 * (x[1:] + x[:-1])", "# ymid = 0.5 * (y[1:] + y[:-1])", "# axes[j, i].contour(xmid, ymid, ct.T, colors='k')", "if", "j", "<", "k", "-", "1", ":", "plt", ".", "setp", "(", "axes", "[", "j", ",", "i", "]", ".", "get_xticklabels", "(", ")", ",", "visible", "=", "False", ")", "if", "i", "!=", "0", ":", "plt", ".", "setp", "(", "axes", "[", "j", ",", "i", "]", ".", "get_yticklabels", "(", ")", ",", "visible", "=", "False", ")", "if", "i", "==", "0", ":", "axes", "[", "j", ",", "i", "]", ".", "set_ylabel", "(", "labels", "[", "j", "]", ",", "fontsize", "=", "label_fontsize", ")", "if", "j", "==", "k", "-", "1", ":", "axes", "[", "j", ",", "i", "]", ".", "set_xlabel", "(", "labels", "[", "i", "]", ",", "fontsize", "=", "label_fontsize", ")", "plt", ".", "setp", "(", "axes", "[", "j", ",", "i", "]", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "xticklabel_angle", ")", "if", "plot_chains", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "clear", "(", ")", "if", "isinstance", "(", "sampler", ",", "emcee", ".", "EnsembleSampler", ")", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "plot", "(", "sampler", ".", "chain", "[", ":", ",", ":", ",", "i", "]", ".", "T", ",", "alpha", "=", "chain_alpha", ")", "elif", "isinstance", "(", "sampler", ",", "emcee", ".", "PTSampler", ")", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "plot", "(", "sampler", ".", "chain", "[", "temp_idx", ",", ":", ",", ":", ",", "i", "]", ".", "T", ",", "alpha", "=", "chain_alpha", ")", "else", ":", "if", "sampler", ".", "ndim", "==", "4", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "plot", "(", "sampler", "[", "temp_idx", ",", ":", ",", ":", ",", "i", "]", ".", "T", ",", "alpha", "=", "chain_alpha", ")", "elif", "sampler", ".", "ndim", "==", "3", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "plot", "(", "sampler", "[", ":", ",", ":", ",", "i", "]", ".", "T", ",", "alpha", "=", "chain_alpha", ")", "elif", "sampler", ".", "ndim", "==", "2", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "plot", "(", "sampler", "[", ":", ",", "i", "]", ".", "T", ",", "alpha", "=", "chain_alpha", ")", "# Plot the weights on top of the chains:", "if", "weights", "is", "not", "None", ":", "a_wt", "=", "axes", "[", "-", "1", ",", "i", "]", ".", "twinx", "(", ")", "a_wt", ".", "plot", "(", "weights", ",", "alpha", "=", "chain_alpha", ",", "linestyle", "=", "'--'", ",", "color", "=", "'r'", ")", "plt", ".", "setp", "(", "a_wt", ".", "yaxis", ".", "get_majorticklabels", "(", ")", ",", "visible", "=", "False", ")", "a_wt", ".", "yaxis", ".", "set_ticks_position", "(", "'none'", ")", "# Plot the cutoff weight as a horizontal line and the first sample", "# which is included as a vertical bar. Note that this won't be quite", "# the right behavior if the weights are not roughly monotonic.", "if", "cutoff_weight", "is", "not", "None", ":", "a_wt", ".", "axhline", "(", "cutoff_weight", "*", "weights", ".", "max", "(", ")", ",", "linestyle", "=", "'-'", ",", "color", "=", "'r'", ")", "wi", ",", "=", "scipy", ".", "where", "(", "weights", ">=", "cutoff_weight", "*", "weights", ".", "max", "(", ")", ")", "a_wt", ".", "axvline", "(", "wi", "[", "0", "]", ",", "linestyle", "=", "'-'", ",", "color", "=", "'r'", ")", "if", "burn", ">", "0", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "axvline", "(", "burn", ",", "color", "=", "'r'", ",", "linewidth", "=", "3", ")", "if", "points", "is", "not", "None", ":", "for", "p", ",", "c", "in", "zip", "(", "points", ",", "colors", ")", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "axhline", "(", "y", "=", "p", "[", "i", "]", ",", "linewidth", "=", "3", ",", "color", "=", "c", ")", "# Reset the xlim since it seems to get messed up:", "axes", "[", "-", "1", ",", "i", "]", ".", "set_xlim", "(", "left", "=", "0", ")", "# try:", "# [axes[-1, i].axhline(y=pt, linewidth=3) for pt in points[i]]", "# except TypeError:", "# axes[-1, i].axhline(y=points[i], linewidth=3)", "if", "label_chain_y", ":", "axes", "[", "-", "1", ",", "i", "]", ".", "set_ylabel", "(", "labels", "[", "i", "]", ",", "fontsize", "=", "chain_label_fontsize", ")", "axes", "[", "-", "1", ",", "i", "]", ".", "set_xlabel", "(", "'step'", ",", "fontsize", "=", "chain_label_fontsize", ")", "plt", ".", "setp", "(", "axes", "[", "-", "1", ",", "i", "]", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "xticklabel_angle", ")", "for", "tick", "in", "axes", "[", "-", "1", ",", "i", "]", ".", "get_yaxis", "(", ")", ".", "get_major_ticks", "(", ")", ":", "tick", ".", "set_pad", "(", "chain_ytick_pad", ")", "tick", ".", "label1", "=", "tick", ".", "_get_text1", "(", ")", "for", "i", "in", "xrange", "(", "0", ",", "k", ")", ":", "if", "max_hist_ticks", "is", "not", "None", ":", "axes", "[", "k", "-", "1", ",", "i", "]", ".", "xaxis", ".", "set_major_locator", "(", "plt", ".", "MaxNLocator", "(", "nbins", "=", "max_hist_ticks", "-", "1", ")", ")", "axes", "[", "i", ",", "0", "]", ".", "yaxis", ".", "set_major_locator", "(", "plt", ".", "MaxNLocator", "(", "nbins", "=", "max_hist_ticks", "-", "1", ")", ")", "if", "plot_chains", "and", "max_chain_ticks", "is", "not", "None", ":", "axes", "[", "k", ",", "i", "]", ".", "yaxis", ".", "set_major_locator", "(", "plt", ".", "MaxNLocator", "(", "nbins", "=", "max_chain_ticks", "-", "1", ")", ")", "axes", "[", "k", ",", "i", "]", ".", "xaxis", ".", "set_major_locator", "(", "plt", ".", "MaxNLocator", "(", "nbins", "=", "max_chain_ticks", "-", "1", ")", ")", "if", "plot_chains", "and", "hide_chain_yticklabels", ":", "plt", ".", "setp", "(", "axes", "[", "k", ",", "i", "]", ".", "get_yticklabels", "(", ")", ",", "visible", "=", "False", ")", "if", "suptitle", "is", "not", "None", ":", "f", ".", "suptitle", "(", "suptitle", ")", "f", ".", "canvas", ".", "draw", "(", ")", "return", "f" ]
Plot the results of MCMC sampler (posterior and chains). Loosely based on triangle.py. Provides extensive options to format the plot. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. suptitle : str, optional The figure title to place at the top. Default is no title. labels : list of str, optional The labels to use for each of the free parameters. Default is to leave the axes unlabeled. bins : int, optional Number of bins to use for the histograms. Default is 50. plot_samples : bool, optional If True, the samples are plotted as individual points. Default is False. plot_hist : bool, optional If True, histograms are plotted. Default is True. plot_chains : bool, optional If True, plot the sampler chains at the bottom. Default is True. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. Default is to not weight the samples. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. cmap : str, optional The colormap to use for the histograms. Default is 'gray_r'. hist_color : str, optional The color to use for the univariate histograms. Default is 'k'. chain_alpha : float, optional The transparency to use for the plots of the individual chains. Setting this to something low lets you better visualize what is going on. Default is 0.1. points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot onto each marginal and chain. Default is None. covs : array, (`D`, `D`) or (`N`, `D`, `D`), optional Covariance matrix or array of covariance matrices to plot onto each marginal. If you do not want to plot a covariance matrix for a specific point, set its corresponding entry to `None`. Default is to not plot confidence ellipses for any points. colors : array of str, (`N`,), optional The colors to use for the points in `points`. Default is to use the standard matplotlib RGBCMYK cycle. ci : array, (`num_ci`,), optional List of confidence intervals to plot for each non-`None` entry in `covs`. Default is 0.95 (just plot the 95 percent confidence interval). max_hist_ticks : int, optional The maximum number of ticks for the histogram plots. Default is None (no limit). max_chain_ticks : int, optional The maximum number of y-axis ticks for the chain plots. Default is 6. label_chain_y : bool, optional If True, the chain plots will have y axis labels. Default is False. hide_chain_yticklabels : bool, optional If True, hide the y axis tick labels for the chain plots. Default is False (show y tick labels). chain_ytick_pad : float, optional The padding (in points) between the y-axis tick labels and the axis for the chain plots. Default is 2.0. label_fontsize : float, optional The font size (in points) to use for the axis labels. Default is `axes.labelsize`. ticklabel_fontsize : float, optional The font size (in points) to use for the axis tick labels. Default is `xtick.labelsize`. chain_label_fontsize : float, optional The font size (in points) to use for the labels of the chain axes. Default is `axes.labelsize`. chain_ticklabel_fontsize : float, optional The font size (in points) to use for the chain axis tick labels. Default is `xtick.labelsize`. xticklabel_angle : float, optional The angle to rotate the x tick labels, in degrees. Default is 90. bottom_sep : float, optional The separation (in relative figure units) between the chains and the marginals. Default is 0.075. suptitle_space : float, optional The amount of space (in relative figure units) to leave for a figure title. Default is 0.1. fixed_height : float, optional The desired figure height (in inches). Default is to automatically adjust based on `fixed_width` to make the subplots square. fixed_width : float, optional The desired figure width (in inches). Default is `figure.figsize[0]`. l : float, optional The location (in relative figure units) of the left margin. Default is 0.1. r : float, optional The location (in relative figure units) of the right margin. Default is 0.9. t1 : float, optional The location (in relative figure units) of the top of the grid of histograms. Overrides `suptitle_space` if present. b1 : float, optional The location (in relative figure units) of the bottom of the grid of histograms. Overrides `bottom_sep` if present. Defaults to 0.1 if `plot_chains` is False. t2 : float, optional The location (in relative figure units) of the top of the grid of chain plots. Default is 0.2. b2 : float, optional The location (in relative figure units) of the bottom of the grid of chain plots. Default is 0.1. ax_space : float, optional The `w_space` and `h_space` to use (in relative figure units). Default is 0.1.
[ "Plot", "the", "results", "of", "MCMC", "sampler", "(", "posterior", "and", "chains", ")", ".", "Loosely", "based", "on", "triangle", ".", "py", ".", "Provides", "extensive", "options", "to", "format", "the", "plot", ".", "Parameters", "----------", "sampler", ":", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", "or", "array", "(", "n_temps", "n_chains", "n_samp", "n_dim", ")", "(", "n_chains", "n_samp", "n_dim", ")", "or", "(", "n_samp", "n_dim", ")", "The", "sampler", "to", "plot", "the", "chains", "/", "marginals", "of", ".", "Can", "also", "be", "an", "array", "of", "samples", "which", "matches", "the", "shape", "of", "the", "chain", "attribute", "that", "would", "be", "present", "in", "a", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", ".", "suptitle", ":", "str", "optional", "The", "figure", "title", "to", "place", "at", "the", "top", ".", "Default", "is", "no", "title", ".", "labels", ":", "list", "of", "str", "optional", "The", "labels", "to", "use", "for", "each", "of", "the", "free", "parameters", ".", "Default", "is", "to", "leave", "the", "axes", "unlabeled", ".", "bins", ":", "int", "optional", "Number", "of", "bins", "to", "use", "for", "the", "histograms", ".", "Default", "is", "50", ".", "plot_samples", ":", "bool", "optional", "If", "True", "the", "samples", "are", "plotted", "as", "individual", "points", ".", "Default", "is", "False", ".", "plot_hist", ":", "bool", "optional", "If", "True", "histograms", "are", "plotted", ".", "Default", "is", "True", ".", "plot_chains", ":", "bool", "optional", "If", "True", "plot", "the", "sampler", "chains", "at", "the", "bottom", ".", "Default", "is", "True", ".", "burn", ":", "int", "optional", "The", "number", "of", "samples", "to", "burn", "before", "making", "the", "marginal", "histograms", ".", "Default", "is", "zero", "(", "use", "all", "samples", ")", ".", "chain_mask", ":", "(", "index", ")", "array", "optional", "Mask", "identifying", "the", "chains", "to", "keep", "before", "plotting", "in", "case", "there", "are", "bad", "chains", ".", "Default", "is", "to", "use", "all", "chains", ".", "temp_idx", ":", "int", "optional", "Index", "of", "the", "temperature", "to", "plot", "when", "plotting", "a", ":", "py", ":", "class", ":", "emcee", ".", "PTSampler", ".", "Default", "is", "0", "(", "samples", "from", "the", "posterior", ")", ".", "weights", ":", "array", "(", "n_temps", "n_chains", "n_samp", ")", "(", "n_chains", "n_samp", ")", "or", "(", "n_samp", ")", "optional", "The", "weight", "for", "each", "sample", ".", "This", "is", "useful", "for", "post", "-", "processing", "the", "output", "from", "MultiNest", "sampling", "for", "instance", ".", "Default", "is", "to", "not", "weight", "the", "samples", ".", "cutoff_weight", ":", "float", "optional", "If", "weights", "and", "cutoff_weight", "are", "present", "points", "with", "weights", "<", "cutoff_weight", "*", "weights", ".", "max", "()", "will", "be", "excluded", ".", "Default", "is", "to", "plot", "all", "points", ".", "cmap", ":", "str", "optional", "The", "colormap", "to", "use", "for", "the", "histograms", ".", "Default", "is", "gray_r", ".", "hist_color", ":", "str", "optional", "The", "color", "to", "use", "for", "the", "univariate", "histograms", ".", "Default", "is", "k", ".", "chain_alpha", ":", "float", "optional", "The", "transparency", "to", "use", "for", "the", "plots", "of", "the", "individual", "chains", ".", "Setting", "this", "to", "something", "low", "lets", "you", "better", "visualize", "what", "is", "going", "on", ".", "Default", "is", "0", ".", "1", ".", "points", ":", "array", "(", "D", ")", "or", "(", "N", "D", ")", "optional", "Array", "of", "point", "(", "s", ")", "to", "plot", "onto", "each", "marginal", "and", "chain", ".", "Default", "is", "None", ".", "covs", ":", "array", "(", "D", "D", ")", "or", "(", "N", "D", "D", ")", "optional", "Covariance", "matrix", "or", "array", "of", "covariance", "matrices", "to", "plot", "onto", "each", "marginal", ".", "If", "you", "do", "not", "want", "to", "plot", "a", "covariance", "matrix", "for", "a", "specific", "point", "set", "its", "corresponding", "entry", "to", "None", ".", "Default", "is", "to", "not", "plot", "confidence", "ellipses", "for", "any", "points", ".", "colors", ":", "array", "of", "str", "(", "N", ")", "optional", "The", "colors", "to", "use", "for", "the", "points", "in", "points", ".", "Default", "is", "to", "use", "the", "standard", "matplotlib", "RGBCMYK", "cycle", ".", "ci", ":", "array", "(", "num_ci", ")", "optional", "List", "of", "confidence", "intervals", "to", "plot", "for", "each", "non", "-", "None", "entry", "in", "covs", ".", "Default", "is", "0", ".", "95", "(", "just", "plot", "the", "95", "percent", "confidence", "interval", ")", ".", "max_hist_ticks", ":", "int", "optional", "The", "maximum", "number", "of", "ticks", "for", "the", "histogram", "plots", ".", "Default", "is", "None", "(", "no", "limit", ")", ".", "max_chain_ticks", ":", "int", "optional", "The", "maximum", "number", "of", "y", "-", "axis", "ticks", "for", "the", "chain", "plots", ".", "Default", "is", "6", ".", "label_chain_y", ":", "bool", "optional", "If", "True", "the", "chain", "plots", "will", "have", "y", "axis", "labels", ".", "Default", "is", "False", ".", "hide_chain_yticklabels", ":", "bool", "optional", "If", "True", "hide", "the", "y", "axis", "tick", "labels", "for", "the", "chain", "plots", ".", "Default", "is", "False", "(", "show", "y", "tick", "labels", ")", ".", "chain_ytick_pad", ":", "float", "optional", "The", "padding", "(", "in", "points", ")", "between", "the", "y", "-", "axis", "tick", "labels", "and", "the", "axis", "for", "the", "chain", "plots", ".", "Default", "is", "2", ".", "0", ".", "label_fontsize", ":", "float", "optional", "The", "font", "size", "(", "in", "points", ")", "to", "use", "for", "the", "axis", "labels", ".", "Default", "is", "axes", ".", "labelsize", ".", "ticklabel_fontsize", ":", "float", "optional", "The", "font", "size", "(", "in", "points", ")", "to", "use", "for", "the", "axis", "tick", "labels", ".", "Default", "is", "xtick", ".", "labelsize", ".", "chain_label_fontsize", ":", "float", "optional", "The", "font", "size", "(", "in", "points", ")", "to", "use", "for", "the", "labels", "of", "the", "chain", "axes", ".", "Default", "is", "axes", ".", "labelsize", ".", "chain_ticklabel_fontsize", ":", "float", "optional", "The", "font", "size", "(", "in", "points", ")", "to", "use", "for", "the", "chain", "axis", "tick", "labels", ".", "Default", "is", "xtick", ".", "labelsize", ".", "xticklabel_angle", ":", "float", "optional", "The", "angle", "to", "rotate", "the", "x", "tick", "labels", "in", "degrees", ".", "Default", "is", "90", ".", "bottom_sep", ":", "float", "optional", "The", "separation", "(", "in", "relative", "figure", "units", ")", "between", "the", "chains", "and", "the", "marginals", ".", "Default", "is", "0", ".", "075", ".", "suptitle_space", ":", "float", "optional", "The", "amount", "of", "space", "(", "in", "relative", "figure", "units", ")", "to", "leave", "for", "a", "figure", "title", ".", "Default", "is", "0", ".", "1", ".", "fixed_height", ":", "float", "optional", "The", "desired", "figure", "height", "(", "in", "inches", ")", ".", "Default", "is", "to", "automatically", "adjust", "based", "on", "fixed_width", "to", "make", "the", "subplots", "square", ".", "fixed_width", ":", "float", "optional", "The", "desired", "figure", "width", "(", "in", "inches", ")", ".", "Default", "is", "figure", ".", "figsize", "[", "0", "]", ".", "l", ":", "float", "optional", "The", "location", "(", "in", "relative", "figure", "units", ")", "of", "the", "left", "margin", ".", "Default", "is", "0", ".", "1", ".", "r", ":", "float", "optional", "The", "location", "(", "in", "relative", "figure", "units", ")", "of", "the", "right", "margin", ".", "Default", "is", "0", ".", "9", ".", "t1", ":", "float", "optional", "The", "location", "(", "in", "relative", "figure", "units", ")", "of", "the", "top", "of", "the", "grid", "of", "histograms", ".", "Overrides", "suptitle_space", "if", "present", ".", "b1", ":", "float", "optional", "The", "location", "(", "in", "relative", "figure", "units", ")", "of", "the", "bottom", "of", "the", "grid", "of", "histograms", ".", "Overrides", "bottom_sep", "if", "present", ".", "Defaults", "to", "0", ".", "1", "if", "plot_chains", "is", "False", ".", "t2", ":", "float", "optional", "The", "location", "(", "in", "relative", "figure", "units", ")", "of", "the", "top", "of", "the", "grid", "of", "chain", "plots", ".", "Default", "is", "0", ".", "2", ".", "b2", ":", "float", "optional", "The", "location", "(", "in", "relative", "figure", "units", ")", "of", "the", "bottom", "of", "the", "grid", "of", "chain", "plots", ".", "Default", "is", "0", ".", "1", ".", "ax_space", ":", "float", "optional", "The", "w_space", "and", "h_space", "to", "use", "(", "in", "relative", "figure", "units", ")", ".", "Default", "is", "0", ".", "1", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L2032-L2439
markchil/gptools
gptools/utils.py
plot_sampler_fingerprint
def plot_sampler_fingerprint( sampler, hyperprior, weights=None, cutoff_weight=None, nbins=None, labels=None, burn=0, chain_mask=None, temp_idx=0, points=None, plot_samples=False, sample_color='k', point_color=None, point_lw=3, title='', rot_x_labels=False, figsize=None ): """Make a plot of the sampler's "fingerprint": univariate marginal histograms for all hyperparameters. The hyperparameters are mapped to [0, 1] using :py:meth:`hyperprior.elementwise_cdf`, so this can only be used with prior distributions which implement this function. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. hyperprior : :py:class:`~gptools.utils.JointPrior` instance The joint prior distribution for the hyperparameters. Used to map the values to [0, 1] so that the hyperparameters can all be shown on the same axis. weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. nbins : int or array of int, (`D`,), optional The number of bins dividing [0, 1] to use for each histogram. If a single int is given, this is used for all of the hyperparameters. If an array of ints is given, these are the numbers of bins for each of the hyperparameters. The default is to determine the number of bins using the Freedman-Diaconis rule. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot as horizontal lines. Default is None. plot_samples : bool, optional If True, the samples are plotted as horizontal lines. Default is False. sample_color : str, optional The color to plot the samples in. Default is 'k', meaning black. point_color : str or list of str, optional The color to plot the individual points in. Default is to loop through matplotlib's default color sequence. If a list is provided, it will be cycled through. point_lw : float, optional Line width to use when plotting the individual points. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] # Process the samples: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] weights = weights[mask] else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) if labels is None: labels = [''] * k u = scipy.asarray([hyperprior.elementwise_cdf(p) for p in flat_trace], dtype=float).T if nbins is None: lq, uq = scipy.stats.scoreatpercentile(u, [25, 75], axis=1) h = 2.0 * (uq - lq) / u.shape[0]**(1.0 / 3.0) n = scipy.asarray(scipy.ceil(1.0 / h), dtype=int) else: try: iter(nbins) n = nbins except TypeError: n = nbins * scipy.ones(u.shape[0]) hist = [scipy.stats.histogram(uv, numbins=nv, defaultlimits=[0, 1], weights=weights) for uv, nv in zip(u, n)] max_ct = max([max(h.count) for h in hist]) min_ct = min([min(h.count) for h in hist]) f = plt.figure(figsize=figsize) a = f.add_subplot(1, 1, 1) for i, (h, pn) in enumerate(zip(hist, labels)): a.imshow( scipy.atleast_2d(scipy.asarray(h.count[::-1], dtype=float)).T, cmap='gray_r', interpolation='nearest', vmin=min_ct, vmax=max_ct, extent=(i, i + 1, 0, 1), aspect='auto' ) if plot_samples: for p in u: for i, uv in enumerate(p): a.plot([i, i + 1], [uv, uv], sample_color, alpha=0.1) if points is not None: points = scipy.atleast_2d(scipy.asarray(points, dtype=float)) u_points = [hyperprior.elementwise_cdf(p) for p in points] if point_color is None: c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) else: c_cycle = itertools.cycle(scipy.atleast_1d(point_color)) for p in u_points: c = c_cycle.next() for i, uv in enumerate(p): a.plot([i, i + 1], [uv, uv], color=c, lw=point_lw) a.set_xlim(0, len(hist)) a.set_ylim(0, 1) a.set_xticks(0.5 + scipy.arange(0, len(hist), dtype=float)) a.set_xticklabels(labels) if rot_x_labels: plt.setp(a.xaxis.get_majorticklabels(), rotation=90) a.set_xlabel("parameter") a.set_ylabel("$u=F_P(p)$") a.set_title(title) return f, a
python
def plot_sampler_fingerprint( sampler, hyperprior, weights=None, cutoff_weight=None, nbins=None, labels=None, burn=0, chain_mask=None, temp_idx=0, points=None, plot_samples=False, sample_color='k', point_color=None, point_lw=3, title='', rot_x_labels=False, figsize=None ): """Make a plot of the sampler's "fingerprint": univariate marginal histograms for all hyperparameters. The hyperparameters are mapped to [0, 1] using :py:meth:`hyperprior.elementwise_cdf`, so this can only be used with prior distributions which implement this function. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. hyperprior : :py:class:`~gptools.utils.JointPrior` instance The joint prior distribution for the hyperparameters. Used to map the values to [0, 1] so that the hyperparameters can all be shown on the same axis. weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. nbins : int or array of int, (`D`,), optional The number of bins dividing [0, 1] to use for each histogram. If a single int is given, this is used for all of the hyperparameters. If an array of ints is given, these are the numbers of bins for each of the hyperparameters. The default is to determine the number of bins using the Freedman-Diaconis rule. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot as horizontal lines. Default is None. plot_samples : bool, optional If True, the samples are plotted as horizontal lines. Default is False. sample_color : str, optional The color to plot the samples in. Default is 'k', meaning black. point_color : str or list of str, optional The color to plot the individual points in. Default is to loop through matplotlib's default color sequence. If a list is provided, it will be cycled through. point_lw : float, optional Line width to use when plotting the individual points. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] # Process the samples: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] weights = weights[mask] else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) if labels is None: labels = [''] * k u = scipy.asarray([hyperprior.elementwise_cdf(p) for p in flat_trace], dtype=float).T if nbins is None: lq, uq = scipy.stats.scoreatpercentile(u, [25, 75], axis=1) h = 2.0 * (uq - lq) / u.shape[0]**(1.0 / 3.0) n = scipy.asarray(scipy.ceil(1.0 / h), dtype=int) else: try: iter(nbins) n = nbins except TypeError: n = nbins * scipy.ones(u.shape[0]) hist = [scipy.stats.histogram(uv, numbins=nv, defaultlimits=[0, 1], weights=weights) for uv, nv in zip(u, n)] max_ct = max([max(h.count) for h in hist]) min_ct = min([min(h.count) for h in hist]) f = plt.figure(figsize=figsize) a = f.add_subplot(1, 1, 1) for i, (h, pn) in enumerate(zip(hist, labels)): a.imshow( scipy.atleast_2d(scipy.asarray(h.count[::-1], dtype=float)).T, cmap='gray_r', interpolation='nearest', vmin=min_ct, vmax=max_ct, extent=(i, i + 1, 0, 1), aspect='auto' ) if plot_samples: for p in u: for i, uv in enumerate(p): a.plot([i, i + 1], [uv, uv], sample_color, alpha=0.1) if points is not None: points = scipy.atleast_2d(scipy.asarray(points, dtype=float)) u_points = [hyperprior.elementwise_cdf(p) for p in points] if point_color is None: c_cycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) else: c_cycle = itertools.cycle(scipy.atleast_1d(point_color)) for p in u_points: c = c_cycle.next() for i, uv in enumerate(p): a.plot([i, i + 1], [uv, uv], color=c, lw=point_lw) a.set_xlim(0, len(hist)) a.set_ylim(0, 1) a.set_xticks(0.5 + scipy.arange(0, len(hist), dtype=float)) a.set_xticklabels(labels) if rot_x_labels: plt.setp(a.xaxis.get_majorticklabels(), rotation=90) a.set_xlabel("parameter") a.set_ylabel("$u=F_P(p)$") a.set_title(title) return f, a
[ "def", "plot_sampler_fingerprint", "(", "sampler", ",", "hyperprior", ",", "weights", "=", "None", ",", "cutoff_weight", "=", "None", ",", "nbins", "=", "None", ",", "labels", "=", "None", ",", "burn", "=", "0", ",", "chain_mask", "=", "None", ",", "temp_idx", "=", "0", ",", "points", "=", "None", ",", "plot_samples", "=", "False", ",", "sample_color", "=", "'k'", ",", "point_color", "=", "None", ",", "point_lw", "=", "3", ",", "title", "=", "''", ",", "rot_x_labels", "=", "False", ",", "figsize", "=", "None", ")", ":", "try", ":", "k", "=", "sampler", ".", "flatchain", ".", "shape", "[", "-", "1", "]", "except", "AttributeError", ":", "# Assumes array input is only case where there is no \"flatchain\" attribute.", "k", "=", "sampler", ".", "shape", "[", "-", "1", "]", "# Process the samples:", "if", "isinstance", "(", "sampler", ",", "emcee", ".", "EnsembleSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "chain", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "emcee", ".", "PTSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "nwalkers", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "scipy", ".", "ndarray", ")", ":", "if", "sampler", ".", "ndim", "==", "4", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "1", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "3", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "2", ":", "flat_trace", "=", "sampler", "[", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "if", "cutoff_weight", "is", "not", "None", "and", "weights", "is", "not", "None", ":", "mask", "=", "weights", ">=", "cutoff_weight", "*", "weights", ".", "max", "(", ")", "flat_trace", "=", "flat_trace", "[", "mask", ",", ":", "]", "weights", "=", "weights", "[", "mask", "]", "else", ":", "raise", "ValueError", "(", "\"Unknown sampler class: %s\"", "%", "(", "type", "(", "sampler", ")", ",", ")", ")", "if", "labels", "is", "None", ":", "labels", "=", "[", "''", "]", "*", "k", "u", "=", "scipy", ".", "asarray", "(", "[", "hyperprior", ".", "elementwise_cdf", "(", "p", ")", "for", "p", "in", "flat_trace", "]", ",", "dtype", "=", "float", ")", ".", "T", "if", "nbins", "is", "None", ":", "lq", ",", "uq", "=", "scipy", ".", "stats", ".", "scoreatpercentile", "(", "u", ",", "[", "25", ",", "75", "]", ",", "axis", "=", "1", ")", "h", "=", "2.0", "*", "(", "uq", "-", "lq", ")", "/", "u", ".", "shape", "[", "0", "]", "**", "(", "1.0", "/", "3.0", ")", "n", "=", "scipy", ".", "asarray", "(", "scipy", ".", "ceil", "(", "1.0", "/", "h", ")", ",", "dtype", "=", "int", ")", "else", ":", "try", ":", "iter", "(", "nbins", ")", "n", "=", "nbins", "except", "TypeError", ":", "n", "=", "nbins", "*", "scipy", ".", "ones", "(", "u", ".", "shape", "[", "0", "]", ")", "hist", "=", "[", "scipy", ".", "stats", ".", "histogram", "(", "uv", ",", "numbins", "=", "nv", ",", "defaultlimits", "=", "[", "0", ",", "1", "]", ",", "weights", "=", "weights", ")", "for", "uv", ",", "nv", "in", "zip", "(", "u", ",", "n", ")", "]", "max_ct", "=", "max", "(", "[", "max", "(", "h", ".", "count", ")", "for", "h", "in", "hist", "]", ")", "min_ct", "=", "min", "(", "[", "min", "(", "h", ".", "count", ")", "for", "h", "in", "hist", "]", ")", "f", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "a", "=", "f", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "for", "i", ",", "(", "h", ",", "pn", ")", "in", "enumerate", "(", "zip", "(", "hist", ",", "labels", ")", ")", ":", "a", ".", "imshow", "(", "scipy", ".", "atleast_2d", "(", "scipy", ".", "asarray", "(", "h", ".", "count", "[", ":", ":", "-", "1", "]", ",", "dtype", "=", "float", ")", ")", ".", "T", ",", "cmap", "=", "'gray_r'", ",", "interpolation", "=", "'nearest'", ",", "vmin", "=", "min_ct", ",", "vmax", "=", "max_ct", ",", "extent", "=", "(", "i", ",", "i", "+", "1", ",", "0", ",", "1", ")", ",", "aspect", "=", "'auto'", ")", "if", "plot_samples", ":", "for", "p", "in", "u", ":", "for", "i", ",", "uv", "in", "enumerate", "(", "p", ")", ":", "a", ".", "plot", "(", "[", "i", ",", "i", "+", "1", "]", ",", "[", "uv", ",", "uv", "]", ",", "sample_color", ",", "alpha", "=", "0.1", ")", "if", "points", "is", "not", "None", ":", "points", "=", "scipy", ".", "atleast_2d", "(", "scipy", ".", "asarray", "(", "points", ",", "dtype", "=", "float", ")", ")", "u_points", "=", "[", "hyperprior", ".", "elementwise_cdf", "(", "p", ")", "for", "p", "in", "points", "]", "if", "point_color", "is", "None", ":", "c_cycle", "=", "itertools", ".", "cycle", "(", "[", "'b'", ",", "'g'", ",", "'r'", ",", "'c'", ",", "'m'", ",", "'y'", ",", "'k'", "]", ")", "else", ":", "c_cycle", "=", "itertools", ".", "cycle", "(", "scipy", ".", "atleast_1d", "(", "point_color", ")", ")", "for", "p", "in", "u_points", ":", "c", "=", "c_cycle", ".", "next", "(", ")", "for", "i", ",", "uv", "in", "enumerate", "(", "p", ")", ":", "a", ".", "plot", "(", "[", "i", ",", "i", "+", "1", "]", ",", "[", "uv", ",", "uv", "]", ",", "color", "=", "c", ",", "lw", "=", "point_lw", ")", "a", ".", "set_xlim", "(", "0", ",", "len", "(", "hist", ")", ")", "a", ".", "set_ylim", "(", "0", ",", "1", ")", "a", ".", "set_xticks", "(", "0.5", "+", "scipy", ".", "arange", "(", "0", ",", "len", "(", "hist", ")", ",", "dtype", "=", "float", ")", ")", "a", ".", "set_xticklabels", "(", "labels", ")", "if", "rot_x_labels", ":", "plt", ".", "setp", "(", "a", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "90", ")", "a", ".", "set_xlabel", "(", "\"parameter\"", ")", "a", ".", "set_ylabel", "(", "\"$u=F_P(p)$\"", ")", "a", ".", "set_title", "(", "title", ")", "return", "f", ",", "a" ]
Make a plot of the sampler's "fingerprint": univariate marginal histograms for all hyperparameters. The hyperparameters are mapped to [0, 1] using :py:meth:`hyperprior.elementwise_cdf`, so this can only be used with prior distributions which implement this function. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. hyperprior : :py:class:`~gptools.utils.JointPrior` instance The joint prior distribution for the hyperparameters. Used to map the values to [0, 1] so that the hyperparameters can all be shown on the same axis. weights : array, (`n_temps`, `n_chains`, `n_samp`), (`n_chains`, `n_samp`) or (`n_samp`,), optional The weight for each sample. This is useful for post-processing the output from MultiNest sampling, for instance. cutoff_weight : float, optional If `weights` and `cutoff_weight` are present, points with `weights < cutoff_weight * weights.max()` will be excluded. Default is to plot all points. nbins : int or array of int, (`D`,), optional The number of bins dividing [0, 1] to use for each histogram. If a single int is given, this is used for all of the hyperparameters. If an array of ints is given, these are the numbers of bins for each of the hyperparameters. The default is to determine the number of bins using the Freedman-Diaconis rule. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). points : array, (`D`,) or (`N`, `D`), optional Array of point(s) to plot as horizontal lines. Default is None. plot_samples : bool, optional If True, the samples are plotted as horizontal lines. Default is False. sample_color : str, optional The color to plot the samples in. Default is 'k', meaning black. point_color : str or list of str, optional The color to plot the individual points in. Default is to loop through matplotlib's default color sequence. If a list is provided, it will be cycled through. point_lw : float, optional Line width to use when plotting the individual points. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default.
[ "Make", "a", "plot", "of", "the", "sampler", "s", "fingerprint", ":", "univariate", "marginal", "histograms", "for", "all", "hyperparameters", ".", "The", "hyperparameters", "are", "mapped", "to", "[", "0", "1", "]", "using", ":", "py", ":", "meth", ":", "hyperprior", ".", "elementwise_cdf", "so", "this", "can", "only", "be", "used", "with", "prior", "distributions", "which", "implement", "this", "function", ".", "Returns", "the", "figure", "and", "axis", "created", ".", "Parameters", "----------", "sampler", ":", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", "or", "array", "(", "n_temps", "n_chains", "n_samp", "n_dim", ")", "(", "n_chains", "n_samp", "n_dim", ")", "or", "(", "n_samp", "n_dim", ")", "The", "sampler", "to", "plot", "the", "chains", "/", "marginals", "of", ".", "Can", "also", "be", "an", "array", "of", "samples", "which", "matches", "the", "shape", "of", "the", "chain", "attribute", "that", "would", "be", "present", "in", "a", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", ".", "hyperprior", ":", ":", "py", ":", "class", ":", "~gptools", ".", "utils", ".", "JointPrior", "instance", "The", "joint", "prior", "distribution", "for", "the", "hyperparameters", ".", "Used", "to", "map", "the", "values", "to", "[", "0", "1", "]", "so", "that", "the", "hyperparameters", "can", "all", "be", "shown", "on", "the", "same", "axis", ".", "weights", ":", "array", "(", "n_temps", "n_chains", "n_samp", ")", "(", "n_chains", "n_samp", ")", "or", "(", "n_samp", ")", "optional", "The", "weight", "for", "each", "sample", ".", "This", "is", "useful", "for", "post", "-", "processing", "the", "output", "from", "MultiNest", "sampling", "for", "instance", ".", "cutoff_weight", ":", "float", "optional", "If", "weights", "and", "cutoff_weight", "are", "present", "points", "with", "weights", "<", "cutoff_weight", "*", "weights", ".", "max", "()", "will", "be", "excluded", ".", "Default", "is", "to", "plot", "all", "points", ".", "nbins", ":", "int", "or", "array", "of", "int", "(", "D", ")", "optional", "The", "number", "of", "bins", "dividing", "[", "0", "1", "]", "to", "use", "for", "each", "histogram", ".", "If", "a", "single", "int", "is", "given", "this", "is", "used", "for", "all", "of", "the", "hyperparameters", ".", "If", "an", "array", "of", "ints", "is", "given", "these", "are", "the", "numbers", "of", "bins", "for", "each", "of", "the", "hyperparameters", ".", "The", "default", "is", "to", "determine", "the", "number", "of", "bins", "using", "the", "Freedman", "-", "Diaconis", "rule", ".", "labels", ":", "array", "of", "str", "(", "D", ")", "optional", "The", "labels", "for", "each", "hyperparameter", ".", "Default", "is", "to", "use", "empty", "strings", ".", "burn", ":", "int", "optional", "The", "number", "of", "samples", "to", "burn", "before", "making", "the", "marginal", "histograms", ".", "Default", "is", "zero", "(", "use", "all", "samples", ")", ".", "chain_mask", ":", "(", "index", ")", "array", "optional", "Mask", "identifying", "the", "chains", "to", "keep", "before", "plotting", "in", "case", "there", "are", "bad", "chains", ".", "Default", "is", "to", "use", "all", "chains", ".", "temp_idx", ":", "int", "optional", "Index", "of", "the", "temperature", "to", "plot", "when", "plotting", "a", ":", "py", ":", "class", ":", "emcee", ".", "PTSampler", ".", "Default", "is", "0", "(", "samples", "from", "the", "posterior", ")", ".", "points", ":", "array", "(", "D", ")", "or", "(", "N", "D", ")", "optional", "Array", "of", "point", "(", "s", ")", "to", "plot", "as", "horizontal", "lines", ".", "Default", "is", "None", ".", "plot_samples", ":", "bool", "optional", "If", "True", "the", "samples", "are", "plotted", "as", "horizontal", "lines", ".", "Default", "is", "False", ".", "sample_color", ":", "str", "optional", "The", "color", "to", "plot", "the", "samples", "in", ".", "Default", "is", "k", "meaning", "black", ".", "point_color", ":", "str", "or", "list", "of", "str", "optional", "The", "color", "to", "plot", "the", "individual", "points", "in", ".", "Default", "is", "to", "loop", "through", "matplotlib", "s", "default", "color", "sequence", ".", "If", "a", "list", "is", "provided", "it", "will", "be", "cycled", "through", ".", "point_lw", ":", "float", "optional", "Line", "width", "to", "use", "when", "plotting", "the", "individual", "points", ".", "title", ":", "str", "optional", "Title", "to", "use", "for", "the", "plot", ".", "rot_x_labels", ":", "bool", "optional", "If", "True", "the", "labels", "for", "the", "x", "-", "axis", "are", "rotated", "90", "degrees", ".", "Default", "is", "False", "(", "do", "not", "rotate", "labels", ")", ".", "figsize", ":", "2", "-", "tuple", "optional", "The", "figure", "size", "to", "use", ".", "Default", "is", "to", "use", "the", "matplotlib", "default", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L2441-L2614
markchil/gptools
gptools/utils.py
plot_sampler_cov
def plot_sampler_cov( sampler, method='corr', weights=None, cutoff_weight=None, labels=None, burn=0, chain_mask=None, temp_idx=0, cbar_label=None, title='', rot_x_labels=False, figsize=None, xlabel_on_top=True ): """Make a plot of the sampler's correlation or covariance matrix. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. method : {'corr', 'cov'} Whether to plot the correlation matrix ('corr') or the covariance matrix ('cov'). The covariance matrix is often not useful because different parameters have wildly different scales. Default is to plot the correlation matrix. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). cbar_label : str, optional The label to use for the colorbar. The default is chosen based on the value of the `method` keyword. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. xlabel_on_top : bool, optional If True, the x-axis labels are put on top (the way mathematicians present matrices). Default is True. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] # Process the samples: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] weights = weights[mask] else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) if labels is None: labels = [''] * k if cbar_label is None: cbar_label = r'$\mathrm{cov}(p_1, p_2)$' if method == 'cov' else r'$\mathrm{corr}(p_1, p_2)$' if weights is None: if method == 'corr': cov = scipy.corrcoef(flat_trace, rowvar=0, ddof=1) else: cov = scipy.cov(flat_trace, rowvar=0, ddof=1) else: cov = scipy.cov(flat_trace, rowvar=0, aweights=weights) if method == 'corr': stds = scipy.sqrt(scipy.diag(cov)) STD_1, STD_2 = scipy.meshgrid(stds, stds) cov = cov / (STD_1 * STD_2) f_cov = plt.figure(figsize=figsize) a_cov = f_cov.add_subplot(1, 1, 1) a_cov.set_title(title) if method == 'cov': vmax = scipy.absolute(cov).max() else: vmax = 1.0 cax = a_cov.pcolor(cov, cmap='seismic', vmin=-1 * vmax, vmax=vmax) divider = make_axes_locatable(a_cov) a_cb = divider.append_axes("right", size="10%", pad=0.05) cbar = f_cov.colorbar(cax, cax=a_cb, label=cbar_label) a_cov.set_xlabel('parameter') a_cov.set_ylabel('parameter') a_cov.axis('square') a_cov.invert_yaxis() if xlabel_on_top: a_cov.xaxis.tick_top() a_cov.xaxis.set_label_position('top') a_cov.set_xticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float)) a_cov.set_yticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float)) a_cov.set_xticklabels(labels) if rot_x_labels: plt.setp(a_cov.xaxis.get_majorticklabels(), rotation=90) a_cov.set_yticklabels(labels) a_cov.set_xlim(0, flat_trace.shape[1]) a_cov.set_ylim(flat_trace.shape[1], 0) return f_cov, a_cov
python
def plot_sampler_cov( sampler, method='corr', weights=None, cutoff_weight=None, labels=None, burn=0, chain_mask=None, temp_idx=0, cbar_label=None, title='', rot_x_labels=False, figsize=None, xlabel_on_top=True ): """Make a plot of the sampler's correlation or covariance matrix. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. method : {'corr', 'cov'} Whether to plot the correlation matrix ('corr') or the covariance matrix ('cov'). The covariance matrix is often not useful because different parameters have wildly different scales. Default is to plot the correlation matrix. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). cbar_label : str, optional The label to use for the colorbar. The default is chosen based on the value of the `method` keyword. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. xlabel_on_top : bool, optional If True, the x-axis labels are put on top (the way mathematicians present matrices). Default is True. """ try: k = sampler.flatchain.shape[-1] except AttributeError: # Assumes array input is only case where there is no "flatchain" attribute. k = sampler.shape[-1] # Process the samples: if isinstance(sampler, emcee.EnsembleSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.chain.shape[0], dtype=bool) flat_trace = sampler.chain[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, emcee.PTSampler): if chain_mask is None: chain_mask = scipy.ones(sampler.nwalkers, dtype=bool) flat_trace = sampler.chain[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) elif isinstance(sampler, scipy.ndarray): if sampler.ndim == 4: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[1], dtype=bool) flat_trace = sampler[temp_idx, chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[temp_idx, chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 3: if chain_mask is None: chain_mask = scipy.ones(sampler.shape[0], dtype=bool) flat_trace = sampler[chain_mask, burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[chain_mask, burn:] weights = weights.ravel() elif sampler.ndim == 2: flat_trace = sampler[burn:, :] flat_trace = flat_trace.reshape((-1, k)) if weights is not None: weights = weights[burn:] weights = weights.ravel() if cutoff_weight is not None and weights is not None: mask = weights >= cutoff_weight * weights.max() flat_trace = flat_trace[mask, :] weights = weights[mask] else: raise ValueError("Unknown sampler class: %s" % (type(sampler),)) if labels is None: labels = [''] * k if cbar_label is None: cbar_label = r'$\mathrm{cov}(p_1, p_2)$' if method == 'cov' else r'$\mathrm{corr}(p_1, p_2)$' if weights is None: if method == 'corr': cov = scipy.corrcoef(flat_trace, rowvar=0, ddof=1) else: cov = scipy.cov(flat_trace, rowvar=0, ddof=1) else: cov = scipy.cov(flat_trace, rowvar=0, aweights=weights) if method == 'corr': stds = scipy.sqrt(scipy.diag(cov)) STD_1, STD_2 = scipy.meshgrid(stds, stds) cov = cov / (STD_1 * STD_2) f_cov = plt.figure(figsize=figsize) a_cov = f_cov.add_subplot(1, 1, 1) a_cov.set_title(title) if method == 'cov': vmax = scipy.absolute(cov).max() else: vmax = 1.0 cax = a_cov.pcolor(cov, cmap='seismic', vmin=-1 * vmax, vmax=vmax) divider = make_axes_locatable(a_cov) a_cb = divider.append_axes("right", size="10%", pad=0.05) cbar = f_cov.colorbar(cax, cax=a_cb, label=cbar_label) a_cov.set_xlabel('parameter') a_cov.set_ylabel('parameter') a_cov.axis('square') a_cov.invert_yaxis() if xlabel_on_top: a_cov.xaxis.tick_top() a_cov.xaxis.set_label_position('top') a_cov.set_xticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float)) a_cov.set_yticks(0.5 + scipy.arange(0, flat_trace.shape[1], dtype=float)) a_cov.set_xticklabels(labels) if rot_x_labels: plt.setp(a_cov.xaxis.get_majorticklabels(), rotation=90) a_cov.set_yticklabels(labels) a_cov.set_xlim(0, flat_trace.shape[1]) a_cov.set_ylim(flat_trace.shape[1], 0) return f_cov, a_cov
[ "def", "plot_sampler_cov", "(", "sampler", ",", "method", "=", "'corr'", ",", "weights", "=", "None", ",", "cutoff_weight", "=", "None", ",", "labels", "=", "None", ",", "burn", "=", "0", ",", "chain_mask", "=", "None", ",", "temp_idx", "=", "0", ",", "cbar_label", "=", "None", ",", "title", "=", "''", ",", "rot_x_labels", "=", "False", ",", "figsize", "=", "None", ",", "xlabel_on_top", "=", "True", ")", ":", "try", ":", "k", "=", "sampler", ".", "flatchain", ".", "shape", "[", "-", "1", "]", "except", "AttributeError", ":", "# Assumes array input is only case where there is no \"flatchain\" attribute.", "k", "=", "sampler", ".", "shape", "[", "-", "1", "]", "# Process the samples:", "if", "isinstance", "(", "sampler", ",", "emcee", ".", "EnsembleSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "chain", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "emcee", ".", "PTSampler", ")", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "nwalkers", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", ".", "chain", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "elif", "isinstance", "(", "sampler", ",", "scipy", ".", "ndarray", ")", ":", "if", "sampler", ".", "ndim", "==", "4", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "1", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "temp_idx", ",", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "3", ":", "if", "chain_mask", "is", "None", ":", "chain_mask", "=", "scipy", ".", "ones", "(", "sampler", ".", "shape", "[", "0", "]", ",", "dtype", "=", "bool", ")", "flat_trace", "=", "sampler", "[", "chain_mask", ",", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "chain_mask", ",", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "elif", "sampler", ".", "ndim", "==", "2", ":", "flat_trace", "=", "sampler", "[", "burn", ":", ",", ":", "]", "flat_trace", "=", "flat_trace", ".", "reshape", "(", "(", "-", "1", ",", "k", ")", ")", "if", "weights", "is", "not", "None", ":", "weights", "=", "weights", "[", "burn", ":", "]", "weights", "=", "weights", ".", "ravel", "(", ")", "if", "cutoff_weight", "is", "not", "None", "and", "weights", "is", "not", "None", ":", "mask", "=", "weights", ">=", "cutoff_weight", "*", "weights", ".", "max", "(", ")", "flat_trace", "=", "flat_trace", "[", "mask", ",", ":", "]", "weights", "=", "weights", "[", "mask", "]", "else", ":", "raise", "ValueError", "(", "\"Unknown sampler class: %s\"", "%", "(", "type", "(", "sampler", ")", ",", ")", ")", "if", "labels", "is", "None", ":", "labels", "=", "[", "''", "]", "*", "k", "if", "cbar_label", "is", "None", ":", "cbar_label", "=", "r'$\\mathrm{cov}(p_1, p_2)$'", "if", "method", "==", "'cov'", "else", "r'$\\mathrm{corr}(p_1, p_2)$'", "if", "weights", "is", "None", ":", "if", "method", "==", "'corr'", ":", "cov", "=", "scipy", ".", "corrcoef", "(", "flat_trace", ",", "rowvar", "=", "0", ",", "ddof", "=", "1", ")", "else", ":", "cov", "=", "scipy", ".", "cov", "(", "flat_trace", ",", "rowvar", "=", "0", ",", "ddof", "=", "1", ")", "else", ":", "cov", "=", "scipy", ".", "cov", "(", "flat_trace", ",", "rowvar", "=", "0", ",", "aweights", "=", "weights", ")", "if", "method", "==", "'corr'", ":", "stds", "=", "scipy", ".", "sqrt", "(", "scipy", ".", "diag", "(", "cov", ")", ")", "STD_1", ",", "STD_2", "=", "scipy", ".", "meshgrid", "(", "stds", ",", "stds", ")", "cov", "=", "cov", "/", "(", "STD_1", "*", "STD_2", ")", "f_cov", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "a_cov", "=", "f_cov", ".", "add_subplot", "(", "1", ",", "1", ",", "1", ")", "a_cov", ".", "set_title", "(", "title", ")", "if", "method", "==", "'cov'", ":", "vmax", "=", "scipy", ".", "absolute", "(", "cov", ")", ".", "max", "(", ")", "else", ":", "vmax", "=", "1.0", "cax", "=", "a_cov", ".", "pcolor", "(", "cov", ",", "cmap", "=", "'seismic'", ",", "vmin", "=", "-", "1", "*", "vmax", ",", "vmax", "=", "vmax", ")", "divider", "=", "make_axes_locatable", "(", "a_cov", ")", "a_cb", "=", "divider", ".", "append_axes", "(", "\"right\"", ",", "size", "=", "\"10%\"", ",", "pad", "=", "0.05", ")", "cbar", "=", "f_cov", ".", "colorbar", "(", "cax", ",", "cax", "=", "a_cb", ",", "label", "=", "cbar_label", ")", "a_cov", ".", "set_xlabel", "(", "'parameter'", ")", "a_cov", ".", "set_ylabel", "(", "'parameter'", ")", "a_cov", ".", "axis", "(", "'square'", ")", "a_cov", ".", "invert_yaxis", "(", ")", "if", "xlabel_on_top", ":", "a_cov", ".", "xaxis", ".", "tick_top", "(", ")", "a_cov", ".", "xaxis", ".", "set_label_position", "(", "'top'", ")", "a_cov", ".", "set_xticks", "(", "0.5", "+", "scipy", ".", "arange", "(", "0", ",", "flat_trace", ".", "shape", "[", "1", "]", ",", "dtype", "=", "float", ")", ")", "a_cov", ".", "set_yticks", "(", "0.5", "+", "scipy", ".", "arange", "(", "0", ",", "flat_trace", ".", "shape", "[", "1", "]", ",", "dtype", "=", "float", ")", ")", "a_cov", ".", "set_xticklabels", "(", "labels", ")", "if", "rot_x_labels", ":", "plt", ".", "setp", "(", "a_cov", ".", "xaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "90", ")", "a_cov", ".", "set_yticklabels", "(", "labels", ")", "a_cov", ".", "set_xlim", "(", "0", ",", "flat_trace", ".", "shape", "[", "1", "]", ")", "a_cov", ".", "set_ylim", "(", "flat_trace", ".", "shape", "[", "1", "]", ",", "0", ")", "return", "f_cov", ",", "a_cov" ]
Make a plot of the sampler's correlation or covariance matrix. Returns the figure and axis created. Parameters ---------- sampler : :py:class:`emcee.Sampler` instance or array, (`n_temps`, `n_chains`, `n_samp`, `n_dim`), (`n_chains`, `n_samp`, `n_dim`) or (`n_samp`, `n_dim`) The sampler to plot the chains/marginals of. Can also be an array of samples which matches the shape of the `chain` attribute that would be present in a :py:class:`emcee.Sampler` instance. method : {'corr', 'cov'} Whether to plot the correlation matrix ('corr') or the covariance matrix ('cov'). The covariance matrix is often not useful because different parameters have wildly different scales. Default is to plot the correlation matrix. labels : array of str, (`D`,), optional The labels for each hyperparameter. Default is to use empty strings. burn : int, optional The number of samples to burn before making the marginal histograms. Default is zero (use all samples). chain_mask : (index) array, optional Mask identifying the chains to keep before plotting, in case there are bad chains. Default is to use all chains. temp_idx : int, optional Index of the temperature to plot when plotting a :py:class:`emcee.PTSampler`. Default is 0 (samples from the posterior). cbar_label : str, optional The label to use for the colorbar. The default is chosen based on the value of the `method` keyword. title : str, optional Title to use for the plot. rot_x_labels : bool, optional If True, the labels for the x-axis are rotated 90 degrees. Default is False (do not rotate labels). figsize : 2-tuple, optional The figure size to use. Default is to use the matplotlib default. xlabel_on_top : bool, optional If True, the x-axis labels are put on top (the way mathematicians present matrices). Default is True.
[ "Make", "a", "plot", "of", "the", "sampler", "s", "correlation", "or", "covariance", "matrix", ".", "Returns", "the", "figure", "and", "axis", "created", ".", "Parameters", "----------", "sampler", ":", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", "or", "array", "(", "n_temps", "n_chains", "n_samp", "n_dim", ")", "(", "n_chains", "n_samp", "n_dim", ")", "or", "(", "n_samp", "n_dim", ")", "The", "sampler", "to", "plot", "the", "chains", "/", "marginals", "of", ".", "Can", "also", "be", "an", "array", "of", "samples", "which", "matches", "the", "shape", "of", "the", "chain", "attribute", "that", "would", "be", "present", "in", "a", ":", "py", ":", "class", ":", "emcee", ".", "Sampler", "instance", ".", "method", ":", "{", "corr", "cov", "}", "Whether", "to", "plot", "the", "correlation", "matrix", "(", "corr", ")", "or", "the", "covariance", "matrix", "(", "cov", ")", ".", "The", "covariance", "matrix", "is", "often", "not", "useful", "because", "different", "parameters", "have", "wildly", "different", "scales", ".", "Default", "is", "to", "plot", "the", "correlation", "matrix", ".", "labels", ":", "array", "of", "str", "(", "D", ")", "optional", "The", "labels", "for", "each", "hyperparameter", ".", "Default", "is", "to", "use", "empty", "strings", ".", "burn", ":", "int", "optional", "The", "number", "of", "samples", "to", "burn", "before", "making", "the", "marginal", "histograms", ".", "Default", "is", "zero", "(", "use", "all", "samples", ")", ".", "chain_mask", ":", "(", "index", ")", "array", "optional", "Mask", "identifying", "the", "chains", "to", "keep", "before", "plotting", "in", "case", "there", "are", "bad", "chains", ".", "Default", "is", "to", "use", "all", "chains", ".", "temp_idx", ":", "int", "optional", "Index", "of", "the", "temperature", "to", "plot", "when", "plotting", "a", ":", "py", ":", "class", ":", "emcee", ".", "PTSampler", ".", "Default", "is", "0", "(", "samples", "from", "the", "posterior", ")", ".", "cbar_label", ":", "str", "optional", "The", "label", "to", "use", "for", "the", "colorbar", ".", "The", "default", "is", "chosen", "based", "on", "the", "value", "of", "the", "method", "keyword", ".", "title", ":", "str", "optional", "Title", "to", "use", "for", "the", "plot", ".", "rot_x_labels", ":", "bool", "optional", "If", "True", "the", "labels", "for", "the", "x", "-", "axis", "are", "rotated", "90", "degrees", ".", "Default", "is", "False", "(", "do", "not", "rotate", "labels", ")", ".", "figsize", ":", "2", "-", "tuple", "optional", "The", "figure", "size", "to", "use", ".", "Default", "is", "to", "use", "the", "matplotlib", "default", ".", "xlabel_on_top", ":", "bool", "optional", "If", "True", "the", "x", "-", "axis", "labels", "are", "put", "on", "top", "(", "the", "way", "mathematicians", "present", "matrices", ")", ".", "Default", "is", "True", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L2616-L2752
markchil/gptools
gptools/utils.py
ProductJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array-like, (`num_params`,) Values between 0 and 1 to evaluate inverse CDF at. """ p1_num_params = len(self.p1.bounds) return scipy.concatenate( ( self.p1.sample_u(q[:p1_num_params]), self.p2.sample_u(q[p1_num_params:]) ) )
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array-like, (`num_params`,) Values between 0 and 1 to evaluate inverse CDF at. """ p1_num_params = len(self.p1.bounds) return scipy.concatenate( ( self.p1.sample_u(q[:p1_num_params]), self.p2.sample_u(q[p1_num_params:]) ) )
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "p1_num_params", "=", "len", "(", "self", ".", "p1", ".", "bounds", ")", "return", "scipy", ".", "concatenate", "(", "(", "self", ".", "p1", ".", "sample_u", "(", "q", "[", ":", "p1_num_params", "]", ")", ",", "self", ".", "p2", ".", "sample_u", "(", "q", "[", "p1_num_params", ":", "]", ")", ")", ")" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array-like, (`num_params`,) Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "-", "like", "(", "num_params", ")", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L285-L306
markchil/gptools
gptools/utils.py
ProductJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p1_num_params = len(self.p1.bounds) return scipy.concatenate( ( self.p1.elementwise_cdf(p[:p1_num_params]), self.p2.elementwise_cdf(p[p1_num_params:]) ) )
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p1_num_params = len(self.p1.bounds) return scipy.concatenate( ( self.p1.elementwise_cdf(p[:p1_num_params]), self.p2.elementwise_cdf(p[p1_num_params:]) ) )
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p1_num_params", "=", "len", "(", "self", ".", "p1", ".", "bounds", ")", "return", "scipy", ".", "concatenate", "(", "(", "self", ".", "p1", ".", "elementwise_cdf", "(", "p", "[", ":", "p1_num_params", "]", ")", ",", "self", ".", "p2", ".", "elementwise_cdf", "(", "p", "[", "p1_num_params", ":", "]", ")", ")", ")" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L308-L330
markchil/gptools
gptools/utils.py
ProductJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. The outputs of the two priors are stacked vertically. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ draw_1 = self.p1.random_draw(size=size) draw_2 = self.p2.random_draw(size=size) if draw_1.ndim == 1: return scipy.hstack((draw_1, draw_2)) else: return scipy.vstack((draw_1, draw_2))
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. The outputs of the two priors are stacked vertically. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ draw_1 = self.p1.random_draw(size=size) draw_2 = self.p2.random_draw(size=size) if draw_1.ndim == 1: return scipy.hstack((draw_1, draw_2)) else: return scipy.vstack((draw_1, draw_2))
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "draw_1", "=", "self", ".", "p1", ".", "random_draw", "(", "size", "=", "size", ")", "draw_2", "=", "self", ".", "p2", ".", "random_draw", "(", "size", "=", "size", ")", "if", "draw_1", ".", "ndim", "==", "1", ":", "return", "scipy", ".", "hstack", "(", "(", "draw_1", ",", "draw_2", ")", ")", "else", ":", "return", "scipy", ".", "vstack", "(", "(", "draw_1", ",", "draw_2", ")", ")" ]
Draw random samples of the hyperparameters. The outputs of the two priors are stacked vertically. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "The", "outputs", "of", "the", "two", "priors", "are", "stacked", "vertically", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L332-L349
markchil/gptools
gptools/utils.py
UniformJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.bounds): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([(b[1] - b[0]) * v + b[0] for v, b in zip(q, self.bounds)])
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.bounds): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([(b[1] - b[0]) * v + b[0] for v, b in zip(q, self.bounds)])
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "q", "=", "scipy", ".", "atleast_1d", "(", "q", ")", "if", "len", "(", "q", ")", "!=", "len", "(", "self", ".", "bounds", ")", ":", "raise", "ValueError", "(", "\"length of q must equal the number of parameters!\"", ")", "if", "q", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"q must be one-dimensional!\"", ")", "if", "(", "q", "<", "0", ")", ".", "any", "(", ")", "or", "(", "q", ">", "1", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"q must be within [0, 1]!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "(", "b", "[", "1", "]", "-", "b", "[", "0", "]", ")", "*", "v", "+", "b", "[", "0", "]", "for", "v", ",", "b", "in", "zip", "(", "q", ",", "self", ".", "bounds", ")", "]", ")" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "of", "float", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L392-L414
markchil/gptools
gptools/utils.py
UniformJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.bounds): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") c = scipy.zeros(len(self.bounds)) for k in xrange(0, len(self.bounds)): if p[k] <= self.bounds[k][0]: c[k] = 0.0 elif p[k] >= self.bounds[k][1]: c[k] = 1.0 else: c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0]) return c
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.bounds): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") c = scipy.zeros(len(self.bounds)) for k in xrange(0, len(self.bounds)): if p[k] <= self.bounds[k][0]: c[k] = 0.0 elif p[k] >= self.bounds[k][1]: c[k] = 1.0 else: c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0]) return c
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p", "=", "scipy", ".", "atleast_1d", "(", "p", ")", "if", "len", "(", "p", ")", "!=", "len", "(", "self", ".", "bounds", ")", ":", "raise", "ValueError", "(", "\"length of p must equal the number of parameters!\"", ")", "if", "p", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"p must be one-dimensional!\"", ")", "c", "=", "scipy", ".", "zeros", "(", "len", "(", "self", ".", "bounds", ")", ")", "for", "k", "in", "xrange", "(", "0", ",", "len", "(", "self", ".", "bounds", ")", ")", ":", "if", "p", "[", "k", "]", "<=", "self", ".", "bounds", "[", "k", "]", "[", "0", "]", ":", "c", "[", "k", "]", "=", "0.0", "elif", "p", "[", "k", "]", ">=", "self", ".", "bounds", "[", "k", "]", "[", "1", "]", ":", "c", "[", "k", "]", "=", "1.0", "else", ":", "c", "[", "k", "]", "=", "(", "p", "[", "k", "]", "-", "self", ".", "bounds", "[", "k", "]", "[", "0", "]", ")", "/", "(", "self", ".", "bounds", "[", "k", "]", "[", "1", "]", "-", "self", ".", "bounds", "[", "k", "]", "[", "0", "]", ")", "return", "c" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L416-L445
markchil/gptools
gptools/utils.py
UniformJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([numpy.random.uniform(low=b[0], high=b[1], size=size) for b in self.bounds])
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([numpy.random.uniform(low=b[0], high=b[1], size=size) for b in self.bounds])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "numpy", ".", "random", ".", "uniform", "(", "low", "=", "b", "[", "0", "]", ",", "high", "=", "b", "[", "1", "]", ",", "size", "=", "size", ")", "for", "b", "in", "self", ".", "bounds", "]", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L447-L456
markchil/gptools
gptools/utils.py
CoreEdgeJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ if size is None: size = 1 single_val = True else: single_val = False out_shape = [len(self.bounds)] try: out_shape.extend(size) except TypeError: out_shape.append(size) out = scipy.zeros(out_shape) for j in xrange(0, len(self.bounds)): if j != 2: out[j, :] = numpy.random.uniform(low=self.bounds[j][0], high=self.bounds[j][1], size=size) else: out[j, :] = numpy.random.uniform(low=self.bounds[j][0], high=out[j - 1, :], size=size) if not single_val: return out else: return out.ravel()
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ if size is None: size = 1 single_val = True else: single_val = False out_shape = [len(self.bounds)] try: out_shape.extend(size) except TypeError: out_shape.append(size) out = scipy.zeros(out_shape) for j in xrange(0, len(self.bounds)): if j != 2: out[j, :] = numpy.random.uniform(low=self.bounds[j][0], high=self.bounds[j][1], size=size) else: out[j, :] = numpy.random.uniform(low=self.bounds[j][0], high=out[j - 1, :], size=size) if not single_val: return out else: return out.ravel()
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "1", "single_val", "=", "True", "else", ":", "single_val", "=", "False", "out_shape", "=", "[", "len", "(", "self", ".", "bounds", ")", "]", "try", ":", "out_shape", ".", "extend", "(", "size", ")", "except", "TypeError", ":", "out_shape", ".", "append", "(", "size", ")", "out", "=", "scipy", ".", "zeros", "(", "out_shape", ")", "for", "j", "in", "xrange", "(", "0", ",", "len", "(", "self", ".", "bounds", ")", ")", ":", "if", "j", "!=", "2", ":", "out", "[", "j", ",", ":", "]", "=", "numpy", ".", "random", ".", "uniform", "(", "low", "=", "self", ".", "bounds", "[", "j", "]", "[", "0", "]", ",", "high", "=", "self", ".", "bounds", "[", "j", "]", "[", "1", "]", ",", "size", "=", "size", ")", "else", ":", "out", "[", "j", ",", ":", "]", "=", "numpy", ".", "random", ".", "uniform", "(", "low", "=", "self", ".", "bounds", "[", "j", "]", "[", "0", "]", ",", "high", "=", "out", "[", "j", "-", "1", ",", ":", "]", ",", "size", "=", "size", ")", "if", "not", "single_val", ":", "return", "out", "else", ":", "return", "out", ".", "ravel", "(", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L520-L554
markchil/gptools
gptools/utils.py
IndependentJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.univariate_priors): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([p.ppf(v) for v, p in zip(q, self.univariate_priors)])
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.univariate_priors): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([p.ppf(v) for v, p in zip(q, self.univariate_priors)])
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "q", "=", "scipy", ".", "atleast_1d", "(", "q", ")", "if", "len", "(", "q", ")", "!=", "len", "(", "self", ".", "univariate_priors", ")", ":", "raise", "ValueError", "(", "\"length of q must equal the number of parameters!\"", ")", "if", "q", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"q must be one-dimensional!\"", ")", "if", "(", "q", "<", "0", ")", ".", "any", "(", ")", "or", "(", "q", ">", "1", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"q must be within [0, 1]!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "p", ".", "ppf", "(", "v", ")", "for", "v", ",", "p", "in", "zip", "(", "q", ",", "self", ".", "univariate_priors", ")", "]", ")" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "of", "float", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L708-L730
markchil/gptools
gptools/utils.py
IndependentJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.univariate_priors): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([pr.cdf(v) for v, pr in zip(p, self.univariate_priors)])
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.univariate_priors): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([pr.cdf(v) for v, pr in zip(p, self.univariate_priors)])
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p", "=", "scipy", ".", "atleast_1d", "(", "p", ")", "if", "len", "(", "p", ")", "!=", "len", "(", "self", ".", "univariate_priors", ")", ":", "raise", "ValueError", "(", "\"length of p must equal the number of parameters!\"", ")", "if", "p", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"p must be one-dimensional!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "pr", ".", "cdf", "(", "v", ")", "for", "v", ",", "pr", "in", "zip", "(", "p", ",", "self", ".", "univariate_priors", ")", "]", ")" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L732-L753
markchil/gptools
gptools/utils.py
IndependentJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([p.rvs(size=size) for p in self.univariate_priors])
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([p.rvs(size=size) for p in self.univariate_priors])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "p", ".", "rvs", "(", "size", "=", "size", ")", "for", "p", "in", "self", ".", "univariate_priors", "]", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L755-L764
markchil/gptools
gptools/utils.py
NormalJointPrior.bounds
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.norm.interval(self.i, loc=m, scale=s) for s, m in zip(self.sigma, self.mu)]
python
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.norm.interval(self.i, loc=m, scale=s) for s, m in zip(self.sigma, self.mu)]
[ "def", "bounds", "(", "self", ")", ":", "return", "[", "scipy", ".", "stats", ".", "norm", ".", "interval", "(", "self", ".", "i", ",", "loc", "=", "m", ",", "scale", "=", "s", ")", "for", "s", ",", "m", "in", "zip", "(", "self", ".", "sigma", ",", "self", ".", "mu", ")", "]" ]
The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful.
[ "The", "bounds", "of", "the", "random", "variable", ".", "Set", "self", ".", "i", "=", "0", ".", "95", "to", "return", "the", "95%", "interval", "if", "this", "is", "used", "for", "setting", "bounds", "on", "optimizers", "/", "etc", ".", "where", "infinite", "bounds", "may", "not", "be", "useful", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L803-L809
markchil/gptools
gptools/utils.py
NormalJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.sigma): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.norm.ppf(v, loc=m, scale=s) for v, s, m in zip(q, self.sigma, self.mu)])
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.sigma): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.norm.ppf(v, loc=m, scale=s) for v, s, m in zip(q, self.sigma, self.mu)])
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "q", "=", "scipy", ".", "atleast_1d", "(", "q", ")", "if", "len", "(", "q", ")", "!=", "len", "(", "self", ".", "sigma", ")", ":", "raise", "ValueError", "(", "\"length of q must equal the number of parameters!\"", ")", "if", "q", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"q must be one-dimensional!\"", ")", "if", "(", "q", "<", "0", ")", ".", "any", "(", ")", "or", "(", "q", ">", "1", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"q must be within [0, 1]!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "norm", ".", "ppf", "(", "v", ",", "loc", "=", "m", ",", "scale", "=", "s", ")", "for", "v", ",", "s", ",", "m", "in", "zip", "(", "q", ",", "self", ".", "sigma", ",", "self", ".", "mu", ")", "]", ")" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "of", "float", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L811-L833
markchil/gptools
gptools/utils.py
NormalJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.sigma): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([scipy.stats.norm.cdf(v, loc=m, scale=s) for v, s, m in zip(p, self.sigma, self.mu)])
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.sigma): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([scipy.stats.norm.cdf(v, loc=m, scale=s) for v, s, m in zip(p, self.sigma, self.mu)])
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p", "=", "scipy", ".", "atleast_1d", "(", "p", ")", "if", "len", "(", "p", ")", "!=", "len", "(", "self", ".", "sigma", ")", ":", "raise", "ValueError", "(", "\"length of p must equal the number of parameters!\"", ")", "if", "p", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"p must be one-dimensional!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "norm", ".", "cdf", "(", "v", ",", "loc", "=", "m", ",", "scale", "=", "s", ")", "for", "v", ",", "s", ",", "m", "in", "zip", "(", "p", ",", "self", ".", "sigma", ",", "self", ".", "mu", ")", "]", ")" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L835-L856
markchil/gptools
gptools/utils.py
NormalJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.norm.rvs(loc=m, scale=s, size=size) for s, m in zip(self.sigma, self.mu)])
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.norm.rvs(loc=m, scale=s, size=size) for s, m in zip(self.sigma, self.mu)])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "norm", ".", "rvs", "(", "loc", "=", "m", ",", "scale", "=", "s", ",", "size", "=", "size", ")", "for", "s", ",", "m", "in", "zip", "(", "self", ".", "sigma", ",", "self", ".", "mu", ")", "]", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L858-L867
markchil/gptools
gptools/utils.py
LogNormalJointPrior.bounds
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.lognorm.interval(self.i, s, loc=0, scale=em) for s, em in zip(self.sigma, self.emu)]
python
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.lognorm.interval(self.i, s, loc=0, scale=em) for s, em in zip(self.sigma, self.emu)]
[ "def", "bounds", "(", "self", ")", ":", "return", "[", "scipy", ".", "stats", ".", "lognorm", ".", "interval", "(", "self", ".", "i", ",", "s", ",", "loc", "=", "0", ",", "scale", "=", "em", ")", "for", "s", ",", "em", "in", "zip", "(", "self", ".", "sigma", ",", "self", ".", "emu", ")", "]" ]
The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful.
[ "The", "bounds", "of", "the", "random", "variable", ".", "Set", "self", ".", "i", "=", "0", ".", "95", "to", "return", "the", "95%", "interval", "if", "this", "is", "used", "for", "setting", "bounds", "on", "optimizers", "/", "etc", ".", "where", "infinite", "bounds", "may", "not", "be", "useful", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L909-L915
markchil/gptools
gptools/utils.py
LogNormalJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.sigma): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.lognorm.ppf(v, s, loc=0, scale=em) for v, s, em in zip(q, self.sigma, self.emu)])
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.sigma): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.lognorm.ppf(v, s, loc=0, scale=em) for v, s, em in zip(q, self.sigma, self.emu)])
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "q", "=", "scipy", ".", "atleast_1d", "(", "q", ")", "if", "len", "(", "q", ")", "!=", "len", "(", "self", ".", "sigma", ")", ":", "raise", "ValueError", "(", "\"length of q must equal the number of parameters!\"", ")", "if", "q", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"q must be one-dimensional!\"", ")", "if", "(", "q", "<", "0", ")", ".", "any", "(", ")", "or", "(", "q", ">", "1", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"q must be within [0, 1]!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "lognorm", ".", "ppf", "(", "v", ",", "s", ",", "loc", "=", "0", ",", "scale", "=", "em", ")", "for", "v", ",", "s", ",", "em", "in", "zip", "(", "q", ",", "self", ".", "sigma", ",", "self", ".", "emu", ")", "]", ")" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "of", "float", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L917-L939
markchil/gptools
gptools/utils.py
LogNormalJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.sigma): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([scipy.stats.lognorm.cdf(v, s, loc=0, scale=em) for v, s, em in zip(p, self.sigma, self.emu)])
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.sigma): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([scipy.stats.lognorm.cdf(v, s, loc=0, scale=em) for v, s, em in zip(p, self.sigma, self.emu)])
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p", "=", "scipy", ".", "atleast_1d", "(", "p", ")", "if", "len", "(", "p", ")", "!=", "len", "(", "self", ".", "sigma", ")", ":", "raise", "ValueError", "(", "\"length of p must equal the number of parameters!\"", ")", "if", "p", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"p must be one-dimensional!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "lognorm", ".", "cdf", "(", "v", ",", "s", ",", "loc", "=", "0", ",", "scale", "=", "em", ")", "for", "v", ",", "s", ",", "em", "in", "zip", "(", "p", ",", "self", ".", "sigma", ",", "self", ".", "emu", ")", "]", ")" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L941-L962
markchil/gptools
gptools/utils.py
LogNormalJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.lognorm.rvs(s, loc=0, scale=em, size=size) for s, em in zip(self.sigma, self.emu)])
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.lognorm.rvs(s, loc=0, scale=em, size=size) for s, em in zip(self.sigma, self.emu)])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "lognorm", ".", "rvs", "(", "s", ",", "loc", "=", "0", ",", "scale", "=", "em", ",", "size", "=", "size", ")", "for", "s", ",", "em", "in", "zip", "(", "self", ".", "sigma", ",", "self", ".", "emu", ")", "]", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L964-L973
markchil/gptools
gptools/utils.py
GammaJointPrior.bounds
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.gamma.interval(self.i, a, loc=0, scale=1.0 / b) for a, b in zip(self.a, self.b)]
python
def bounds(self): """The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful. """ return [scipy.stats.gamma.interval(self.i, a, loc=0, scale=1.0 / b) for a, b in zip(self.a, self.b)]
[ "def", "bounds", "(", "self", ")", ":", "return", "[", "scipy", ".", "stats", ".", "gamma", ".", "interval", "(", "self", ".", "i", ",", "a", ",", "loc", "=", "0", ",", "scale", "=", "1.0", "/", "b", ")", "for", "a", ",", "b", "in", "zip", "(", "self", ".", "a", ",", "self", ".", "b", ")", "]" ]
The bounds of the random variable. Set `self.i=0.95` to return the 95% interval if this is used for setting bounds on optimizers/etc. where infinite bounds may not be useful.
[ "The", "bounds", "of", "the", "random", "variable", ".", "Set", "self", ".", "i", "=", "0", ".", "95", "to", "return", "the", "95%", "interval", "if", "this", "is", "used", "for", "setting", "bounds", "on", "optimizers", "/", "etc", ".", "where", "infinite", "bounds", "may", "not", "be", "useful", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1015-L1021
markchil/gptools
gptools/utils.py
GammaJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.a): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.gamma.ppf(v, a, loc=0, scale=1.0 / b) for v, a, b in zip(q, self.a, self.b)])
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.a): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.gamma.ppf(v, a, loc=0, scale=1.0 / b) for v, a, b in zip(q, self.a, self.b)])
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "q", "=", "scipy", ".", "atleast_1d", "(", "q", ")", "if", "len", "(", "q", ")", "!=", "len", "(", "self", ".", "a", ")", ":", "raise", "ValueError", "(", "\"length of q must equal the number of parameters!\"", ")", "if", "q", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"q must be one-dimensional!\"", ")", "if", "(", "q", "<", "0", ")", ".", "any", "(", ")", "or", "(", "q", ">", "1", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"q must be within [0, 1]!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "gamma", ".", "ppf", "(", "v", ",", "a", ",", "loc", "=", "0", ",", "scale", "=", "1.0", "/", "b", ")", "for", "v", ",", "a", ",", "b", "in", "zip", "(", "q", ",", "self", ".", "a", ",", "self", ".", "b", ")", "]", ")" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "of", "float", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1023-L1045
markchil/gptools
gptools/utils.py
GammaJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.a): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([scipy.stats.gamma.cdf(v, a, loc=0, scale=1.0 / b) for v, a, b in zip(p, self.a, self.b)])
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.a): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") return scipy.asarray([scipy.stats.gamma.cdf(v, a, loc=0, scale=1.0 / b) for v, a, b in zip(p, self.a, self.b)])
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p", "=", "scipy", ".", "atleast_1d", "(", "p", ")", "if", "len", "(", "p", ")", "!=", "len", "(", "self", ".", "a", ")", ":", "raise", "ValueError", "(", "\"length of p must equal the number of parameters!\"", ")", "if", "p", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"p must be one-dimensional!\"", ")", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "gamma", ".", "cdf", "(", "v", ",", "a", ",", "loc", "=", "0", ",", "scale", "=", "1.0", "/", "b", ")", "for", "v", ",", "a", ",", "b", "in", "zip", "(", "p", ",", "self", ".", "a", ",", "self", ".", "b", ")", "]", ")" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1047-L1068
markchil/gptools
gptools/utils.py
GammaJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.gamma.rvs(a, loc=0, scale=1.0 / b, size=size) for a, b in zip(self.a, self.b)])
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ return scipy.asarray([scipy.stats.gamma.rvs(a, loc=0, scale=1.0 / b, size=size) for a, b in zip(self.a, self.b)])
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "return", "scipy", ".", "asarray", "(", "[", "scipy", ".", "stats", ".", "gamma", ".", "rvs", "(", "a", ",", "loc", "=", "0", ",", "scale", "=", "1.0", "/", "b", ",", "size", "=", "size", ")", "for", "a", ",", "b", "in", "zip", "(", "self", ".", "a", ",", "self", ".", "b", ")", "]", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1070-L1079
markchil/gptools
gptools/utils.py
SortedUniformJointPrior.sample_u
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != self.num_var: raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") # Old way, not quite correct: # q = scipy.sort(q) # return scipy.asarray([(self.ub - self.lb) * v + self.lb for v in q]) # New way, based on conditional marginals: out = scipy.zeros_like(q, dtype=float) out[0] = self.lb for d in xrange(0, len(out)): out[d] = ( (1.0 - (1.0 - q[d])**(1.0 / (self.num_var - d))) * (self.ub - out[max(d - 1, 0)]) + out[max(d - 1, 0)] ) return out
python
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != self.num_var: raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") # Old way, not quite correct: # q = scipy.sort(q) # return scipy.asarray([(self.ub - self.lb) * v + self.lb for v in q]) # New way, based on conditional marginals: out = scipy.zeros_like(q, dtype=float) out[0] = self.lb for d in xrange(0, len(out)): out[d] = ( (1.0 - (1.0 - q[d])**(1.0 / (self.num_var - d))) * (self.ub - out[max(d - 1, 0)]) + out[max(d - 1, 0)] ) return out
[ "def", "sample_u", "(", "self", ",", "q", ")", ":", "q", "=", "scipy", ".", "atleast_1d", "(", "q", ")", "if", "len", "(", "q", ")", "!=", "self", ".", "num_var", ":", "raise", "ValueError", "(", "\"length of q must equal the number of parameters!\"", ")", "if", "q", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"q must be one-dimensional!\"", ")", "if", "(", "q", "<", "0", ")", ".", "any", "(", ")", "or", "(", "q", ">", "1", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"q must be within [0, 1]!\"", ")", "# Old way, not quite correct:", "# q = scipy.sort(q)", "# return scipy.asarray([(self.ub - self.lb) * v + self.lb for v in q])", "# New way, based on conditional marginals:", "out", "=", "scipy", ".", "zeros_like", "(", "q", ",", "dtype", "=", "float", ")", "out", "[", "0", "]", "=", "self", ".", "lb", "for", "d", "in", "xrange", "(", "0", ",", "len", "(", "out", ")", ")", ":", "out", "[", "d", "]", "=", "(", "(", "1.0", "-", "(", "1.0", "-", "q", "[", "d", "]", ")", "**", "(", "1.0", "/", "(", "self", ".", "num_var", "-", "d", ")", ")", ")", "*", "(", "self", ".", "ub", "-", "out", "[", "max", "(", "d", "-", "1", ",", "0", ")", "]", ")", "+", "out", "[", "max", "(", "d", "-", "1", ",", "0", ")", "]", ")", "return", "out" ]
r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at.
[ "r", "Extract", "a", "sample", "from", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "inverse", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "PPF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "uniformly", "on", ":", "math", ":", "[", "0", "1", "]", "this", "function", "will", "return", "corresponding", "samples", "for", "each", "variable", ".", "Parameters", "----------", "q", ":", "array", "of", "float", "Values", "between", "0", "and", "1", "to", "evaluate", "inverse", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1154-L1189
markchil/gptools
gptools/utils.py
SortedUniformJointPrior.elementwise_cdf
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.bounds): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") c = scipy.zeros(len(self.bounds)) # Old way, based on sorted uniform variables: # for k in xrange(0, len(self.bounds)): # if p[k] <= self.bounds[k][0]: # c[k] = 0.0 # elif p[k] >= self.bounds[k][1]: # c[k] = 1.0 # else: # c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0]) # New way, based on conditional marginals: for d in xrange(0, len(c)): pdm1 = p[d - 1] if d > 0 else self.lb if p[d] <= pdm1: c[d] = 0.0 elif p[d] >= self.ub: c[d] = 1.0 else: c[d] = 1.0 - (1.0 - (p[d] - pdm1) / (self.ub - pdm1))**(self.num_var - d) return c
python
def elementwise_cdf(self, p): r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at. """ p = scipy.atleast_1d(p) if len(p) != len(self.bounds): raise ValueError("length of p must equal the number of parameters!") if p.ndim != 1: raise ValueError("p must be one-dimensional!") c = scipy.zeros(len(self.bounds)) # Old way, based on sorted uniform variables: # for k in xrange(0, len(self.bounds)): # if p[k] <= self.bounds[k][0]: # c[k] = 0.0 # elif p[k] >= self.bounds[k][1]: # c[k] = 1.0 # else: # c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0]) # New way, based on conditional marginals: for d in xrange(0, len(c)): pdm1 = p[d - 1] if d > 0 else self.lb if p[d] <= pdm1: c[d] = 0.0 elif p[d] >= self.ub: c[d] = 1.0 else: c[d] = 1.0 - (1.0 - (p[d] - pdm1) / (self.ub - pdm1))**(self.num_var - d) return c
[ "def", "elementwise_cdf", "(", "self", ",", "p", ")", ":", "p", "=", "scipy", ".", "atleast_1d", "(", "p", ")", "if", "len", "(", "p", ")", "!=", "len", "(", "self", ".", "bounds", ")", ":", "raise", "ValueError", "(", "\"length of p must equal the number of parameters!\"", ")", "if", "p", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"p must be one-dimensional!\"", ")", "c", "=", "scipy", ".", "zeros", "(", "len", "(", "self", ".", "bounds", ")", ")", "# Old way, based on sorted uniform variables:", "# for k in xrange(0, len(self.bounds)):", "# if p[k] <= self.bounds[k][0]:", "# c[k] = 0.0", "# elif p[k] >= self.bounds[k][1]:", "# c[k] = 1.0", "# else:", "# c[k] = (p[k] - self.bounds[k][0]) / (self.bounds[k][1] - self.bounds[k][0])", "# New way, based on conditional marginals:", "for", "d", "in", "xrange", "(", "0", ",", "len", "(", "c", ")", ")", ":", "pdm1", "=", "p", "[", "d", "-", "1", "]", "if", "d", ">", "0", "else", "self", ".", "lb", "if", "p", "[", "d", "]", "<=", "pdm1", ":", "c", "[", "d", "]", "=", "0.0", "elif", "p", "[", "d", "]", ">=", "self", ".", "ub", ":", "c", "[", "d", "]", "=", "1.0", "else", ":", "c", "[", "d", "]", "=", "1.0", "-", "(", "1.0", "-", "(", "p", "[", "d", "]", "-", "pdm1", ")", "/", "(", "self", ".", "ub", "-", "pdm1", ")", ")", "**", "(", "self", ".", "num_var", "-", "d", ")", "return", "c" ]
r"""Convert a sample to random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the CDF. To facilitate efficient sampling, this function returns a *vector* of CDF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed according to the prior, this function will return variables uniform on :math:`[0, 1]` corresponding to each variable. This is the inverse operation to :py:meth:`sample_u`. Parameters ---------- p : array-like, (`num_params`,) Values to evaluate CDF at.
[ "r", "Convert", "a", "sample", "to", "random", "variates", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", ".", "For", "a", "univariate", "distribution", "this", "is", "simply", "evaluating", "the", "CDF", ".", "To", "facilitate", "efficient", "sampling", "this", "function", "returns", "a", "*", "vector", "*", "of", "CDF", "values", "one", "value", "for", "each", "variable", ".", "Basically", "the", "idea", "is", "that", "given", "a", "vector", ":", "math", ":", "q", "of", "num_params", "values", "each", "of", "which", "is", "distributed", "according", "to", "the", "prior", "this", "function", "will", "return", "variables", "uniform", "on", ":", "math", ":", "[", "0", "1", "]", "corresponding", "to", "each", "variable", ".", "This", "is", "the", "inverse", "operation", "to", ":", "py", ":", "meth", ":", "sample_u", ".", "Parameters", "----------", "p", ":", "array", "-", "like", "(", "num_params", ")", "Values", "to", "evaluate", "CDF", "at", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1191-L1233
markchil/gptools
gptools/utils.py
SortedUniformJointPrior.random_draw
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ if size is None: size = 1 single_val = True else: single_val = False out_shape = [self.num_var] try: out_shape.extend(size) except TypeError: out_shape.append(size) out = scipy.sort( numpy.random.uniform( low=self.lb, high=self.ub, size=out_shape ), axis=0 ) if not single_val: return out else: return out.ravel()
python
def random_draw(self, size=None): """Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None. """ if size is None: size = 1 single_val = True else: single_val = False out_shape = [self.num_var] try: out_shape.extend(size) except TypeError: out_shape.append(size) out = scipy.sort( numpy.random.uniform( low=self.lb, high=self.ub, size=out_shape ), axis=0 ) if not single_val: return out else: return out.ravel()
[ "def", "random_draw", "(", "self", ",", "size", "=", "None", ")", ":", "if", "size", "is", "None", ":", "size", "=", "1", "single_val", "=", "True", "else", ":", "single_val", "=", "False", "out_shape", "=", "[", "self", ".", "num_var", "]", "try", ":", "out_shape", ".", "extend", "(", "size", ")", "except", "TypeError", ":", "out_shape", ".", "append", "(", "size", ")", "out", "=", "scipy", ".", "sort", "(", "numpy", ".", "random", ".", "uniform", "(", "low", "=", "self", ".", "lb", ",", "high", "=", "self", ".", "ub", ",", "size", "=", "out_shape", ")", ",", "axis", "=", "0", ")", "if", "not", "single_val", ":", "return", "out", "else", ":", "return", "out", ".", "ravel", "(", ")" ]
Draw random samples of the hyperparameters. Parameters ---------- size : None, int or array-like, optional The number/shape of samples to draw. If None, only one sample is returned. Default is None.
[ "Draw", "random", "samples", "of", "the", "hyperparameters", ".", "Parameters", "----------", "size", ":", "None", "int", "or", "array", "-", "like", "optional", "The", "number", "/", "shape", "of", "samples", "to", "draw", ".", "If", "None", "only", "one", "sample", "is", "returned", ".", "Default", "is", "None", "." ]
train
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/utils.py#L1235-L1267
CTPUG/wafer
wafer/tickets/views.py
zapier_cancel_hook
def zapier_cancel_hook(request): ''' Zapier can post something like this when tickets are cancelled { "ticket_type": "Individual (Regular)", "barcode": "12345678", "email": "demo@example.com" } ''' if request.META.get('HTTP_X_ZAPIER_SECRET', None) != settings.WAFER_TICKETS_SECRET: raise PermissionDenied('Incorrect secret') # This is required for python 3, and in theory fine on python 2 payload = json.loads(request.body.decode('utf8')) ticket = Ticket.objects.filter(barcode=payload['barcode']) if ticket.exists(): # delete the ticket ticket.delete() return HttpResponse("Cancelled\n", content_type='text/plain')
python
def zapier_cancel_hook(request): ''' Zapier can post something like this when tickets are cancelled { "ticket_type": "Individual (Regular)", "barcode": "12345678", "email": "demo@example.com" } ''' if request.META.get('HTTP_X_ZAPIER_SECRET', None) != settings.WAFER_TICKETS_SECRET: raise PermissionDenied('Incorrect secret') # This is required for python 3, and in theory fine on python 2 payload = json.loads(request.body.decode('utf8')) ticket = Ticket.objects.filter(barcode=payload['barcode']) if ticket.exists(): # delete the ticket ticket.delete() return HttpResponse("Cancelled\n", content_type='text/plain')
[ "def", "zapier_cancel_hook", "(", "request", ")", ":", "if", "request", ".", "META", ".", "get", "(", "'HTTP_X_ZAPIER_SECRET'", ",", "None", ")", "!=", "settings", ".", "WAFER_TICKETS_SECRET", ":", "raise", "PermissionDenied", "(", "'Incorrect secret'", ")", "# This is required for python 3, and in theory fine on python 2", "payload", "=", "json", ".", "loads", "(", "request", ".", "body", ".", "decode", "(", "'utf8'", ")", ")", "ticket", "=", "Ticket", ".", "objects", ".", "filter", "(", "barcode", "=", "payload", "[", "'barcode'", "]", ")", "if", "ticket", ".", "exists", "(", ")", ":", "# delete the ticket", "ticket", ".", "delete", "(", ")", "return", "HttpResponse", "(", "\"Cancelled\\n\"", ",", "content_type", "=", "'text/plain'", ")" ]
Zapier can post something like this when tickets are cancelled { "ticket_type": "Individual (Regular)", "barcode": "12345678", "email": "demo@example.com" }
[ "Zapier", "can", "post", "something", "like", "this", "when", "tickets", "are", "cancelled", "{", "ticket_type", ":", "Individual", "(", "Regular", ")", "barcode", ":", "12345678", "email", ":", "demo" ]
train
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/tickets/views.py#L55-L73
CTPUG/wafer
wafer/tickets/views.py
zapier_guest_hook
def zapier_guest_hook(request): ''' Zapier can POST something like this when tickets are bought: { "ticket_type": "Individual (Regular)", "barcode": "12345678", "email": "demo@example.com" } ''' if request.META.get('HTTP_X_ZAPIER_SECRET', None) != settings.WAFER_TICKETS_SECRET: raise PermissionDenied('Incorrect secret') # This is required for python 3, and in theory fine on python 2 payload = json.loads(request.body.decode('utf8')) import_ticket(payload['barcode'], payload['ticket_type'], payload['email']) return HttpResponse("Noted\n", content_type='text/plain')
python
def zapier_guest_hook(request): ''' Zapier can POST something like this when tickets are bought: { "ticket_type": "Individual (Regular)", "barcode": "12345678", "email": "demo@example.com" } ''' if request.META.get('HTTP_X_ZAPIER_SECRET', None) != settings.WAFER_TICKETS_SECRET: raise PermissionDenied('Incorrect secret') # This is required for python 3, and in theory fine on python 2 payload = json.loads(request.body.decode('utf8')) import_ticket(payload['barcode'], payload['ticket_type'], payload['email']) return HttpResponse("Noted\n", content_type='text/plain')
[ "def", "zapier_guest_hook", "(", "request", ")", ":", "if", "request", ".", "META", ".", "get", "(", "'HTTP_X_ZAPIER_SECRET'", ",", "None", ")", "!=", "settings", ".", "WAFER_TICKETS_SECRET", ":", "raise", "PermissionDenied", "(", "'Incorrect secret'", ")", "# This is required for python 3, and in theory fine on python 2", "payload", "=", "json", ".", "loads", "(", "request", ".", "body", ".", "decode", "(", "'utf8'", ")", ")", "import_ticket", "(", "payload", "[", "'barcode'", "]", ",", "payload", "[", "'ticket_type'", "]", ",", "payload", "[", "'email'", "]", ")", "return", "HttpResponse", "(", "\"Noted\\n\"", ",", "content_type", "=", "'text/plain'", ")" ]
Zapier can POST something like this when tickets are bought: { "ticket_type": "Individual (Regular)", "barcode": "12345678", "email": "demo@example.com" }
[ "Zapier", "can", "POST", "something", "like", "this", "when", "tickets", "are", "bought", ":", "{" ]
train
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/tickets/views.py#L81-L99
cocaine/cocaine-framework-python
cocaine/detail/secadaptor.py
TVM.fetch_token
def fetch_token(self): """Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. """ grant_type = 'client_credentials' channel = yield self._tvm.ticket_full( self._client_id, self._client_secret, grant_type, {}) ticket = yield channel.rx.get() raise gen.Return(self._make_token(ticket))
python
def fetch_token(self): """Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. """ grant_type = 'client_credentials' channel = yield self._tvm.ticket_full( self._client_id, self._client_secret, grant_type, {}) ticket = yield channel.rx.get() raise gen.Return(self._make_token(ticket))
[ "def", "fetch_token", "(", "self", ")", ":", "grant_type", "=", "'client_credentials'", "channel", "=", "yield", "self", ".", "_tvm", ".", "ticket_full", "(", "self", ".", "_client_id", ",", "self", ".", "_client_secret", ",", "grant_type", ",", "{", "}", ")", "ticket", "=", "yield", "channel", ".", "rx", ".", "get", "(", ")", "raise", "gen", ".", "Return", "(", "self", ".", "_make_token", "(", "ticket", ")", ")" ]
Gains token from secure backend service. :return: Token formatted for Cocaine protocol header.
[ "Gains", "token", "from", "secure", "backend", "service", "." ]
train
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/secadaptor.py#L54-L65
cocaine/cocaine-framework-python
cocaine/detail/secadaptor.py
SecureServiceFabric.make_secure_adaptor
def make_secure_adaptor(service, mod, client_id, client_secret, tok_update_sec=None): """ :param service: Service to wrap in. :param mod: Name (type) of token refresh backend. :param client_id: Client identifier. :param client_secret: Client secret. :param tok_update_sec: Token update interval in seconds. """ if mod == 'TVM': return SecureServiceAdaptor(service, TVM(client_id, client_secret), tok_update_sec) return SecureServiceAdaptor(service, Promiscuous(), tok_update_sec)
python
def make_secure_adaptor(service, mod, client_id, client_secret, tok_update_sec=None): """ :param service: Service to wrap in. :param mod: Name (type) of token refresh backend. :param client_id: Client identifier. :param client_secret: Client secret. :param tok_update_sec: Token update interval in seconds. """ if mod == 'TVM': return SecureServiceAdaptor(service, TVM(client_id, client_secret), tok_update_sec) return SecureServiceAdaptor(service, Promiscuous(), tok_update_sec)
[ "def", "make_secure_adaptor", "(", "service", ",", "mod", ",", "client_id", ",", "client_secret", ",", "tok_update_sec", "=", "None", ")", ":", "if", "mod", "==", "'TVM'", ":", "return", "SecureServiceAdaptor", "(", "service", ",", "TVM", "(", "client_id", ",", "client_secret", ")", ",", "tok_update_sec", ")", "return", "SecureServiceAdaptor", "(", "service", ",", "Promiscuous", "(", ")", ",", "tok_update_sec", ")" ]
:param service: Service to wrap in. :param mod: Name (type) of token refresh backend. :param client_id: Client identifier. :param client_secret: Client secret. :param tok_update_sec: Token update interval in seconds.
[ ":", "param", "service", ":", "Service", "to", "wrap", "in", ".", ":", "param", "mod", ":", "Name", "(", "type", ")", "of", "token", "refresh", "backend", ".", ":", "param", "client_id", ":", "Client", "identifier", ".", ":", "param", "client_secret", ":", "Client", "secret", ".", ":", "param", "tok_update_sec", ":", "Token", "update", "interval", "in", "seconds", "." ]
train
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/secadaptor.py#L131-L142
wdecoster/nanoget
nanoget/extraction_functions.py
process_summary
def process_summary(summaryfile, **kwargs): """Extracting information from an albacore summary file. Only reads which have a >0 length are returned. The fields below may or may not exist, depending on the type of sequencing performed. Fields 1-14 are for 1D sequencing. Fields 1-23 for 2D sequencing. Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing Fields 28-38 for barcoded workflows 1 filename 2 read_id 3 run_id 4 channel 5 start_time 6 duration 7 num_events 8 template_start 9 num_events_template 10 template_duration 11 num_called_template 12 sequence_length_template 13 mean_qscore_template 14 strand_score_template 15 complement_start 16 num_events_complement 17 complement_duration 18 num_called_complement 19 sequence_length_complement 20 mean_qscore_complement 21 strand_score_complement 22 sequence_length_2d 23 mean_qscore_2d 24 filename1 25 filename2 26 read_id1 27 read_id2 28 barcode_arrangement 29 barcode_score 30 barcode_full_arrangement 31 front_score 32 rear_score 33 front_begin_index 34 front_foundseq_length 35 rear_end_index 36 rear_foundseq_length 37 kit 38 variant """ logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format( summaryfile, kwargs["readtype"])) ut.check_existance(summaryfile) if kwargs["readtype"] == "1D": cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_template", "mean_qscore_template"] elif kwargs["readtype"] in ["2D", "1D2"]: cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"] if kwargs["barcoded"]: cols.append("barcode_arrangement") logging.info("Nanoget: Extracting metrics per barcode.") try: datadf = pd.read_csv( filepath_or_buffer=summaryfile, sep="\t", usecols=cols, ) except ValueError: logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format( summaryfile, ', '.join(cols))) sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format( summaryfile, ', '.join(cols))) if kwargs["barcoded"]: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals", "barcode"] else: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"] logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile)) return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
python
def process_summary(summaryfile, **kwargs): """Extracting information from an albacore summary file. Only reads which have a >0 length are returned. The fields below may or may not exist, depending on the type of sequencing performed. Fields 1-14 are for 1D sequencing. Fields 1-23 for 2D sequencing. Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing Fields 28-38 for barcoded workflows 1 filename 2 read_id 3 run_id 4 channel 5 start_time 6 duration 7 num_events 8 template_start 9 num_events_template 10 template_duration 11 num_called_template 12 sequence_length_template 13 mean_qscore_template 14 strand_score_template 15 complement_start 16 num_events_complement 17 complement_duration 18 num_called_complement 19 sequence_length_complement 20 mean_qscore_complement 21 strand_score_complement 22 sequence_length_2d 23 mean_qscore_2d 24 filename1 25 filename2 26 read_id1 27 read_id2 28 barcode_arrangement 29 barcode_score 30 barcode_full_arrangement 31 front_score 32 rear_score 33 front_begin_index 34 front_foundseq_length 35 rear_end_index 36 rear_foundseq_length 37 kit 38 variant """ logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format( summaryfile, kwargs["readtype"])) ut.check_existance(summaryfile) if kwargs["readtype"] == "1D": cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_template", "mean_qscore_template"] elif kwargs["readtype"] in ["2D", "1D2"]: cols = ["read_id", "run_id", "channel", "start_time", "duration", "sequence_length_2d", "mean_qscore_2d"] if kwargs["barcoded"]: cols.append("barcode_arrangement") logging.info("Nanoget: Extracting metrics per barcode.") try: datadf = pd.read_csv( filepath_or_buffer=summaryfile, sep="\t", usecols=cols, ) except ValueError: logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format( summaryfile, ', '.join(cols))) sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format( summaryfile, ', '.join(cols))) if kwargs["barcoded"]: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals", "barcode"] else: datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"] logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile)) return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
[ "def", "process_summary", "(", "summaryfile", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Collecting metrics from summary file {} for {} sequencing\"", ".", "format", "(", "summaryfile", ",", "kwargs", "[", "\"readtype\"", "]", ")", ")", "ut", ".", "check_existance", "(", "summaryfile", ")", "if", "kwargs", "[", "\"readtype\"", "]", "==", "\"1D\"", ":", "cols", "=", "[", "\"read_id\"", ",", "\"run_id\"", ",", "\"channel\"", ",", "\"start_time\"", ",", "\"duration\"", ",", "\"sequence_length_template\"", ",", "\"mean_qscore_template\"", "]", "elif", "kwargs", "[", "\"readtype\"", "]", "in", "[", "\"2D\"", ",", "\"1D2\"", "]", ":", "cols", "=", "[", "\"read_id\"", ",", "\"run_id\"", ",", "\"channel\"", ",", "\"start_time\"", ",", "\"duration\"", ",", "\"sequence_length_2d\"", ",", "\"mean_qscore_2d\"", "]", "if", "kwargs", "[", "\"barcoded\"", "]", ":", "cols", ".", "append", "(", "\"barcode_arrangement\"", ")", "logging", ".", "info", "(", "\"Nanoget: Extracting metrics per barcode.\"", ")", "try", ":", "datadf", "=", "pd", ".", "read_csv", "(", "filepath_or_buffer", "=", "summaryfile", ",", "sep", "=", "\"\\t\"", ",", "usecols", "=", "cols", ",", ")", "except", "ValueError", ":", "logging", ".", "error", "(", "\"Nanoget: did not find expected columns in summary file {}:\\n {}\"", ".", "format", "(", "summaryfile", ",", "', '", ".", "join", "(", "cols", ")", ")", ")", "sys", ".", "exit", "(", "\"ERROR: expected columns in summary file {} not found:\\n {}\"", ".", "format", "(", "summaryfile", ",", "', '", ".", "join", "(", "cols", ")", ")", ")", "if", "kwargs", "[", "\"barcoded\"", "]", ":", "datadf", ".", "columns", "=", "[", "\"readIDs\"", ",", "\"runIDs\"", ",", "\"channelIDs\"", ",", "\"time\"", ",", "\"duration\"", ",", "\"lengths\"", ",", "\"quals\"", ",", "\"barcode\"", "]", "else", ":", "datadf", ".", "columns", "=", "[", "\"readIDs\"", ",", "\"runIDs\"", ",", "\"channelIDs\"", ",", "\"time\"", ",", "\"duration\"", ",", "\"lengths\"", ",", "\"quals\"", "]", "logging", ".", "info", "(", "\"Nanoget: Finished collecting statistics from summary file {}\"", ".", "format", "(", "summaryfile", ")", ")", "return", "ut", ".", "reduce_memory_usage", "(", "datadf", ".", "loc", "[", "datadf", "[", "\"lengths\"", "]", "!=", "0", "]", ".", "copy", "(", ")", ")" ]
Extracting information from an albacore summary file. Only reads which have a >0 length are returned. The fields below may or may not exist, depending on the type of sequencing performed. Fields 1-14 are for 1D sequencing. Fields 1-23 for 2D sequencing. Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing Fields 28-38 for barcoded workflows 1 filename 2 read_id 3 run_id 4 channel 5 start_time 6 duration 7 num_events 8 template_start 9 num_events_template 10 template_duration 11 num_called_template 12 sequence_length_template 13 mean_qscore_template 14 strand_score_template 15 complement_start 16 num_events_complement 17 complement_duration 18 num_called_complement 19 sequence_length_complement 20 mean_qscore_complement 21 strand_score_complement 22 sequence_length_2d 23 mean_qscore_2d 24 filename1 25 filename2 26 read_id1 27 read_id2 28 barcode_arrangement 29 barcode_score 30 barcode_full_arrangement 31 front_score 32 rear_score 33 front_begin_index 34 front_foundseq_length 35 rear_end_index 36 rear_foundseq_length 37 kit 38 variant
[ "Extracting", "information", "from", "an", "albacore", "summary", "file", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L12-L90
wdecoster/nanoget
nanoget/extraction_functions.py
check_bam
def check_bam(bam, samtype="bam"): """Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read """ ut.check_existance(bam) samfile = pysam.AlignmentFile(bam, "rb") if not samfile.has_index(): pysam.index(bam) samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index logging.info("Nanoget: No index for bam file could be found, created index.") if not samfile.header['HD']['SO'] == 'coordinate': logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam)) sys.exit("Please use a bam file sorted by coordinate.") if samtype == "bam": logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format( bam, samfile.mapped, samfile.unmapped)) if samfile.mapped == 0: logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam)) sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam)) return samfile
python
def check_bam(bam, samtype="bam"): """Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read """ ut.check_existance(bam) samfile = pysam.AlignmentFile(bam, "rb") if not samfile.has_index(): pysam.index(bam) samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index logging.info("Nanoget: No index for bam file could be found, created index.") if not samfile.header['HD']['SO'] == 'coordinate': logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam)) sys.exit("Please use a bam file sorted by coordinate.") if samtype == "bam": logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format( bam, samfile.mapped, samfile.unmapped)) if samfile.mapped == 0: logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam)) sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam)) return samfile
[ "def", "check_bam", "(", "bam", ",", "samtype", "=", "\"bam\"", ")", ":", "ut", ".", "check_existance", "(", "bam", ")", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "if", "not", "samfile", ".", "has_index", "(", ")", ":", "pysam", ".", "index", "(", "bam", ")", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "# Need to reload the samfile after creating index", "logging", ".", "info", "(", "\"Nanoget: No index for bam file could be found, created index.\"", ")", "if", "not", "samfile", ".", "header", "[", "'HD'", "]", "[", "'SO'", "]", "==", "'coordinate'", ":", "logging", ".", "error", "(", "\"Nanoget: Bam file {} not sorted by coordinate!.\"", ".", "format", "(", "bam", ")", ")", "sys", ".", "exit", "(", "\"Please use a bam file sorted by coordinate.\"", ")", "if", "samtype", "==", "\"bam\"", ":", "logging", ".", "info", "(", "\"Nanoget: Bam file {} contains {} mapped and {} unmapped reads.\"", ".", "format", "(", "bam", ",", "samfile", ".", "mapped", ",", "samfile", ".", "unmapped", ")", ")", "if", "samfile", ".", "mapped", "==", "0", ":", "logging", ".", "error", "(", "\"Nanoget: Bam file {} does not contain aligned reads.\"", ".", "format", "(", "bam", ")", ")", "sys", ".", "exit", "(", "\"FATAL: not a single read was mapped in bam file {}\"", ".", "format", "(", "bam", ")", ")", "return", "samfile" ]
Check if bam file is valid. Bam file should: - exists - has an index (create if necessary) - is sorted by coordinate - has at least one mapped read
[ "Check", "if", "bam", "file", "is", "valid", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L93-L117
wdecoster/nanoget
nanoget/extraction_functions.py
process_ubam
def process_ubam(bam, **kwargs): """Extracting metrics from unaligned bam format Extracting lengths """ logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam)) samfile = pysam.AlignmentFile(bam, "rb", check_sq=False) if not samfile.has_index(): pysam.index(bam) # Need to reload the samfile after creating index samfile = pysam.AlignmentFile(bam, "rb") logging.info("Nanoget: No index for bam file could be found, created index.") datadf = pd.DataFrame( data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length) for read in samfile.fetch(until_eof=True)], columns=["readIDs", "quals", "lengths"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: ubam {} contains {} reads.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
python
def process_ubam(bam, **kwargs): """Extracting metrics from unaligned bam format Extracting lengths """ logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam)) samfile = pysam.AlignmentFile(bam, "rb", check_sq=False) if not samfile.has_index(): pysam.index(bam) # Need to reload the samfile after creating index samfile = pysam.AlignmentFile(bam, "rb") logging.info("Nanoget: No index for bam file could be found, created index.") datadf = pd.DataFrame( data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length) for read in samfile.fetch(until_eof=True)], columns=["readIDs", "quals", "lengths"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: ubam {} contains {} reads.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
[ "def", "process_ubam", "(", "bam", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from ubam file {}.\"", ".", "format", "(", "bam", ")", ")", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ",", "check_sq", "=", "False", ")", "if", "not", "samfile", ".", "has_index", "(", ")", ":", "pysam", ".", "index", "(", "bam", ")", "# Need to reload the samfile after creating index", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "logging", ".", "info", "(", "\"Nanoget: No index for bam file could be found, created index.\"", ")", "datadf", "=", "pd", ".", "DataFrame", "(", "data", "=", "[", "(", "read", ".", "query_name", ",", "nanomath", ".", "ave_qual", "(", "read", ".", "query_qualities", ")", ",", "read", ".", "query_length", ")", "for", "read", "in", "samfile", ".", "fetch", "(", "until_eof", "=", "True", ")", "]", ",", "columns", "=", "[", "\"readIDs\"", ",", "\"quals\"", ",", "\"lengths\"", "]", ")", ".", "dropna", "(", "axis", "=", "'columns'", ",", "how", "=", "'all'", ")", ".", "dropna", "(", "axis", "=", "'index'", ",", "how", "=", "'any'", ")", "logging", ".", "info", "(", "\"Nanoget: ubam {} contains {} reads.\"", ".", "format", "(", "bam", ",", "datadf", "[", "\"lengths\"", "]", ".", "size", ")", ")", "return", "ut", ".", "reduce_memory_usage", "(", "datadf", ")" ]
Extracting metrics from unaligned bam format Extracting lengths
[ "Extracting", "metrics", "from", "unaligned", "bam", "format", "Extracting", "lengths" ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L120-L139
wdecoster/nanoget
nanoget/extraction_functions.py
process_bam
def process_bam(bam, **kwargs): """Combines metrics from bam after extraction. Processing function: calls pool of worker functions to extract from a bam file the following metrics: -lengths -aligned lengths -qualities -aligned qualities -mapping qualities -edit distances to the reference genome scaled by read length Returned in a pandas DataFrame """ logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam)) samfile = check_bam(bam) chromosomes = samfile.references params = zip([bam] * len(chromosomes), chromosomes) with cfutures.ProcessPoolExecutor() as executor: datadf = pd.DataFrame( data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist], columns=["readIDs", "quals", "aligned_quals", "lengths", "aligned_lengths", "mapQ", "percentIdentity"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: bam {} contains {} primary alignments.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
python
def process_bam(bam, **kwargs): """Combines metrics from bam after extraction. Processing function: calls pool of worker functions to extract from a bam file the following metrics: -lengths -aligned lengths -qualities -aligned qualities -mapping qualities -edit distances to the reference genome scaled by read length Returned in a pandas DataFrame """ logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam)) samfile = check_bam(bam) chromosomes = samfile.references params = zip([bam] * len(chromosomes), chromosomes) with cfutures.ProcessPoolExecutor() as executor: datadf = pd.DataFrame( data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist], columns=["readIDs", "quals", "aligned_quals", "lengths", "aligned_lengths", "mapQ", "percentIdentity"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: bam {} contains {} primary alignments.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
[ "def", "process_bam", "(", "bam", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from bam file {}.\"", ".", "format", "(", "bam", ")", ")", "samfile", "=", "check_bam", "(", "bam", ")", "chromosomes", "=", "samfile", ".", "references", "params", "=", "zip", "(", "[", "bam", "]", "*", "len", "(", "chromosomes", ")", ",", "chromosomes", ")", "with", "cfutures", ".", "ProcessPoolExecutor", "(", ")", "as", "executor", ":", "datadf", "=", "pd", ".", "DataFrame", "(", "data", "=", "[", "res", "for", "sublist", "in", "executor", ".", "map", "(", "extract_from_bam", ",", "params", ")", "for", "res", "in", "sublist", "]", ",", "columns", "=", "[", "\"readIDs\"", ",", "\"quals\"", ",", "\"aligned_quals\"", ",", "\"lengths\"", ",", "\"aligned_lengths\"", ",", "\"mapQ\"", ",", "\"percentIdentity\"", "]", ")", ".", "dropna", "(", "axis", "=", "'columns'", ",", "how", "=", "'all'", ")", ".", "dropna", "(", "axis", "=", "'index'", ",", "how", "=", "'any'", ")", "logging", ".", "info", "(", "\"Nanoget: bam {} contains {} primary alignments.\"", ".", "format", "(", "bam", ",", "datadf", "[", "\"lengths\"", "]", ".", "size", ")", ")", "return", "ut", ".", "reduce_memory_usage", "(", "datadf", ")" ]
Combines metrics from bam after extraction. Processing function: calls pool of worker functions to extract from a bam file the following metrics: -lengths -aligned lengths -qualities -aligned qualities -mapping qualities -edit distances to the reference genome scaled by read length Returned in a pandas DataFrame
[ "Combines", "metrics", "from", "bam", "after", "extraction", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L142-L168
wdecoster/nanoget
nanoget/extraction_functions.py
extract_from_bam
def extract_from_bam(params): """Extracts metrics from bam. Worker function per chromosome loop over a bam file and create list with tuples containing metrics: -qualities -aligned qualities -lengths -aligned lengths -mapping qualities -edit distances to the reference genome scaled by read length """ bam, chromosome = params samfile = pysam.AlignmentFile(bam, "rb") return [ (read.query_name, nanomath.ave_qual(read.query_qualities), nanomath.ave_qual(read.query_alignment_qualities), read.query_length, read.query_alignment_length, read.mapping_quality, get_pID(read)) for read in samfile.fetch(reference=chromosome, multiple_iterators=True) if not read.is_secondary]
python
def extract_from_bam(params): """Extracts metrics from bam. Worker function per chromosome loop over a bam file and create list with tuples containing metrics: -qualities -aligned qualities -lengths -aligned lengths -mapping qualities -edit distances to the reference genome scaled by read length """ bam, chromosome = params samfile = pysam.AlignmentFile(bam, "rb") return [ (read.query_name, nanomath.ave_qual(read.query_qualities), nanomath.ave_qual(read.query_alignment_qualities), read.query_length, read.query_alignment_length, read.mapping_quality, get_pID(read)) for read in samfile.fetch(reference=chromosome, multiple_iterators=True) if not read.is_secondary]
[ "def", "extract_from_bam", "(", "params", ")", ":", "bam", ",", "chromosome", "=", "params", "samfile", "=", "pysam", ".", "AlignmentFile", "(", "bam", ",", "\"rb\"", ")", "return", "[", "(", "read", ".", "query_name", ",", "nanomath", ".", "ave_qual", "(", "read", ".", "query_qualities", ")", ",", "nanomath", ".", "ave_qual", "(", "read", ".", "query_alignment_qualities", ")", ",", "read", ".", "query_length", ",", "read", ".", "query_alignment_length", ",", "read", ".", "mapping_quality", ",", "get_pID", "(", "read", ")", ")", "for", "read", "in", "samfile", ".", "fetch", "(", "reference", "=", "chromosome", ",", "multiple_iterators", "=", "True", ")", "if", "not", "read", ".", "is_secondary", "]" ]
Extracts metrics from bam. Worker function per chromosome loop over a bam file and create list with tuples containing metrics: -qualities -aligned qualities -lengths -aligned lengths -mapping qualities -edit distances to the reference genome scaled by read length
[ "Extracts", "metrics", "from", "bam", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L200-L223
wdecoster/nanoget
nanoget/extraction_functions.py
get_pID
def get_pID(read): """Return the percent identity of a read. based on the NM tag if present, if not calculate from MD tag and CIGAR string read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L """ try: return 100 * (1 - read.get_tag("NM") / read.query_alignment_length) except KeyError: try: return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) / read.query_alignment_length) except KeyError: return None except ZeroDivisionError: return None
python
def get_pID(read): """Return the percent identity of a read. based on the NM tag if present, if not calculate from MD tag and CIGAR string read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L """ try: return 100 * (1 - read.get_tag("NM") / read.query_alignment_length) except KeyError: try: return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples)) / read.query_alignment_length) except KeyError: return None except ZeroDivisionError: return None
[ "def", "get_pID", "(", "read", ")", ":", "try", ":", "return", "100", "*", "(", "1", "-", "read", ".", "get_tag", "(", "\"NM\"", ")", "/", "read", ".", "query_alignment_length", ")", "except", "KeyError", ":", "try", ":", "return", "100", "*", "(", "1", "-", "(", "parse_MD", "(", "read", ".", "get_tag", "(", "\"MD\"", ")", ")", "+", "parse_CIGAR", "(", "read", ".", "cigartuples", ")", ")", "/", "read", ".", "query_alignment_length", ")", "except", "KeyError", ":", "return", "None", "except", "ZeroDivisionError", ":", "return", "None" ]
Return the percent identity of a read. based on the NM tag if present, if not calculate from MD tag and CIGAR string read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
[ "Return", "the", "percent", "identity", "of", "a", "read", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L226-L243
wdecoster/nanoget
nanoget/extraction_functions.py
handle_compressed_input
def handle_compressed_input(inputfq, file_type="fastq"): """Return handles from compressed files according to extension. Check for which fastq input is presented and open a handle accordingly Can read from compressed files (gz, bz2, bgz) or uncompressed Relies on file extensions to recognize compression """ ut.check_existance(inputfq) if inputfq.endswith(('.gz', 'bgz')): import gzip logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq)) return gzip.open(inputfq, 'rt') elif inputfq.endswith('.bz2'): import bz2 logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq)) return bz2.open(inputfq, 'rt') elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')): return open(inputfq, 'r') else: logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq)) sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n' 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
python
def handle_compressed_input(inputfq, file_type="fastq"): """Return handles from compressed files according to extension. Check for which fastq input is presented and open a handle accordingly Can read from compressed files (gz, bz2, bgz) or uncompressed Relies on file extensions to recognize compression """ ut.check_existance(inputfq) if inputfq.endswith(('.gz', 'bgz')): import gzip logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq)) return gzip.open(inputfq, 'rt') elif inputfq.endswith('.bz2'): import bz2 logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq)) return bz2.open(inputfq, 'rt') elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')): return open(inputfq, 'r') else: logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq)) sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n' 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
[ "def", "handle_compressed_input", "(", "inputfq", ",", "file_type", "=", "\"fastq\"", ")", ":", "ut", ".", "check_existance", "(", "inputfq", ")", "if", "inputfq", ".", "endswith", "(", "(", "'.gz'", ",", "'bgz'", ")", ")", ":", "import", "gzip", "logging", ".", "info", "(", "\"Nanoget: Decompressing gzipped {} {}\"", ".", "format", "(", "file_type", ",", "inputfq", ")", ")", "return", "gzip", ".", "open", "(", "inputfq", ",", "'rt'", ")", "elif", "inputfq", ".", "endswith", "(", "'.bz2'", ")", ":", "import", "bz2", "logging", ".", "info", "(", "\"Nanoget: Decompressing bz2 compressed {} {}\"", ".", "format", "(", "file_type", ",", "inputfq", ")", ")", "return", "bz2", ".", "open", "(", "inputfq", ",", "'rt'", ")", "elif", "inputfq", ".", "endswith", "(", "(", "'.fastq'", ",", "'.fq'", ",", "'fasta'", ",", "'.fa'", ",", "'.fas'", ")", ")", ":", "return", "open", "(", "inputfq", ",", "'r'", ")", "else", ":", "logging", ".", "error", "(", "\"INPUT ERROR: Unrecognized file extension {}\"", ".", "format", "(", "inputfq", ")", ")", "sys", ".", "exit", "(", "'INPUT ERROR:\\nUnrecognized file extension in {}\\n'", "'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'", ".", "format", "(", "inputfq", ")", ")" ]
Return handles from compressed files according to extension. Check for which fastq input is presented and open a handle accordingly Can read from compressed files (gz, bz2, bgz) or uncompressed Relies on file extensions to recognize compression
[ "Return", "handles", "from", "compressed", "files", "according", "to", "extension", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L256-L277
wdecoster/nanoget
nanoget/extraction_functions.py
process_fasta
def process_fasta(fasta, **kwargs): """Combine metrics extracted from a fasta file.""" logging.info("Nanoget: Starting to collect statistics from a fasta file.") inputfasta = handle_compressed_input(fasta, file_type="fasta") return ut.reduce_memory_usage(pd.DataFrame( data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")], columns=["lengths"] ).dropna())
python
def process_fasta(fasta, **kwargs): """Combine metrics extracted from a fasta file.""" logging.info("Nanoget: Starting to collect statistics from a fasta file.") inputfasta = handle_compressed_input(fasta, file_type="fasta") return ut.reduce_memory_usage(pd.DataFrame( data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")], columns=["lengths"] ).dropna())
[ "def", "process_fasta", "(", "fasta", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from a fasta file.\"", ")", "inputfasta", "=", "handle_compressed_input", "(", "fasta", ",", "file_type", "=", "\"fasta\"", ")", "return", "ut", ".", "reduce_memory_usage", "(", "pd", ".", "DataFrame", "(", "data", "=", "[", "len", "(", "rec", ")", "for", "rec", "in", "SeqIO", ".", "parse", "(", "inputfasta", ",", "\"fasta\"", ")", "]", ",", "columns", "=", "[", "\"lengths\"", "]", ")", ".", "dropna", "(", ")", ")" ]
Combine metrics extracted from a fasta file.
[ "Combine", "metrics", "extracted", "from", "a", "fasta", "file", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L280-L287
wdecoster/nanoget
nanoget/extraction_functions.py
process_fastq_plain
def process_fastq_plain(fastq, **kwargs): """Combine metrics extracted from a fastq file.""" logging.info("Nanoget: Starting to collect statistics from plain fastq file.") inputfastq = handle_compressed_input(fastq) return ut.reduce_memory_usage(pd.DataFrame( data=[res for res in extract_from_fastq(inputfastq) if res], columns=["quals", "lengths"] ).dropna())
python
def process_fastq_plain(fastq, **kwargs): """Combine metrics extracted from a fastq file.""" logging.info("Nanoget: Starting to collect statistics from plain fastq file.") inputfastq = handle_compressed_input(fastq) return ut.reduce_memory_usage(pd.DataFrame( data=[res for res in extract_from_fastq(inputfastq) if res], columns=["quals", "lengths"] ).dropna())
[ "def", "process_fastq_plain", "(", "fastq", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from plain fastq file.\"", ")", "inputfastq", "=", "handle_compressed_input", "(", "fastq", ")", "return", "ut", ".", "reduce_memory_usage", "(", "pd", ".", "DataFrame", "(", "data", "=", "[", "res", "for", "res", "in", "extract_from_fastq", "(", "inputfastq", ")", "if", "res", "]", ",", "columns", "=", "[", "\"quals\"", ",", "\"lengths\"", "]", ")", ".", "dropna", "(", ")", ")" ]
Combine metrics extracted from a fastq file.
[ "Combine", "metrics", "extracted", "from", "a", "fastq", "file", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L290-L297
wdecoster/nanoget
nanoget/extraction_functions.py
extract_from_fastq
def extract_from_fastq(fq): """Extract metrics from a fastq file. Return average quality and read length """ for rec in SeqIO.parse(fq, "fastq"): yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
python
def extract_from_fastq(fq): """Extract metrics from a fastq file. Return average quality and read length """ for rec in SeqIO.parse(fq, "fastq"): yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
[ "def", "extract_from_fastq", "(", "fq", ")", ":", "for", "rec", "in", "SeqIO", ".", "parse", "(", "fq", ",", "\"fastq\"", ")", ":", "yield", "nanomath", ".", "ave_qual", "(", "rec", ".", "letter_annotations", "[", "\"phred_quality\"", "]", ")", ",", "len", "(", "rec", ")" ]
Extract metrics from a fastq file. Return average quality and read length
[ "Extract", "metrics", "from", "a", "fastq", "file", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L300-L306
wdecoster/nanoget
nanoget/extraction_functions.py
stream_fastq_full
def stream_fastq_full(fastq, threads): """Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght """ logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
python
def stream_fastq_full(fastq, threads): """Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght """ logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
[ "def", "stream_fastq_full", "(", "fastq", ",", "threads", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect full metrics from plain fastq file.\"", ")", "inputfastq", "=", "handle_compressed_input", "(", "fastq", ")", "with", "cfutures", ".", "ProcessPoolExecutor", "(", "max_workers", "=", "threads", ")", "as", "executor", ":", "for", "results", "in", "executor", ".", "map", "(", "extract_all_from_fastq", ",", "SeqIO", ".", "parse", "(", "inputfastq", ",", "\"fastq\"", ")", ")", ":", "yield", "results", "logging", ".", "info", "(", "\"Nanoget: Finished collecting statistics from plain fastq file.\"", ")" ]
Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght
[ "Generator", "for", "returning", "metrics", "extracted", "from", "fastq", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L309-L322
wdecoster/nanoget
nanoget/extraction_functions.py
extract_all_from_fastq
def extract_all_from_fastq(rec): """Extract metrics from a fastq file. Return identifier, read length, average quality and median quality """ return (rec.id, len(rec), nanomath.ave_qual(rec.letter_annotations["phred_quality"]), nanomath.median_qual(rec.letter_annotations["phred_quality"]))
python
def extract_all_from_fastq(rec): """Extract metrics from a fastq file. Return identifier, read length, average quality and median quality """ return (rec.id, len(rec), nanomath.ave_qual(rec.letter_annotations["phred_quality"]), nanomath.median_qual(rec.letter_annotations["phred_quality"]))
[ "def", "extract_all_from_fastq", "(", "rec", ")", ":", "return", "(", "rec", ".", "id", ",", "len", "(", "rec", ")", ",", "nanomath", ".", "ave_qual", "(", "rec", ".", "letter_annotations", "[", "\"phred_quality\"", "]", ")", ",", "nanomath", ".", "median_qual", "(", "rec", ".", "letter_annotations", "[", "\"phred_quality\"", "]", ")", ")" ]
Extract metrics from a fastq file. Return identifier, read length, average quality and median quality
[ "Extract", "metrics", "from", "a", "fastq", "file", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L325-L333
wdecoster/nanoget
nanoget/extraction_functions.py
process_fastq_rich
def process_fastq_rich(fastq, **kwargs): """Extract metrics from a richer fastq file. Extract information from fastq files generated by albacore or MinKNOW, containing richer information in the header (key-value pairs) read=<int> [72] ch=<int> [159] start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp Z indicates UTC time, T is the delimiter between date expression and time expression dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse -> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc()) """ logging.info("Nanoget: Starting to collect statistics from rich fastq file.") inputfastq = handle_compressed_input(fastq) res = [] for record in SeqIO.parse(inputfastq, "fastq"): try: read_info = info_to_dict(record.description) res.append( (nanomath.ave_qual(record.letter_annotations["phred_quality"]), len(record), read_info["ch"], read_info["start_time"], read_info["runid"])) except KeyError: logging.error("Nanoget: keyerror when processing record {}".format(record.description)) sys.exit("Unexpected fastq identifier:\n{}\n\n \ missing one or more of expected fields 'ch', 'start_time' or 'runid'".format( record.description)) df = pd.DataFrame( data=res, columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna() df["channelIDs"] = df["channelIDs"].astype("int64") return ut.reduce_memory_usage(df)
python
def process_fastq_rich(fastq, **kwargs): """Extract metrics from a richer fastq file. Extract information from fastq files generated by albacore or MinKNOW, containing richer information in the header (key-value pairs) read=<int> [72] ch=<int> [159] start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp Z indicates UTC time, T is the delimiter between date expression and time expression dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse -> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc()) """ logging.info("Nanoget: Starting to collect statistics from rich fastq file.") inputfastq = handle_compressed_input(fastq) res = [] for record in SeqIO.parse(inputfastq, "fastq"): try: read_info = info_to_dict(record.description) res.append( (nanomath.ave_qual(record.letter_annotations["phred_quality"]), len(record), read_info["ch"], read_info["start_time"], read_info["runid"])) except KeyError: logging.error("Nanoget: keyerror when processing record {}".format(record.description)) sys.exit("Unexpected fastq identifier:\n{}\n\n \ missing one or more of expected fields 'ch', 'start_time' or 'runid'".format( record.description)) df = pd.DataFrame( data=res, columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna() df["channelIDs"] = df["channelIDs"].astype("int64") return ut.reduce_memory_usage(df)
[ "def", "process_fastq_rich", "(", "fastq", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from rich fastq file.\"", ")", "inputfastq", "=", "handle_compressed_input", "(", "fastq", ")", "res", "=", "[", "]", "for", "record", "in", "SeqIO", ".", "parse", "(", "inputfastq", ",", "\"fastq\"", ")", ":", "try", ":", "read_info", "=", "info_to_dict", "(", "record", ".", "description", ")", "res", ".", "append", "(", "(", "nanomath", ".", "ave_qual", "(", "record", ".", "letter_annotations", "[", "\"phred_quality\"", "]", ")", ",", "len", "(", "record", ")", ",", "read_info", "[", "\"ch\"", "]", ",", "read_info", "[", "\"start_time\"", "]", ",", "read_info", "[", "\"runid\"", "]", ")", ")", "except", "KeyError", ":", "logging", ".", "error", "(", "\"Nanoget: keyerror when processing record {}\"", ".", "format", "(", "record", ".", "description", ")", ")", "sys", ".", "exit", "(", "\"Unexpected fastq identifier:\\n{}\\n\\n \\\n missing one or more of expected fields 'ch', 'start_time' or 'runid'\"", ".", "format", "(", "record", ".", "description", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "res", ",", "columns", "=", "[", "\"quals\"", ",", "\"lengths\"", ",", "\"channelIDs\"", ",", "\"timestamp\"", ",", "\"runIDs\"", "]", ")", ".", "dropna", "(", ")", "df", "[", "\"channelIDs\"", "]", "=", "df", "[", "\"channelIDs\"", "]", ".", "astype", "(", "\"int64\"", ")", "return", "ut", ".", "reduce_memory_usage", "(", "df", ")" ]
Extract metrics from a richer fastq file. Extract information from fastq files generated by albacore or MinKNOW, containing richer information in the header (key-value pairs) read=<int> [72] ch=<int> [159] start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp Z indicates UTC time, T is the delimiter between date expression and time expression dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse -> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
[ "Extract", "metrics", "from", "a", "richer", "fastq", "file", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L341-L374
wdecoster/nanoget
nanoget/extraction_functions.py
readfq
def readfq(fp): """Generator function adapted from https://github.com/lh3/readfq.""" last = None # this is a buffer keeping the last unprocessed line while True: # mimic closure; is it a bad idea? if not last: # the first record or a record following a fastq for l in fp: # search for the start of the next record if l[0] in '>@': # fasta/q header line last = l[:-1] # save this line break if not last: break name, seqs, last = last[1:].partition(" ")[0], [], None for l in fp: # read the sequence if l[0] in '@+>': last = l[:-1] break seqs.append(l[:-1]) if not last or last[0] != '+': # this is a fasta record yield name, ''.join(seqs), None # yield a fasta record if not last: break else: # this is a fastq record seq, leng, seqs = ''.join(seqs), 0, [] for l in fp: # read the quality seqs.append(l[:-1]) leng += len(l) - 1 if leng >= len(seq): # have read enough quality last = None yield name, seq, ''.join(seqs) # yield a fastq record break if last: # reach EOF before reading enough quality yield name, seq, None # yield a fasta record instead break
python
def readfq(fp): """Generator function adapted from https://github.com/lh3/readfq.""" last = None # this is a buffer keeping the last unprocessed line while True: # mimic closure; is it a bad idea? if not last: # the first record or a record following a fastq for l in fp: # search for the start of the next record if l[0] in '>@': # fasta/q header line last = l[:-1] # save this line break if not last: break name, seqs, last = last[1:].partition(" ")[0], [], None for l in fp: # read the sequence if l[0] in '@+>': last = l[:-1] break seqs.append(l[:-1]) if not last or last[0] != '+': # this is a fasta record yield name, ''.join(seqs), None # yield a fasta record if not last: break else: # this is a fastq record seq, leng, seqs = ''.join(seqs), 0, [] for l in fp: # read the quality seqs.append(l[:-1]) leng += len(l) - 1 if leng >= len(seq): # have read enough quality last = None yield name, seq, ''.join(seqs) # yield a fastq record break if last: # reach EOF before reading enough quality yield name, seq, None # yield a fasta record instead break
[ "def", "readfq", "(", "fp", ")", ":", "last", "=", "None", "# this is a buffer keeping the last unprocessed line", "while", "True", ":", "# mimic closure; is it a bad idea?", "if", "not", "last", ":", "# the first record or a record following a fastq", "for", "l", "in", "fp", ":", "# search for the start of the next record", "if", "l", "[", "0", "]", "in", "'>@'", ":", "# fasta/q header line", "last", "=", "l", "[", ":", "-", "1", "]", "# save this line", "break", "if", "not", "last", ":", "break", "name", ",", "seqs", ",", "last", "=", "last", "[", "1", ":", "]", ".", "partition", "(", "\" \"", ")", "[", "0", "]", ",", "[", "]", ",", "None", "for", "l", "in", "fp", ":", "# read the sequence", "if", "l", "[", "0", "]", "in", "'@+>'", ":", "last", "=", "l", "[", ":", "-", "1", "]", "break", "seqs", ".", "append", "(", "l", "[", ":", "-", "1", "]", ")", "if", "not", "last", "or", "last", "[", "0", "]", "!=", "'+'", ":", "# this is a fasta record", "yield", "name", ",", "''", ".", "join", "(", "seqs", ")", ",", "None", "# yield a fasta record", "if", "not", "last", ":", "break", "else", ":", "# this is a fastq record", "seq", ",", "leng", ",", "seqs", "=", "''", ".", "join", "(", "seqs", ")", ",", "0", ",", "[", "]", "for", "l", "in", "fp", ":", "# read the quality", "seqs", ".", "append", "(", "l", "[", ":", "-", "1", "]", ")", "leng", "+=", "len", "(", "l", ")", "-", "1", "if", "leng", ">=", "len", "(", "seq", ")", ":", "# have read enough quality", "last", "=", "None", "yield", "name", ",", "seq", ",", "''", ".", "join", "(", "seqs", ")", "# yield a fastq record", "break", "if", "last", ":", "# reach EOF before reading enough quality", "yield", "name", ",", "seq", ",", "None", "# yield a fasta record instead", "break" ]
Generator function adapted from https://github.com/lh3/readfq.
[ "Generator", "function", "adapted", "from", "https", ":", "//", "github", ".", "com", "/", "lh3", "/", "readfq", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L377-L409
wdecoster/nanoget
nanoget/extraction_functions.py
fq_minimal
def fq_minimal(fq): """Minimal fastq metrics extractor. Quickly parse a fasta/fastq file - but makes expectations on the file format There will be dragons if unexpected format is used Expects a fastq_rich format, but extracts only timestamp and length """ try: while True: time = next(fq)[1:].split(" ")[4][11:-1] length = len(next(fq)) next(fq) next(fq) yield time, length except StopIteration: yield None
python
def fq_minimal(fq): """Minimal fastq metrics extractor. Quickly parse a fasta/fastq file - but makes expectations on the file format There will be dragons if unexpected format is used Expects a fastq_rich format, but extracts only timestamp and length """ try: while True: time = next(fq)[1:].split(" ")[4][11:-1] length = len(next(fq)) next(fq) next(fq) yield time, length except StopIteration: yield None
[ "def", "fq_minimal", "(", "fq", ")", ":", "try", ":", "while", "True", ":", "time", "=", "next", "(", "fq", ")", "[", "1", ":", "]", ".", "split", "(", "\" \"", ")", "[", "4", "]", "[", "11", ":", "-", "1", "]", "length", "=", "len", "(", "next", "(", "fq", ")", ")", "next", "(", "fq", ")", "next", "(", "fq", ")", "yield", "time", ",", "length", "except", "StopIteration", ":", "yield", "None" ]
Minimal fastq metrics extractor. Quickly parse a fasta/fastq file - but makes expectations on the file format There will be dragons if unexpected format is used Expects a fastq_rich format, but extracts only timestamp and length
[ "Minimal", "fastq", "metrics", "extractor", "." ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L412-L427
wdecoster/nanoget
nanoget/extraction_functions.py
process_fastq_minimal
def process_fastq_minimal(fastq, **kwargs): """Swiftly extract minimal features (length and timestamp) from a rich fastq file""" infastq = handle_compressed_input(fastq) try: df = pd.DataFrame( data=[rec for rec in fq_minimal(infastq) if rec], columns=["timestamp", "lengths"] ) except IndexError: logging.error("Fatal: Incorrect file structure for fastq_minimal") sys.exit("Error: file does not match expected structure for fastq_minimal") return ut.reduce_memory_usage(df)
python
def process_fastq_minimal(fastq, **kwargs): """Swiftly extract minimal features (length and timestamp) from a rich fastq file""" infastq = handle_compressed_input(fastq) try: df = pd.DataFrame( data=[rec for rec in fq_minimal(infastq) if rec], columns=["timestamp", "lengths"] ) except IndexError: logging.error("Fatal: Incorrect file structure for fastq_minimal") sys.exit("Error: file does not match expected structure for fastq_minimal") return ut.reduce_memory_usage(df)
[ "def", "process_fastq_minimal", "(", "fastq", ",", "*", "*", "kwargs", ")", ":", "infastq", "=", "handle_compressed_input", "(", "fastq", ")", "try", ":", "df", "=", "pd", ".", "DataFrame", "(", "data", "=", "[", "rec", "for", "rec", "in", "fq_minimal", "(", "infastq", ")", "if", "rec", "]", ",", "columns", "=", "[", "\"timestamp\"", ",", "\"lengths\"", "]", ")", "except", "IndexError", ":", "logging", ".", "error", "(", "\"Fatal: Incorrect file structure for fastq_minimal\"", ")", "sys", ".", "exit", "(", "\"Error: file does not match expected structure for fastq_minimal\"", ")", "return", "ut", ".", "reduce_memory_usage", "(", "df", ")" ]
Swiftly extract minimal features (length and timestamp) from a rich fastq file
[ "Swiftly", "extract", "minimal", "features", "(", "length", "and", "timestamp", ")", "from", "a", "rich", "fastq", "file" ]
train
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L430-L441
LordDarkula/chess_py
chess_py/core/algebraic/converter.py
_get_piece
def _get_piece(string, index): """ Returns Piece subclass given index of piece. :type: index: int :type: loc Location :raise: KeyError """ piece = string[index].strip() piece = piece.upper() piece_dict = {'R': Rook, 'P': Pawn, 'B': Bishop, 'N': Knight, 'Q': Queen, 'K': King} try: return piece_dict[piece] except KeyError: raise ValueError("Piece {} is invalid".format(piece))
python
def _get_piece(string, index): """ Returns Piece subclass given index of piece. :type: index: int :type: loc Location :raise: KeyError """ piece = string[index].strip() piece = piece.upper() piece_dict = {'R': Rook, 'P': Pawn, 'B': Bishop, 'N': Knight, 'Q': Queen, 'K': King} try: return piece_dict[piece] except KeyError: raise ValueError("Piece {} is invalid".format(piece))
[ "def", "_get_piece", "(", "string", ",", "index", ")", ":", "piece", "=", "string", "[", "index", "]", ".", "strip", "(", ")", "piece", "=", "piece", ".", "upper", "(", ")", "piece_dict", "=", "{", "'R'", ":", "Rook", ",", "'P'", ":", "Pawn", ",", "'B'", ":", "Bishop", ",", "'N'", ":", "Knight", ",", "'Q'", ":", "Queen", ",", "'K'", ":", "King", "}", "try", ":", "return", "piece_dict", "[", "piece", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Piece {} is invalid\"", ".", "format", "(", "piece", ")", ")" ]
Returns Piece subclass given index of piece. :type: index: int :type: loc Location :raise: KeyError
[ "Returns", "Piece", "subclass", "given", "index", "of", "piece", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L24-L44
LordDarkula/chess_py
chess_py/core/algebraic/converter.py
incomplete_alg
def incomplete_alg(alg_str, input_color, position): """ Converts a string written in short algebraic form into an incomplete move. These incomplete moves do not have the initial location specified and therefore cannot be used to update the board. IN order to fully utilize incomplete move, it must be run through ``make_legal()`` with the corresponding position. It is recommended to use ``short_alg()`` instead of this method because it returns a complete move. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: alg_str: str :type: input_color: Color """ edge_rank = 0 \ if input_color == color.white \ else 7 if alg_str is None or len(alg_str) <= 1: raise ValueError("algebraic string {} is invalid".format(alg_str)) # King-side castle if alg_str in ["00", "oo", "OO", "0-0", "o-o", "O-O"]: return Move(end_loc=Location(edge_rank, 6), piece=King(input_color, Location(edge_rank, 4)), status=notation_const.KING_SIDE_CASTLE, start_loc=Location(edge_rank, 4)) # Queen-side castle if alg_str in ["000", "ooo", "OOO", "0-0-0", "o-o-o", "O-O-O"]: return Move(end_loc=Location(edge_rank, 2), piece=King(input_color, Location(edge_rank, 4)), status=notation_const.QUEEN_SIDE_CASTLE, start_loc=Location(edge_rank, 4)) try: end_location = Location.from_string(alg_str[-2:]) except ValueError: end_location = Location.from_string(alg_str[-4:-2]) # Pawn movement if len(alg_str) == 2: possible_pawn = position.piece_at_square(end_location.shift_back(input_color)) if type(possible_pawn) is Pawn and \ possible_pawn.color == input_color: start_location = end_location.shift_back(input_color) else: start_location = end_location.shift_back(input_color, times=2) return Move(end_loc=end_location, piece=position.piece_at_square(start_location), status=notation_const.MOVEMENT, start_loc=start_location) # Non-pawn Piece movement if len(alg_str) == 3: possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Multiple options (Capture or Piece movement with file specified) if len(alg_str) == 4: # Capture if alg_str[1].upper() == "X": # Pawn capture if not alg_str[0].isupper(): pawn_location = Location(end_location.rank, ord(alg_str[0]) - 97).shift_back(input_color) possible_pawn = position.piece_at_square(pawn_location) if type(possible_pawn) is Pawn and \ possible_pawn.color == input_color: en_passant_pawn = position.piece_at_square(end_location.shift_back(input_color)) if type(en_passant_pawn) is Pawn and \ en_passant_pawn.color != input_color and \ position.is_square_empty(end_location): return Move(end_loc=end_location, piece=position.piece_at_square(pawn_location), status=notation_const.EN_PASSANT, start_loc=pawn_location) else: return Move(end_loc=end_location, piece=position.piece_at_square(pawn_location), status=notation_const.CAPTURE, start_loc=pawn_location) # Piece capture elif alg_str[0].isupper(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Pawn Promotion elif alg_str[2] == "=": promote_end_loc = Location.from_string(alg_str[:2]) if promote_end_loc.rank != 0 and promote_end_loc.rank != 7: raise ValueError("Promotion {} must be on the last rank".format(alg_str)) return Move(end_loc=promote_end_loc, piece=Pawn(input_color, promote_end_loc), status=notation_const.PROMOTE, promoted_to_piece=_get_piece(alg_str, 3), start_loc=promote_end_loc.shift_back(input_color)) # Non-pawn Piece movement with file specified (aRb7) elif alg_str[1].isupper() and not alg_str[0].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 1), position, start_file=alg_str[0]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # (alt) Non-pawn Piece movement with file specified (Rab7) elif alg_str[0].isupper() and not alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_file=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Non-pawn Piece movement with rank specified (R1b7) elif alg_str[0].isupper() and alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_rank=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Multiple options if len(alg_str) == 5: # Non-pawn Piece movement with rank and file specified (a2Ra1 if not alg_str[0].isdigit() and \ alg_str[1].isdigit() and \ alg_str[2].isupper() and \ not alg_str[3].isdigit() and \ alg_str[4].isdigit: start_loc = Location.from_string(alg_str[:2]) return Move(end_loc=end_location, piece=_get_piece(alg_str, 2)(input_color, end_location), status=notation_const.MOVEMENT, start_loc=start_loc) # Multiple Piece capture options if alg_str[2].upper() == "X": # Piece capture with rank specified (R1xa1) if alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_rank=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Piece capture with file specified (Rdxd7) else: possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_file=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Pawn promotion with capture if len(alg_str) == 6 and alg_str[4] == "=": start_file = ord(alg_str[0]) - 97 promote_capture_end_loc = Location.from_string(alg_str[2:4]) return Move(end_loc=promote_capture_end_loc, piece=Pawn(input_color, promote_capture_end_loc), status=notation_const.CAPTURE_AND_PROMOTE, promoted_to_piece=_get_piece(alg_str, 5), start_loc=Location(end_location.shift_back(input_color).rank, start_file)) raise ValueError("algebraic string {} is invalid in \n{}".format(alg_str, position))
python
def incomplete_alg(alg_str, input_color, position): """ Converts a string written in short algebraic form into an incomplete move. These incomplete moves do not have the initial location specified and therefore cannot be used to update the board. IN order to fully utilize incomplete move, it must be run through ``make_legal()`` with the corresponding position. It is recommended to use ``short_alg()`` instead of this method because it returns a complete move. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: alg_str: str :type: input_color: Color """ edge_rank = 0 \ if input_color == color.white \ else 7 if alg_str is None or len(alg_str) <= 1: raise ValueError("algebraic string {} is invalid".format(alg_str)) # King-side castle if alg_str in ["00", "oo", "OO", "0-0", "o-o", "O-O"]: return Move(end_loc=Location(edge_rank, 6), piece=King(input_color, Location(edge_rank, 4)), status=notation_const.KING_SIDE_CASTLE, start_loc=Location(edge_rank, 4)) # Queen-side castle if alg_str in ["000", "ooo", "OOO", "0-0-0", "o-o-o", "O-O-O"]: return Move(end_loc=Location(edge_rank, 2), piece=King(input_color, Location(edge_rank, 4)), status=notation_const.QUEEN_SIDE_CASTLE, start_loc=Location(edge_rank, 4)) try: end_location = Location.from_string(alg_str[-2:]) except ValueError: end_location = Location.from_string(alg_str[-4:-2]) # Pawn movement if len(alg_str) == 2: possible_pawn = position.piece_at_square(end_location.shift_back(input_color)) if type(possible_pawn) is Pawn and \ possible_pawn.color == input_color: start_location = end_location.shift_back(input_color) else: start_location = end_location.shift_back(input_color, times=2) return Move(end_loc=end_location, piece=position.piece_at_square(start_location), status=notation_const.MOVEMENT, start_loc=start_location) # Non-pawn Piece movement if len(alg_str) == 3: possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Multiple options (Capture or Piece movement with file specified) if len(alg_str) == 4: # Capture if alg_str[1].upper() == "X": # Pawn capture if not alg_str[0].isupper(): pawn_location = Location(end_location.rank, ord(alg_str[0]) - 97).shift_back(input_color) possible_pawn = position.piece_at_square(pawn_location) if type(possible_pawn) is Pawn and \ possible_pawn.color == input_color: en_passant_pawn = position.piece_at_square(end_location.shift_back(input_color)) if type(en_passant_pawn) is Pawn and \ en_passant_pawn.color != input_color and \ position.is_square_empty(end_location): return Move(end_loc=end_location, piece=position.piece_at_square(pawn_location), status=notation_const.EN_PASSANT, start_loc=pawn_location) else: return Move(end_loc=end_location, piece=position.piece_at_square(pawn_location), status=notation_const.CAPTURE, start_loc=pawn_location) # Piece capture elif alg_str[0].isupper(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Pawn Promotion elif alg_str[2] == "=": promote_end_loc = Location.from_string(alg_str[:2]) if promote_end_loc.rank != 0 and promote_end_loc.rank != 7: raise ValueError("Promotion {} must be on the last rank".format(alg_str)) return Move(end_loc=promote_end_loc, piece=Pawn(input_color, promote_end_loc), status=notation_const.PROMOTE, promoted_to_piece=_get_piece(alg_str, 3), start_loc=promote_end_loc.shift_back(input_color)) # Non-pawn Piece movement with file specified (aRb7) elif alg_str[1].isupper() and not alg_str[0].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 1), position, start_file=alg_str[0]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # (alt) Non-pawn Piece movement with file specified (Rab7) elif alg_str[0].isupper() and not alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_file=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Non-pawn Piece movement with rank specified (R1b7) elif alg_str[0].isupper() and alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_rank=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.MOVEMENT, start_loc=start_location) # Multiple options if len(alg_str) == 5: # Non-pawn Piece movement with rank and file specified (a2Ra1 if not alg_str[0].isdigit() and \ alg_str[1].isdigit() and \ alg_str[2].isupper() and \ not alg_str[3].isdigit() and \ alg_str[4].isdigit: start_loc = Location.from_string(alg_str[:2]) return Move(end_loc=end_location, piece=_get_piece(alg_str, 2)(input_color, end_location), status=notation_const.MOVEMENT, start_loc=start_loc) # Multiple Piece capture options if alg_str[2].upper() == "X": # Piece capture with rank specified (R1xa1) if alg_str[1].isdigit(): possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_rank=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Piece capture with file specified (Rdxd7) else: possible_piece, start_location = _get_piece_start_location(end_location, input_color, _get_piece(alg_str, 0), position, start_file=alg_str[1]) return Move(end_loc=end_location, piece=possible_piece, status=notation_const.CAPTURE, start_loc=start_location) # Pawn promotion with capture if len(alg_str) == 6 and alg_str[4] == "=": start_file = ord(alg_str[0]) - 97 promote_capture_end_loc = Location.from_string(alg_str[2:4]) return Move(end_loc=promote_capture_end_loc, piece=Pawn(input_color, promote_capture_end_loc), status=notation_const.CAPTURE_AND_PROMOTE, promoted_to_piece=_get_piece(alg_str, 5), start_loc=Location(end_location.shift_back(input_color).rank, start_file)) raise ValueError("algebraic string {} is invalid in \n{}".format(alg_str, position))
[ "def", "incomplete_alg", "(", "alg_str", ",", "input_color", ",", "position", ")", ":", "edge_rank", "=", "0", "if", "input_color", "==", "color", ".", "white", "else", "7", "if", "alg_str", "is", "None", "or", "len", "(", "alg_str", ")", "<=", "1", ":", "raise", "ValueError", "(", "\"algebraic string {} is invalid\"", ".", "format", "(", "alg_str", ")", ")", "# King-side castle", "if", "alg_str", "in", "[", "\"00\"", ",", "\"oo\"", ",", "\"OO\"", ",", "\"0-0\"", ",", "\"o-o\"", ",", "\"O-O\"", "]", ":", "return", "Move", "(", "end_loc", "=", "Location", "(", "edge_rank", ",", "6", ")", ",", "piece", "=", "King", "(", "input_color", ",", "Location", "(", "edge_rank", ",", "4", ")", ")", ",", "status", "=", "notation_const", ".", "KING_SIDE_CASTLE", ",", "start_loc", "=", "Location", "(", "edge_rank", ",", "4", ")", ")", "# Queen-side castle", "if", "alg_str", "in", "[", "\"000\"", ",", "\"ooo\"", ",", "\"OOO\"", ",", "\"0-0-0\"", ",", "\"o-o-o\"", ",", "\"O-O-O\"", "]", ":", "return", "Move", "(", "end_loc", "=", "Location", "(", "edge_rank", ",", "2", ")", ",", "piece", "=", "King", "(", "input_color", ",", "Location", "(", "edge_rank", ",", "4", ")", ")", ",", "status", "=", "notation_const", ".", "QUEEN_SIDE_CASTLE", ",", "start_loc", "=", "Location", "(", "edge_rank", ",", "4", ")", ")", "try", ":", "end_location", "=", "Location", ".", "from_string", "(", "alg_str", "[", "-", "2", ":", "]", ")", "except", "ValueError", ":", "end_location", "=", "Location", ".", "from_string", "(", "alg_str", "[", "-", "4", ":", "-", "2", "]", ")", "# Pawn movement", "if", "len", "(", "alg_str", ")", "==", "2", ":", "possible_pawn", "=", "position", ".", "piece_at_square", "(", "end_location", ".", "shift_back", "(", "input_color", ")", ")", "if", "type", "(", "possible_pawn", ")", "is", "Pawn", "and", "possible_pawn", ".", "color", "==", "input_color", ":", "start_location", "=", "end_location", ".", "shift_back", "(", "input_color", ")", "else", ":", "start_location", "=", "end_location", ".", "shift_back", "(", "input_color", ",", "times", "=", "2", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "position", ".", "piece_at_square", "(", "start_location", ")", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Non-pawn Piece movement", "if", "len", "(", "alg_str", ")", "==", "3", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Multiple options (Capture or Piece movement with file specified)", "if", "len", "(", "alg_str", ")", "==", "4", ":", "# Capture", "if", "alg_str", "[", "1", "]", ".", "upper", "(", ")", "==", "\"X\"", ":", "# Pawn capture", "if", "not", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", ":", "pawn_location", "=", "Location", "(", "end_location", ".", "rank", ",", "ord", "(", "alg_str", "[", "0", "]", ")", "-", "97", ")", ".", "shift_back", "(", "input_color", ")", "possible_pawn", "=", "position", ".", "piece_at_square", "(", "pawn_location", ")", "if", "type", "(", "possible_pawn", ")", "is", "Pawn", "and", "possible_pawn", ".", "color", "==", "input_color", ":", "en_passant_pawn", "=", "position", ".", "piece_at_square", "(", "end_location", ".", "shift_back", "(", "input_color", ")", ")", "if", "type", "(", "en_passant_pawn", ")", "is", "Pawn", "and", "en_passant_pawn", ".", "color", "!=", "input_color", "and", "position", ".", "is_square_empty", "(", "end_location", ")", ":", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "position", ".", "piece_at_square", "(", "pawn_location", ")", ",", "status", "=", "notation_const", ".", "EN_PASSANT", ",", "start_loc", "=", "pawn_location", ")", "else", ":", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "position", ".", "piece_at_square", "(", "pawn_location", ")", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "pawn_location", ")", "# Piece capture", "elif", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "start_location", ")", "# Pawn Promotion", "elif", "alg_str", "[", "2", "]", "==", "\"=\"", ":", "promote_end_loc", "=", "Location", ".", "from_string", "(", "alg_str", "[", ":", "2", "]", ")", "if", "promote_end_loc", ".", "rank", "!=", "0", "and", "promote_end_loc", ".", "rank", "!=", "7", ":", "raise", "ValueError", "(", "\"Promotion {} must be on the last rank\"", ".", "format", "(", "alg_str", ")", ")", "return", "Move", "(", "end_loc", "=", "promote_end_loc", ",", "piece", "=", "Pawn", "(", "input_color", ",", "promote_end_loc", ")", ",", "status", "=", "notation_const", ".", "PROMOTE", ",", "promoted_to_piece", "=", "_get_piece", "(", "alg_str", ",", "3", ")", ",", "start_loc", "=", "promote_end_loc", ".", "shift_back", "(", "input_color", ")", ")", "# Non-pawn Piece movement with file specified (aRb7)", "elif", "alg_str", "[", "1", "]", ".", "isupper", "(", ")", "and", "not", "alg_str", "[", "0", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "1", ")", ",", "position", ",", "start_file", "=", "alg_str", "[", "0", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# (alt) Non-pawn Piece movement with file specified (Rab7)", "elif", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", "and", "not", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_file", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Non-pawn Piece movement with rank specified (R1b7)", "elif", "alg_str", "[", "0", "]", ".", "isupper", "(", ")", "and", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_rank", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_location", ")", "# Multiple options", "if", "len", "(", "alg_str", ")", "==", "5", ":", "# Non-pawn Piece movement with rank and file specified (a2Ra1", "if", "not", "alg_str", "[", "0", "]", ".", "isdigit", "(", ")", "and", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", "and", "alg_str", "[", "2", "]", ".", "isupper", "(", ")", "and", "not", "alg_str", "[", "3", "]", ".", "isdigit", "(", ")", "and", "alg_str", "[", "4", "]", ".", "isdigit", ":", "start_loc", "=", "Location", ".", "from_string", "(", "alg_str", "[", ":", "2", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "_get_piece", "(", "alg_str", ",", "2", ")", "(", "input_color", ",", "end_location", ")", ",", "status", "=", "notation_const", ".", "MOVEMENT", ",", "start_loc", "=", "start_loc", ")", "# Multiple Piece capture options", "if", "alg_str", "[", "2", "]", ".", "upper", "(", ")", "==", "\"X\"", ":", "# Piece capture with rank specified (R1xa1)", "if", "alg_str", "[", "1", "]", ".", "isdigit", "(", ")", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_rank", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "start_location", ")", "# Piece capture with file specified (Rdxd7)", "else", ":", "possible_piece", ",", "start_location", "=", "_get_piece_start_location", "(", "end_location", ",", "input_color", ",", "_get_piece", "(", "alg_str", ",", "0", ")", ",", "position", ",", "start_file", "=", "alg_str", "[", "1", "]", ")", "return", "Move", "(", "end_loc", "=", "end_location", ",", "piece", "=", "possible_piece", ",", "status", "=", "notation_const", ".", "CAPTURE", ",", "start_loc", "=", "start_location", ")", "# Pawn promotion with capture", "if", "len", "(", "alg_str", ")", "==", "6", "and", "alg_str", "[", "4", "]", "==", "\"=\"", ":", "start_file", "=", "ord", "(", "alg_str", "[", "0", "]", ")", "-", "97", "promote_capture_end_loc", "=", "Location", ".", "from_string", "(", "alg_str", "[", "2", ":", "4", "]", ")", "return", "Move", "(", "end_loc", "=", "promote_capture_end_loc", ",", "piece", "=", "Pawn", "(", "input_color", ",", "promote_capture_end_loc", ")", ",", "status", "=", "notation_const", ".", "CAPTURE_AND_PROMOTE", ",", "promoted_to_piece", "=", "_get_piece", "(", "alg_str", ",", "5", ")", ",", "start_loc", "=", "Location", "(", "end_location", ".", "shift_back", "(", "input_color", ")", ".", "rank", ",", "start_file", ")", ")", "raise", "ValueError", "(", "\"algebraic string {} is invalid in \\n{}\"", ".", "format", "(", "alg_str", ",", "position", ")", ")" ]
Converts a string written in short algebraic form into an incomplete move. These incomplete moves do not have the initial location specified and therefore cannot be used to update the board. IN order to fully utilize incomplete move, it must be run through ``make_legal()`` with the corresponding position. It is recommended to use ``short_alg()`` instead of this method because it returns a complete move. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: alg_str: str :type: input_color: Color
[ "Converts", "a", "string", "written", "in", "short", "algebraic", "form", "into", "an", "incomplete", "move", ".", "These", "incomplete", "moves", "do", "not", "have", "the", "initial", "location", "specified", "and", "therefore", "cannot", "be", "used", "to", "update", "the", "board", ".", "IN", "order", "to", "fully", "utilize", "incomplete", "move", "it", "must", "be", "run", "through", "make_legal", "()", "with", "the", "corresponding", "position", ".", "It", "is", "recommended", "to", "use", "short_alg", "()", "instead", "of", "this", "method", "because", "it", "returns", "a", "complete", "move", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L105-L305
LordDarkula/chess_py
chess_py/core/algebraic/converter.py
make_legal
def make_legal(move, position): """ Converts an incomplete move (initial ``Location`` not specified) and the corresponding position into the a complete move with the most likely starting point specified. If no moves match, ``None`` is returned. :type: move: Move :type: position: Board :rtype: Move """ assert isinstance(move, Move) for legal_move in position.all_possible_moves(move.color): if move.status == notation_const.LONG_ALG: if move.end_loc == legal_move.end_loc and \ move.start_loc == legal_move.start_loc: return legal_move elif move == legal_move: return legal_move raise ValueError("Move {} not legal in \n{}".format(repr(move), position))
python
def make_legal(move, position): """ Converts an incomplete move (initial ``Location`` not specified) and the corresponding position into the a complete move with the most likely starting point specified. If no moves match, ``None`` is returned. :type: move: Move :type: position: Board :rtype: Move """ assert isinstance(move, Move) for legal_move in position.all_possible_moves(move.color): if move.status == notation_const.LONG_ALG: if move.end_loc == legal_move.end_loc and \ move.start_loc == legal_move.start_loc: return legal_move elif move == legal_move: return legal_move raise ValueError("Move {} not legal in \n{}".format(repr(move), position))
[ "def", "make_legal", "(", "move", ",", "position", ")", ":", "assert", "isinstance", "(", "move", ",", "Move", ")", "for", "legal_move", "in", "position", ".", "all_possible_moves", "(", "move", ".", "color", ")", ":", "if", "move", ".", "status", "==", "notation_const", ".", "LONG_ALG", ":", "if", "move", ".", "end_loc", "==", "legal_move", ".", "end_loc", "and", "move", ".", "start_loc", "==", "legal_move", ".", "start_loc", ":", "return", "legal_move", "elif", "move", "==", "legal_move", ":", "return", "legal_move", "raise", "ValueError", "(", "\"Move {} not legal in \\n{}\"", ".", "format", "(", "repr", "(", "move", ")", ",", "position", ")", ")" ]
Converts an incomplete move (initial ``Location`` not specified) and the corresponding position into the a complete move with the most likely starting point specified. If no moves match, ``None`` is returned. :type: move: Move :type: position: Board :rtype: Move
[ "Converts", "an", "incomplete", "move", "(", "initial", "Location", "not", "specified", ")", "and", "the", "corresponding", "position", "into", "the", "a", "complete", "move", "with", "the", "most", "likely", "starting", "point", "specified", ".", "If", "no", "moves", "match", "None", "is", "returned", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L308-L330
LordDarkula/chess_py
chess_py/core/algebraic/converter.py
short_alg
def short_alg(algebraic_string, input_color, position): """ Converts a string written in short algebraic form, the color of the side whose turn it is, and the corresponding position into a complete move that can be played. If no moves match, None is returned. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: algebraic_string: str :type: input_color: Color :type: position: Board """ return make_legal(incomplete_alg(algebraic_string, input_color, position), position)
python
def short_alg(algebraic_string, input_color, position): """ Converts a string written in short algebraic form, the color of the side whose turn it is, and the corresponding position into a complete move that can be played. If no moves match, None is returned. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: algebraic_string: str :type: input_color: Color :type: position: Board """ return make_legal(incomplete_alg(algebraic_string, input_color, position), position)
[ "def", "short_alg", "(", "algebraic_string", ",", "input_color", ",", "position", ")", ":", "return", "make_legal", "(", "incomplete_alg", "(", "algebraic_string", ",", "input_color", ",", "position", ")", ",", "position", ")" ]
Converts a string written in short algebraic form, the color of the side whose turn it is, and the corresponding position into a complete move that can be played. If no moves match, None is returned. Examples: e4, Nf3, exd5, Qxf3, 00, 000, e8=Q :type: algebraic_string: str :type: input_color: Color :type: position: Board
[ "Converts", "a", "string", "written", "in", "short", "algebraic", "form", "the", "color", "of", "the", "side", "whose", "turn", "it", "is", "and", "the", "corresponding", "position", "into", "a", "complete", "move", "that", "can", "be", "played", ".", "If", "no", "moves", "match", "None", "is", "returned", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L333-L346
LordDarkula/chess_py
chess_py/core/algebraic/converter.py
long_alg
def long_alg(alg_str, position): """ Converts a string written in long algebraic form and the corresponding position into a complete move (initial location specified). Used primarily for UCI, but can be used for other purposes. :type: alg_str: str :type: position: Board :rtype: Move """ if alg_str is None or len(alg_str) < 4 or len(alg_str) > 6: raise ValueError("Invalid string input {}".format(alg_str)) end = Location.from_string(alg_str[2:]) start = Location.from_string(alg_str[:2]) piece = position.piece_at_square(start) if len(alg_str) == 4: return make_legal(Move(end_loc=end, piece=piece, status=notation_const.LONG_ALG, start_loc=start), position) promoted_to = _get_piece(alg_str, 4) if promoted_to is None or \ promoted_to is King or \ promoted_to is Pawn: raise Exception("Invalid move input") return make_legal(Move(end_loc=end, piece=piece, status=notation_const.LONG_ALG, start_loc=start, promoted_to_piece=promoted_to), position)
python
def long_alg(alg_str, position): """ Converts a string written in long algebraic form and the corresponding position into a complete move (initial location specified). Used primarily for UCI, but can be used for other purposes. :type: alg_str: str :type: position: Board :rtype: Move """ if alg_str is None or len(alg_str) < 4 or len(alg_str) > 6: raise ValueError("Invalid string input {}".format(alg_str)) end = Location.from_string(alg_str[2:]) start = Location.from_string(alg_str[:2]) piece = position.piece_at_square(start) if len(alg_str) == 4: return make_legal(Move(end_loc=end, piece=piece, status=notation_const.LONG_ALG, start_loc=start), position) promoted_to = _get_piece(alg_str, 4) if promoted_to is None or \ promoted_to is King or \ promoted_to is Pawn: raise Exception("Invalid move input") return make_legal(Move(end_loc=end, piece=piece, status=notation_const.LONG_ALG, start_loc=start, promoted_to_piece=promoted_to), position)
[ "def", "long_alg", "(", "alg_str", ",", "position", ")", ":", "if", "alg_str", "is", "None", "or", "len", "(", "alg_str", ")", "<", "4", "or", "len", "(", "alg_str", ")", ">", "6", ":", "raise", "ValueError", "(", "\"Invalid string input {}\"", ".", "format", "(", "alg_str", ")", ")", "end", "=", "Location", ".", "from_string", "(", "alg_str", "[", "2", ":", "]", ")", "start", "=", "Location", ".", "from_string", "(", "alg_str", "[", ":", "2", "]", ")", "piece", "=", "position", ".", "piece_at_square", "(", "start", ")", "if", "len", "(", "alg_str", ")", "==", "4", ":", "return", "make_legal", "(", "Move", "(", "end_loc", "=", "end", ",", "piece", "=", "piece", ",", "status", "=", "notation_const", ".", "LONG_ALG", ",", "start_loc", "=", "start", ")", ",", "position", ")", "promoted_to", "=", "_get_piece", "(", "alg_str", ",", "4", ")", "if", "promoted_to", "is", "None", "or", "promoted_to", "is", "King", "or", "promoted_to", "is", "Pawn", ":", "raise", "Exception", "(", "\"Invalid move input\"", ")", "return", "make_legal", "(", "Move", "(", "end_loc", "=", "end", ",", "piece", "=", "piece", ",", "status", "=", "notation_const", ".", "LONG_ALG", ",", "start_loc", "=", "start", ",", "promoted_to_piece", "=", "promoted_to", ")", ",", "position", ")" ]
Converts a string written in long algebraic form and the corresponding position into a complete move (initial location specified). Used primarily for UCI, but can be used for other purposes. :type: alg_str: str :type: position: Board :rtype: Move
[ "Converts", "a", "string", "written", "in", "long", "algebraic", "form", "and", "the", "corresponding", "position", "into", "a", "complete", "move", "(", "initial", "location", "specified", ")", ".", "Used", "primarily", "for", "UCI", "but", "can", "be", "used", "for", "other", "purposes", "." ]
train
https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/algebraic/converter.py#L349-L383
cimm-kzn/CGRtools
CGRtools/containers/molecule.py
MoleculeContainer.reset_query_marks
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 # hybridization 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): if self._node[j].element != 'H': neighbors += 1 if hybridization in (3, 4): continue order = bond.order if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 atom._neighbors = neighbors atom._hybridization = hybridization self.flush_cache()
python
def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 # hybridization 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): if self._node[j].element != 'H': neighbors += 1 if hybridization in (3, 4): continue order = bond.order if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 atom._neighbors = neighbors atom._hybridization = hybridization self.flush_cache()
[ "def", "reset_query_marks", "(", "self", ")", ":", "for", "i", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "neighbors", "=", "0", "hybridization", "=", "1", "# hybridization 1- sp3; 2- sp2; 3- sp1; 4- aromatic", "for", "j", ",", "bond", "in", "self", ".", "_adj", "[", "i", "]", ".", "items", "(", ")", ":", "if", "self", ".", "_node", "[", "j", "]", ".", "element", "!=", "'H'", ":", "neighbors", "+=", "1", "if", "hybridization", "in", "(", "3", ",", "4", ")", ":", "continue", "order", "=", "bond", ".", "order", "if", "order", "==", "4", ":", "hybridization", "=", "4", "elif", "order", "==", "3", ":", "hybridization", "=", "3", "elif", "order", "==", "2", ":", "if", "hybridization", "==", "2", ":", "hybridization", "=", "3", "else", ":", "hybridization", "=", "2", "atom", ".", "_neighbors", "=", "neighbors", "atom", ".", "_hybridization", "=", "hybridization", "self", ".", "flush_cache", "(", ")" ]
set or reset hyb and neighbors marks to atoms.
[ "set", "or", "reset", "hyb", "and", "neighbors", "marks", "to", "atoms", "." ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/molecule.py#L38-L65
cimm-kzn/CGRtools
CGRtools/containers/molecule.py
MoleculeContainer.implicify_hydrogens
def implicify_hydrogens(self): """ remove explicit hydrogen if possible :return: number of removed hydrogens """ explicit = defaultdict(list) c = 0 for n, atom in self.atoms(): if atom.element == 'H': for m in self.neighbors(n): if self._node[m].element != 'H': explicit[m].append(n) for n, h in explicit.items(): atom = self._node[n] len_h = len(h) for i in range(len_h, 0, -1): hi = h[:i] if atom.get_implicit_h([y.order for x, y in self._adj[n].items() if x not in hi]) == i: for x in hi: self.remove_node(x) c += 1 break self.flush_cache() return c
python
def implicify_hydrogens(self): """ remove explicit hydrogen if possible :return: number of removed hydrogens """ explicit = defaultdict(list) c = 0 for n, atom in self.atoms(): if atom.element == 'H': for m in self.neighbors(n): if self._node[m].element != 'H': explicit[m].append(n) for n, h in explicit.items(): atom = self._node[n] len_h = len(h) for i in range(len_h, 0, -1): hi = h[:i] if atom.get_implicit_h([y.order for x, y in self._adj[n].items() if x not in hi]) == i: for x in hi: self.remove_node(x) c += 1 break self.flush_cache() return c
[ "def", "implicify_hydrogens", "(", "self", ")", ":", "explicit", "=", "defaultdict", "(", "list", ")", "c", "=", "0", "for", "n", ",", "atom", "in", "self", ".", "atoms", "(", ")", ":", "if", "atom", ".", "element", "==", "'H'", ":", "for", "m", "in", "self", ".", "neighbors", "(", "n", ")", ":", "if", "self", ".", "_node", "[", "m", "]", ".", "element", "!=", "'H'", ":", "explicit", "[", "m", "]", ".", "append", "(", "n", ")", "for", "n", ",", "h", "in", "explicit", ".", "items", "(", ")", ":", "atom", "=", "self", ".", "_node", "[", "n", "]", "len_h", "=", "len", "(", "h", ")", "for", "i", "in", "range", "(", "len_h", ",", "0", ",", "-", "1", ")", ":", "hi", "=", "h", "[", ":", "i", "]", "if", "atom", ".", "get_implicit_h", "(", "[", "y", ".", "order", "for", "x", ",", "y", "in", "self", ".", "_adj", "[", "n", "]", ".", "items", "(", ")", "if", "x", "not", "in", "hi", "]", ")", "==", "i", ":", "for", "x", "in", "hi", ":", "self", ".", "remove_node", "(", "x", ")", "c", "+=", "1", "break", "self", ".", "flush_cache", "(", ")", "return", "c" ]
remove explicit hydrogen if possible :return: number of removed hydrogens
[ "remove", "explicit", "hydrogen", "if", "possible" ]
train
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/molecule.py#L67-L93