hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91c4421633a0ddab2d35d03049dc7a5c7c016161
| 503
|
py
|
Python
|
Algorithm - BOJ/Greedy/9009.py
|
kangjunseo/C-
|
eafdf57a22b3a794d09cab045d6d60c2842ba347
|
[
"MIT"
] | 2
|
2021-08-30T12:37:57.000Z
|
2021-11-29T05:42:05.000Z
|
Algorithm - BOJ/Greedy/9009.py
|
kangjunseo/C-
|
eafdf57a22b3a794d09cab045d6d60c2842ba347
|
[
"MIT"
] | null | null | null |
Algorithm - BOJ/Greedy/9009.py
|
kangjunseo/C-
|
eafdf57a22b3a794d09cab045d6d60c2842ba347
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
T = int(input())
fb = [0 for _ in range(50)]
fb[0], fb[1] = 1,1
for i in range(2,50): fb[i] = fb[i-1]+fb[i-2]
for _ in range(T):
N = int(input())
L = []
while N:
for i in range(50):
if fb[i]>N:
L.append(fb[i-1])
N-=fb[i-1]
break
if fb[i]==N:
L.append(fb[i])
N=0
break
for i in sorted(L): print(i, end=' ')
print()
| 20.958333
| 45
| 0.409543
|
import sys
input = sys.stdin.readline
T = int(input())
fb = [0 for _ in range(50)]
fb[0], fb[1] = 1,1
for i in range(2,50): fb[i] = fb[i-1]+fb[i-2]
for _ in range(T):
N = int(input())
L = []
while N:
for i in range(50):
if fb[i]>N:
L.append(fb[i-1])
N-=fb[i-1]
break
if fb[i]==N:
L.append(fb[i])
N=0
break
for i in sorted(L): print(i, end=' ')
print()
| 0
| 0
| 0
|
b293cc0e890762ffd29d2a3f9b67a7d12e79d9a6
| 2,335
|
py
|
Python
|
pyxel/packager.py
|
JUNE-9653/pyxel
|
2d0c828757ea0183cfca526f78d0d72ae4b76753
|
[
"MIT"
] | 1
|
2019-08-19T11:43:12.000Z
|
2019-08-19T11:43:12.000Z
|
pyxel/packager.py
|
JUNE-9653/pyxel
|
2d0c828757ea0183cfca526f78d0d72ae4b76753
|
[
"MIT"
] | null | null | null |
pyxel/packager.py
|
JUNE-9653/pyxel
|
2d0c828757ea0183cfca526f78d0d72ae4b76753
|
[
"MIT"
] | null | null | null |
import glob
import os
import platform
import shutil
import sys
import pyxel
if __name__ == "__main__":
run()
| 26.83908
| 76
| 0.572591
|
import glob
import os
import platform
import shutil
import sys
import pyxel
def run():
arg = sys.argv[1] if len(sys.argv) >= 2 else ""
name = ""
if not arg or arg.startswith("-"):
if arg == "-v" or arg == "--version":
print("Pyxel Packager {}".format(pyxel.VERSION))
return
else:
print("Usage: pyxelpackager python_file")
print("Options:")
print(" -h, --help This help text")
print(" -v, --version Show version number and quit")
return
dirname = os.path.dirname(arg) or "."
filename = os.path.basename(arg)
name = name or os.path.splitext(filename)[0]
separator = ";" if platform.system() == "Windows" else ":"
os.chdir(dirname)
options = [
"--clean",
"--noconfirm",
"--log-level=WARN",
"--onefile",
"--noconsole",
"--name={}".format(name),
"--hidden-import=numpy.random.bounded_integers",
"--hidden-import=numpy.random.common",
"--hidden-import=numpy.random.entropy",
]
src_lib_dir = os.path.dirname(pyxel.core._get_absolute_libpath())
dst_lib_dir = os.path.dirname(pyxel.core._get_relative_libpath())
libs = filter(os.path.isfile, glob.glob(os.path.join(src_lib_dir, "*")))
for lib in libs:
libname = os.path.basename(lib)
options.append(
"--add-data={}{}{}".format(
os.path.join(src_lib_dir, libname), separator, dst_lib_dir
)
)
assets = filter(os.path.isfile, glob.glob("assets/**", recursive=True))
for asset in assets:
options.append(
"--add-data={}{}{}".format(
os.path.abspath(asset), separator, os.path.dirname(asset)
)
)
try:
shutil.rmtree("dist", ignore_errors=True)
_run_pyinstaller(options + [filename])
finally:
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("__pycache__", ignore_errors=True)
spec_file = "{}.spec".format(name)
if os.path.exists(spec_file):
os.remove(spec_file)
def _run_pyinstaller(args):
import PyInstaller.__main__
print("pyinstaller {}".format(" ".join(args)))
PyInstaller.__main__.run(args)
if __name__ == "__main__":
run()
| 2,171
| 0
| 46
|
199b885044f89ef22f190b6b05b9d8c2d2de93a1
| 618
|
py
|
Python
|
api/projeto/model/model.py
|
ldynczuki/iris-model-api
|
0519f5d3d8158f7560326219bd2263a234ad0cbe
|
[
"MIT"
] | null | null | null |
api/projeto/model/model.py
|
ldynczuki/iris-model-api
|
0519f5d3d8158f7560326219bd2263a234ad0cbe
|
[
"MIT"
] | null | null | null |
api/projeto/model/model.py
|
ldynczuki/iris-model-api
|
0519f5d3d8158f7560326219bd2263a234ad0cbe
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
from projeto.settings import WEIGHTS_PATH
from loguru import logger
| 25.75
| 80
| 0.62945
|
import pickle
import numpy as np
from projeto.settings import WEIGHTS_PATH
from loguru import logger
class Classificador():
def __init__(self):
self.model = pickle.load(open(WEIGHTS_PATH, 'rb'))
logger.debug(self.model)
def get_predict(self, sepal_length, sepal_width, petal_length, petal_width):
'''
Método para realizar a inferência dos dados enviados.
'''
data = np.array([[sepal_length, sepal_width,
petal_length, petal_width]])
prediction = self.model.predict(data)
return prediction[0]
| 100
| 396
| 23
|
3183e10ae3187fd9fc4435ab27745eddc0c1ddac
| 8,584
|
py
|
Python
|
falcon_heavy/core/types/primitive.py
|
NotJustAToy/falcon-heavy
|
2e96f649daafc2707a01e38f403f1ce4268f4629
|
[
"Apache-2.0"
] | 21
|
2020-01-02T10:44:42.000Z
|
2022-02-11T14:27:05.000Z
|
falcon_heavy/core/types/primitive.py
|
NotJustAToy/falcon-heavy
|
2e96f649daafc2707a01e38f403f1ce4268f4629
|
[
"Apache-2.0"
] | 2
|
2020-02-13T21:06:56.000Z
|
2020-09-27T16:47:25.000Z
|
falcon_heavy/core/types/primitive.py
|
NotJustAToy/falcon-heavy
|
2e96f649daafc2707a01e38f403f1ce4268f4629
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 Not Just A Toy Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as ty
from distutils.util import strtobool
from falcon_heavy.utils import force_str, FalconHeavyUnicodeDecodeError
from .base import BaseType, ValidationResult, Messages, Types
from .path import Path
from .exceptions import SchemaError
from .errors import Error
__all__ = (
'StringType',
'Number',
'GenericNumberType',
'NumberType',
'IntegerType',
'BooleanType',
)
class StringType(BaseType[str]):
"""String type
:param min_length: invalidate when value length less than specified
:param max_length: invalidate when value length greater than specified
:param pattern: invalidate when value is not match to specified pattern
"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a string",
'cast': "Couldn't cast to a string",
'min_length': "Must be no less than {0} characters in length",
'max_length': "Must be no greater than {0} characters in length",
'pattern': "Does not match the pattern"
}
TYPES: ty.ClassVar[Types] = (str, )
__slots__ = (
'min_length',
'max_length',
'pattern'
)
Number = ty.Union[int, float]
T_num = ty.TypeVar('T_num', int, float, Number)
class GenericNumberType(BaseType[T_num]):
"""Generic number type
:param minimum: invalidate when value less than specified minimum
:param maximum: Invalidate when value greater than specified maximum
:param exclusive_minimum: when True, it indicates that the range excludes the minimum value.
When False (or not included), it indicates that the range includes the minimum value
:param exclusive_maximum: when True, it indicates that the range excludes the maximum value.
When false (or not included), it indicates that the range includes the maximum value
:param multiple_of: invalidate when value is not multiple of specified
"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a number",
'cast': "Couldn't cast to a number",
'minimum': "Is less than the minimum of {0}",
'exclusive_minimum': "Is less than or equal to the minimum of {0}",
'maximum': "Is greater than the maximum of {0}",
'exclusive_maximum': "Is greater than or equal to the maximum of {0}",
'multiple_of': "Is not a multiple of {0}"
}
TYPES: ty.ClassVar[Types] = (int, float)
__slots__ = (
'minimum',
'maximum',
'exclusive_minimum',
'exclusive_maximum',
'multiple_of'
)
class NumberType(GenericNumberType[Number]):
"""Number type"""
class IntegerType(GenericNumberType[int]):
"""Integer type"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be an integer",
'cast': "Couldn't cast to an integer"
}
TYPES: ty.ClassVar[Types] = (int, )
__slots__ = ()
class BooleanType(BaseType[bool]):
"""Boolean type"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a boolean",
'cast': "Couldn't cast to a boolean"
}
TYPES: ty.ClassVar[Types] = (bool, )
__slots__ = ()
| 32.270677
| 112
| 0.632339
|
# Copyright 2019-2020 Not Just A Toy Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing as ty
from distutils.util import strtobool
from falcon_heavy.utils import force_str, FalconHeavyUnicodeDecodeError
from .base import BaseType, ValidationResult, Messages, Types
from .path import Path
from .exceptions import SchemaError
from .errors import Error
__all__ = (
'StringType',
'Number',
'GenericNumberType',
'NumberType',
'IntegerType',
'BooleanType',
)
class StringType(BaseType[str]):
"""String type
:param min_length: invalidate when value length less than specified
:param max_length: invalidate when value length greater than specified
:param pattern: invalidate when value is not match to specified pattern
"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a string",
'cast': "Couldn't cast to a string",
'min_length': "Must be no less than {0} characters in length",
'max_length': "Must be no greater than {0} characters in length",
'pattern': "Does not match the pattern"
}
TYPES: ty.ClassVar[Types] = (str, )
__slots__ = (
'min_length',
'max_length',
'pattern'
)
def __init__(
self,
min_length: ty.Optional[int] = None,
max_length: ty.Optional[int] = None,
pattern: ty.Optional[ty.Pattern] = None,
**kwargs: ty.Any
) -> None:
self.min_length = min_length
self.max_length = max_length
self.pattern = pattern
super(StringType, self).__init__(**kwargs)
def _cast(self, value: ty.Any, path: Path, *args: ty.Any, strict: bool = True, **context: ty.Any) -> ty.Any:
if isinstance(value, self.TYPES) or strict:
return value
try:
return force_str(value, errors='replace')
except FalconHeavyUnicodeDecodeError:
raise SchemaError(Error(path, self.messages['cast']))
def validate_length(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if self.min_length is not None and len(value) < self.min_length:
return self.messages['min_length'].format(self.min_length)
if self.max_length is not None and len(value) > self.max_length:
return self.messages['max_length'].format(self.max_length)
return None
def validate_pattern(self, value: str, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if self.pattern is not None and self.pattern.match(value) is None:
return self.messages['pattern']
return None
Number = ty.Union[int, float]
T_num = ty.TypeVar('T_num', int, float, Number)
class GenericNumberType(BaseType[T_num]):
"""Generic number type
:param minimum: invalidate when value less than specified minimum
:param maximum: Invalidate when value greater than specified maximum
:param exclusive_minimum: when True, it indicates that the range excludes the minimum value.
When False (or not included), it indicates that the range includes the minimum value
:param exclusive_maximum: when True, it indicates that the range excludes the maximum value.
When false (or not included), it indicates that the range includes the maximum value
:param multiple_of: invalidate when value is not multiple of specified
"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a number",
'cast': "Couldn't cast to a number",
'minimum': "Is less than the minimum of {0}",
'exclusive_minimum': "Is less than or equal to the minimum of {0}",
'maximum': "Is greater than the maximum of {0}",
'exclusive_maximum': "Is greater than or equal to the maximum of {0}",
'multiple_of': "Is not a multiple of {0}"
}
TYPES: ty.ClassVar[Types] = (int, float)
__slots__ = (
'minimum',
'maximum',
'exclusive_minimum',
'exclusive_maximum',
'multiple_of'
)
def __init__(
self,
minimum: ty.Optional[Number] = None,
maximum: ty.Optional[Number] = None,
exclusive_minimum: bool = False,
exclusive_maximum: bool = False,
multiple_of: ty.Optional[Number] = None,
**kwargs: ty.Any
) -> None:
self.minimum = minimum
self.maximum = maximum
self.exclusive_minimum = exclusive_minimum
self.exclusive_maximum = exclusive_maximum
self.multiple_of = multiple_of
super(GenericNumberType, self).__init__(**kwargs)
def _cast(self, value: ty.Any, path: Path, *args: ty.Any, strict: bool = True, **context: ty.Any) -> ty.Any:
if isinstance(value, self.TYPES) or strict:
return value
if isinstance(value, bool):
return int(value)
try:
return float(value)
except (ValueError, TypeError):
raise SchemaError(Error(path, self.messages['cast']))
def _check_type(self, value: ty.Any, path: Path, *args: ty.Any, **context: ty.Any) -> bool:
# bool is subtype of int
if isinstance(value, bool):
return False
return super(GenericNumberType, self)._check_type(value, path, **context)
def validate_minimum(self, value: Number, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if self.minimum is None:
return None
if self.exclusive_minimum and value <= self.minimum:
return self.messages['exclusive_minimum'].format(self.minimum)
if not self.exclusive_minimum and value < self.minimum:
return self.messages['minimum'].format(self.minimum)
return None
def validate_maximum(self, value: Number, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if self.maximum is None:
return None
if self.exclusive_maximum and value >= self.maximum:
return self.messages['exclusive_maximum'].format(self.maximum)
if not self.exclusive_maximum and value > self.maximum:
return self.messages['maximum'].format(self.maximum)
return None
def validate_multiple_of(self, value: Number, *args: ty.Any, **context: ty.Any) -> ValidationResult:
if self.multiple_of is None:
return None
if isinstance(self.multiple_of, float):
quotient = value / self.multiple_of
failed = int(quotient) != quotient
else:
failed = value % self.multiple_of
if failed:
return self.messages['multiple_of'].format(self.multiple_of)
return None
class NumberType(GenericNumberType[Number]):
"""Number type"""
class IntegerType(GenericNumberType[int]):
"""Integer type"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be an integer",
'cast': "Couldn't cast to an integer"
}
TYPES: ty.ClassVar[Types] = (int, )
__slots__ = ()
def _cast(self, value: ty.Any, path: Path, *args: ty.Any, strict: bool = True, **context: ty.Any) -> ty.Any:
if isinstance(value, self.TYPES) or strict:
return value
try:
return int(value)
except (TypeError, ValueError):
raise SchemaError(Error(path, self.messages['cast']))
class BooleanType(BaseType[bool]):
"""Boolean type"""
MESSAGES: ty.ClassVar[Messages] = {
'type': "Must be a boolean",
'cast': "Couldn't cast to a boolean"
}
TYPES: ty.ClassVar[Types] = (bool, )
__slots__ = ()
def _cast(self, value: ty.Any, path: Path, *args: ty.Any, strict: bool = True, **context: ty.Any) -> ty.Any:
if isinstance(value, self.TYPES) or strict:
return value
if isinstance(value, str):
try:
return bool(strtobool(value))
except ValueError:
pass
elif isinstance(value, (int, float)):
return bool(value)
raise SchemaError(Error(path, self.messages['cast']))
| 4,553
| 0
| 324
|
5315b05e1aaa46313b3adc7b163dea8a2e872737
| 280
|
py
|
Python
|
dashboard/modules/test/test_utils.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
dashboard/modules/test/test_utils.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
dashboard/modules/test/test_utils.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
import logging
import async_timeout
logger = logging.getLogger(__name__)
| 23.333333
| 58
| 0.746429
|
import logging
import async_timeout
logger = logging.getLogger(__name__)
async def http_get(http_session, url, timeout_seconds=60):
with async_timeout.timeout(timeout_seconds):
async with http_session.get(url) as response:
return await response.json()
| 181
| 0
| 23
|
8148a4a8c9845dde838c0081397b352d71916831
| 350
|
py
|
Python
|
portal/migrations/versions/1e09e871fb65_.py
|
uwcirg/true_nth_usa_portal
|
e2434731aed86f1c43f15d428dde8ffc28ac7e5f
|
[
"BSD-3-Clause"
] | 3
|
2017-01-15T10:11:57.000Z
|
2018-10-02T23:46:44.000Z
|
portal/migrations/versions/1e09e871fb65_.py
|
uwcirg/true_nth_usa_portal
|
e2434731aed86f1c43f15d428dde8ffc28ac7e5f
|
[
"BSD-3-Clause"
] | 876
|
2016-04-04T20:45:11.000Z
|
2019-02-28T00:10:36.000Z
|
portal/migrations/versions/1e09e871fb65_.py
|
uwcirg/truenth-portal
|
459a0d157982f010175c50b9cccd860a61790370
|
[
"BSD-3-Clause"
] | 9
|
2016-04-13T01:18:55.000Z
|
2018-09-19T20:44:23.000Z
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 1e09e871fb65
Revises: ('0b4e7a8a7e64', '0701782c564d')
Create Date: 2019-03-28 23:08:21.960495
"""
# revision identifiers, used by Alembic.
revision = '1e09e871fb65'
down_revision = ('0b4e7a8a7e64', '0701782c564d')
| 15.217391
| 48
| 0.722857
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 1e09e871fb65
Revises: ('0b4e7a8a7e64', '0701782c564d')
Create Date: 2019-03-28 23:08:21.960495
"""
# revision identifiers, used by Alembic.
revision = '1e09e871fb65'
down_revision = ('0b4e7a8a7e64', '0701782c564d')
def upgrade():
pass
def downgrade():
pass
| 6
| 0
| 46
|
24194b04dfeaa229134fca286dd617aacde90e9a
| 2,915
|
py
|
Python
|
student_gp/toxic_comment.py
|
IMsumitkumar/CommOn-Developers-Community
|
d15d4c0f725adbf1a7bcbbd7156d3edab3f4e740
|
[
"MIT"
] | 1
|
2020-11-04T12:55:59.000Z
|
2020-11-04T12:55:59.000Z
|
student_gp/toxic_comment.py
|
IMsumitkumar/CommOn-Developers-Community
|
d15d4c0f725adbf1a7bcbbd7156d3edab3f4e740
|
[
"MIT"
] | null | null | null |
student_gp/toxic_comment.py
|
IMsumitkumar/CommOn-Developers-Community
|
d15d4c0f725adbf1a7bcbbd7156d3edab3f4e740
|
[
"MIT"
] | null | null | null |
import os
import pickle
from asgiref.sync import sync_to_async
@sync_to_async
| 30.051546
| 81
| 0.677873
|
import os
import pickle
from asgiref.sync import sync_to_async
@sync_to_async
def toxic_classifier(data):
data = [data]
modulePath = os.path.dirname(__file__)
toxic_vect = os.path.join(modulePath, 'pkl/toxic_vect.pkl')
toxic_model = os.path.join(modulePath, 'pkl/toxic_model.pkl')
severe_toxic_vect = os.path.join(modulePath, 'pkl/severe_toxic_vect.pkl')
severe_toxic_model = os.path.join(modulePath, 'pkl/severe_toxic_model.pkl')
obscene_vect = os.path.join(modulePath, 'pkl/obscene_vect.pkl')
obscene_model = os.path.join(modulePath, 'pkl/obscene_model.pkl')
threat_vect = os.path.join(modulePath, 'pkl/threat_vect.pkl')
threat_model = os.path.join(modulePath, 'pkl/threat_model.pkl')
insult_vect = os.path.join(modulePath, 'pkl/insult_vect.pkl')
insult_model = os.path.join(modulePath, 'pkl/insult_model.pkl')
identity_hate_vect = os.path.join(modulePath, 'pkl/identity_hate_vect.pkl')
identity_hate_model = os.path.join(modulePath, 'pkl/identity_hate_model.pkl')
#vector
with open(toxic_vect, "rb") as f:
tox = pickle.load(f)
with open(severe_toxic_vect, "rb") as f:
sev_tox = pickle.load(f)
with open(obscene_vect, "rb") as f:
obs = pickle.load(f)
with open(threat_vect, "rb") as f:
threat = pickle.load(f)
with open(insult_vect, "rb") as f:
insult = pickle.load(f)
with open(identity_hate_vect, "rb") as f:
hate = pickle.load(f)
# module
with open(toxic_model, "rb") as f:
tox_model = pickle.load(f)
with open(severe_toxic_model, "rb") as f:
sev_model = pickle.load(f)
with open(obscene_model, "rb") as f:
obs_model = pickle.load(f)
with open(threat_model, "rb") as f:
threat_model = pickle.load(f)
with open(insult_model, "rb") as f:
insult_model = pickle.load(f)
with open(identity_hate_model, "rb") as f:
hate_model = pickle.load(f)
# transform
vect_tox = tox.transform(data)
pred_tox = tox_model.predict_proba(vect_tox)[:,1]
vect_sev = sev_tox.transform(data)
pred_sev = sev_model.predict_proba(vect_sev)[:,1]
vect_obs = obs.transform(data)
pred_obs = obs_model.predict_proba(vect_obs)[:,1]
vect_threat = threat.transform(data)
pred_threat = threat_model.predict_proba(vect_threat)[:,1]
vect_insult = insult.transform(data)
pred_insult = insult_model.predict_proba(vect_insult)[:,1]
vect_hate = hate.transform(data)
pred_hate = hate_model.predict_proba(vect_hate)[:,1]
out_tox = round(pred_tox[0], 2) * 100
out_sev_tox = round(pred_sev[0], 2) * 100
out_obs = round(pred_obs[0], 2) * 100
out_threat = round(pred_threat[0], 2) * 100
out_insult = round(pred_insult[0], 2) * 100
out_hate = round(pred_hate[0], 2) * 100
return out_tox, out_sev_tox, out_obs, out_threat, out_insult, out_hate
| 2,814
| 0
| 22
|
31980498cb54b923b287a4a673e24a9c06cf51b9
| 2,751
|
py
|
Python
|
owcsimpy/geoobjects/bases/paramline_py.py
|
ardimasp/owcsimpy
|
155b0f26dd5e247cef9a84265256b0d70ba0b139
|
[
"MIT"
] | null | null | null |
owcsimpy/geoobjects/bases/paramline_py.py
|
ardimasp/owcsimpy
|
155b0f26dd5e247cef9a84265256b0d70ba0b139
|
[
"MIT"
] | null | null | null |
owcsimpy/geoobjects/bases/paramline_py.py
|
ardimasp/owcsimpy
|
155b0f26dd5e247cef9a84265256b0d70ba0b139
|
[
"MIT"
] | null | null | null |
import numpy as np
class ParamLine_py(object):
""" A 3D parametric line.
Parameters
----------
P0: ndarray(3,)
A tail point.
P1: ndarray(3,)
A head point.
Attributes
----------
P0
P1
u: ndarray(3,)
Show the direction of the parametric line
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from owcsimpy.geoutils.draw import draw
>>> from owcsimpy.geoobjects.bases.paramline_py import ParamLine_py as Line
>>> # Generate a line l
>>> l = Line(np.array([0.5,0.5,0.5]),np.ones(3))
>>> # Draw
>>> fig,ax = draw(lines=l,figsize=(5,6))
>>> # Get a point at t = 0.25
>>> P = l.getPoint(0.25)
>>> print("Point at t=0.25 is {}".format(P))
Point at t=0.25 is [0.625 0.625 0.625]
>>> # Draw
>>> x,y,z = P
>>> ax.scatter(x,y,z)
>>> plt.show()
.. plot::
:format: doctest
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from owcsimpy.geoutils.draw import draw
>>> from owcsimpy.geoobjects.bases.paramline_py import ParamLine_py as Line
>>> # Generate a line l
>>> l = Line(np.array([0.5,0.5,0.5]),np.ones(3))
>>> # Draw
>>> fig,ax = draw(lines=l,figsize=(5,6))
>>> # Get a point at t = 0.25
>>> P = l.getPoint(0.25)
>>> print("Point at t=0.25 is {}".format(P))
>>> # Draw
>>> x,y,z = P
>>> ax.scatter(x,y,z)
>>> plt.show()
Notes
-----
A parametric line, :math:`l(t)`, is defined as:
.. math:: l(t) = P_0 + \mathbf{u} t, \mathrm{where}\ \mathbf{u} = P_1-P_0.
"""
def getPoint(self,t0):
""" Get a point at t=t0.
Returns
-------
ndarray(3,)
Return P0+u t0
"""
return self.P0+self.u*t0
def isValid(self,P):
""" Check wheter the point P is in line.
Returns
-------
bool
"""
up = P-self.P0 # u prime
return True if np.allclose(up,np.zeros(3)) else np.allclose(
up/np.linalg.norm(up),self.u/np.linalg.norm(self.u))
def getParam(self,P):
""" Get t of P. Or, find t s.t. P0+u t = P
Returns
-------
float or None
Return None if P is not in the line.
"""
# Check validity of P
if self.isValid(P):
idxnotzero = np.where(self.u!=0)[0]
return (P-self.P0)[idxnotzero]/self.u[idxnotzero]
else:
return None
| 22.735537
| 83
| 0.48964
|
import numpy as np
class ParamLine_py(object):
""" A 3D parametric line.
Parameters
----------
P0: ndarray(3,)
A tail point.
P1: ndarray(3,)
A head point.
Attributes
----------
P0
P1
u: ndarray(3,)
Show the direction of the parametric line
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from owcsimpy.geoutils.draw import draw
>>> from owcsimpy.geoobjects.bases.paramline_py import ParamLine_py as Line
>>> # Generate a line l
>>> l = Line(np.array([0.5,0.5,0.5]),np.ones(3))
>>> # Draw
>>> fig,ax = draw(lines=l,figsize=(5,6))
>>> # Get a point at t = 0.25
>>> P = l.getPoint(0.25)
>>> print("Point at t=0.25 is {}".format(P))
Point at t=0.25 is [0.625 0.625 0.625]
>>> # Draw
>>> x,y,z = P
>>> ax.scatter(x,y,z)
>>> plt.show()
.. plot::
:format: doctest
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from owcsimpy.geoutils.draw import draw
>>> from owcsimpy.geoobjects.bases.paramline_py import ParamLine_py as Line
>>> # Generate a line l
>>> l = Line(np.array([0.5,0.5,0.5]),np.ones(3))
>>> # Draw
>>> fig,ax = draw(lines=l,figsize=(5,6))
>>> # Get a point at t = 0.25
>>> P = l.getPoint(0.25)
>>> print("Point at t=0.25 is {}".format(P))
>>> # Draw
>>> x,y,z = P
>>> ax.scatter(x,y,z)
>>> plt.show()
Notes
-----
A parametric line, :math:`l(t)`, is defined as:
.. math:: l(t) = P_0 + \mathbf{u} t, \mathrm{where}\ \mathbf{u} = P_1-P_0.
"""
def __init__(self,P0,P1):
assert P0.size == 3 and P1.size == 3
self.P0,self.P1 = P0,P1
self.u = P1-P0
def getPoint(self,t0):
""" Get a point at t=t0.
Returns
-------
ndarray(3,)
Return P0+u t0
"""
return self.P0+self.u*t0
def isValid(self,P):
""" Check wheter the point P is in line.
Returns
-------
bool
"""
up = P-self.P0 # u prime
return True if np.allclose(up,np.zeros(3)) else np.allclose(
up/np.linalg.norm(up),self.u/np.linalg.norm(self.u))
def getParam(self,P):
""" Get t of P. Or, find t s.t. P0+u t = P
Returns
-------
float or None
Return None if P is not in the line.
"""
# Check validity of P
if self.isValid(P):
idxnotzero = np.where(self.u!=0)[0]
return (P-self.P0)[idxnotzero]/self.u[idxnotzero]
else:
return None
| 114
| 0
| 27
|
47d37e709d0ad25a0178922ce88835efb815d534
| 208
|
py
|
Python
|
tests/classes/cellphone_name.py
|
Jesse-Yung/jsonclasses
|
d40c52aec42bcb978a80ceb98b93ab38134dc790
|
[
"MIT"
] | 50
|
2021-08-18T08:08:04.000Z
|
2022-03-20T07:23:26.000Z
|
tests/classes/cellphone_name.py
|
Jesse-Yung/jsonclasses
|
d40c52aec42bcb978a80ceb98b93ab38134dc790
|
[
"MIT"
] | 1
|
2021-11-23T02:12:29.000Z
|
2021-11-23T13:35:26.000Z
|
tests/classes/cellphone_name.py
|
Jesse-Yung/jsonclasses
|
d40c52aec42bcb978a80ceb98b93ab38134dc790
|
[
"MIT"
] | 8
|
2021-07-01T02:39:15.000Z
|
2021-12-10T02:20:18.000Z
|
from __future__ import annotations
from jsonclasses import jsonclass, types
@jsonclass
| 20.8
| 50
| 0.793269
|
from __future__ import annotations
from jsonclasses import jsonclass, types
@jsonclass
class CellphoneName:
cellphone_name: str = types.str.tocap.required
cellphone_title: str = types.str.required
| 0
| 96
| 22
|
1f9ac96adf2671f27d6dc3e241d464406259f49c
| 825
|
py
|
Python
|
democelery/network/signals.py
|
alexdzul/democelery
|
4627d155ccc3a6daf21ec4e4b8d554891446288d
|
[
"MIT"
] | 1
|
2021-12-02T05:29:37.000Z
|
2021-12-02T05:29:37.000Z
|
democelery/network/signals.py
|
alexdzul/democelery
|
4627d155ccc3a6daf21ec4e4b8d554891446288d
|
[
"MIT"
] | null | null | null |
democelery/network/signals.py
|
alexdzul/democelery
|
4627d155ccc3a6daf21ec4e4b8d554891446288d
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_save, post_delete
from django.dispatch import receiver
from .models import Post, Subscriber
from .functions import notify_new_content, notify_new_subscriber
@receiver(post_save, sender=Post)
def send_mail_to_subscriber(sender, **kwargs):
"""
Sending message to subscribers when exist a new post.
When a post is created
"""
if kwargs.get('created', False):
post = kwargs.get("instance")
notify_new_content(post.id)
@receiver(post_save, sender=Subscriber)
def new_subscriber(sender, **kwargs):
"""
Sending message staff when exist a new subscriber.
"""
if kwargs.get('created', False):
subscriber = kwargs.get("instance")
notify_new_subscriber(subscriber.id)
| 30.555556
| 69
| 0.724848
|
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_save, post_delete
from django.dispatch import receiver
from .models import Post, Subscriber
from .functions import notify_new_content, notify_new_subscriber
@receiver(post_save, sender=Post)
def send_mail_to_subscriber(sender, **kwargs):
"""
Sending message to subscribers when exist a new post.
When a post is created
"""
if kwargs.get('created', False):
post = kwargs.get("instance")
notify_new_content(post.id)
@receiver(post_save, sender=Subscriber)
def new_subscriber(sender, **kwargs):
"""
Sending message staff when exist a new subscriber.
"""
if kwargs.get('created', False):
subscriber = kwargs.get("instance")
notify_new_subscriber(subscriber.id)
| 0
| 0
| 0
|
aa678430fa7a985978185bacff538e9309826d62
| 217
|
py
|
Python
|
web/sso/admin.py
|
kisawebkaist/kisaweb
|
05883d4d81394d5055137f24a8a46d7c90191101
|
[
"MIT"
] | 2
|
2020-11-06T10:32:52.000Z
|
2021-11-05T06:56:08.000Z
|
web/sso/admin.py
|
zero-or-one/kisaweb
|
d88eca63b50fd2593a7d1aa23916a80437e84925
|
[
"MIT"
] | 29
|
2020-10-09T18:29:11.000Z
|
2022-03-12T14:23:36.000Z
|
web/sso/admin.py
|
kisawebkaist/kisaweb
|
05883d4d81394d5055137f24a8a46d7c90191101
|
[
"MIT"
] | 11
|
2020-10-26T03:59:47.000Z
|
2021-10-04T07:03:44.000Z
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User, Agreement
# Register your models here.
admin.site.register(User, UserAdmin)
admin.site.register(Agreement)
| 21.7
| 47
| 0.81106
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User, Agreement
# Register your models here.
admin.site.register(User, UserAdmin)
admin.site.register(Agreement)
| 0
| 0
| 0
|
a342471396fe69a5d48b3288fe510c7425f390c9
| 2,415
|
py
|
Python
|
lifoid/www/api/webhook.py
|
romaryd/lifoid
|
f0c96d2c58cc7980ef7a3747251928f6da9015f5
|
[
"Apache-2.0"
] | 1
|
2018-06-27T07:04:24.000Z
|
2018-06-27T07:04:24.000Z
|
lifoid/www/api/webhook.py
|
LifoidLabs/lifoid
|
f0c96d2c58cc7980ef7a3747251928f6da9015f5
|
[
"Apache-2.0"
] | 5
|
2018-05-23T07:06:45.000Z
|
2018-10-09T20:08:29.000Z
|
lifoid/www/api/webhook.py
|
LifoidLabs/lifoid
|
f0c96d2c58cc7980ef7a3747251928f6da9015f5
|
[
"Apache-2.0"
] | 1
|
2019-03-25T17:34:28.000Z
|
2019-03-25T17:34:28.000Z
|
import sys
import os
import json
import traceback
import importlib
from flask import request, make_response, Blueprint, abort
from lifoid.config import settings
from lifoid.logging.mixin import ServiceLogger
from lifoid.constants import E_GET, E_POST
from lifoid.events import process_event
from lifoid.exceptions import (LifoidRequestForbiddenError,
LifoidRequestUnknownError)
sys.path.insert(0, os.getcwd())
logger = ServiceLogger()
try:
app_settings_module = importlib.import_module(
settings.lifoid_settings_module
)
logger.debug('Templates path: {}'.format(
app_settings_module.TEMPLATES_PATH))
webhook = Blueprint('webhook', __name__,
template_folder=app_settings_module.TEMPLATES_PATH)
except ImportError:
logger.error('No templates path configured')
webhook = Blueprint('webhook', __name__)
@webhook.route('/webhook', methods=['GET', 'POST'])
def index():
"""
Universal webhook endpoint for all messenger applications.
"""
logger.debug('Webhook blueprint invoked')
try:
if request.method == 'POST':
e_type = E_POST
data = request.get_data()
if data.startswith(b'payload'):
event = json.loads(request.form['payload'])
else:
event = json.loads(request.get_data())
elif request.method == 'GET':
e_type = E_GET
event = request.args
else:
return make_response('Request method not supported', 404)
logger.debug('{} {}'.format(e_type, event))
asynchronous = settings.pasync == 'yes'
resp, perf = process_event(e_type, event, asynchronous)
logger.info('Request processed in {}'.format(perf))
if resp is not None:
logger.debug('Http Response: {}'.format(resp))
return make_response(resp, 200)
return make_response('OK', 200)
except KeyError:
logger.error(traceback.format_exc())
logger.error('Missing key argument')
return make_response('Missing key argument', 404)
except LifoidRequestForbiddenError:
return abort(403)
except LifoidRequestUnknownError:
return make_response('Unknown request', 404)
except:
logger.error(request.get_data())
logger.error(traceback.format_exc())
return make_response('', 200)
| 36.044776
| 75
| 0.654658
|
import sys
import os
import json
import traceback
import importlib
from flask import request, make_response, Blueprint, abort
from lifoid.config import settings
from lifoid.logging.mixin import ServiceLogger
from lifoid.constants import E_GET, E_POST
from lifoid.events import process_event
from lifoid.exceptions import (LifoidRequestForbiddenError,
LifoidRequestUnknownError)
sys.path.insert(0, os.getcwd())
logger = ServiceLogger()
try:
app_settings_module = importlib.import_module(
settings.lifoid_settings_module
)
logger.debug('Templates path: {}'.format(
app_settings_module.TEMPLATES_PATH))
webhook = Blueprint('webhook', __name__,
template_folder=app_settings_module.TEMPLATES_PATH)
except ImportError:
logger.error('No templates path configured')
webhook = Blueprint('webhook', __name__)
@webhook.route('/webhook', methods=['GET', 'POST'])
def index():
"""
Universal webhook endpoint for all messenger applications.
"""
logger.debug('Webhook blueprint invoked')
try:
if request.method == 'POST':
e_type = E_POST
data = request.get_data()
if data.startswith(b'payload'):
event = json.loads(request.form['payload'])
else:
event = json.loads(request.get_data())
elif request.method == 'GET':
e_type = E_GET
event = request.args
else:
return make_response('Request method not supported', 404)
logger.debug('{} {}'.format(e_type, event))
asynchronous = settings.pasync == 'yes'
resp, perf = process_event(e_type, event, asynchronous)
logger.info('Request processed in {}'.format(perf))
if resp is not None:
logger.debug('Http Response: {}'.format(resp))
return make_response(resp, 200)
return make_response('OK', 200)
except KeyError:
logger.error(traceback.format_exc())
logger.error('Missing key argument')
return make_response('Missing key argument', 404)
except LifoidRequestForbiddenError:
return abort(403)
except LifoidRequestUnknownError:
return make_response('Unknown request', 404)
except:
logger.error(request.get_data())
logger.error(traceback.format_exc())
return make_response('', 200)
| 0
| 0
| 0
|
6bae8be3122dbc23198d933ed6ee8ad05824b27f
| 118
|
py
|
Python
|
service_account_auth/__init__.py
|
Wilduck/gclient-service-account-auth
|
26e23a13b33546ff87df92fea387c680fab75e10
|
[
"MIT"
] | 3
|
2015-02-11T08:52:35.000Z
|
2020-03-08T06:07:39.000Z
|
service_account_auth/__init__.py
|
Wilduck/gclient-service-account-auth
|
26e23a13b33546ff87df92fea387c680fab75e10
|
[
"MIT"
] | 4
|
2015-01-23T02:33:39.000Z
|
2016-03-14T20:52:46.000Z
|
service_account_auth/__init__.py
|
Wilduck/gclient-service-account-auth
|
26e23a13b33546ff87df92fea387c680fab75e10
|
[
"MIT"
] | 5
|
2015-02-11T08:52:44.000Z
|
2018-03-05T00:50:57.000Z
|
# flake8: noqa
from .version import __version__
from .authorized_service import AuthorizedService, get_email_and_key
| 23.6
| 68
| 0.847458
|
# flake8: noqa
from .version import __version__
from .authorized_service import AuthorizedService, get_email_and_key
| 0
| 0
| 0
|
e7e8c447861dcbf1adf101ce010eed6eb350f9c8
| 4,272
|
py
|
Python
|
npc/character/tags/tag_container.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | 13
|
2016-02-23T08:15:22.000Z
|
2021-07-17T20:54:57.000Z
|
npc/character/tags/tag_container.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | 1
|
2017-03-30T08:11:40.000Z
|
2017-09-07T15:01:08.000Z
|
npc/character/tags/tag_container.py
|
Arent128/npc
|
c8a1e227a1d4d7c540c4f4427b611ffc290535ee
|
[
"MIT"
] | 1
|
2020-02-21T09:44:40.000Z
|
2020-02-21T09:44:40.000Z
|
from collections import UserDict
from copy import copy
from . import *
class TagContainer(UserDict):
"""
Manages a coherent group of tags
Instances are callable. That syntax is the preferred way to get a tag, since
it will always return a tag object. Accessing an undeclared tag in this way
will return an UnknownTag object instead of raising an error.
This object can also be accessed like a normal dict.
"""
def __init__(self):
"""
Create a new tag container
All tag containers start with a special DescriptionTag element for
consistency.
"""
super().__init__()
self._add_taglike(DescriptionTag)
self.problems = []
def __call__(self, tag_name: str):
"""
Get a tag by its name
Tags which have already been defined will be returned as-is. Undefined
tag names are assigned an UnknownTag object, which is then returned.
Args:
tag_name (str): Name of the tag to fetch
Returns:
A Tag or UnknownTag object
"""
tag = self.data.get(tag_name)
if tag is None:
tag = UnknownTag(tag_name)
self.append(tag)
return tag
def append(self, tag):
"""
Add a tag to this container
The tag will be indexed by its name
Args:
tag (Tag): A tag object
"""
if tag.name in self.data:
return
self.data[tag.name] = tag
def update(self, values: dict):
"""
Update multiple tags using data from the values dict
Args:
values (dict): Dictionary of lists whose keys are tag names
"""
for key, data in values.items():
self(key).update(data)
def all(self):
"""
Iterate over the stored tag objects
Returns:
Iterator for the stored tag objects
"""
return iter(self.data.values())
def names(self):
"""
Get a view of the tag names
Returns:
Dictionary view of the saved tag names
"""
return self.data.keys()
def present(self):
"""
Create a new container with only the tags marked as present
Returns:
TagContainer object with only present tags
"""
new_container = copy(self)
new_container.data = {k: v for k, v in self.data.items() if v.present}
return new_container
def _add_taglike(self, klass, *args, **kwargs):
"""
Internal method to create and append a new tag
Args:
klass (object): Tag class to create
args, kwargs: Other arguments as appropriate for the tag to create
"""
self.append(klass(*args, **kwargs))
def add_tag(self, *args, **kwargs):
"""
Add a new tag
"""
self._add_taglike(Tag, *args, **kwargs)
def add_flag(self, *args, **kwargs):
"""
Add a new flag
"""
self._add_taglike(Flag, *args, **kwargs)
def add_group(self, *args, **kwargs):
"""
Add a new group tag
"""
self._add_taglike(GroupTag, *args, **kwargs)
@property
def valid(self):
"""
bool: Whether this tag is internally valid
This property is only meaningful after calling validate()
"""
return len(self.problems) == 0
def validate(self, strict: bool=False):
"""
Validate all of the tags in this container
Args:
strict (bool): Whether to report non-critical errors and omissions
Returns:
True if this tag has no validation problems, false if not
"""
self.problems = []
for key, tag in self.data.items():
tag.validate(strict=strict)
self.problems.extend(tag.problems)
if key != tag.name:
self.problems.append("Tag '{}' has wrong key: '{}'".format(tag.name, key))
return self.valid
def sanitize(self):
"""
Ask all tags to remove their hidden values
Calls sanitize on each tag
"""
for tag in self.values():
tag.sanitize()
| 26.04878
| 90
| 0.562734
|
from collections import UserDict
from copy import copy
from . import *
class TagContainer(UserDict):
"""
Manages a coherent group of tags
Instances are callable. That syntax is the preferred way to get a tag, since
it will always return a tag object. Accessing an undeclared tag in this way
will return an UnknownTag object instead of raising an error.
This object can also be accessed like a normal dict.
"""
def __init__(self):
"""
Create a new tag container
All tag containers start with a special DescriptionTag element for
consistency.
"""
super().__init__()
self._add_taglike(DescriptionTag)
self.problems = []
def __call__(self, tag_name: str):
"""
Get a tag by its name
Tags which have already been defined will be returned as-is. Undefined
tag names are assigned an UnknownTag object, which is then returned.
Args:
tag_name (str): Name of the tag to fetch
Returns:
A Tag or UnknownTag object
"""
tag = self.data.get(tag_name)
if tag is None:
tag = UnknownTag(tag_name)
self.append(tag)
return tag
def append(self, tag):
"""
Add a tag to this container
The tag will be indexed by its name
Args:
tag (Tag): A tag object
"""
if tag.name in self.data:
return
self.data[tag.name] = tag
def update(self, values: dict):
"""
Update multiple tags using data from the values dict
Args:
values (dict): Dictionary of lists whose keys are tag names
"""
for key, data in values.items():
self(key).update(data)
def all(self):
"""
Iterate over the stored tag objects
Returns:
Iterator for the stored tag objects
"""
return iter(self.data.values())
def names(self):
"""
Get a view of the tag names
Returns:
Dictionary view of the saved tag names
"""
return self.data.keys()
def present(self):
"""
Create a new container with only the tags marked as present
Returns:
TagContainer object with only present tags
"""
new_container = copy(self)
new_container.data = {k: v for k, v in self.data.items() if v.present}
return new_container
def _add_taglike(self, klass, *args, **kwargs):
"""
Internal method to create and append a new tag
Args:
klass (object): Tag class to create
args, kwargs: Other arguments as appropriate for the tag to create
"""
self.append(klass(*args, **kwargs))
def add_tag(self, *args, **kwargs):
"""
Add a new tag
"""
self._add_taglike(Tag, *args, **kwargs)
def add_flag(self, *args, **kwargs):
"""
Add a new flag
"""
self._add_taglike(Flag, *args, **kwargs)
def add_group(self, *args, **kwargs):
"""
Add a new group tag
"""
self._add_taglike(GroupTag, *args, **kwargs)
@property
def valid(self):
"""
bool: Whether this tag is internally valid
This property is only meaningful after calling validate()
"""
return len(self.problems) == 0
def validate(self, strict: bool=False):
"""
Validate all of the tags in this container
Args:
strict (bool): Whether to report non-critical errors and omissions
Returns:
True if this tag has no validation problems, false if not
"""
self.problems = []
for key, tag in self.data.items():
tag.validate(strict=strict)
self.problems.extend(tag.problems)
if key != tag.name:
self.problems.append("Tag '{}' has wrong key: '{}'".format(tag.name, key))
return self.valid
def sanitize(self):
"""
Ask all tags to remove their hidden values
Calls sanitize on each tag
"""
for tag in self.values():
tag.sanitize()
| 0
| 0
| 0
|
889649b4ec4b2639ffc6d441632f708612e41430
| 6,096
|
py
|
Python
|
ingv/get-ingv-data.py
|
danja/elfquake
|
0c42a32ccc1d7008febf120eabe666fbdccff781
|
[
"Apache-2.0"
] | 2
|
2019-04-24T01:57:04.000Z
|
2020-05-26T22:28:55.000Z
|
ingv/get-ingv-data.py
|
danja/elfquake
|
0c42a32ccc1d7008febf120eabe666fbdccff781
|
[
"Apache-2.0"
] | null | null | null |
ingv/get-ingv-data.py
|
danja/elfquake
|
0c42a32ccc1d7008febf120eabe666fbdccff781
|
[
"Apache-2.0"
] | null | null | null |
#
# apologies for the camelCase - the previous version was in JS
import dateutil.parser
import datetime
from datetime import date, time
from time import sleep
import http.client
import xml.etree.ElementTree as ET
## ''Aquila 6 April 2009 > 5 mag
# 42.3476°N 13.3800°ECoordinates: 42.3476°N 13.3800°E[1]
# http://webservices.ingv.it/fdsnws/event/1/query?starttime=2009-04-01T00:00:00&endtime=2009-04-10T00:00:00
# curl -i "http://webservices.ingv.it/fdsnws/event/1/query?starttime=2010-01-01T00:00:00&endtime=2010-01-01T06:00:00"
# using low-level version to log connection issues
if __name__ == "__main__":
INGV().main()
| 39.329032
| 138
| 0.598753
|
#
# apologies for the camelCase - the previous version was in JS
import dateutil.parser
import datetime
from datetime import date, time
from time import sleep
import http.client
import xml.etree.ElementTree as ET
## ''Aquila 6 April 2009 > 5 mag
# 42.3476°N 13.3800°ECoordinates: 42.3476°N 13.3800°E[1]
# http://webservices.ingv.it/fdsnws/event/1/query?starttime=2009-04-01T00:00:00&endtime=2009-04-10T00:00:00
# curl -i "http://webservices.ingv.it/fdsnws/event/1/query?starttime=2010-01-01T00:00:00&endtime=2010-01-01T06:00:00"
class INGV():
def __init__(self):
self.errors = ""
self.domain = "webservices.ingv.it"
self.endpoint = "/fdsnws/event/1/query"
# service dates are UTC, though it shouldn't matter here
# self.startDate = dateutil.parser.parse("2009-04-01T00:00:00Z") l'Aquila
# self.startDate = dateutil.parser.parse("1997-01-01T00:00:00Z") proper
self.startDate = dateutil.parser.parse("2010-03-10T18:00:00Z") # temp
self.endDate = dateutil.parser.parse("2017-08-30T00:00:00Z")
minlat = "40"
maxlat = "47"
minlon = "7"
maxlon = "15"
#geographic-constraints=boundaries-rect
self.region_string = "geographic-constraints=boundaries-rect&minlat="+minlat+"&maxlat="+maxlat+"&minlon="+minlon+"&maxlon="+maxlon
self.windowHours = 1 #
self.windows_per_file = 24*30 # 30 day blocks
self.data_dir = "./csv_data/raw/"
self.sample_domain = "webservices.ingv.it"
self.sample_path = "/fdsnws/event/1/query?starttime=2010-01-01T00:00:00&endtime=2010-01-01T06:00:00"
self.pause = 0.1 # delay between GETs to be kinder to the service
self.timeout = 300 # for the GET 5 mins - sometimes it takes a long time
self.csv = ""
def main(self):
window_count = 0
startWindow = self.startDate
# step through dates
while(startWindow < self.endDate):
startString = datetime.datetime.isoformat(startWindow)
# crop to 2010-01-01T00:00:00 - timespec=seconds didn't work for me
startString = startString[0:19]
startWindow = startWindow + datetime.timedelta(hours=self.windowHours)
endString = datetime.datetime.isoformat(startWindow)
endString = endString[0:19]
query = "?"+self.region_string
query = query + '&starttime=' + startString + '&endtime=' + endString
path = self.endpoint + query
sleep(self.pause) # don't hammer the service
try:
xml = self.get_xml(self.domain, path)
except ValueError as err:
print(err)
print()
if xml != "":
self.csv = self.csv + self.extract_data(xml,query)
window_count = window_count + 1
if(window_count == self.windows_per_file):
window_count = 0
filename = self.data_dir+"ingv_"+startString+".csv"
print("Saving "+filename)
self.save_text(filename,self.csv)
self.csv = ""
self.save_text(self.data_dir+"error.log", self.errors)
# using low-level version to log connection issues
def get_xml(self, domain, path):
# print("PATH = "+path)
connection = http.client.HTTPConnection(domain, timeout=self.timeout) # , timeout=self.timeout
connection.request('GET', path)
response = connection.getresponse()
url = "http://"+domain+path
print("Query: "+url+"\n")
if(response.status == 204): # no content
return ""
if(response.status == 200):
content = response.read().decode('utf-8')
else:
http_error = "HTTP "+str(response.status)+" "+response.reason
message = "Unexpected response from \n"+url+"\n"+http_error
content = ""
if response.status < 200 or response.status >= 300:
self.errors = self.errors + message
raise ValueError(http_error)
return content
# xmlns="http://quakeml.org/xmlns/bed/1.2"
# xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"
#
# q:quakeml/eventParameters/event/origin/time/value
# q:quakeml/eventParameters/event/origin/latitude/value
# q:quakeml/eventParameters/event/origin/longitude/value
# q:quakeml/eventParameters/event/origin/depth/value
# q:quakeml/eventParameters/event/magnitude/mag/value
def extract_data(self,xml,query): # query is for debugging bad results
ns = "{http://quakeml.org/xmlns/bed/1.2}"
# print(xml+"\n\n\n")
try:
root = ET.fromstring(xml)
except ET.ParseError as err:
url = "http://"+self.domain+self.endpoint+query
message = "Error in XML from \n"+url+"\n--------\n"+xml+"\n---------\n"+err.msg+"\n"
self.errors = self.errors + message
print(message)
return ""
eventParameters = root.find(ns+'eventParameters')
current = ""
count = 0
for event in eventParameters.findall(ns+'event'):
origin_element = event.find(ns+'origin')
time = origin_element.find(ns+'time').find(ns+'value').text
latitude = origin_element.find(ns+'latitude').find(ns+'value').text
longitude = origin_element.find(ns+'longitude').find(ns+'value').text
depth = origin_element.find(ns+'depth').find(ns+'value').text
magnitude_element = event.find(ns+'magnitude')
magnitude = magnitude_element.find(ns+'mag').find(ns+'value').text
count = count + 1
current = current + time+", "+latitude+", "+longitude+", "+depth+", "+magnitude+"\n"
print("\nEvents from query = "+str(count)+"\n")
return current
def save_text(self, filename, text):
with open(filename, 'w') as file:
file.write(text)
if __name__ == "__main__":
INGV().main()
| 5,285
| -8
| 156
|
1bdd22b9e8687c784908307d90517dd91dd44dff
| 23,507
|
py
|
Python
|
test_model.py
|
IulianEmilTampu/OCT_SPLIT_PROPERLY_YOUR_DATA
|
40708f54c2502b85b2acbfec35a7bcff5f031c92
|
[
"MIT"
] | null | null | null |
test_model.py
|
IulianEmilTampu/OCT_SPLIT_PROPERLY_YOUR_DATA
|
40708f54c2502b85b2acbfec35a7bcff5f031c92
|
[
"MIT"
] | null | null | null |
test_model.py
|
IulianEmilTampu/OCT_SPLIT_PROPERLY_YOUR_DATA
|
40708f54c2502b85b2acbfec35a7bcff5f031c92
|
[
"MIT"
] | null | null | null |
'''
Script that tests a trained models on its training dataset. It does the same
testing routine as the one in the overall utilities_models_tf.py script.
It saves
¤ the information about the test for easy later plotting
¤ ROC (per-class and overall using micro and macro average)
¤ PP curve (per-class and overall using micro and macro average)
¤ summary of performance for easy read of the final scores
Steps
1 - get paths and models to test
2 - load testing dataset
3 - get predictions using the test function in the utilities_models_tf.py
4 - plot and save confusion matrix
5 - plot and save ROC curve
6 - save detailed info of the testing and summary
'''
import os
import sys
import cv2
import glob
import json
import time
import pickle
import random
import pathlib
import argparse
import importlib
import numpy as np
from datetime import datetime
from collections import OrderedDict
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
# local imports
import utilities
import utilities_models_tf
## 1 - get models information and additional files
parser = argparse.ArgumentParser(description='Script that prints a summary of the model perfomance.')
parser.add_argument('-m','--model_path' ,required=True, help='Specify the folder where the trained model is located')
parser.add_argument('-d','--dataset_path' ,required=False, help='Specify where the dataset is located', default=False)
parser.add_argument('-mv','--model_version' ,required=False, help='Specify if to run the training on the best model (best) or the last (last)', default="best")
args = parser.parse_args()
model_path = args.model_path
dataset_path = args.dataset_path
model_version = args.model_version
# # # DEBUG
# model_path = '/flush/iulta54/Research/P3_OCT_SPLIT_PROPERLY_YOUR_DATA/trained_models/LightOCT_per_image_split_5_folds_rkf_10_lr0.0001_batch64_AIIMS_rls_True'
# dataset_path = "/flush/iulta54/Research/Data/OCT/AIIMS_Dataset/original"
# model_version = "best"
title="Testing script"
print(f'\n{"-"*len(title)}')
print(f'{title}')
print(f'{"-"*len(title)}\n')
# check forlders
if not os.path.isdir(model_path):
raise ValueError(f'Model not found. Given {model_path}')
else:
# check that the configuration file is in place
if not os.path.isfile(os.path.join(model_path,'config.json')):
raise ValueError(f'Configuration file not found for the given model. Check that the model was configured and trained. Given {os.path.join(model_path,"config.json")}')
else:
print("Model and config file found.")
# check that the model_version setting is a correct one (best, last, ensamble-not jet implemented)
if not any([model_version==s for s in ["best", "last", "ensamble"]]):
raise ValueError(f'The given model version for the testing is unknown. Given {model_version}, expected best, last or ensamble')
print(f'Working on model {os.path.basename(model_path)}')
print(f'Model configuration set for testing: {model_version}')
## 2 - load testing dataset
importlib.reload(utilities)
# load configuration file
with open(os.path.join(model_path,'config.json')) as json_file:
config = json.load(json_file)
config['label_description'] = config['unique_labels']
if dataset_path is False:
dataset_path = config['dataset_folder']
# check if dataset folder
if not os.path.isdir(dataset_path):
raise ValueError(f'Dataset path not found. Given {dataset_path}')
else:
print('Dataset path found.')
# take one testing
# make sure that the files point to this system dataset
# fix names based on the given dataset path
if any([config['dataset_type'] == 'retinal', config['dataset_type'] == 'Kermany']):
if config['dataset_split_strategy'] == 'original':
idx = 4
else:
idx = 3
elif config['dataset_type'] == 'AIIMS':
idx = 4
elif config['dataset_type'] == 'Srinivas':
idx = 5
test_img= []
# build file names to point to this given dataset
for f in config['test']:
aus = [pathlib.Path(f).parts[-i] for i in reversed(range(idx))][0:-1]
aus.insert(0, dataset_path)
test_img.append(os.path.join(*aus))
# create generator based on model specifications and dataset
if any([config['dataset_type'] == 'retinal', config['dataset_type'] == 'Kermany']):
data_gen = utilities.Kermany_data_gen
elif config['dataset_type'] == 'AIIMS':
data_gen = utilities.AIIMS_data_gen
elif config['dataset_type'] == 'Srinivas':
data_gen = utilities.Srinivas_data_gen
test_dataset = data_gen(test_img,
unique_labels=config['unique_labels'],
batch_size=16,
training=False,
channels=config['n_channels'],
input_size=config['input_size'],
random_label_experiment=config['random_label_experiment'],
random_label_experiment_seed=291209)
## perform testing for each fold the model was trained on
importlib.reload(utilities_models_tf)
test_fold_summary = {}
folds = glob.glob(os.path.join(model_path,"fold_*"))
# get the right model based on the model_version_specification
if model_version=="best":
model_name_version = "model.tf"
elif model_version=="last":
model_name_version = "last_model.tf"
for idx, f in enumerate(folds):
print(f'Working on fold {idx+1}/{len(folds)}')
# load model
if os.path.exists(os.path.join(f, model_name_version)):
model = tf.keras.models.load_model(os.path.join(f, model_name_version), compile=False)
else:
raise Exception('Model not found')
test_gt, test_prediction, test_time = utilities_models_tf.test_independent(model, config, test_dataset)
test_fold_summary[idx]={
'ground_truth':np.argmax(test_gt.numpy(), axis=-1),
'prediction':test_prediction.numpy(),
'test_time':float(test_time)
}
## save and plot
from collections import OrderedDict
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
'''
Saving overall cross validation test results and images:
- Confisuion matrix
- ROC curve
- Precision-Recall curve
- test summary file with the prediction for every test image (test_summary.txt)
Here add also the information needed to re-plot the ROC and PP curves (fpr,
tpr, roc_auc, precision and recall - micro and macro average)
The test_summary.txt file is a dictionary with the following entries:
- model_name: string
- labels: list of the true values fot the tested images
- fold_test_values: list containing the predictions for every fold (list of lists)
- test_time: string
- test date: string
- accuracy: float
- false_positive_rate: list containing the fpr for every class (list of lists)
- false_positive_rate_micro_avg: list containing the micro average fpr (used for the overall roc plot)
- false_positive_rate_macro_avg: list containing the macro average fpr (used for the overall roc plot)
- true_positive_rate: list containing the tpr for every class (list of lists)
- true_positive_rate_micro_avg: list containing the micro average tpr (used for the overall roc plot)
- true_positive_rate_macro_avg: list containing the macro average tpr (used for the overall roc plot)
- precision: list containing the precision values for every class to plot the PP (list of lists)
- precision_micro_avg: list of overall micro average of precision
- average_precision: average precision value computed using micro average
- recall: list containing the recall value for every class to plot the PP (list of lists)
- recall_micro_avg: list of overall micro average of recall
- F1: list of micro and macro average f1-score
Since the full test_summary file is long to open, the scores are also saved in a separate file for easy access
scores_test_summary.txt
'''
print(f'Saving information...')
# ############# save the information that is already available
test_summary = OrderedDict()
# for backwards compatibility
if config['number_crossvalidation_repetitions']:
n_cv = config['N_FOLDS']*config['number_crossvalidation_repetitions']
else:
n_cv = config['N_FOLDS']
test_summary['model_name'] = config['model_save_name']
test_summary['labels'] = [int(i) for i in test_fold_summary[0]['ground_truth']]
test_summary['folds_test_logits_values'] = [test_fold_summary[cv]['prediction'].tolist() for cv in range(n_cv)]
test_summary['test_time'] = utilities.tictoc_from_time(np.sum([test_fold_summary[cv]['test_time'] for cv in range(n_cv)]))
test_summary['test_model_version'] = model_version
test_summary['test_date'] = time.strftime("%Y%m%d-%H%M%S")
# ############ plot and save confucion matrix
ensemble_pred_argmax = []
ensemble_pred_logits = []
# compute ensemble
# compute the logits mean along the folds
ensemble_pred_logits = np.array(test_summary['folds_test_logits_values']).mean(axis=0)
# compute argmax prediction
ensemble_pred_argmax = np.argmax(ensemble_pred_logits, axis=1)
acc = utilities.plotConfusionMatrix(test_summary['labels'], ensemble_pred_argmax,
classes=config['label_description'],
savePath=model_path,
saveName=f'ConfusionMatrix_{model_version}_model',
draw=False)
# ############ plot and save ROC curve
fpr, tpr, roc_auc = utilities.plotROC(test_summary['labels'], ensemble_pred_logits,
classes=config['label_description'],
savePath=model_path,
saveName=f'Multiclass_ROC_{model_version}_model',
draw=False)
# make elements of the dictionary to be lists for saving
for key, value in fpr.items():
fpr[key]=value.tolist()
for key, value in tpr.items():
tpr[key]=value.tolist()
for key, value in roc_auc.items():
roc_auc[key]=value.tolist()
# ############ plot and save PR curve
precision, recall, average_precision, F1 = utilities.plotPR(test_summary['labels'],
ensemble_pred_logits,
classes=config['label_description'],
savePath=model_path,
saveName=f'Multiclass_PR_{model_version}_model',
draw=False)
# make elements of the dictionary to be lists for saving
for key, value in precision.items():
precision[key]=value.tolist()
for key, value in recall.items():
recall[key]=value.tolist()
# save all the information in the test summary
test_summary['accuracy'] = acc
# test_summary['false_positive_rate'] = [fpr[i].tolist() for i in range(len(class_labels))]
test_summary['false_positive_rate'] = fpr
# test_summary['false_positive_rate_micro_avg'] = fpr['micro'].tolist()
# test_summary['false_positive_rate_macro_avg'] = fpr['macro'].tolist()
test_summary['true_positive_rate'] = tpr
# test_summary['true_positive_rate'] = [tpr[i].tolist() for i in range(len(class_labels))]
# test_summary['true_positive_rate_micro_avg'] = tpr['micro'].tolist()
# test_summary['true_positive_rate_macro_avg'] = tpr['macro'].tolist()
test_summary['roc_auc'] = roc_auc
test_summary['precision'] = precision
# test_summary['precision'] = [precision[i].tolist() for i in range(len(class_labels))]
# test_summary['precision_micro_avg'] = precision['micro'].tolist()
test_summary['recall'] = recall
# test_summary['recall'] = [recall[i].tolist() for i in range(len(class_labels))]
# test_summary['recall_micro_avg'] = recall['micro'].tolist()
test_summary['average_precision'] = average_precision
test_summary['F1'] = F1
# save summary file
with open(os.path.join(model_path,f'{model_version}_model_version_test_summary.txt'), 'w') as fp:
json.dump(test_summary, fp)
## save summary (can be improved, but using the routine from print_model_performance)
from sklearn.metrics import average_precision_score, recall_score, roc_auc_score, f1_score, confusion_matrix, accuracy_score, matthews_corrcoef
def get_metrics(true_logits, pred_logits, average='macro'):
'''
Utility that given confusion matrics, returns a dictionary containing:
tptnfpfn : overall TP, TN, FP, FN values for each of the classes
precision (specificity) : for each of the classes
recall (sensitivity) : for each of the classes
f1-score : for each of the classes
auc : for each of the classes
overall_acc : over all classes
overall_specificity : over all classes
overall_precision : over all classes
overall_f1-score : over all classes
overall_auc : over all classes
'''
# compute confusion matrix
cnf_matrix = confusion_matrix(np.argmax(true_logits,-1), np.argmax(pred_logits, -1))
# compute TP, TN, FP, FN
FP = (cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)).astype(float)
FN = (cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)).astype(float)
TP = (np.diag(cnf_matrix)).astype(float)
TN = (cnf_matrix.sum() - (FP + FN + TP)).astype(float)
# compute per class metrics
summary_dict = {
'precision': TN / (FP+TN),
'recall': TP / (TP+FN),
'accuracy': (TP+TN) / (TP+TN+FP+FN),
'f1-score': TP / (TP + 0.5*(FP+FN)),
'auc': roc_auc_score(true_logits, pred_logits, average=None),
}
# compute overall metrics
# note that these metrics, especially those that depend on a threshold,
# will the average metrics over all the tresholds and will be different
# compared to the single threshold computed above (threshold=0.5)
summary_dict['overall_precision']=average_precision_score(true_logits,
pred_logits,
average=average)
summary_dict['overall_recall']=recall_score(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1),
average=average)
summary_dict['overall_accuracy']=accuracy_score(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1))
summary_dict['overall_f1-score']=f1_score(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1),
average=average)
summary_dict['overall_auc']=roc_auc_score(true_logits,
pred_logits,
multi_class='ovr',
average=average)
summary_dict['matthews_correlation_coef']=matthews_corrcoef(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1),)
return summary_dict
n_folds = len(folds)
print()
labels = np.eye(np.unique(test_summary['labels']).shape[0])[test_summary['labels']]
pred_logits = test_summary['folds_test_logits_values']
# Computupe per fold performance
per_fold_performance = []
for f in range(len(folds)):
per_fold_performance.append(get_metrics(labels, pred_logits[f]))
# compute ensamble performance
# compute the logits mean along the folds
ensemble_pred_logits = np.array(pred_logits).mean(axis=0)
# compute argmax prediction
ensemble_pred_argmax = np.argmax(ensemble_pred_logits, axis=1)
performance_ensamble = get_metrics(labels, ensemble_pred_logits)
# ######################### printing on file
summary = open(os.path.join(model_path,f'{model_version}_model_version_short_test_summary.txt'), 'w')
summary.write(f'\nModel Name: {os.path.basename(model_path)}\n\n')
# add test time overall and per image
average_test_time = np.mean([test_fold_summary[cv]['test_time'] for cv in range(n_cv)])
average_test_time_per_image = np.mean([test_fold_summary[cv]['test_time'] for cv in range(n_cv)])/labels.shape[0]
summary.write(f'Overall model test time (average over folds): {utilities.tictoc_from_time(average_test_time)}\n')
summary.write(f'Average test time per image (average over folds): {utilities.tictoc_from_time(average_test_time_per_image)}\n\n')
# print a summary of the testing per fold, class and ensamble
keys = ['precision','recall', 'accuracy', 'f1-score', 'auc']
max_len_k=max([len(k) for k in keys])
classes = config['label_description']
max_len_c=max(max([len(k) for k in classes]),len('Overall'))
max_len_f=max([len(s) for s in ["Fold", "Average","STD","Ensamble"]])
# build dict that holds the avg of all metrics
avg_dict = {k:[] for k in keys}
# build headers strings
fold_str = f'{"Fold":^{max_len_f}}'
class_str = f'{"Class":^{max_len_c}}'
keys_str = ''.join([f'{k.capitalize():^{max_len_k+2}}' for k in keys])
# start printing
summary.write(f'\n{"¤"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
summary.write(f'{"¤ Per-fold metrics ¤":^{(max_len_f+max_len_c+len(keys_str))}}'+'\n')
summary.write(f'{"¤"*(max_len_f+max_len_c+len(keys_str))}\n')
# print header
summary.write(fold_str+class_str+keys_str+'\n')
# print per fold and class metrics
for idx, fp in enumerate(per_fold_performance):
fold_num_str = f'{str(idx+1):^{max_len_f}}'
# print per class performance
for idc, c in enumerate(classes):
class_metrics = ''.join([f'{str(round(fp[k][idc],3)):^{max_len_k+2}}' for k in keys])
if idc == 0:
summary.write(fold_num_str+f'{c:^{max_len_c}}'+class_metrics+'\n')
else:
summary.write(f'{" ":^{max_len_f}}'+f'{c:^{max_len_c}}'+class_metrics+'\n')
# print overall performance
summary.write(f'{"-"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
overall_metric_str = ''.join([f'{str(round(fp["overall_"+k],3)):^{max_len_k+2}}' for k in keys])
summary.write(fold_num_str+f'{"Overall":^{max_len_c}}'+overall_metric_str+'\n')
summary.write(f'{"-"*(max_len_f+max_len_c+len(keys_str))}'+'\n\n')
# save overall metrics for later
[avg_dict[k].append(fp['overall_'+k]) for k in keys]
# print average overall metrics for the folds
avg_overall_metric_str = ''.join([f'{str(round(np.mean(avg_dict[k]),3)):^{max_len_k+2}}' for k in keys])
std_overall_metric_str = ''.join([f'{str(round(np.std(avg_dict[k]),3)):^{max_len_k+2}}' for k in keys])
summary.write(f'{"="*(max_len_f+max_len_c+len(keys_str))}'+'\n')
summary.write(fold_str+class_str+keys_str+'\n')
summary.write(f'{"Average":^{max_len_f}}'+f'{"":^{len(class_str)}}'+avg_overall_metric_str+'\n')
summary.write(f'{"STD":^{max_len_f}}'+f'{"":^{len(class_str)}}'+std_overall_metric_str+'\n')
# plot ensamble metrics
summary.write(f'\n{"¤"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
summary.write(f'{"¤ Ensamble metrics ¤":^{(max_len_f+max_len_c+len(keys_str))}}'+'\n')
summary.write(f'{"¤"*(max_len_f+max_len_c+len(keys_str))}\n')
# print header
summary.write(f'{"Ensamble":^{max_len_f}}'+class_str+keys_str+'\n')
# print per class performance
fp = performance_ensamble
for idc, c in enumerate(classes):
class_metrics = ''.join([f'{str(round(fp[k][idc],3)):^{max_len_k+2}}' for k in keys])
summary.write(f'{" ":^{max_len_f}}'+f'{c:^{max_len_c}}'+class_metrics+'\n')
# print overall performance
summary.write(f'{"-"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
overall_metric_str = ''.join([f'{str(round(fp["overall_"+k],3)):^{max_len_k+2}}' for k in keys])
summary.write(f'{" ":^{max_len_f}}'+f'{"Overall":^{max_len_c}}'+overall_metric_str+'\n')
summary.close()
## print on terminal
print('Ensemble preformance\n')
max_len = max([len(key) for key in performance_ensamble.keys()])
[print(f'{key:{max_len}s}: {value}') for key, value in performance_ensamble.items()]
## save also the information in a .csv file useful for plotting (and hypothesis testing in case)
'''
The file should allow for easy plotting of the models performance based on
classification type, model, model_version (best, last, ensemble_best, ensemble_last),
metrics, training time inference time.
'''
def get_time_from_string(time_string):
'''
Utility that given a time_string formated as 0d:0h:0m:0s:0ms,
returns the number of hours
'''
splits = time_string.split(':')
# get the values for d, h, m, s, ms
time_values = [ [] for _ in range(len(splits))]
[[time_values[idx].append(c) for c in v if c.isnumeric()] for idx, v in enumerate(splits)]
time_values = [int(''.join(v)) for v in time_values]
hours = sum([tv*hv for tv, hv in zip(time_values, [24, 1, 1/60, 1/60**2, 1/60**3])])
return hours
def get_mean_training_time(model_path, model_version='best', debug=False):
'''
Utility that returns the mean trianing time over the folds
'''
fold_paths = glob.glob(os.path.join(model_path, 'fold_*',''))
per_fold_training_time = []
for idx, fp in enumerate(fold_paths):
# open summary training
with open(os.path.join(fp,'model_summary_json.txt')) as json_file:
training_summary = json.load(json_file)
# get number of hours for the best model
training_time = get_time_from_string(training_summary['TRAINING_TIME'])
# adjust if needed the training time for the last model
if model_version == 'last':
epoch_training_time = training_time/(training_summary['EPOCHS']+1e-6)
training_time = epoch_training_time*250
if debug:
print(f'Fold {idx+1} ({model_version}): {training_time:0.2f}')
per_fold_training_time.append(training_time)
return per_fold_training_time
## work on saving
import csv
summary_file = os.path.join(model_path,f'{model_version}_tabular_test_summary.csv')
csv_file = open(summary_file, "w")
writer = csv.writer(csv_file)
csv_header = ['classification_type',
'nbr_classes',
'model_type',
'model_version',
'fold',
'precision',
'recall',
'accuracy',
'f1-score',
'auc',
'training_time',
'matthews_correlation_coef',
]
writer.writerow(csv_header)
per_fold_training_time = get_mean_training_time(model_path, model_version=model_version)
classification_type = 'per-disease' if config['dataset_type']=='retinal' else 'normal-vs-cancer'
nbr_classes = 4 if config['dataset_type']=='retinal' else 2
# loop through all the folds and save information
rows_to_write = []
for idx, fp in enumerate(per_fold_performance):
rows_to_write.append([classification_type,
nbr_classes,
config['model_configuration'],
model_version,
idx+1,
fp['overall_precision'],
fp['overall_recall'],
fp['overall_accuracy'],
fp['overall_f1-score'],
fp['overall_auc'],
per_fold_training_time[idx],
fp['matthews_correlation_coef'],
])
# add ensamble information
rows_to_write.append([classification_type,
nbr_classes,
config['model_configuration'],
'ensemble',
'ensemble',
performance_ensamble['overall_precision'],
performance_ensamble['overall_recall'],
performance_ensamble['overall_accuracy'],
performance_ensamble['overall_f1-score'],
performance_ensamble['overall_auc'],
per_fold_training_time[idx],
performance_ensamble['matthews_correlation_coef'],
])
writer.writerows(rows_to_write)
csv_file.close()
| 40.599309
| 174
| 0.691836
|
'''
Script that tests a trained models on its training dataset. It does the same
testing routine as the one in the overall utilities_models_tf.py script.
It saves
¤ the information about the test for easy later plotting
¤ ROC (per-class and overall using micro and macro average)
¤ PP curve (per-class and overall using micro and macro average)
¤ summary of performance for easy read of the final scores
Steps
1 - get paths and models to test
2 - load testing dataset
3 - get predictions using the test function in the utilities_models_tf.py
4 - plot and save confusion matrix
5 - plot and save ROC curve
6 - save detailed info of the testing and summary
'''
import os
import sys
import cv2
import glob
import json
import time
import pickle
import random
import pathlib
import argparse
import importlib
import numpy as np
from datetime import datetime
from collections import OrderedDict
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import load_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
# local imports
import utilities
import utilities_models_tf
## 1 - get models information and additional files
parser = argparse.ArgumentParser(description='Script that prints a summary of the model perfomance.')
parser.add_argument('-m','--model_path' ,required=True, help='Specify the folder where the trained model is located')
parser.add_argument('-d','--dataset_path' ,required=False, help='Specify where the dataset is located', default=False)
parser.add_argument('-mv','--model_version' ,required=False, help='Specify if to run the training on the best model (best) or the last (last)', default="best")
args = parser.parse_args()
model_path = args.model_path
dataset_path = args.dataset_path
model_version = args.model_version
# # # DEBUG
# model_path = '/flush/iulta54/Research/P3_OCT_SPLIT_PROPERLY_YOUR_DATA/trained_models/LightOCT_per_image_split_5_folds_rkf_10_lr0.0001_batch64_AIIMS_rls_True'
# dataset_path = "/flush/iulta54/Research/Data/OCT/AIIMS_Dataset/original"
# model_version = "best"
title="Testing script"
print(f'\n{"-"*len(title)}')
print(f'{title}')
print(f'{"-"*len(title)}\n')
# check forlders
if not os.path.isdir(model_path):
raise ValueError(f'Model not found. Given {model_path}')
else:
# check that the configuration file is in place
if not os.path.isfile(os.path.join(model_path,'config.json')):
raise ValueError(f'Configuration file not found for the given model. Check that the model was configured and trained. Given {os.path.join(model_path,"config.json")}')
else:
print("Model and config file found.")
# check that the model_version setting is a correct one (best, last, ensamble-not jet implemented)
if not any([model_version==s for s in ["best", "last", "ensamble"]]):
raise ValueError(f'The given model version for the testing is unknown. Given {model_version}, expected best, last or ensamble')
print(f'Working on model {os.path.basename(model_path)}')
print(f'Model configuration set for testing: {model_version}')
## 2 - load testing dataset
importlib.reload(utilities)
# load configuration file
with open(os.path.join(model_path,'config.json')) as json_file:
config = json.load(json_file)
config['label_description'] = config['unique_labels']
if dataset_path is False:
dataset_path = config['dataset_folder']
# check if dataset folder
if not os.path.isdir(dataset_path):
raise ValueError(f'Dataset path not found. Given {dataset_path}')
else:
print('Dataset path found.')
# take one testing
# make sure that the files point to this system dataset
# fix names based on the given dataset path
if any([config['dataset_type'] == 'retinal', config['dataset_type'] == 'Kermany']):
if config['dataset_split_strategy'] == 'original':
idx = 4
else:
idx = 3
elif config['dataset_type'] == 'AIIMS':
idx = 4
elif config['dataset_type'] == 'Srinivas':
idx = 5
test_img= []
# build file names to point to this given dataset
for f in config['test']:
aus = [pathlib.Path(f).parts[-i] for i in reversed(range(idx))][0:-1]
aus.insert(0, dataset_path)
test_img.append(os.path.join(*aus))
# create generator based on model specifications and dataset
if any([config['dataset_type'] == 'retinal', config['dataset_type'] == 'Kermany']):
data_gen = utilities.Kermany_data_gen
elif config['dataset_type'] == 'AIIMS':
data_gen = utilities.AIIMS_data_gen
elif config['dataset_type'] == 'Srinivas':
data_gen = utilities.Srinivas_data_gen
test_dataset = data_gen(test_img,
unique_labels=config['unique_labels'],
batch_size=16,
training=False,
channels=config['n_channels'],
input_size=config['input_size'],
random_label_experiment=config['random_label_experiment'],
random_label_experiment_seed=291209)
## perform testing for each fold the model was trained on
importlib.reload(utilities_models_tf)
test_fold_summary = {}
folds = glob.glob(os.path.join(model_path,"fold_*"))
# get the right model based on the model_version_specification
if model_version=="best":
model_name_version = "model.tf"
elif model_version=="last":
model_name_version = "last_model.tf"
for idx, f in enumerate(folds):
print(f'Working on fold {idx+1}/{len(folds)}')
# load model
if os.path.exists(os.path.join(f, model_name_version)):
model = tf.keras.models.load_model(os.path.join(f, model_name_version), compile=False)
else:
raise Exception('Model not found')
test_gt, test_prediction, test_time = utilities_models_tf.test_independent(model, config, test_dataset)
test_fold_summary[idx]={
'ground_truth':np.argmax(test_gt.numpy(), axis=-1),
'prediction':test_prediction.numpy(),
'test_time':float(test_time)
}
## save and plot
from collections import OrderedDict
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
'''
Saving overall cross validation test results and images:
- Confisuion matrix
- ROC curve
- Precision-Recall curve
- test summary file with the prediction for every test image (test_summary.txt)
Here add also the information needed to re-plot the ROC and PP curves (fpr,
tpr, roc_auc, precision and recall - micro and macro average)
The test_summary.txt file is a dictionary with the following entries:
- model_name: string
- labels: list of the true values fot the tested images
- fold_test_values: list containing the predictions for every fold (list of lists)
- test_time: string
- test date: string
- accuracy: float
- false_positive_rate: list containing the fpr for every class (list of lists)
- false_positive_rate_micro_avg: list containing the micro average fpr (used for the overall roc plot)
- false_positive_rate_macro_avg: list containing the macro average fpr (used for the overall roc plot)
- true_positive_rate: list containing the tpr for every class (list of lists)
- true_positive_rate_micro_avg: list containing the micro average tpr (used for the overall roc plot)
- true_positive_rate_macro_avg: list containing the macro average tpr (used for the overall roc plot)
- precision: list containing the precision values for every class to plot the PP (list of lists)
- precision_micro_avg: list of overall micro average of precision
- average_precision: average precision value computed using micro average
- recall: list containing the recall value for every class to plot the PP (list of lists)
- recall_micro_avg: list of overall micro average of recall
- F1: list of micro and macro average f1-score
Since the full test_summary file is long to open, the scores are also saved in a separate file for easy access
scores_test_summary.txt
'''
print(f'Saving information...')
# ############# save the information that is already available
test_summary = OrderedDict()
# for backwards compatibility
if config['number_crossvalidation_repetitions']:
n_cv = config['N_FOLDS']*config['number_crossvalidation_repetitions']
else:
n_cv = config['N_FOLDS']
test_summary['model_name'] = config['model_save_name']
test_summary['labels'] = [int(i) for i in test_fold_summary[0]['ground_truth']]
test_summary['folds_test_logits_values'] = [test_fold_summary[cv]['prediction'].tolist() for cv in range(n_cv)]
test_summary['test_time'] = utilities.tictoc_from_time(np.sum([test_fold_summary[cv]['test_time'] for cv in range(n_cv)]))
test_summary['test_model_version'] = model_version
test_summary['test_date'] = time.strftime("%Y%m%d-%H%M%S")
# ############ plot and save confucion matrix
ensemble_pred_argmax = []
ensemble_pred_logits = []
# compute ensemble
# compute the logits mean along the folds
ensemble_pred_logits = np.array(test_summary['folds_test_logits_values']).mean(axis=0)
# compute argmax prediction
ensemble_pred_argmax = np.argmax(ensemble_pred_logits, axis=1)
acc = utilities.plotConfusionMatrix(test_summary['labels'], ensemble_pred_argmax,
classes=config['label_description'],
savePath=model_path,
saveName=f'ConfusionMatrix_{model_version}_model',
draw=False)
# ############ plot and save ROC curve
fpr, tpr, roc_auc = utilities.plotROC(test_summary['labels'], ensemble_pred_logits,
classes=config['label_description'],
savePath=model_path,
saveName=f'Multiclass_ROC_{model_version}_model',
draw=False)
# make elements of the dictionary to be lists for saving
for key, value in fpr.items():
fpr[key]=value.tolist()
for key, value in tpr.items():
tpr[key]=value.tolist()
for key, value in roc_auc.items():
roc_auc[key]=value.tolist()
# ############ plot and save PR curve
precision, recall, average_precision, F1 = utilities.plotPR(test_summary['labels'],
ensemble_pred_logits,
classes=config['label_description'],
savePath=model_path,
saveName=f'Multiclass_PR_{model_version}_model',
draw=False)
# make elements of the dictionary to be lists for saving
for key, value in precision.items():
precision[key]=value.tolist()
for key, value in recall.items():
recall[key]=value.tolist()
# save all the information in the test summary
test_summary['accuracy'] = acc
# test_summary['false_positive_rate'] = [fpr[i].tolist() for i in range(len(class_labels))]
test_summary['false_positive_rate'] = fpr
# test_summary['false_positive_rate_micro_avg'] = fpr['micro'].tolist()
# test_summary['false_positive_rate_macro_avg'] = fpr['macro'].tolist()
test_summary['true_positive_rate'] = tpr
# test_summary['true_positive_rate'] = [tpr[i].tolist() for i in range(len(class_labels))]
# test_summary['true_positive_rate_micro_avg'] = tpr['micro'].tolist()
# test_summary['true_positive_rate_macro_avg'] = tpr['macro'].tolist()
test_summary['roc_auc'] = roc_auc
test_summary['precision'] = precision
# test_summary['precision'] = [precision[i].tolist() for i in range(len(class_labels))]
# test_summary['precision_micro_avg'] = precision['micro'].tolist()
test_summary['recall'] = recall
# test_summary['recall'] = [recall[i].tolist() for i in range(len(class_labels))]
# test_summary['recall_micro_avg'] = recall['micro'].tolist()
test_summary['average_precision'] = average_precision
test_summary['F1'] = F1
# save summary file
with open(os.path.join(model_path,f'{model_version}_model_version_test_summary.txt'), 'w') as fp:
json.dump(test_summary, fp)
## save summary (can be improved, but using the routine from print_model_performance)
from sklearn.metrics import average_precision_score, recall_score, roc_auc_score, f1_score, confusion_matrix, accuracy_score, matthews_corrcoef
def get_metrics(true_logits, pred_logits, average='macro'):
'''
Utility that given confusion matrics, returns a dictionary containing:
tptnfpfn : overall TP, TN, FP, FN values for each of the classes
precision (specificity) : for each of the classes
recall (sensitivity) : for each of the classes
f1-score : for each of the classes
auc : for each of the classes
overall_acc : over all classes
overall_specificity : over all classes
overall_precision : over all classes
overall_f1-score : over all classes
overall_auc : over all classes
'''
# compute confusion matrix
cnf_matrix = confusion_matrix(np.argmax(true_logits,-1), np.argmax(pred_logits, -1))
# compute TP, TN, FP, FN
FP = (cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)).astype(float)
FN = (cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)).astype(float)
TP = (np.diag(cnf_matrix)).astype(float)
TN = (cnf_matrix.sum() - (FP + FN + TP)).astype(float)
# compute per class metrics
summary_dict = {
'precision': TN / (FP+TN),
'recall': TP / (TP+FN),
'accuracy': (TP+TN) / (TP+TN+FP+FN),
'f1-score': TP / (TP + 0.5*(FP+FN)),
'auc': roc_auc_score(true_logits, pred_logits, average=None),
}
# compute overall metrics
# note that these metrics, especially those that depend on a threshold,
# will the average metrics over all the tresholds and will be different
# compared to the single threshold computed above (threshold=0.5)
summary_dict['overall_precision']=average_precision_score(true_logits,
pred_logits,
average=average)
summary_dict['overall_recall']=recall_score(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1),
average=average)
summary_dict['overall_accuracy']=accuracy_score(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1))
summary_dict['overall_f1-score']=f1_score(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1),
average=average)
summary_dict['overall_auc']=roc_auc_score(true_logits,
pred_logits,
multi_class='ovr',
average=average)
summary_dict['matthews_correlation_coef']=matthews_corrcoef(np.argmax(true_logits,-1),
np.argmax(pred_logits,-1),)
return summary_dict
n_folds = len(folds)
print()
labels = np.eye(np.unique(test_summary['labels']).shape[0])[test_summary['labels']]
pred_logits = test_summary['folds_test_logits_values']
# Computupe per fold performance
per_fold_performance = []
for f in range(len(folds)):
per_fold_performance.append(get_metrics(labels, pred_logits[f]))
# compute ensamble performance
# compute the logits mean along the folds
ensemble_pred_logits = np.array(pred_logits).mean(axis=0)
# compute argmax prediction
ensemble_pred_argmax = np.argmax(ensemble_pred_logits, axis=1)
performance_ensamble = get_metrics(labels, ensemble_pred_logits)
# ######################### printing on file
summary = open(os.path.join(model_path,f'{model_version}_model_version_short_test_summary.txt'), 'w')
summary.write(f'\nModel Name: {os.path.basename(model_path)}\n\n')
# add test time overall and per image
average_test_time = np.mean([test_fold_summary[cv]['test_time'] for cv in range(n_cv)])
average_test_time_per_image = np.mean([test_fold_summary[cv]['test_time'] for cv in range(n_cv)])/labels.shape[0]
summary.write(f'Overall model test time (average over folds): {utilities.tictoc_from_time(average_test_time)}\n')
summary.write(f'Average test time per image (average over folds): {utilities.tictoc_from_time(average_test_time_per_image)}\n\n')
# print a summary of the testing per fold, class and ensamble
keys = ['precision','recall', 'accuracy', 'f1-score', 'auc']
max_len_k=max([len(k) for k in keys])
classes = config['label_description']
max_len_c=max(max([len(k) for k in classes]),len('Overall'))
max_len_f=max([len(s) for s in ["Fold", "Average","STD","Ensamble"]])
# build dict that holds the avg of all metrics
avg_dict = {k:[] for k in keys}
# build headers strings
fold_str = f'{"Fold":^{max_len_f}}'
class_str = f'{"Class":^{max_len_c}}'
keys_str = ''.join([f'{k.capitalize():^{max_len_k+2}}' for k in keys])
# start printing
summary.write(f'\n{"¤"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
summary.write(f'{"¤ Per-fold metrics ¤":^{(max_len_f+max_len_c+len(keys_str))}}'+'\n')
summary.write(f'{"¤"*(max_len_f+max_len_c+len(keys_str))}\n')
# print header
summary.write(fold_str+class_str+keys_str+'\n')
# print per fold and class metrics
for idx, fp in enumerate(per_fold_performance):
fold_num_str = f'{str(idx+1):^{max_len_f}}'
# print per class performance
for idc, c in enumerate(classes):
class_metrics = ''.join([f'{str(round(fp[k][idc],3)):^{max_len_k+2}}' for k in keys])
if idc == 0:
summary.write(fold_num_str+f'{c:^{max_len_c}}'+class_metrics+'\n')
else:
summary.write(f'{" ":^{max_len_f}}'+f'{c:^{max_len_c}}'+class_metrics+'\n')
# print overall performance
summary.write(f'{"-"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
overall_metric_str = ''.join([f'{str(round(fp["overall_"+k],3)):^{max_len_k+2}}' for k in keys])
summary.write(fold_num_str+f'{"Overall":^{max_len_c}}'+overall_metric_str+'\n')
summary.write(f'{"-"*(max_len_f+max_len_c+len(keys_str))}'+'\n\n')
# save overall metrics for later
[avg_dict[k].append(fp['overall_'+k]) for k in keys]
# print average overall metrics for the folds
avg_overall_metric_str = ''.join([f'{str(round(np.mean(avg_dict[k]),3)):^{max_len_k+2}}' for k in keys])
std_overall_metric_str = ''.join([f'{str(round(np.std(avg_dict[k]),3)):^{max_len_k+2}}' for k in keys])
summary.write(f'{"="*(max_len_f+max_len_c+len(keys_str))}'+'\n')
summary.write(fold_str+class_str+keys_str+'\n')
summary.write(f'{"Average":^{max_len_f}}'+f'{"":^{len(class_str)}}'+avg_overall_metric_str+'\n')
summary.write(f'{"STD":^{max_len_f}}'+f'{"":^{len(class_str)}}'+std_overall_metric_str+'\n')
# plot ensamble metrics
summary.write(f'\n{"¤"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
summary.write(f'{"¤ Ensamble metrics ¤":^{(max_len_f+max_len_c+len(keys_str))}}'+'\n')
summary.write(f'{"¤"*(max_len_f+max_len_c+len(keys_str))}\n')
# print header
summary.write(f'{"Ensamble":^{max_len_f}}'+class_str+keys_str+'\n')
# print per class performance
fp = performance_ensamble
for idc, c in enumerate(classes):
class_metrics = ''.join([f'{str(round(fp[k][idc],3)):^{max_len_k+2}}' for k in keys])
summary.write(f'{" ":^{max_len_f}}'+f'{c:^{max_len_c}}'+class_metrics+'\n')
# print overall performance
summary.write(f'{"-"*(max_len_f+max_len_c+len(keys_str))}'+'\n')
overall_metric_str = ''.join([f'{str(round(fp["overall_"+k],3)):^{max_len_k+2}}' for k in keys])
summary.write(f'{" ":^{max_len_f}}'+f'{"Overall":^{max_len_c}}'+overall_metric_str+'\n')
summary.close()
## print on terminal
print('Ensemble preformance\n')
max_len = max([len(key) for key in performance_ensamble.keys()])
[print(f'{key:{max_len}s}: {value}') for key, value in performance_ensamble.items()]
## save also the information in a .csv file useful for plotting (and hypothesis testing in case)
'''
The file should allow for easy plotting of the models performance based on
classification type, model, model_version (best, last, ensemble_best, ensemble_last),
metrics, training time inference time.
'''
def get_time_from_string(time_string):
'''
Utility that given a time_string formated as 0d:0h:0m:0s:0ms,
returns the number of hours
'''
splits = time_string.split(':')
# get the values for d, h, m, s, ms
time_values = [ [] for _ in range(len(splits))]
[[time_values[idx].append(c) for c in v if c.isnumeric()] for idx, v in enumerate(splits)]
time_values = [int(''.join(v)) for v in time_values]
hours = sum([tv*hv for tv, hv in zip(time_values, [24, 1, 1/60, 1/60**2, 1/60**3])])
return hours
def get_mean_training_time(model_path, model_version='best', debug=False):
'''
Utility that returns the mean trianing time over the folds
'''
fold_paths = glob.glob(os.path.join(model_path, 'fold_*',''))
per_fold_training_time = []
for idx, fp in enumerate(fold_paths):
# open summary training
with open(os.path.join(fp,'model_summary_json.txt')) as json_file:
training_summary = json.load(json_file)
# get number of hours for the best model
training_time = get_time_from_string(training_summary['TRAINING_TIME'])
# adjust if needed the training time for the last model
if model_version == 'last':
epoch_training_time = training_time/(training_summary['EPOCHS']+1e-6)
training_time = epoch_training_time*250
if debug:
print(f'Fold {idx+1} ({model_version}): {training_time:0.2f}')
per_fold_training_time.append(training_time)
return per_fold_training_time
## work on saving
import csv
summary_file = os.path.join(model_path,f'{model_version}_tabular_test_summary.csv')
csv_file = open(summary_file, "w")
writer = csv.writer(csv_file)
csv_header = ['classification_type',
'nbr_classes',
'model_type',
'model_version',
'fold',
'precision',
'recall',
'accuracy',
'f1-score',
'auc',
'training_time',
'matthews_correlation_coef',
]
writer.writerow(csv_header)
per_fold_training_time = get_mean_training_time(model_path, model_version=model_version)
classification_type = 'per-disease' if config['dataset_type']=='retinal' else 'normal-vs-cancer'
nbr_classes = 4 if config['dataset_type']=='retinal' else 2
# loop through all the folds and save information
rows_to_write = []
for idx, fp in enumerate(per_fold_performance):
rows_to_write.append([classification_type,
nbr_classes,
config['model_configuration'],
model_version,
idx+1,
fp['overall_precision'],
fp['overall_recall'],
fp['overall_accuracy'],
fp['overall_f1-score'],
fp['overall_auc'],
per_fold_training_time[idx],
fp['matthews_correlation_coef'],
])
# add ensamble information
rows_to_write.append([classification_type,
nbr_classes,
config['model_configuration'],
'ensemble',
'ensemble',
performance_ensamble['overall_precision'],
performance_ensamble['overall_recall'],
performance_ensamble['overall_accuracy'],
performance_ensamble['overall_f1-score'],
performance_ensamble['overall_auc'],
per_fold_training_time[idx],
performance_ensamble['matthews_correlation_coef'],
])
writer.writerows(rows_to_write)
csv_file.close()
| 0
| 0
| 0
|
82f29cec15ebbe4486ef10820f6f24ee0639831a
| 300
|
py
|
Python
|
yunionclient/api/cloudpermissions.py
|
yunionyun/python_yunionsdk
|
40a567b80f6fb3ebc72d8cc6313b334a201b2f00
|
[
"Apache-2.0"
] | 3
|
2021-09-22T11:34:08.000Z
|
2022-03-13T04:55:17.000Z
|
yunionclient/api/cloudpermissions.py
|
yunionyun/python_yunionsdk
|
40a567b80f6fb3ebc72d8cc6313b334a201b2f00
|
[
"Apache-2.0"
] | 13
|
2019-06-06T08:25:41.000Z
|
2021-07-16T07:26:10.000Z
|
yunionclient/api/cloudpermissions.py
|
yunionyun/python_yunionsdk
|
40a567b80f6fb3ebc72d8cc6313b334a201b2f00
|
[
"Apache-2.0"
] | 7
|
2019-03-31T05:43:36.000Z
|
2021-03-04T09:59:05.000Z
|
from yunionclient.common import base
| 23.076923
| 53
| 0.753333
|
from yunionclient.common import base
class Cloudpermission(base.ResourceBase):
pass
class CloudpermissionManager(base.StandaloneManager):
resource_class = Cloudpermission
keyword = 'cloudpermission'
keyword_plural = 'cloudpermissions'
_columns = ["Id", "Name", "Description"]
| 0
| 215
| 46
|
38fd0d779b303c1216b7bddcf77f090e5523b5f4
| 3,399
|
py
|
Python
|
linguistic_features/connectives.py
|
iverinaivanova/Linguistic-Mechanisms-in-the-Section-Types-of-a-Research-Article
|
b9aad26d66a1b550f1734a76dd94b440e58b6125
|
[
"Apache-2.0"
] | null | null | null |
linguistic_features/connectives.py
|
iverinaivanova/Linguistic-Mechanisms-in-the-Section-Types-of-a-Research-Article
|
b9aad26d66a1b550f1734a76dd94b440e58b6125
|
[
"Apache-2.0"
] | null | null | null |
linguistic_features/connectives.py
|
iverinaivanova/Linguistic-Mechanisms-in-the-Section-Types-of-a-Research-Article
|
b9aad26d66a1b550f1734a76dd94b440e58b6125
|
[
"Apache-2.0"
] | null | null | null |
"""The script retrieves explicit connectives marking temporal, comparison, contingency, and expansion relations
and prints the total number of these connectives per article section."""
import sys
import spacy
from collections import Counter
nlp = spacy.load("en_core_web_sm")
# The script below retrieves the connectives from each article abstract
# and prints their total number.
# To change the section type, simply replace "abstracts.txt" with
# "introductions.txt" -- for introductions,
# "relatedwork.txt" -- for related works,
# "discussions.txt" -- for discussions,
# "conclusions.txt" -- for conclusions.
f = open("abstracts.txt", "r", encoding="utf-8")
for line in f:
if len(line) > 0:
items = line.split(" ")
if len(items) > 1:
file_name = items[0]
file_text = items[1]
low_case = file_text.lower()
doc = nlp(low_case)
temporal = {"after", "afterwards", "before",
"earlier", "later", "meanwhile",
"next", "previously", "simultaneously", "thereafter",
"till", "until", "ultimately"
}
comparison = {"although", "but", "conversely", "however",
"instead", "nevertheless", "nonetheless", "rather",
"though", "whereas", "yet", "regardless", "despite", "though"
}
contingency = {"as", "because", "consequently", "hence", "if",
"thereby", "therefore", "thus", "so", "indeed", "accordingly"}
expansion = {"also", "alternatively", "besides", "else", "except",
"finally", "further", "furthermore", "likewise",
"moreover", "neither", "nor", "or", "otherwise", "overall", "plus",
"separately", "similarly", "specifically",
"especially", "first", "second", "firstly", "secondly"}
temporal_relations = []
comparison_relations = []
contingency_relations = []
expansion_relations = []
for token in doc:
if token.text == token.text in temporal:
temporal_relations.append(token.text)
elif token.text == token.text in comparison:
comparison_relations.append(token.text)
elif token.text == token.text in contingency:
contingency_relations.append(token.text)
elif token.text == token.text in comparison:
expansion_relations.append(token.text)
print(file_name)
print("Temporal connectives found in the section:", temporal_relations)
print("Total number of connectives:", len(temporal_relations))
print("Comparison connectives found in the section:", comparison_relations)
print("Total number of connectives:", len(comparison_relations))
print("Contingency connectives found in the section:", contingency_relations)
print("Total number of connectives:", len(contingency_relations))
print("Expansion connectives found in the section:", expansion_relations)
print("Total number of connectives:", len(expansion_relations))
| 53.952381
| 112
| 0.568108
|
"""The script retrieves explicit connectives marking temporal, comparison, contingency, and expansion relations
and prints the total number of these connectives per article section."""
import sys
import spacy
from collections import Counter
nlp = spacy.load("en_core_web_sm")
# The script below retrieves the connectives from each article abstract
# and prints their total number.
# To change the section type, simply replace "abstracts.txt" with
# "introductions.txt" -- for introductions,
# "relatedwork.txt" -- for related works,
# "discussions.txt" -- for discussions,
# "conclusions.txt" -- for conclusions.
f = open("abstracts.txt", "r", encoding="utf-8")
for line in f:
if len(line) > 0:
items = line.split(" ")
if len(items) > 1:
file_name = items[0]
file_text = items[1]
low_case = file_text.lower()
doc = nlp(low_case)
temporal = {"after", "afterwards", "before",
"earlier", "later", "meanwhile",
"next", "previously", "simultaneously", "thereafter",
"till", "until", "ultimately"
}
comparison = {"although", "but", "conversely", "however",
"instead", "nevertheless", "nonetheless", "rather",
"though", "whereas", "yet", "regardless", "despite", "though"
}
contingency = {"as", "because", "consequently", "hence", "if",
"thereby", "therefore", "thus", "so", "indeed", "accordingly"}
expansion = {"also", "alternatively", "besides", "else", "except",
"finally", "further", "furthermore", "likewise",
"moreover", "neither", "nor", "or", "otherwise", "overall", "plus",
"separately", "similarly", "specifically",
"especially", "first", "second", "firstly", "secondly"}
temporal_relations = []
comparison_relations = []
contingency_relations = []
expansion_relations = []
for token in doc:
if token.text == token.text in temporal:
temporal_relations.append(token.text)
elif token.text == token.text in comparison:
comparison_relations.append(token.text)
elif token.text == token.text in contingency:
contingency_relations.append(token.text)
elif token.text == token.text in comparison:
expansion_relations.append(token.text)
print(file_name)
print("Temporal connectives found in the section:", temporal_relations)
print("Total number of connectives:", len(temporal_relations))
print("Comparison connectives found in the section:", comparison_relations)
print("Total number of connectives:", len(comparison_relations))
print("Contingency connectives found in the section:", contingency_relations)
print("Total number of connectives:", len(contingency_relations))
print("Expansion connectives found in the section:", expansion_relations)
print("Total number of connectives:", len(expansion_relations))
| 0
| 0
| 0
|
c3eb3f79de238630fd3623659ca0acabe1a18ed6
| 7,079
|
py
|
Python
|
apps/reports/metadata/basic.py
|
commtrack/commtrack-old-to-del
|
cc9c22754ac192a45483cef609bdcf09aa990340
|
[
"BSD-3-Clause"
] | 1
|
2017-05-19T07:23:00.000Z
|
2017-05-19T07:23:00.000Z
|
apps/reports/metadata/basic.py
|
commtrack/commtrack-old-to-del
|
cc9c22754ac192a45483cef609bdcf09aa990340
|
[
"BSD-3-Clause"
] | null | null | null |
apps/reports/metadata/basic.py
|
commtrack/commtrack-old-to-del
|
cc9c22754ac192a45483cef609bdcf09aa990340
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Basic metadata reports/queries
@author: dan myung (dmyung@dimagi.com)
create: 10/19/2009
Notes:
A place where oft reused queries off xformmanager.metadata can be referenced.
This is somewhat redundant with functionality already existent in the apps/reports/util.py file which already does
a basic metadata query.
"""
from hq.models import Domain, Organization, ReporterProfile, BlacklistedUser
from receiver.models import Submission
from reporters.models import Reporter, ReporterGroup
from datetime import datetime, timedelta
from xformmanager.models import Metadata
def build_filtered_metadataquery(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to prepare the metadata query for the eventual magic you will do to it.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
Returns the filtered Metadata queryset
"""
if domain is None and reportergroup is None and reporterprofile is None and len(formdefs)== 0:
raise Exception("Insufficient arguments to run query")
filtered = Metadata.objects.filter(timeend__gte=intervalstart).filter(timeend__lte=intervalend)
exclude_usernames = []
if domain != None:
#next, get the blacklisted users from this domain and exclude them
blist = BlacklistedUser.objects.filter(domains=domain, active=True)
exclude_usernames = blist.values_list('username',flat=True)
filtered = filtered.filter(formdefmodel__domain=domain).exclude(username__in=exclude_usernames)
if reportergroup != None:
raise Exception ("Reportergroup filtration not implemented yet")
if reporterprofile != None:
#note, consistency on chw_id vs. chw_username still needs to be worked out. for this usage, we'll stick to chw_username
#chw_id should probably used long term
filtered = filtered.filter(username=reporterprofile.chw_username).exclude(username__in=exclude_usernames)
if len(formdefs) > 0:
filtered = filtered.filter(formdefmodel__in=formdefs).exclude(username__in=exclude_usernames)
return filtered
def timeend_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the timeend by hour.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
hourcounts = []
for hour in range(0,24):
hourcounts.append(filtered.extra(where=['hour(timeend)=%d' % hour ]).count())
return hourcounts
def timestart_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the timestart by hour.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
hourcounts = []
for hour in range(0,24):
hourcounts.append(filtered.extra(where=['hour(timestart)=%d' % hour ]).count())
return hourcounts
def timedelta_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the average time delta of the timestart and timeeend, and show the results
by hour of the timeend.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
hourcounts = []
for hour in range(0,24):
filterhour_timeend = filtered.extra(where=['hour(timeend)=%d' % hour ])
totalseconds = 0
for filt in filterhour_timeend:
totalseconds = totalseconds + (filt.timeend - filt.timestart).seconds
if filterhour_timeend.count() > 0:
avg = (totalseconds/filterhour_timeend.count())/60
else:
avg = -1
hourcounts.append(avg)
return hourcounts
def receivetime_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the time received by the server
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
#ok, so we got the filtered results, now we need to cross link it with the submissions to get the submit_time
submission_ids = filtered.values_list('attachment__submission__id', flat=True)
submissions = Submission.objects.filter(id__in=submission_ids)
hourcounts = []
for hour in range(0,24):
hourshift = (hour + 17)%24
#hourshift = hour
hourcounts.append(submissions.extra(where=['hour(submit_time)=%d' % hourshift ]).count())
return hourcounts
def metadata_submission_stats(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Using the same metadata filtration, establish stats on Metadata
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
#total by deviceid
#duplicates
#metadata/users
pass
| 41.156977
| 167
| 0.703772
|
"""
Basic metadata reports/queries
@author: dan myung (dmyung@dimagi.com)
create: 10/19/2009
Notes:
A place where oft reused queries off xformmanager.metadata can be referenced.
This is somewhat redundant with functionality already existent in the apps/reports/util.py file which already does
a basic metadata query.
"""
from hq.models import Domain, Organization, ReporterProfile, BlacklistedUser
from receiver.models import Submission
from reporters.models import Reporter, ReporterGroup
from datetime import datetime, timedelta
from xformmanager.models import Metadata
def build_filtered_metadataquery(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to prepare the metadata query for the eventual magic you will do to it.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
Returns the filtered Metadata queryset
"""
if domain is None and reportergroup is None and reporterprofile is None and len(formdefs)== 0:
raise Exception("Insufficient arguments to run query")
filtered = Metadata.objects.filter(timeend__gte=intervalstart).filter(timeend__lte=intervalend)
exclude_usernames = []
if domain != None:
#next, get the blacklisted users from this domain and exclude them
blist = BlacklistedUser.objects.filter(domains=domain, active=True)
exclude_usernames = blist.values_list('username',flat=True)
filtered = filtered.filter(formdefmodel__domain=domain).exclude(username__in=exclude_usernames)
if reportergroup != None:
raise Exception ("Reportergroup filtration not implemented yet")
if reporterprofile != None:
#note, consistency on chw_id vs. chw_username still needs to be worked out. for this usage, we'll stick to chw_username
#chw_id should probably used long term
filtered = filtered.filter(username=reporterprofile.chw_username).exclude(username__in=exclude_usernames)
if len(formdefs) > 0:
filtered = filtered.filter(formdefmodel__in=formdefs).exclude(username__in=exclude_usernames)
return filtered
def timeend_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the timeend by hour.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
hourcounts = []
for hour in range(0,24):
hourcounts.append(filtered.extra(where=['hour(timeend)=%d' % hour ]).count())
return hourcounts
def timestart_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the timestart by hour.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
hourcounts = []
for hour in range(0,24):
hourcounts.append(filtered.extra(where=['hour(timestart)=%d' % hour ]).count())
return hourcounts
def timedelta_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the average time delta of the timestart and timeeend, and show the results
by hour of the timeend.
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
hourcounts = []
for hour in range(0,24):
filterhour_timeend = filtered.extra(where=['hour(timeend)=%d' % hour ])
totalseconds = 0
for filt in filterhour_timeend:
totalseconds = totalseconds + (filt.timeend - filt.timestart).seconds
if filterhour_timeend.count() > 0:
avg = (totalseconds/filterhour_timeend.count())/60
else:
avg = -1
hourcounts.append(avg)
return hourcounts
def receivetime_by_hour_of_day(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Simple report function to get a histogram of the time received by the server
The required arguments are a timespan, and at least one of the following:
Domain
A single reporter group
A single reporter profile
or an array of formdatadef
This will return an array of counts, 24 in length for each hour with integers in it [1,4,1,2,4,2,...]
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
#ok, so we got the filtered results, now we need to cross link it with the submissions to get the submit_time
submission_ids = filtered.values_list('attachment__submission__id', flat=True)
submissions = Submission.objects.filter(id__in=submission_ids)
hourcounts = []
for hour in range(0,24):
hourshift = (hour + 17)%24
#hourshift = hour
hourcounts.append(submissions.extra(where=['hour(submit_time)=%d' % hourshift ]).count())
return hourcounts
def metadata_submission_stats(intervalstart, intervalend, domain=None, reportergroup=None, reporterprofile=None, formdefs=[]):
"""
Using the same metadata filtration, establish stats on Metadata
"""
filtered = build_filtered_metadataquery(intervalstart, intervalend, domain=domain, reportergroup=reportergroup, reporterprofile=reporterprofile, formdefs=formdefs)
#total by deviceid
#duplicates
#metadata/users
pass
| 0
| 0
| 0
|
7adf46f4dc4fd50dd8b98f3b3816828778eed8e7
| 7,795
|
py
|
Python
|
src/skills/switch_skill.py
|
winkste/mp32_generic
|
4231ac4a1c63a7479b79a5693516e3e86a1b8c69
|
[
"MIT"
] | null | null | null |
src/skills/switch_skill.py
|
winkste/mp32_generic
|
4231ac4a1c63a7479b79a5693516e3e86a1b8c69
|
[
"MIT"
] | null | null | null |
src/skills/switch_skill.py
|
winkste/mp32_generic
|
4231ac4a1c63a7479b79a5693516e3e86a1b8c69
|
[
"MIT"
] | 2
|
2020-11-03T08:54:28.000Z
|
2021-05-27T13:28:17.000Z
|
################################################################################
# filename: switch_skill.py
# date: 07. Apr. 2021
# username: winkste
# name: Stephan Wink
# description: This module handles the input signal of a switch.
# In the first implementation it will only support polling of an
# input pin. In future implementations also reacting on interrupts
# shall be possible, therefore a mode variable is defined and
# handed over to the object during construction
#
#
#
################################################################################
################################################################################
# Imports
import time
from src.skills.abs_skill import AbstractSkill
from src.mqtt.user_subs import UserSubs
from src.mqtt.user_pubs import UserPubs
import machine
import src.utils.trace as T
################################################################################
# Variables
_NO_VALUE = 0xff
_SWITCH_OFF_TIME = 1000
SWITCH_SKILL_MODE_POLL = 0
SWITCH_SKILL_MODE_ISR = 1
_SWITCH_STATE_LOW = 0
_SWITCH_STATE_HIGH = 1
_SWITCH_STATE_INIT = 0xff
_SWITCH_STATE_DICT_INV = {
_SWITCH_STATE_LOW: _SWITCH_STATE_HIGH,
_SWITCH_STATE_HIGH: _SWITCH_STATE_LOW,
}
################################################################################
# Functions
################################################################################
# Classes
################################################################################
# @brief This is the switch skill, handling a switch input signal
################################################################################
################################################################################
# Scripts
T.configure(__name__, T.INFO)
if __name__ == "__main__":
# execute only if run as a script
T.trace(__name__, T.WARNING, 'no main script defined ')
| 38.975
| 107
| 0.493906
|
################################################################################
# filename: switch_skill.py
# date: 07. Apr. 2021
# username: winkste
# name: Stephan Wink
# description: This module handles the input signal of a switch.
# In the first implementation it will only support polling of an
# input pin. In future implementations also reacting on interrupts
# shall be possible, therefore a mode variable is defined and
# handed over to the object during construction
#
#
#
################################################################################
################################################################################
# Imports
import time
from src.skills.abs_skill import AbstractSkill
from src.mqtt.user_subs import UserSubs
from src.mqtt.user_pubs import UserPubs
import machine
import src.utils.trace as T
################################################################################
# Variables
_NO_VALUE = 0xff
_SWITCH_OFF_TIME = 1000
SWITCH_SKILL_MODE_POLL = 0
SWITCH_SKILL_MODE_ISR = 1
_SWITCH_STATE_LOW = 0
_SWITCH_STATE_HIGH = 1
_SWITCH_STATE_INIT = 0xff
_SWITCH_STATE_DICT_INV = {
_SWITCH_STATE_LOW: _SWITCH_STATE_HIGH,
_SWITCH_STATE_HIGH: _SWITCH_STATE_LOW,
}
################################################################################
# Functions
################################################################################
# Classes
################################################################################
# @brief This is the switch skill, handling a switch input signal
################################################################################
class SwitchSkill(AbstractSkill):
############################################################################
# Member Attributes
_pub_state = None
_publish_state = True
_current_state = _SWITCH_STATE_INIT
_switch_pin = _NO_VALUE
_led_pin = _NO_VALUE
_switch_gpio = None
_led_gpio = None
_led_inf = False
_swith_mode = SWITCH_SKILL_MODE_POLL
_switch_trigger = _SWITCH_STATE_HIGH
_switch_state_published = True
############################################################################
# Member Functions
############################################################################
# @brief constructor of the switch skill object
# @param dev_id device identification
# @param skill_entity skill entity if multiple skills are generated
# @param switch_pin switch input pin
# @param switch_mode switch detection mode, currently only poll supported
# @param led_pin led pin displaying the switch state
# @param led_inv led inverse state displaying
# @return none
############################################################################
def __init__(self, dev_id, skill_entity, switch_pin,
switch_mode=SWITCH_SKILL_MODE_POLL, led_pin=_NO_VALUE,
led_inv=False):
super().__init__(dev_id, skill_entity)
self._skill_name = "SWITCH skill"
self._pub_state = UserPubs("switch/triggered", dev_id, "std", skill_entity)
self._switch_pin = switch_pin
self._led_pin = led_pin
self._led_inf = led_inv
self._switch_mode = switch_mode
self._switch_gpio = None
self._led_gpio = None
self._switch_trigger = _SWITCH_STATE_LOW
############################################################################
# @brief starts the skill
# @return none
############################################################################
def start_skill(self):
global _NO_VALUE
if self._switch_pin != _NO_VALUE:
self._switch_gpio = machine.Pin(self._switch_pin, machine.Pin.IN)
if self._led_pin != _NO_VALUE:
self._led_gpio = machine.Pin(self._led_pin, machine.Pin.OUT)
T.trace(__name__, T.DEBUG, 'led pin configured: ' + str(self._led_pin))
############################################################################
# @brief checks the switch state transition
# @return none
############################################################################
def _check_switch_state_transition(self):
if self._switch_gpio != None:
new_switch_state = self._switch_gpio.value()
T.trace(__name__, T.DEBUG, 'SWITCH signal:' + str(new_switch_state))
if new_switch_state != self._current_state:
self._current_state = new_switch_state
T.trace(__name__, T.DEBUG, 'state transition detected...')
if new_switch_state == self._switch_trigger:
self._publish_state = True
if self._led_gpio != None:
if self._led_inf == False:
self._led_gpio.value(self._current_state)
T.trace(__name__, T.DEBUG, 'led state:' + str(self._current_state))
else:
self._led_gpio.value(_SWITCH_STATE_DICT_INV[self._current_state])
T.trace(__name__, T.DEBUG, 'led state:' + str(_SWITCH_STATE_DICT_INV[self._current_state]))
############################################################################
# @brief executes the skill cyclic task
# @return none
############################################################################
def execute_skill(self):
current_time = time.ticks_ms()
# check if we need to switch back 'ON' to 'OFF' state after time X
if self._switch_state_published:
if abs(time.ticks_diff(current_time, self._last_time)) > _SWITCH_OFF_TIME:
self._pub_state.publish('OFF')
self._switch_state_published = False
self._check_switch_state_transition()
if self._publish_state == True:
self._publish_state = False
self._pub_state.publish('ON')
self._last_time = current_time
self._switch_state_published = True
############################################################################
# @brief executes the incoming subscription callback handler
# @param topic topic identifier of the messsage
# @param payload payload of the message
# @return none
############################################################################
def execute_subscription(self, topic, data):
T.trace(__name__, T.ERROR, 'unexpected subscription')
T.trace(__name__, T.DEBUG, 'topic: ' + topic)
T.trace(__name__, T.DEBUG, 'data: ' + data)
############################################################################
# @brief stopps the skill
# @return none
############################################################################
def stop_skill(self):
super().stop_skill()
self._switch_gpio = None
self._current_state = _SWITCH_STATE_LOW
if self._led_gpio != None:
if self._led_inf == False:
self._led_gpio.value(self._current_state)
T.trace(__name__, T.DEBUG, 'led state:' + str(self._current_state))
else:
self._led_gpio.value(_SWITCH_STATE_DICT_INV[self._current_state])
T.trace(__name__, T.DEBUG, 'led state:' + str(_SWITCH_STATE_DICT_INV[self._current_state]))
self._led_gpio = None
################################################################################
# Scripts
T.configure(__name__, T.INFO)
if __name__ == "__main__":
# execute only if run as a script
T.trace(__name__, T.WARNING, 'no main script defined ')
| 3,229
| 2,603
| 22
|
def7e890c66fcbef5ecfc97dcc5848e24c252ed5
| 14,062
|
py
|
Python
|
vyper/parser/parser_utils.py
|
mpcnat/vyper
|
731263c9bea826a167639989350688833f68c182
|
[
"MIT"
] | null | null | null |
vyper/parser/parser_utils.py
|
mpcnat/vyper
|
731263c9bea826a167639989350688833f68c182
|
[
"MIT"
] | null | null | null |
vyper/parser/parser_utils.py
|
mpcnat/vyper
|
731263c9bea826a167639989350688833f68c182
|
[
"MIT"
] | null | null | null |
from vyper.utils import GAS_IDENTITY, GAS_IDENTITYWORD
from vyper.exceptions import (
InvalidLiteralException,
TypeMismatchException
)
from vyper.parser.lll_node import (
LLLnode
)
from vyper.types import (
BaseType,
ByteArrayType,
ContractType,
NullType,
StructType,
MappingType,
TupleType,
ListType,
)
from vyper.types import (
is_base_type,
are_units_compatible,
get_size_of_type,
ceil32
)
from vyper.utils import (
SizeLimits,
MemoryPositions,
DECIMAL_DIVISOR
)
# Get a decimal number as a fraction with denominator multiple of 10
# Is a number of decimal form (e.g. 65281) or 0x form (e.g. 0xff01) or 0b binary form (e.g. 0b0001)
# Copies byte array
# Copy bytes
# Accepts 4 arguments:
# (i) an LLL node for the start position of the source
# (ii) an LLL node for the start position of the destination
# (iii) an LLL node for the length
# (iv) a constant for the max length
# Takes a <32 byte array as input, and outputs a number.
# Take a value representing a memory or storage location, and descend down to an element or member variable
# Convert from one base type to another
# Unwrap location
| 46.562914
| 148
| 0.59323
|
from vyper.utils import GAS_IDENTITY, GAS_IDENTITYWORD
from vyper.exceptions import (
InvalidLiteralException,
TypeMismatchException
)
from vyper.parser.lll_node import (
LLLnode
)
from vyper.types import (
BaseType,
ByteArrayType,
ContractType,
NullType,
StructType,
MappingType,
TupleType,
ListType,
)
from vyper.types import (
is_base_type,
are_units_compatible,
get_size_of_type,
ceil32
)
from vyper.utils import (
SizeLimits,
MemoryPositions,
DECIMAL_DIVISOR
)
# Get a decimal number as a fraction with denominator multiple of 10
def get_number_as_fraction(expr, context):
context_slice = context.origcode.splitlines()[expr.lineno - 1][expr.col_offset:]
t = 0
while t < len(context_slice) and context_slice[t] in '0123456789.':
t += 1
top = int(context_slice[:t].replace('.', ''))
bottom = 1 if '.' not in context_slice[:t] else 10**(t - context_slice[:t].index('.') - 1)
if expr.n < 0:
top *= -1
return context_slice[:t], top, bottom
# Is a number of decimal form (e.g. 65281) or 0x form (e.g. 0xff01) or 0b binary form (e.g. 0b0001)
def get_original_if_0_prefixed(expr, context):
context_slice = context.origcode.splitlines()[expr.lineno - 1][expr.col_offset:]
type_prefix = context_slice[:2]
if type_prefix not in ('0x', '0b'):
return None
if type_prefix == '0x':
t = 0
while t + 2 < len(context_slice) and context_slice[t + 2] in '0123456789abcdefABCDEF':
t += 1
return context_slice[:t + 2]
elif type_prefix == '0b':
t = 0
while t + 2 < len(context_slice) and context_slice[t + 2] in '01':
t += 1
return context_slice[:t + 2]
# Copies byte array
def make_byte_array_copier(destination, source):
if not isinstance(source.typ, (ByteArrayType, NullType)):
raise TypeMismatchException("Can only set a byte array to another byte array")
if isinstance(source.typ, ByteArrayType) and source.typ.maxlen > destination.typ.maxlen:
raise TypeMismatchException("Cannot cast from greater max-length %d to shorter max-length %d" % (source.typ.maxlen, destination.typ.maxlen))
# Special case: memory to memory
if source.location == "memory" and destination.location == "memory":
gas_calculation = GAS_IDENTITY + GAS_IDENTITYWORD * (ceil32(source.typ.maxlen) // 32)
o = LLLnode.from_list(
['with', '_source', source,
['with', '_sz', ['add', 32, ['mload', '_source']],
['assert', ['call', ['add', 18, ['div', '_sz', 10]], 4, 0, '_source', '_sz', destination, '_sz']]]],
typ=None, add_gas_estimate=gas_calculation, annotation='Memory copy'
)
return o
pos_node = LLLnode.from_list('_pos', typ=source.typ, location=source.location)
# Get the length
if isinstance(source.typ, NullType):
length = 1
elif source.location == "memory":
length = ['add', ['mload', '_pos'], 32]
elif source.location == "storage":
length = ['add', ['sload', '_pos'], 32]
pos_node = LLLnode.from_list(['sha3_32', pos_node], typ=source.typ, location=source.location)
else:
raise Exception("Unsupported location:" + source.location)
if destination.location == "storage":
destination = LLLnode.from_list(['sha3_32', destination], typ=destination.typ, location=destination.location)
# Maximum theoretical length
max_length = 32 if isinstance(source.typ, NullType) else source.typ.maxlen + 32
return LLLnode.from_list(['with', '_pos', 0 if isinstance(source.typ, NullType) else source,
make_byte_slice_copier(destination, pos_node, length, max_length)], typ=None)
# Copy bytes
# Accepts 4 arguments:
# (i) an LLL node for the start position of the source
# (ii) an LLL node for the start position of the destination
# (iii) an LLL node for the length
# (iv) a constant for the max length
def make_byte_slice_copier(destination, source, length, max_length):
# Special case: memory to memory
if source.location == "memory" and destination.location == "memory":
return LLLnode.from_list(['with', '_l', max_length,
['pop', ['call', 18 + max_length // 10, 4, 0, source,
'_l', destination, '_l']]], typ=None, annotation='copy byte slice dest: %s' % str(destination))
# Copy over data
if isinstance(source.typ, NullType):
loader = 0
elif source.location == "memory":
loader = ['mload', ['add', '_pos', ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]]]
elif source.location == "storage":
loader = ['sload', ['add', '_pos', ['mload', MemoryPositions.FREE_LOOP_INDEX]]]
else:
raise Exception("Unsupported location:" + source.location)
# Where to paste it?
if destination.location == "memory":
setter = ['mstore', ['add', '_opos', ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]]], loader]
elif destination.location == "storage":
setter = ['sstore', ['add', '_opos', ['mload', MemoryPositions.FREE_LOOP_INDEX]], loader]
else:
raise Exception("Unsupported location:" + destination.location)
# Check to see if we hit the length
checker = ['if', ['gt', ['mul', 32, ['mload', MemoryPositions.FREE_LOOP_INDEX]], '_actual_len'], 'break']
# Make a loop to do the copying
o = ['with', '_pos', source,
['with', '_opos', destination,
['with', '_actual_len', length,
['repeat', MemoryPositions.FREE_LOOP_INDEX, 0, (max_length + 31) // 32,
['seq', checker, setter]]]]]
return LLLnode.from_list(o, typ=None, annotation='copy byte slice src: %s dst: %s' % (source, destination))
# Takes a <32 byte array as input, and outputs a number.
def byte_array_to_num(arg, expr, out_type, offset=32,):
if arg.location == "memory":
lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))
first_el_getter = LLLnode.from_list(['mload', ['add', 32, '_sub']], typ=BaseType('int128'))
elif arg.location == "storage":
lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
first_el_getter = LLLnode.from_list(['sload', ['add', 1, ['sha3_32', '_sub']]], typ=BaseType('int128'))
if out_type == 'int128':
result = ['clamp',
['mload', MemoryPositions.MINNUM],
['div', '_el1', ['exp', 256, ['sub', 32, '_len']]],
['mload', MemoryPositions.MAXNUM]]
elif out_type == 'uint256':
result = ['div', '_el1', ['exp', 256, ['sub', offset, '_len']]]
return LLLnode.from_list(['with', '_sub', arg,
['with', '_el1', first_el_getter,
['with', '_len', ['clamp', 0, lengetter, 32],
result
]]],
typ=BaseType(out_type), annotation='bytearray to number (%s)' % out_type)
def get_length(arg):
if arg.location == "memory":
return LLLnode.from_list(['mload', arg], typ=BaseType('int128'))
elif arg.location == "storage":
return LLLnode.from_list(['sload', ['sha3_32', arg]], typ=BaseType('int128'))
def getpos(node):
return (node.lineno, node.col_offset)
# Take a value representing a memory or storage location, and descend down to an element or member variable
def add_variable_offset(parent, key, pos):
typ, location = parent.typ, parent.location
if isinstance(typ, (StructType, TupleType)):
if isinstance(typ, StructType):
if not isinstance(key, str):
raise TypeMismatchException("Expecting a member variable access; cannot access element %r" % key, pos)
if key not in typ.members:
raise TypeMismatchException("Object does not have member variable %s" % key, pos)
subtype = typ.members[key]
attrs = sorted(typ.members.keys())
if key not in attrs:
raise TypeMismatchException("Member %s not found. Only the following available: %s" % (key, " ".join(attrs)), pos)
index = attrs.index(key)
annotation = key
else:
if not isinstance(key, int):
raise TypeMismatchException("Expecting a static index; cannot access element %r" % key, pos)
attrs = list(range(len(typ.members)))
index = key
annotation = None
if location == 'storage':
return LLLnode.from_list(['add', ['sha3_32', parent], LLLnode.from_list(index, annotation=annotation)],
typ=subtype,
location='storage')
elif location == 'storage_prehashed':
return LLLnode.from_list(['add', parent, LLLnode.from_list(index, annotation=annotation)],
typ=subtype,
location='storage')
elif location == 'memory':
offset = 0
for i in range(index):
offset += 32 * get_size_of_type(typ.members[attrs[i]])
return LLLnode.from_list(['add', offset, parent],
typ=typ.members[key],
location='memory',
annotation=annotation)
else:
raise TypeMismatchException("Not expecting a member variable access")
elif isinstance(typ, MappingType):
if isinstance(key.typ, ByteArrayType):
if not isinstance(typ.keytype, ByteArrayType) or (typ.keytype.maxlen < key.typ.maxlen):
raise TypeMismatchException(
'Mapping keys of bytes cannot be cast, use exact same bytes type of: %s' % str(typ.keytype), pos
)
subtype = typ.valuetype
if len(key.args[0].args) >= 3: # handle bytes literal.
sub = LLLnode.from_list([
'seq',
key,
['sha3', ['add', key.args[0].args[-1], 32], ['mload', key.args[0].args[-1]]]
])
else:
sub = LLLnode.from_list(['sha3', ['add', key.args[0].value, 32], ['mload', key.args[0].value]])
else:
subtype = typ.valuetype
sub = base_type_conversion(key, key.typ, typ.keytype, pos=pos)
if location == 'storage':
return LLLnode.from_list(['sha3_64', parent, sub],
typ=subtype,
location='storage')
elif location == 'memory':
raise TypeMismatchException("Can only have fixed-side arrays in memory, not mappings", pos)
elif isinstance(typ, ListType):
subtype = typ.subtype
sub = ['uclamplt', base_type_conversion(key, key.typ, BaseType('int128'), pos=pos), typ.count]
if location == 'storage':
return LLLnode.from_list(['add', ['sha3_32', parent], sub],
typ=subtype,
location='storage')
elif location == 'storage_prehashed':
return LLLnode.from_list(['add', parent, sub],
typ=subtype,
location='storage')
elif location == 'memory':
offset = 32 * get_size_of_type(subtype)
return LLLnode.from_list(['add', ['mul', offset, sub], parent],
typ=subtype,
location='memory')
else:
raise TypeMismatchException("Not expecting an array access ", pos)
else:
raise TypeMismatchException("Cannot access the child of a constant variable! %r" % typ, pos)
# Convert from one base type to another
def base_type_conversion(orig, frm, to, pos):
orig = unwrap_location(orig)
if getattr(frm, 'is_literal', False) and frm.typ in ('int128', 'uint256') and not SizeLimits.in_bounds(frm.typ, orig.value):
raise InvalidLiteralException("Number out of range: " + str(orig.value), pos)
if not isinstance(frm, (BaseType, NullType)) or not isinstance(to, BaseType):
raise TypeMismatchException("Base type conversion from or to non-base type: %r %r" % (frm, to), pos)
elif is_base_type(frm, to.typ) and are_units_compatible(frm, to):
return LLLnode(orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate)
elif is_base_type(frm, 'int128') and is_base_type(to, 'decimal') and are_units_compatible(frm, to):
return LLLnode.from_list(['mul', orig, DECIMAL_DIVISOR], typ=BaseType('decimal', to.unit, to.positional))
elif isinstance(frm, NullType):
if to.typ not in ('int128', 'bool', 'uint256', 'address', 'bytes32', 'decimal'):
# This is only to future proof the use of base_type_conversion.
raise TypeMismatchException("Cannot convert null-type object to type %r" % to, pos) # pragma: no cover
return LLLnode.from_list(0, typ=to)
elif isinstance(to, ContractType) and frm.typ == 'address':
return LLLnode(orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate)
# Integer literal conversion.
elif (frm.typ, to.typ, frm.is_literal) == ('int128', 'uint256', True):
return LLLnode(orig.value, orig.args, typ=to, add_gas_estimate=orig.add_gas_estimate)
else:
raise TypeMismatchException("Typecasting from base type %r to %r unavailable" % (frm, to), pos)
# Unwrap location
def unwrap_location(orig):
if orig.location == 'memory':
return LLLnode.from_list(['mload', orig], typ=orig.typ)
elif orig.location == 'storage':
return LLLnode.from_list(['sload', orig], typ=orig.typ)
else:
return orig
| 12,647
| 0
| 222
|
29ffcd6a05a2485e18fc7df508229e0ccd5838db
| 180
|
py
|
Python
|
st2common/tests/resources/loadableplugin/plugin/util/randomutil.py
|
machao19902/st2
|
6768a529af1b3c12109cbfeae19d3cf7fdb71bb7
|
[
"Apache-2.0"
] | 1
|
2020-11-09T21:05:33.000Z
|
2020-11-09T21:05:33.000Z
|
st2common/tests/resources/loadableplugin/plugin/util/randomutil.py
|
machao19902/st2
|
6768a529af1b3c12109cbfeae19d3cf7fdb71bb7
|
[
"Apache-2.0"
] | 3
|
2021-03-25T23:57:10.000Z
|
2021-03-26T00:01:05.000Z
|
st2common/tests/resources/loadableplugin/plugin/util/randomutil.py
|
machao19902/st2
|
6768a529af1b3c12109cbfeae19d3cf7fdb71bb7
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import random
from six.moves import range
| 22.5
| 65
| 0.783333
|
from __future__ import absolute_import
import random
from six.moves import range
def get_random_numbers(count):
return [random.randrange(0, 1000) for idx in range(0, count)]
| 75
| 0
| 23
|
9ae424fea148a73e82ace7484199b2bc53457d2d
| 367
|
py
|
Python
|
rpi/kibawe_v2/routine.py
|
updewsprado/updews-datalogger
|
862e39504119c9f6f8a4988ff2cd5ad39ed46863
|
[
"Unlicense"
] | null | null | null |
rpi/kibawe_v2/routine.py
|
updewsprado/updews-datalogger
|
862e39504119c9f6f8a4988ff2cd5ad39ed46863
|
[
"Unlicense"
] | null | null | null |
rpi/kibawe_v2/routine.py
|
updewsprado/updews-datalogger
|
862e39504119c9f6f8a4988ff2cd5ad39ed46863
|
[
"Unlicense"
] | null | null | null |
from xbeecoord import *
from time import sleep
import multiprocessing
reset()
sleep(5)
getRssi()
sleep(2)
wakeup()
startwait()
'''
def main():
reset()
sleep(5)
getRssi()
sleep(2)
wakeup()
startwait()
if __name__=='__main__':
p = multiprocessing.Process(target=main,name="routine")
p.start()
sleep(300)
print "terminating routine"
p.terminate()
'''
| 11.46875
| 56
| 0.689373
|
from xbeecoord import *
from time import sleep
import multiprocessing
reset()
sleep(5)
getRssi()
sleep(2)
wakeup()
startwait()
'''
def main():
reset()
sleep(5)
getRssi()
sleep(2)
wakeup()
startwait()
if __name__=='__main__':
p = multiprocessing.Process(target=main,name="routine")
p.start()
sleep(300)
print "terminating routine"
p.terminate()
'''
| 0
| 0
| 0
|
95ae4165b2b8de19eb116db0c51a0d1f8417d350
| 14,185
|
py
|
Python
|
rulesanml.py
|
asrivast28/FastSNAP
|
3240484d863ae68d18e7198c8ad1e211463715a9
|
[
"Apache-2.0"
] | null | null | null |
rulesanml.py
|
asrivast28/FastSNAP
|
3240484d863ae68d18e7198c8ad1e211463715a9
|
[
"Apache-2.0"
] | null | null | null |
rulesanml.py
|
asrivast28/FastSNAP
|
3240484d863ae68d18e7198c8ad1e211463715a9
|
[
"Apache-2.0"
] | null | null | null |
##
# @file rulesanml.py
# @author Ankit Srivastava <asrivast@gatech.edu>
#
# Copyright 2018 Georgia Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import micronap.sdk as ap
import exceptions
import os
import re
import sys
from regexparser import RegexParser
class RulesAnml(object):
"""
Class for storing ANML-NFAs corresponding to the Snort rules.
"""
def add(self, keyword, sid, patterns):
"""
Add the given patterns, identified by the sid, to the bucket corresponding to the keyword.
"""
# try to add the pattern to a dummy anml object first
# this will throw an error, if there are any issues with patterns
anml = ap.Anml()
network = anml.CreateAutomataNetwork()
self._add_patterns(network, sid, patterns)
# check if the rule satisfies the maximum STEs limit
automaton, emap = anml.CompileAnml()
info = automaton.GetInfo()
if info.ste_count > 49152 / 2:
raise AnmlException, '\nAdding patterns for rule with SID %d failed.\nRequired resources exceeded those in one half-core.\n'%sid
bucket = keyword
if self._maxStes > 0:
if info.ste_count > self._maxStes:
bucket = '%s_%d'%(keyword, sid)
if info.clock_divisor > 1:
bucket = '%s_%d'%(keyword, info.clock_divisor)
#print keyword, sid, info.clock_divisor
# create a new network if it doesn't exist
if bucket not in self._anmlNetworks:
anml = ap.Anml()
network = anml.CreateAutomataNetwork(anmlId = bucket)
self._anmlNetworks[bucket] = (anml, network)
else:
network = self._anmlNetworks[bucket][1]
# now add pattern to the network
self._add_patterns(network, sid, patterns)
def export(self, directory):
"""
Write out all the ANML-NFAs to the given directory.
"""
for bucket, anmlNetwork in self._anmlNetworks.iteritems():
anmlNetwork[1].ExportAnml(os.path.join(directory, bucket + '.anml'))
def compile(self, directory):
"""
Compile all the ANML-NFAs and write the AP-FSMs to the given directory.
"""
for bucket, anmlNetwork in self._anmlNetworks.iteritems():
#if 'general' not in keyword:
#continue
print '\nCompiling %s\n'%bucket
try:
automata, emap = anmlNetwork[0].CompileAnml()
info = automata.GetInfo()
print 'Clock divisor', info.clock_divisor
automata.Save(os.path.join(directory, bucket + '.fsm'))
except ap.ApError, e:
sys.stderr.write('\nCompilation failed with the following error message.\n%s\n'%(str(e)))
sys.stderr.flush()
print '\nDone.\n'
| 46.661184
| 159
| 0.596052
|
##
# @file rulesanml.py
# @author Ankit Srivastava <asrivast@gatech.edu>
#
# Copyright 2018 Georgia Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import micronap.sdk as ap
import exceptions
import os
import re
import sys
from regexparser import RegexParser
class AnmlException(exceptions.Exception):
pass
class RulesAnml(object):
"""
Class for storing ANML-NFAs corresponding to the Snort rules.
"""
def __init__(self, directory, maxStes = 0, maxRepeats = 0, backreferences = False):
self._maxStes = maxStes
self._maxRepeats = maxRepeats
self._backreferences = backreferences
self._anmlNetworks = {}
self._counter = 0
if self._maxRepeats > 0:
self._repetitionSids = set()
self._repetitionFile = open(os.path.join(directory, 'repetitions.txt'), 'wb')
if self._backreferences:
self._backreferenceSids = set()
self._backreferenceFile = open(os.path.join(directory, 'backreferences.txt'), 'wb')
self._orAnchorPattern = re.compile(r'^\/(?P<before>.*)(?P<start>\(|\(.*?\|)\$(?P<end>\|.*?\)|\))(?P<after>(?:\)*))\/(?P<modifiers>\w*)$')
self._anchorPattern = re.compile(r'^\/(?P<open>(?:\(\?\w*:)?)(?P<start>\^?)(?P<pattern>.*?)(?<!\\)(?P<end>\$?)(?P<close>(?:\)*))\/(?P<modifiers>\w*)$')
self._genericPattern = re.compile(r'^\/(?P<pattern>.*)\/(?P<modifiers>[ismexADSUXuJ]*)$')
def _next_boolean_id(self):
self._counter += 1
return '__boolean_%d__'%(self._counter)
def _latch_with_boolean(self, network, element, boolean):
ste = network.AddSTE('*')
network.AddAnmlEdge(ste, boolean, ap.AnmlDefs.PORT_IN)
network.AddAnmlEdge(element, boolean, ap.AnmlDefs.PORT_IN)
network.AddAnmlEdge(element, ste, ap.AnmlDefs.PORT_IN)
network.AddAnmlEdge(ste, ste, ap.AnmlDefs.PORT_IN)
def _replace_back_references(self, pattern):
matched = self._genericPattern.match(pattern)
changed = None
try:
changed = RegexParser(matched.group('pattern')).replace_groups()
changed = '/' + changed + '/' + matched.group('modifiers')
except:
changedPattern, subCount = re.subn(r'\(\?<(\w+)>', lambda x : r'(?P<%s>'%x.group(1), pattern)
if subCount > 0:
return self._replace_back_references(changedPattern)
raise
else:
return changed
def _replace_bounded_repetitions(self, pattern, maxRepeats):
matched = self._genericPattern.match(pattern)
changed = RegexParser(matched.group('pattern')).replace_repeats(maxRepeats)
if changed is not None:
changed = '/' + changed + '/' + matched.group('modifiers')
return changed
def _add_negative_dependent(self, network, regex, dependent, reportCode):
expression, depth = dependent
exprRegex = network.AddRegex(expression)
depthRegex = network.AddRegex('/.{%d}/'%depth)
rangeRegex = network.AddRegex('/.{1,%d}/'%(depth - 1))
network.AddAnmlEdge(regex, exprRegex)
network.AddAnmlEdge(regex, depthRegex)
network.AddAnmlEdge(regex, rangeRegex)
counter = network.AddCounter(1, mode = ap.CounterMode.STOP_HOLD)
network.AddAnmlEdgeEx(exprRegex, 0, counter, ap.AnmlDefs.COUNT_ONE_PORT)
network.AddAnmlEdgeEx(depthRegex, 0, counter, ap.AnmlDefs.COUNT_ONE_PORT)
network.AddAnmlEdgeEx(regex, 0, counter, ap.AnmlDefs.RESET_PORT)
kwargs = {'mode' : ap.BooleanMode.AND, 'anmlId' : self._next_boolean_id()}
if reportCode is not None:
kwargs.update({'match' : True, 'reportCode' : reportCode})
mainAnd = network.AddBoolean(**kwargs)
network.AddAnmlEdge(depthRegex, mainAnd)
network.AddAnmlEdge(counter, mainAnd)
booleanOr = network.AddBoolean(mode = ap.BooleanMode.OR, anmlId = self._next_boolean_id())
network.AddAnmlEdge(regex, booleanOr)
network.AddAnmlEdge(rangeRegex, booleanOr)
booleanNot = network.AddBoolean(mode = ap.BooleanMode.NOT, anmlId = self._next_boolean_id())
network.AddAnmlEdge(counter, booleanNot)
kwargs = {'mode' : ap.BooleanMode.AND, 'anmlId' : self._next_boolean_id()}
if reportCode is not None:
kwargs.update({'eod' : True, 'match' : True, 'reportCode' : reportCode})
eodAnd = network.AddBoolean(**kwargs)
network.AddAnmlEdge(booleanNot, eodAnd)
network.AddAnmlEdge(booleanOr, eodAnd)
return mainAnd, eodAnd
def _add_single_pattern(self, network, pattern, negation, dependent, sid, reportCode = None):
matched = self._anchorPattern.match(pattern)
kwargs = {'startType' : ap.AnmlDefs.START_OF_DATA if matched.group('start') else ap.AnmlDefs.ALL_INPUT}
if not negation and reportCode is not None and not matched.group('end') and not dependent:
kwargs.update({'reportCode' : reportCode, 'match' : True})
pattern = '/' + matched.group('open') + matched.group('pattern') + matched.group('close') + '/' + matched.group('modifiers')
if self._backreferences and sid in self._backreferenceSids:
try:
changed = self._replace_back_references(pattern)
except re.sre_parse.error:
pass
else:
pattern = changed
if self._maxRepeats > 0:
try:
changed = self._replace_bounded_repetitions(pattern, self._maxRepeats)
if changed is not None:
if sid not in self._repetitionSids:
self._repetitionFile.write('%d: %s\n'%(sid, pattern))
self._repetitionSids.add(sid)
pattern = changed
except:
pass
try:
regex = network.AddRegex(pattern, **kwargs)
except ap.ApError, e:
error = True
msg = str(e)
if self._backreferences and e.code == -112:
try:
changed = self._replace_back_references(pattern)
except re.sre_parse.error, f:
msg = 'RegexParser Error: %s'%str(f)
else:
try:
regex = network.AddRegex(changed, **kwargs)
except ap.ApError, f:
msg = str(f)
else:
self._backreferenceFile.write('%d: %s\n'%(sid, pattern))
self._backreferenceSids.add(sid)
error = False
if error:
raise AnmlException, '\nAdding pattern "%s" for rule with SID %d failed.\n%s\n'%(pattern, sid, msg)
if matched.group('end') and reportCode is not None:
kwargs = {'mode' : ap.BooleanMode.OR, 'anmlId' : self._next_boolean_id(), 'eod' : True}
if reportCode is not None:
kwargs.update({'reportCode' : reportCode, 'match' : True})
boolean = network.AddBoolean(**kwargs)
network.AddAnmlEdge(regex, boolean, ap.AnmlDefs.PORT_IN)
return (boolean, False)
if dependent:
main, eod = self._add_negative_dependent(network, regex, dependent, reportCode)
return [(main, True), (eod, False)]
if not negation:
if matched.group('end'):
return (regex, reportCode is not None)
else:
return (regex, True)
else:
kwargs = {'mode' : ap.BooleanMode.NOR, 'anmlId' : self._next_boolean_id()}
if reportCode is not None:
kwargs.update({'reportCode' : reportCode, 'match' : True, 'eod' : True})
boolean = network.AddBoolean(**kwargs)
self._latch_with_boolean(network, regex, boolean)
return (boolean, True)
def _add_multiple_patterns(self, network, patterns, sid):
elements = []
for pattern, negation, dependent in patterns:
returned = self._add_single_pattern(network, pattern, negation, dependent, sid)
returned = [returned] if not isinstance(returned, list) else returned
for element, latch in returned:
if negation or not latch:
elements.append(element)
else:
boolean = network.AddBoolean(mode = ap.BooleanMode.OR, anmlId = self._next_boolean_id())
self._latch_with_boolean(network, element, boolean)
elements.append(boolean)
return elements
def _match_or_anchor(self, pattern):
matched = self._orAnchorPattern.match(pattern)
if matched is not None:
altPattern = []
for first, second in re.findall(r'(.*?)(\||\))', matched.group('start') + matched.group('end')):
alt = first[1:] if first.startswith('(') else first
if alt:
altPattern.append(alt)
altPattern = altPattern[0] if len(altPattern) == 1 else '(' + '|'.join(altPattern) + ')'
return matched.group('before'), altPattern, matched.group('after'), matched.group('modifiers')
def _add_patterns(self, network, sid, patterns):
if len(patterns) == 1:
pattern, negation, dependent = patterns[0]
matched = self._match_or_anchor(pattern)
if matched is not None:
before, altPattern, after, modifiers = matched
pattern = '/' + before + after + '/' + modifiers
regex, latch = self._add_single_pattern(network, pattern, negation, dependent, sid)
boolean = network.AddBoolean(mode = ap.BooleanMode.OR, anmlId = self._next_boolean_id(),
match = True, reportCode = sid, eod = True)
network.AddAnmlEdge(regex, boolean, ap.AnmlDefs.PORT_IN)
pattern = '/' + before + altPattern + after + '/' + modifiers
self._add_single_pattern(network, pattern, negation, dependent, sid, reportCode = sid)
else:
for index in xrange(len(patterns)):
pattern, negation, dependent = patterns[index]
matched = self._match_or_anchor(pattern)
if matched is not None:
before, altPattern, after, modifiers = matched
patterns[index] = ('/' + before + '$' + after + '/' + modifiers, negation, dependent)
self._add_patterns(network, sid, patterns)
patterns[index] = ('/' + before + altPattern + after + '/' + modifiers, negation, dependent)
self._add_patterns(network, sid, patterns)
break
else:
elements = self._add_multiple_patterns(network, patterns, sid)
boolean = network.AddBoolean(mode = ap.BooleanMode.AND, reportCode = sid, match = True, eod = True, anmlId = self._next_boolean_id())
for element in elements:
network.AddAnmlEdge(element, boolean, ap.AnmlDefs.PORT_IN)
def add(self, keyword, sid, patterns):
"""
Add the given patterns, identified by the sid, to the bucket corresponding to the keyword.
"""
# try to add the pattern to a dummy anml object first
# this will throw an error, if there are any issues with patterns
anml = ap.Anml()
network = anml.CreateAutomataNetwork()
self._add_patterns(network, sid, patterns)
# check if the rule satisfies the maximum STEs limit
automaton, emap = anml.CompileAnml()
info = automaton.GetInfo()
if info.ste_count > 49152 / 2:
raise AnmlException, '\nAdding patterns for rule with SID %d failed.\nRequired resources exceeded those in one half-core.\n'%sid
bucket = keyword
if self._maxStes > 0:
if info.ste_count > self._maxStes:
bucket = '%s_%d'%(keyword, sid)
if info.clock_divisor > 1:
bucket = '%s_%d'%(keyword, info.clock_divisor)
#print keyword, sid, info.clock_divisor
# create a new network if it doesn't exist
if bucket not in self._anmlNetworks:
anml = ap.Anml()
network = anml.CreateAutomataNetwork(anmlId = bucket)
self._anmlNetworks[bucket] = (anml, network)
else:
network = self._anmlNetworks[bucket][1]
# now add pattern to the network
self._add_patterns(network, sid, patterns)
def export(self, directory):
"""
Write out all the ANML-NFAs to the given directory.
"""
for bucket, anmlNetwork in self._anmlNetworks.iteritems():
anmlNetwork[1].ExportAnml(os.path.join(directory, bucket + '.anml'))
def compile(self, directory):
"""
Compile all the ANML-NFAs and write the AP-FSMs to the given directory.
"""
for bucket, anmlNetwork in self._anmlNetworks.iteritems():
#if 'general' not in keyword:
#continue
print '\nCompiling %s\n'%bucket
try:
automata, emap = anmlNetwork[0].CompileAnml()
info = automata.GetInfo()
print 'Clock divisor', info.clock_divisor
automata.Save(os.path.join(directory, bucket + '.fsm'))
except ap.ApError, e:
sys.stderr.write('\nCompilation failed with the following error message.\n%s\n'%(str(e)))
sys.stderr.flush()
print '\nDone.\n'
| 10,482
| 30
| 292
|
c319caa266be2ae157a2924b02ace1fdbaf862de
| 16,853
|
py
|
Python
|
tracformatter/trac/test.py
|
kbower/tracwikiconv
|
6db4c856c8ab64beac666226519ae2e8d513c428
|
[
"BSD-3-Clause"
] | 1
|
2017-02-09T15:55:16.000Z
|
2017-02-09T15:55:16.000Z
|
tracformatter/trac/test.py
|
kbower/tracwikiconv
|
6db4c856c8ab64beac666226519ae2e8d513c428
|
[
"BSD-3-Clause"
] | null | null | null |
tracformatter/trac/test.py
|
kbower/tracwikiconv
|
6db4c856c8ab64beac666226519ae2e8d513c428
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import doctest
import os
import unittest
import sys
try:
from babel import Locale
locale_en = Locale.parse('en_US')
except ImportError:
locale_en = None
from trac.config import Configuration
from trac.core import Component, ComponentManager
from trac.env import Environment
from trac.db.api import _parse_db_str, DatabaseManager
from trac.db.sqlite_backend import SQLiteConnection
from trac.db.util import ConnectionWrapper
import trac.db.postgres_backend
import trac.db.mysql_backend
from trac.ticket.default_workflow import load_workflow_config_snippet
from trac.util import translation
def Mock(bases=(), *initargs, **kw):
"""
Simple factory for dummy classes that can be used as replacement for the
real implementation in tests.
Base classes for the mock can be specified using the first parameter, which
must be either a tuple of class objects or a single class object. If the
bases parameter is omitted, the base class of the mock will be object.
So to create a mock that is derived from the builtin dict type, you can do:
>>> mock = Mock(dict)
>>> mock['foo'] = 'bar'
>>> mock['foo']
'bar'
Attributes of the class are provided by any additional keyword parameters.
>>> mock = Mock(foo='bar')
>>> mock.foo
'bar'
Objects produces by this function have the special feature of not requiring
the 'self' parameter on methods, because you should keep data at the scope
of the test function. So you can just do:
>>> mock = Mock(add=lambda x,y: x+y)
>>> mock.add(1, 1)
2
To access attributes from the mock object from inside a lambda function,
just access the mock itself:
>>> mock = Mock(dict, do=lambda x: 'going to the %s' % mock[x])
>>> mock['foo'] = 'bar'
>>> mock.do('foo')
'going to the bar'
Because assignments or other types of statements don't work in lambda
functions, assigning to a local variable from a mock function requires some
extra work:
>>> myvar = [None]
>>> mock = Mock(set=lambda x: myvar.__setitem__(0, x))
>>> mock.set(1)
>>> myvar[0]
1
"""
if not isinstance(bases, tuple):
bases = (bases,)
cls = type('Mock', bases, {})
mock = cls(*initargs)
for k, v in kw.items():
setattr(mock, k, v)
return mock
class MockPerm(object):
"""Fake permission class. Necessary as Mock can not be used with operator
overloading."""
username = ''
__contains__ = has_permission
assert_permission = require
class TestSetup(unittest.TestSuite):
"""
Test suite decorator that allows a fixture to be setup for a complete
suite of test cases.
"""
def setUp(self):
"""Sets up the fixture, and sets self.fixture if needed"""
pass
def tearDown(self):
"""Tears down the fixture"""
pass
def run(self, result):
"""Setup the fixture (self.setUp), call .setFixture on all the tests,
and tear down the fixture (self.tearDown)."""
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite.run(self, result)
self.tearDown()
return result
def _wrapped_run(self, *args, **kwargs):
"Python 2.7 / unittest2 compatibility - there must be a better way..."
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite._wrapped_run(self, *args, **kwargs)
self.tearDown()
# -- Database utilities
# -- Environment stub
class EnvironmentStub(Environment):
"""A stub of the trac.env.Environment object for testing."""
href = abs_href = None
global_databasemanager = None
def __init__(self, default_data=False, enable=None, disable=None,
path=None, destroying=False):
"""Construct a new Environment stub object.
:param default_data: If True, populate the database with some
defaults.
:param enable: A list of component classes or name globs to
activate in the stub environment.
"""
ComponentManager.__init__(self)
Component.__init__(self)
self.systeminfo = []
import trac
self.path = path
if self.path is None:
self.path = os.path.dirname(trac.__file__)
if not os.path.isabs(self.path):
self.path = os.path.join(os.getcwd(), self.path)
# -- configuration
self.config = Configuration(None)
# We have to have a ticket-workflow config for ''lots'' of things to
# work. So insert the basic-workflow config here. There may be a
# better solution than this.
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.set('logging', 'log_level', 'DEBUG')
self.config.set('logging', 'log_type', 'stderr')
if enable is not None:
self.config.set('components', 'trac.*', 'disabled')
else:
self.config.set('components', 'tracopt.versioncontrol.svn.*',
'enabled')
for name_or_class in enable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'enabled')
for name_or_class in disable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'disabled')
# -- logging
from trac.log import logger_handler_factory
self.log, self._log_handler = logger_handler_factory('test')
# -- database
self.config.set('components', 'trac.db.*', 'enabled')
self.dburi = get_dburi()
init_global = False
if self.global_databasemanager:
self.components[DatabaseManager] = global_databasemanager
else:
self.config.set('trac', 'database', self.dburi)
self.global_databasemanager = DatabaseManager(self)
self.config.set('trac', 'debug_sql', True)
self.config.set('logging', 'log_type', 'stderr')
self.config.set('logging', 'log_level', 'DEBUG')
init_global = not destroying
if default_data or init_global:
self.reset_db(default_data)
from trac.web.href import Href
self.href = Href('/trac.cgi')
self.abs_href = Href('http://example.org/trac.cgi')
self.known_users = []
translation.activate(locale_en)
def reset_db(self, default_data=None):
"""Remove all data from Trac tables, keeping the tables themselves.
:param default_data: after clean-up, initialize with default data
:return: True upon success
"""
from trac import db_default
scheme, db_prop = _parse_db_str(self.dburi)
tables = []
remove_sqlite_db = False
try:
with self.db_transaction as db:
db.rollback() # make sure there's no transaction in progress
# check the database version
database_version = db(
"SELECT value FROM system WHERE name='database_version'")
if database_version:
database_version = int(database_version[0][0])
if database_version == db_default.db_version:
# same version, simply clear the tables (faster)
m = sys.modules[__name__]
reset_fn = 'reset_%s_db' % scheme
if hasattr(m, reset_fn):
tables = getattr(m, reset_fn)(self, db_prop)
else:
# different version or version unknown, drop the tables
remove_sqlite_db = True
self.destroy_db(scheme, db_prop)
except Exception, e:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
db = None # as we might shutdown the pool FIXME no longer needed!
if scheme == 'sqlite' and remove_sqlite_db:
path = db_prop['path']
if path != ':memory:':
if not os.path.isabs(path):
path = os.path.join(self.path, path)
self.global_databasemanager.shutdown()
os.remove(path)
if not tables:
self.global_databasemanager.init_db()
# we need to make sure the next get_db_cnx() will re-create
# a new connection aware of the new data model - see #8518.
if self.dburi != 'sqlite::memory:':
self.global_databasemanager.shutdown()
with self.db_transaction as db:
if default_data:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols),
','.join(['%s' for c in cols])),
vals)
else:
db("INSERT INTO system (name, value) VALUES (%s, %s)",
('database_version', str(db_default.db_version)))
# overriden
def locate(fn):
"""Locates a binary on the path.
Returns the fully-qualified path, or None.
"""
exec_suffix = '.exe' if os.name == 'nt' else ''
for p in ["."] + os.environ['PATH'].split(os.pathsep):
f = os.path.join(p, fn + exec_suffix)
if os.path.exists(f):
return f
return None
INCLUDE_FUNCTIONAL_TESTS = True
if __name__ == '__main__':
#FIXME: this is a bit inelegant
if '--skip-functional-tests' in sys.argv:
sys.argv.remove('--skip-functional-tests')
INCLUDE_FUNCTIONAL_TESTS = False
unittest.main(defaultTest='suite')
| 36.958333
| 81
| 0.587314
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import doctest
import os
import unittest
import sys
try:
from babel import Locale
locale_en = Locale.parse('en_US')
except ImportError:
locale_en = None
from trac.config import Configuration
from trac.core import Component, ComponentManager
from trac.env import Environment
from trac.db.api import _parse_db_str, DatabaseManager
from trac.db.sqlite_backend import SQLiteConnection
from trac.db.util import ConnectionWrapper
import trac.db.postgres_backend
import trac.db.mysql_backend
from trac.ticket.default_workflow import load_workflow_config_snippet
from trac.util import translation
def Mock(bases=(), *initargs, **kw):
"""
Simple factory for dummy classes that can be used as replacement for the
real implementation in tests.
Base classes for the mock can be specified using the first parameter, which
must be either a tuple of class objects or a single class object. If the
bases parameter is omitted, the base class of the mock will be object.
So to create a mock that is derived from the builtin dict type, you can do:
>>> mock = Mock(dict)
>>> mock['foo'] = 'bar'
>>> mock['foo']
'bar'
Attributes of the class are provided by any additional keyword parameters.
>>> mock = Mock(foo='bar')
>>> mock.foo
'bar'
Objects produces by this function have the special feature of not requiring
the 'self' parameter on methods, because you should keep data at the scope
of the test function. So you can just do:
>>> mock = Mock(add=lambda x,y: x+y)
>>> mock.add(1, 1)
2
To access attributes from the mock object from inside a lambda function,
just access the mock itself:
>>> mock = Mock(dict, do=lambda x: 'going to the %s' % mock[x])
>>> mock['foo'] = 'bar'
>>> mock.do('foo')
'going to the bar'
Because assignments or other types of statements don't work in lambda
functions, assigning to a local variable from a mock function requires some
extra work:
>>> myvar = [None]
>>> mock = Mock(set=lambda x: myvar.__setitem__(0, x))
>>> mock.set(1)
>>> myvar[0]
1
"""
if not isinstance(bases, tuple):
bases = (bases,)
cls = type('Mock', bases, {})
mock = cls(*initargs)
for k, v in kw.items():
setattr(mock, k, v)
return mock
class MockPerm(object):
"""Fake permission class. Necessary as Mock can not be used with operator
overloading."""
username = ''
def has_permission(self, action, realm_or_resource=None, id=False,
version=False):
return True
__contains__ = has_permission
def __call__(self, realm_or_resource, id=False, version=False):
return self
def require(self, action, realm_or_resource=None, id=False, version=False):
pass
assert_permission = require
class TestSetup(unittest.TestSuite):
"""
Test suite decorator that allows a fixture to be setup for a complete
suite of test cases.
"""
def setUp(self):
"""Sets up the fixture, and sets self.fixture if needed"""
pass
def tearDown(self):
"""Tears down the fixture"""
pass
def run(self, result):
"""Setup the fixture (self.setUp), call .setFixture on all the tests,
and tear down the fixture (self.tearDown)."""
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite.run(self, result)
self.tearDown()
return result
def _wrapped_run(self, *args, **kwargs):
"Python 2.7 / unittest2 compatibility - there must be a better way..."
self.setUp()
if hasattr(self, 'fixture'):
for test in self._tests:
if hasattr(test, 'setFixture'):
test.setFixture(self.fixture)
unittest.TestSuite._wrapped_run(self, *args, **kwargs)
self.tearDown()
class TestCaseSetup(unittest.TestCase):
def setFixture(self, fixture):
self.fixture = fixture
# -- Database utilities
def get_dburi():
dburi = os.environ.get('TRAC_TEST_DB_URI')
if dburi:
scheme, db_prop = _parse_db_str(dburi)
# Assume the schema 'tractest' for Postgres
if scheme == 'postgres' and \
not db_prop.get('params', {}).get('schema'):
if '?' in dburi:
dburi += "&schema=tractest"
else:
dburi += "?schema=tractest"
return dburi
return 'sqlite::memory:'
def reset_sqlite_db(env, db_prop):
dbname = os.path.basename(db_prop['path'])
with env.db_transaction as db:
tables = db("SELECT name FROM sqlite_master WHERE type='table'")
for table in tables:
db("DELETE FROM %s" % table)
return tables
def reset_postgres_db(env, db_prop):
with env.db_transaction as db:
dbname = db.schema
if dbname:
# reset sequences
# information_schema.sequences view is available in PostgreSQL 8.2+
# however Trac supports PostgreSQL 8.0+, uses
# pg_get_serial_sequence()
for seq in db("""
SELECT sequence_name FROM (
SELECT pg_get_serial_sequence(%s||table_name,
column_name)
AS sequence_name
FROM information_schema.columns
WHERE table_schema=%s) AS tab
WHERE sequence_name IS NOT NULL""",
(dbname + '.', dbname)):
db("ALTER SEQUENCE %s RESTART WITH 1" % seq)
# clear tables
tables = db("""SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,))
for table in tables:
db("DELETE FROM %s" % table)
# PostgreSQL supports TRUNCATE TABLE as well
# (see http://www.postgresql.org/docs/8.1/static/sql-truncate.html)
# but on the small tables used here, DELETE is actually much faster
return tables
def reset_mysql_db(env, db_prop):
dbname = os.path.basename(db_prop['path'])
if dbname:
with env.db_transaction as db:
tables = db("""SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,))
for table in tables:
# TRUNCATE TABLE is prefered to DELETE FROM, as we need to reset
# the auto_increment in MySQL.
db("TRUNCATE TABLE %s" % table)
return tables
# -- Environment stub
class EnvironmentStub(Environment):
"""A stub of the trac.env.Environment object for testing."""
href = abs_href = None
global_databasemanager = None
def __init__(self, default_data=False, enable=None, disable=None,
path=None, destroying=False):
"""Construct a new Environment stub object.
:param default_data: If True, populate the database with some
defaults.
:param enable: A list of component classes or name globs to
activate in the stub environment.
"""
ComponentManager.__init__(self)
Component.__init__(self)
self.systeminfo = []
import trac
self.path = path
if self.path is None:
self.path = os.path.dirname(trac.__file__)
if not os.path.isabs(self.path):
self.path = os.path.join(os.getcwd(), self.path)
# -- configuration
self.config = Configuration(None)
# We have to have a ticket-workflow config for ''lots'' of things to
# work. So insert the basic-workflow config here. There may be a
# better solution than this.
load_workflow_config_snippet(self.config, 'basic-workflow.ini')
self.config.set('logging', 'log_level', 'DEBUG')
self.config.set('logging', 'log_type', 'stderr')
if enable is not None:
self.config.set('components', 'trac.*', 'disabled')
else:
self.config.set('components', 'tracopt.versioncontrol.svn.*',
'enabled')
for name_or_class in enable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'enabled')
for name_or_class in disable or ():
config_key = self._component_name(name_or_class)
self.config.set('components', config_key, 'disabled')
# -- logging
from trac.log import logger_handler_factory
self.log, self._log_handler = logger_handler_factory('test')
# -- database
self.config.set('components', 'trac.db.*', 'enabled')
self.dburi = get_dburi()
init_global = False
if self.global_databasemanager:
self.components[DatabaseManager] = global_databasemanager
else:
self.config.set('trac', 'database', self.dburi)
self.global_databasemanager = DatabaseManager(self)
self.config.set('trac', 'debug_sql', True)
self.config.set('logging', 'log_type', 'stderr')
self.config.set('logging', 'log_level', 'DEBUG')
init_global = not destroying
if default_data or init_global:
self.reset_db(default_data)
from trac.web.href import Href
self.href = Href('/trac.cgi')
self.abs_href = Href('http://example.org/trac.cgi')
self.known_users = []
translation.activate(locale_en)
def reset_db(self, default_data=None):
"""Remove all data from Trac tables, keeping the tables themselves.
:param default_data: after clean-up, initialize with default data
:return: True upon success
"""
from trac import db_default
scheme, db_prop = _parse_db_str(self.dburi)
tables = []
remove_sqlite_db = False
try:
with self.db_transaction as db:
db.rollback() # make sure there's no transaction in progress
# check the database version
database_version = db(
"SELECT value FROM system WHERE name='database_version'")
if database_version:
database_version = int(database_version[0][0])
if database_version == db_default.db_version:
# same version, simply clear the tables (faster)
m = sys.modules[__name__]
reset_fn = 'reset_%s_db' % scheme
if hasattr(m, reset_fn):
tables = getattr(m, reset_fn)(self, db_prop)
else:
# different version or version unknown, drop the tables
remove_sqlite_db = True
self.destroy_db(scheme, db_prop)
except Exception, e:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
db = None # as we might shutdown the pool FIXME no longer needed!
if scheme == 'sqlite' and remove_sqlite_db:
path = db_prop['path']
if path != ':memory:':
if not os.path.isabs(path):
path = os.path.join(self.path, path)
self.global_databasemanager.shutdown()
os.remove(path)
if not tables:
self.global_databasemanager.init_db()
# we need to make sure the next get_db_cnx() will re-create
# a new connection aware of the new data model - see #8518.
if self.dburi != 'sqlite::memory:':
self.global_databasemanager.shutdown()
with self.db_transaction as db:
if default_data:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols),
','.join(['%s' for c in cols])),
vals)
else:
db("INSERT INTO system (name, value) VALUES (%s, %s)",
('database_version', str(db_default.db_version)))
def destroy_db(self, scheme=None, db_prop=None):
if not (scheme and db_prop):
scheme, db_prop = _parse_db_str(self.dburi)
try:
with self.db_transaction as db:
if scheme == 'postgres' and db.schema:
db('DROP SCHEMA "%s" CASCADE' % db.schema)
elif scheme == 'mysql':
dbname = os.path.basename(db_prop['path'])
for table in db("""
SELECT table_name FROM information_schema.tables
WHERE table_schema=%s""", (dbname,)):
db("DROP TABLE IF EXISTS `%s`" % table)
except Exception:
# "TracError: Database not found...",
# psycopg2.ProgrammingError: schema "tractest" does not exist
pass
return False
# overriden
def is_component_enabled(self, cls):
if self._component_name(cls).startswith('__main__.'):
return True
return Environment.is_component_enabled(self, cls)
def get_known_users(self, cnx=None):
return self.known_users
def locate(fn):
"""Locates a binary on the path.
Returns the fully-qualified path, or None.
"""
exec_suffix = '.exe' if os.name == 'nt' else ''
for p in ["."] + os.environ['PATH'].split(os.pathsep):
f = os.path.join(p, fn + exec_suffix)
if os.path.exists(f):
return f
return None
INCLUDE_FUNCTIONAL_TESTS = True
def suite():
import trac.tests
import trac.admin.tests
import trac.db.tests
import trac.mimeview.tests
import trac.ticket.tests
import trac.util.tests
import trac.versioncontrol.tests
import trac.versioncontrol.web_ui.tests
import trac.web.tests
import trac.wiki.tests
import tracopt.mimeview.tests
import tracopt.perm.tests
import tracopt.versioncontrol.git.tests
import tracopt.versioncontrol.svn.tests
suite = unittest.TestSuite()
suite.addTest(trac.tests.basicSuite())
if INCLUDE_FUNCTIONAL_TESTS:
suite.addTest(trac.tests.functionalSuite())
suite.addTest(trac.admin.tests.suite())
suite.addTest(trac.db.tests.suite())
suite.addTest(trac.mimeview.tests.suite())
suite.addTest(trac.ticket.tests.suite())
suite.addTest(trac.util.tests.suite())
suite.addTest(trac.versioncontrol.tests.suite())
suite.addTest(trac.versioncontrol.web_ui.tests.suite())
suite.addTest(trac.web.tests.suite())
suite.addTest(trac.wiki.tests.suite())
suite.addTest(tracopt.mimeview.tests.suite())
suite.addTest(tracopt.perm.tests.suite())
suite.addTest(tracopt.versioncontrol.git.tests.suite())
suite.addTest(tracopt.versioncontrol.svn.tests.suite())
suite.addTest(doctest.DocTestSuite(sys.modules[__name__]))
return suite
if __name__ == '__main__':
#FIXME: this is a bit inelegant
if '--skip-functional-tests' in sys.argv:
sys.argv.remove('--skip-functional-tests')
INCLUDE_FUNCTIONAL_TESTS = False
unittest.main(defaultTest='suite')
| 5,256
| 18
| 355
|
e389c227339afdf6dc2a373f24acbfb741a5fdfe
| 287
|
py
|
Python
|
Lecture 18/Lecture18HWAssignment3.py
|
AtharvaJoshi21/PythonPOC
|
6b95eb5bab7b28e9811e43b39e863faf2ee7565b
|
[
"MIT"
] | 1
|
2019-04-27T15:37:04.000Z
|
2019-04-27T15:37:04.000Z
|
Lecture 18/Lecture18HWAssignment3.py
|
AtharvaJoshi21/PythonPOC
|
6b95eb5bab7b28e9811e43b39e863faf2ee7565b
|
[
"MIT"
] | null | null | null |
Lecture 18/Lecture18HWAssignment3.py
|
AtharvaJoshi21/PythonPOC
|
6b95eb5bab7b28e9811e43b39e863faf2ee7565b
|
[
"MIT"
] | 1
|
2020-08-14T06:57:08.000Z
|
2020-08-14T06:57:08.000Z
|
# WAP to accept a folder name from user and create zip file out of contents of it.
import shutil
if __name__ == "__main__":
main()
| 28.7
| 82
| 0.696864
|
# WAP to accept a folder name from user and create zip file out of contents of it.
import shutil
def main():
inputDirPath = input("Please enter directory name: ")
shutil.make_archive("outputZip", "zip", inputDirPath)
print("Archived!")
if __name__ == "__main__":
main()
| 129
| 0
| 23
|
ed2fbd10346eec764cf27d2e4a1be3653c1425d8
| 2,174
|
py
|
Python
|
pagarmecoreapi/models/create_price_bracket_request.py
|
pagarme/pagarme-core-api-python
|
c7b11ca78ab3e7e896e5b75048e6f72b511db00e
|
[
"MIT"
] | 6
|
2021-09-02T19:55:04.000Z
|
2022-03-16T14:06:15.000Z
|
pagarmecoreapi/models/create_price_bracket_request.py
|
pagarme/pagarme-core-api-python
|
c7b11ca78ab3e7e896e5b75048e6f72b511db00e
|
[
"MIT"
] | 2
|
2021-10-11T22:48:15.000Z
|
2022-01-24T18:24:23.000Z
|
pagarmecoreapi/models/create_price_bracket_request.py
|
pagarme/pagarme-core-api-python
|
c7b11ca78ab3e7e896e5b75048e6f72b511db00e
|
[
"MIT"
] | 2
|
2021-09-12T21:43:32.000Z
|
2022-03-07T16:58:54.000Z
|
# -*- coding: utf-8 -*-
"""
pagarmecoreapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class CreatePriceBracketRequest(object):
"""Implementation of the 'CreatePriceBracketRequest' model.
Request for creating a price bracket
Attributes:
start_quantity (int): Start quantity
price (int): Price
end_quantity (int): End quantity
overage_price (int): Overage price
"""
# Create a mapping from Model property names to API property names
_names = {
"start_quantity":'start_quantity',
"price":'price',
"end_quantity":'end_quantity',
"overage_price":'overage_price'
}
def __init__(self,
start_quantity=None,
price=None,
end_quantity=None,
overage_price=None):
"""Constructor for the CreatePriceBracketRequest class"""
# Initialize members of the class
self.start_quantity = start_quantity
self.price = price
self.end_quantity = end_quantity
self.overage_price = overage_price
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
start_quantity = dictionary.get('start_quantity')
price = dictionary.get('price')
end_quantity = dictionary.get('end_quantity')
overage_price = dictionary.get('overage_price')
# Return an object of this model
return cls(start_quantity,
price,
end_quantity,
overage_price)
| 28.605263
| 84
| 0.586937
|
# -*- coding: utf-8 -*-
"""
pagarmecoreapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class CreatePriceBracketRequest(object):
"""Implementation of the 'CreatePriceBracketRequest' model.
Request for creating a price bracket
Attributes:
start_quantity (int): Start quantity
price (int): Price
end_quantity (int): End quantity
overage_price (int): Overage price
"""
# Create a mapping from Model property names to API property names
_names = {
"start_quantity":'start_quantity',
"price":'price',
"end_quantity":'end_quantity',
"overage_price":'overage_price'
}
def __init__(self,
start_quantity=None,
price=None,
end_quantity=None,
overage_price=None):
"""Constructor for the CreatePriceBracketRequest class"""
# Initialize members of the class
self.start_quantity = start_quantity
self.price = price
self.end_quantity = end_quantity
self.overage_price = overage_price
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
start_quantity = dictionary.get('start_quantity')
price = dictionary.get('price')
end_quantity = dictionary.get('end_quantity')
overage_price = dictionary.get('overage_price')
# Return an object of this model
return cls(start_quantity,
price,
end_quantity,
overage_price)
| 0
| 0
| 0
|
985de2c940ad894eae35688847c4e07a0615981e
| 1,987
|
py
|
Python
|
rulelist/datastructure/attribute/attribute.py
|
HMProenca/RuleList
|
4e500c3a9813aadb149286458f11460904fd15d4
|
[
"MIT"
] | 8
|
2021-02-23T10:57:29.000Z
|
2022-03-15T10:29:08.000Z
|
rulelist/datastructure/attribute/attribute.py
|
HMProenca/RuleList
|
4e500c3a9813aadb149286458f11460904fd15d4
|
[
"MIT"
] | 3
|
2021-02-26T21:54:24.000Z
|
2021-06-09T13:28:10.000Z
|
rulelist/datastructure/attribute/attribute.py
|
HMProenca/RuleList
|
4e500c3a9813aadb149286458f11460904fd15d4
|
[
"MIT"
] | 2
|
2021-02-26T21:32:22.000Z
|
2021-11-01T09:32:38.000Z
|
from dataclasses import dataclass, field
from typing import List, AnyStr, Iterator, Any, Dict
import numpy as np
import pandas as pd
from gmpy2 import mpz
@dataclass
class Item:
""" Describes an item of an attribute.
The item is the lowest level representation of the attribute. It is defined by at least one condition and,
for example, in the case of a NominalAttribute it can be given by the condition: x = blue_eyes; and in the
NumericAttribute by: x < 3;
Attributes
----------
bit_array : gmpy2.mpz
Bit representation of the indexes covered by the item's condition.
description : str
Text describing the item.
numper_operators : int
Number of operators necessary to describe the item.
activation_function : object
Partial function applied to DataFrame that returns boolean vector of instances where item is "present".
"""
bitarray: mpz
parent_variable: AnyStr
description: AnyStr
number_operators: int
activation_function: object
@dataclass
class Attribute:
""" Describes an explainable variable.
Contains all information regarding a certain attribute. This is the parent class for NumericAttribute and
NominalAttribute, which add extra specific information to this.
Attributes
----------
name : str
Name of the attribute.
values : np.ndarray
Vector of values associated with attribute.
max_operators : int
Maximum number of operators allowed for this variable.
min_support : int
Minimum support of numer of instances covered by an item or pattern
"""
name: AnyStr
values: np.ndarray
max_operators: int
min_support: Any # it can be an int or a float
cardinality_operator : Dict[int, int] =field(init=False)
items : List[Item] = field(default_factory=list, init=False)
| 32.57377
| 111
| 0.700554
|
from dataclasses import dataclass, field
from typing import List, AnyStr, Iterator, Any, Dict
import numpy as np
import pandas as pd
from gmpy2 import mpz
@dataclass
class Item:
""" Describes an item of an attribute.
The item is the lowest level representation of the attribute. It is defined by at least one condition and,
for example, in the case of a NominalAttribute it can be given by the condition: x = blue_eyes; and in the
NumericAttribute by: x < 3;
Attributes
----------
bit_array : gmpy2.mpz
Bit representation of the indexes covered by the item's condition.
description : str
Text describing the item.
numper_operators : int
Number of operators necessary to describe the item.
activation_function : object
Partial function applied to DataFrame that returns boolean vector of instances where item is "present".
"""
bitarray: mpz
parent_variable: AnyStr
description: AnyStr
number_operators: int
activation_function: object
@dataclass
class Attribute:
""" Describes an explainable variable.
Contains all information regarding a certain attribute. This is the parent class for NumericAttribute and
NominalAttribute, which add extra specific information to this.
Attributes
----------
name : str
Name of the attribute.
values : np.ndarray
Vector of values associated with attribute.
max_operators : int
Maximum number of operators allowed for this variable.
min_support : int
Minimum support of numer of instances covered by an item or pattern
"""
name: AnyStr
values: np.ndarray
max_operators: int
min_support: Any # it can be an int or a float
cardinality_operator : Dict[int, int] =field(init=False)
items : List[Item] = field(default_factory=list, init=False)
def generate_items(self,candidate) -> Iterator[Item]:
for item in self.items:
yield item
| 87
| 0
| 27
|
2e818e22823029570921c6b84b3442ba2ae1d7e7
| 3,803
|
py
|
Python
|
src/codexdb/prep/spider.py
|
itrummer/CodexDB
|
15ab6268c95e8a283b69e17d5fa4cb7589580a27
|
[
"MIT"
] | null | null | null |
src/codexdb/prep/spider.py
|
itrummer/CodexDB
|
15ab6268c95e8a283b69e17d5fa4cb7589580a27
|
[
"MIT"
] | null | null | null |
src/codexdb/prep/spider.py
|
itrummer/CodexDB
|
15ab6268c95e8a283b69e17d5fa4cb7589580a27
|
[
"MIT"
] | null | null | null |
'''
Created on Sep 19, 2021
@author: immanueltrummer
'''
import argparse
import collections
import json
import pandas as pd
import sqlite3
def get_db_path(spider_dir, db_id):
""" Return path to SQLite database file.
Args:
spider_dir: path to SPIDER benchmark
db_id: database identifier
Returns:
path to SQLite database file
"""
return f'{spider_dir}/database/{db_id}/{db_id}.sqlite'
def extract(spider_dir, db_json):
""" Extract data from database into .csv files.
Args:
spider_dir: path to SPIDER main directory
db_json: JSON description of database
"""
db_id = db_json['db_id']
db_dir = f'{spider_dir}/database/{db_id}'
db_path = f'{db_dir}/{db_id}.sqlite'
print(f'Path to DB: {db_path}')
with sqlite3.connect(db_path) as con:
#con.text_factory = bytes
con.text_factory = lambda b: b.decode(errors = 'ignore')
for tbl in db_json['table_names_original']:
query = f'select * from {tbl}'
df = pd.read_sql_query(query, con)
out_path = f'{db_dir}/{tbl}.csv'
df.to_csv(out_path, index=False)
def get_result(spider_dir, query_json):
""" Execute query and return result.
Args:
spider_dir: path to SPIDER benchmark
query_json: describes query by JSON
Returns:
query result
"""
db_id = query_json['db_id']
db_path = get_db_path(spider_dir, db_id)
sql = query_json['query']
with sqlite3.connect(db_path) as con:
cur = con.cursor()
cur.execute(sql)
result = cur.fetchall()
print(f'Query: {sql}; Result: {result}')
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('spider', type=str, help='Path to SPIDER benchmark')
args = parser.parse_args()
tables_path = f'{args.spider}/tables.json'
db_to_s = {}
with open(tables_path) as file:
tables = json.load(file)
nr_dbs = len(tables)
for db_idx, db in enumerate(tables):
db_id = db['db_id']
db_to_s[db_id] = db
print(f'Extracting {db_id} ({db_idx+1}/{nr_dbs})')
extract(args.spider, db)
db_path = f'{args.spider}/schemata.json'
with open(db_path, 'w') as file:
json.dump(db_to_s, file)
for in_file in ['train_spider', 'dev']:
db_to_q = collections.defaultdict(lambda:[])
all_results = []
train_path = f'{args.spider}/{in_file}.json'
with open(train_path) as file:
queries = json.load(file)
nr_queries = len(queries)
nr_valid = 0
for q_idx, q_json in enumerate(queries):
query = q_json['query']
question = q_json['question']
db_id = q_json['db_id']
print(f'"{query}" on "{db_id}" ({q_idx+1}/{nr_queries})')
db_to_q[db_id].append(q_json)
try:
result = get_result(args.spider, q_json)
row = {
'db_id':db_id, 'question':question,
'query':query, 'results':result}
all_results.append(row)
nr_valid += 1
except:
print(f'Invalid Query: {query} on {db_id}')
print(f'Processed {nr_valid}/{nr_queries} queries')
results_path = f'{args.spider}/results_{in_file}.json'
with open(results_path, 'w') as file:
json.dump(all_results, file)
q_path = f'{args.spider}/{in_file}_queries.json'
with open(q_path, 'w') as file:
json.dump(db_to_q, file)
| 31.429752
| 76
| 0.563765
|
'''
Created on Sep 19, 2021
@author: immanueltrummer
'''
import argparse
import collections
import json
import pandas as pd
import sqlite3
def get_db_path(spider_dir, db_id):
""" Return path to SQLite database file.
Args:
spider_dir: path to SPIDER benchmark
db_id: database identifier
Returns:
path to SQLite database file
"""
return f'{spider_dir}/database/{db_id}/{db_id}.sqlite'
def extract(spider_dir, db_json):
""" Extract data from database into .csv files.
Args:
spider_dir: path to SPIDER main directory
db_json: JSON description of database
"""
db_id = db_json['db_id']
db_dir = f'{spider_dir}/database/{db_id}'
db_path = f'{db_dir}/{db_id}.sqlite'
print(f'Path to DB: {db_path}')
with sqlite3.connect(db_path) as con:
#con.text_factory = bytes
con.text_factory = lambda b: b.decode(errors = 'ignore')
for tbl in db_json['table_names_original']:
query = f'select * from {tbl}'
df = pd.read_sql_query(query, con)
out_path = f'{db_dir}/{tbl}.csv'
df.to_csv(out_path, index=False)
def get_result(spider_dir, query_json):
""" Execute query and return result.
Args:
spider_dir: path to SPIDER benchmark
query_json: describes query by JSON
Returns:
query result
"""
db_id = query_json['db_id']
db_path = get_db_path(spider_dir, db_id)
sql = query_json['query']
with sqlite3.connect(db_path) as con:
cur = con.cursor()
cur.execute(sql)
result = cur.fetchall()
print(f'Query: {sql}; Result: {result}')
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('spider', type=str, help='Path to SPIDER benchmark')
args = parser.parse_args()
tables_path = f'{args.spider}/tables.json'
db_to_s = {}
with open(tables_path) as file:
tables = json.load(file)
nr_dbs = len(tables)
for db_idx, db in enumerate(tables):
db_id = db['db_id']
db_to_s[db_id] = db
print(f'Extracting {db_id} ({db_idx+1}/{nr_dbs})')
extract(args.spider, db)
db_path = f'{args.spider}/schemata.json'
with open(db_path, 'w') as file:
json.dump(db_to_s, file)
for in_file in ['train_spider', 'dev']:
db_to_q = collections.defaultdict(lambda:[])
all_results = []
train_path = f'{args.spider}/{in_file}.json'
with open(train_path) as file:
queries = json.load(file)
nr_queries = len(queries)
nr_valid = 0
for q_idx, q_json in enumerate(queries):
query = q_json['query']
question = q_json['question']
db_id = q_json['db_id']
print(f'"{query}" on "{db_id}" ({q_idx+1}/{nr_queries})')
db_to_q[db_id].append(q_json)
try:
result = get_result(args.spider, q_json)
row = {
'db_id':db_id, 'question':question,
'query':query, 'results':result}
all_results.append(row)
nr_valid += 1
except:
print(f'Invalid Query: {query} on {db_id}')
print(f'Processed {nr_valid}/{nr_queries} queries')
results_path = f'{args.spider}/results_{in_file}.json'
with open(results_path, 'w') as file:
json.dump(all_results, file)
q_path = f'{args.spider}/{in_file}_queries.json'
with open(q_path, 'w') as file:
json.dump(db_to_q, file)
| 0
| 0
| 0
|
8bcf8d4d1836ae3eb763677be00b8c2f9e1f2594
| 625
|
py
|
Python
|
tests/test_layers/test_acc.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 47
|
2017-07-15T02:13:18.000Z
|
2022-01-01T09:37:59.000Z
|
tests/test_layers/test_acc.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 3
|
2018-06-22T13:55:12.000Z
|
2020-01-29T01:41:13.000Z
|
tests/test_layers/test_acc.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 8
|
2017-09-03T12:42:54.000Z
|
2020-09-27T03:38:59.000Z
|
import mobula.layers as L
import numpy as np
| 27.173913
| 87
| 0.4448
|
import mobula.layers as L
import numpy as np
def test_acc():
X = np.array([[0, 1, 2],
[1, 2, 0],
[0, 1, 2],
[1, 2, 0]])
Y = np.array([1, 0, 2, 1]).reshape((-1, 1))
# top-k
# 1 [False, False, True, True]
# 2 [True, True, True, True]
target = [np.array([False, False, True, True]), np.array([True, True, True, True])]
[data, label] = L.Data([X, Y])
for i in range(2):
l = L.Accuracy(data, label = label, top_k = 1 + i)
l.reshape()
l.forward()
assert l.Y == np.mean(target[i])
| 557
| 0
| 23
|
eb3850cf0a144d90bc658ea248d321d83bef0449
| 628
|
py
|
Python
|
Webllistoerp/chat/migrations/0004_auto_20210601_0844.py
|
Prakhar-100/DjangoProject
|
5030fabe038bd67af50364431705837aa0bde91b
|
[
"MIT"
] | null | null | null |
Webllistoerp/chat/migrations/0004_auto_20210601_0844.py
|
Prakhar-100/DjangoProject
|
5030fabe038bd67af50364431705837aa0bde91b
|
[
"MIT"
] | null | null | null |
Webllistoerp/chat/migrations/0004_auto_20210601_0844.py
|
Prakhar-100/DjangoProject
|
5030fabe038bd67af50364431705837aa0bde91b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-06-01 08:44
from django.conf import settings
from django.db import migrations, models
| 25.12
| 70
| 0.632166
|
# Generated by Django 3.1.7 on 2021-06-01 08:44
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0003_auto_20210531_1404'),
]
operations = [
migrations.RemoveField(
model_name='chatgrouplist',
name='member_name',
),
migrations.AddField(
model_name='chatgrouplist',
name='member_name',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
| 0
| 481
| 23
|
544879006a299eb76867b4e97ce8c0e45d8e4d68
| 235
|
py
|
Python
|
python/ext_c.py
|
forember/lambd
|
5550b741083e463cee60ce76bb181b4c063879a1
|
[
"MIT"
] | null | null | null |
python/ext_c.py
|
forember/lambd
|
5550b741083e463cee60ce76bb181b4c063879a1
|
[
"MIT"
] | null | null | null |
python/ext_c.py
|
forember/lambd
|
5550b741083e463cee60ce76bb181b4c063879a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim: set sw=2 ts=2 sts=2 et :
'''Defines a comment
(@@ c.(This is a comment!) x z) -> (x z)
'''
from lambd import *
| 16.785714
| 42
| 0.595745
|
# -*- coding: utf-8 -*-
# vim: set sw=2 ts=2 sts=2 et :
'''Defines a comment
(@@ c.(This is a comment!) x z) -> (x z)
'''
from lambd import *
def ext_parexpr(tree, extra_params, body):
body.pop(1)
tree[:] = parse_parexpr(body)
| 67
| 0
| 23
|
06ae4fcc8678e2badcda395c709576597bbf2d8c
| 1,675
|
py
|
Python
|
setup.py
|
martindevora/SHERLOCK
|
5e7492552cbce29e960684a44fd6ad875c8cf60e
|
[
"MIT"
] | 1
|
2021-01-14T16:44:48.000Z
|
2021-01-14T16:44:48.000Z
|
setup.py
|
martindevora/SHERLOCK
|
5e7492552cbce29e960684a44fd6ad875c8cf60e
|
[
"MIT"
] | null | null | null |
setup.py
|
martindevora/SHERLOCK
|
5e7492552cbce29e960684a44fd6ad875c8cf60e
|
[
"MIT"
] | null | null | null |
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
version = os.popen('git tag -l --sort -version:refname | head -n 1').read().split('\n', 1)[0]
setuptools.setup(
name="sherlockpipe", # Replace with your own username
version=version,
author="F.J. Pozuelos & M. Dévora",
author_email="fjpozuelos@uliege.be",
description="Search for Hints of Exoplanets fRom Lightcurves Of spaCe based seeKers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/franpoz/SHERLOCK",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=["numpy",
"cython",
"pandas",
"lightkurve",
"transitleastsquares",
"requests",
"eleanor",
"wotan",
"matplotlib",
"pyyaml",
"allesfitter",
"seaborn",
"bokeh",
"astroplan",
"astroquery",
"sklearn",
"scipy",
"tess-point",
"reproject==0.4",
"reportlab",
"astropy",
"mock > 2.0.0"
]
)
| 34.183673
| 93
| 0.46806
|
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
version = os.popen('git tag -l --sort -version:refname | head -n 1').read().split('\n', 1)[0]
setuptools.setup(
name="sherlockpipe", # Replace with your own username
version=version,
author="F.J. Pozuelos & M. Dévora",
author_email="fjpozuelos@uliege.be",
description="Search for Hints of Exoplanets fRom Lightcurves Of spaCe based seeKers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/franpoz/SHERLOCK",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=["numpy",
"cython",
"pandas",
"lightkurve",
"transitleastsquares",
"requests",
"eleanor",
"wotan",
"matplotlib",
"pyyaml",
"allesfitter",
"seaborn",
"bokeh",
"astroplan",
"astroquery",
"sklearn",
"scipy",
"tess-point",
"reproject==0.4",
"reportlab",
"astropy",
"mock > 2.0.0"
]
)
| 0
| 0
| 0
|
26142f2ab17a6f041b7cf830d3dcd1a97d2eab8f
| 96
|
py
|
Python
|
views/__init__.py
|
brenns10/love
|
1a2a60c510327d91eff9caf32a252141ae00a9eb
|
[
"MIT"
] | 4
|
2017-02-16T02:18:39.000Z
|
2018-01-14T01:56:21.000Z
|
views/__init__.py
|
brenns10/love
|
1a2a60c510327d91eff9caf32a252141ae00a9eb
|
[
"MIT"
] | 17
|
2017-02-16T17:19:53.000Z
|
2018-01-08T01:43:05.000Z
|
views/__init__.py
|
brenns10/love
|
1a2a60c510327d91eff9caf32a252141ae00a9eb
|
[
"MIT"
] | 4
|
2017-02-16T18:48:18.000Z
|
2018-01-08T02:34:07.000Z
|
# -*- coding: utf-8 -*-
# flake8: noqa
from . import api
from . import tasks
from . import web
| 13.714286
| 23
| 0.635417
|
# -*- coding: utf-8 -*-
# flake8: noqa
from . import api
from . import tasks
from . import web
| 0
| 0
| 0
|
22e987fdaffc1da25e1f0e351d6a5f530f8a1785
| 48
|
py
|
Python
|
sensor/__init__.py
|
Boquete/smoger
|
ceb25137684334cba0625ca669c6e786c27bbb06
|
[
"Apache-2.0"
] | 2
|
2018-03-13T18:22:18.000Z
|
2018-03-24T16:48:26.000Z
|
sensor/__init__.py
|
Boquete/smoger
|
ceb25137684334cba0625ca669c6e786c27bbb06
|
[
"Apache-2.0"
] | null | null | null |
sensor/__init__.py
|
Boquete/smoger
|
ceb25137684334cba0625ca669c6e786c27bbb06
|
[
"Apache-2.0"
] | null | null | null |
default_app_config = 'sensor.apps.SmogerConfig'
| 24
| 47
| 0.833333
|
default_app_config = 'sensor.apps.SmogerConfig'
| 0
| 0
| 0
|
cd97b5225dc818019efa0aa136acafab5f25c92c
| 1,748
|
py
|
Python
|
app/models.py
|
livra-ar/backend
|
eb052611bb9b2cfa360fa422ce059984b8d295fa
|
[
"BSD-2-Clause"
] | 1
|
2020-09-05T12:18:06.000Z
|
2020-09-05T12:18:06.000Z
|
app/models.py
|
thamidurm/ar-content-platform-backend
|
eb052611bb9b2cfa360fa422ce059984b8d295fa
|
[
"BSD-2-Clause"
] | 3
|
2021-06-09T17:46:46.000Z
|
2021-09-22T18:54:57.000Z
|
app/models.py
|
livra-ar/backend
|
eb052611bb9b2cfa360fa422ce059984b8d295fa
|
[
"BSD-2-Clause"
] | null | null | null |
import mongoengine
from mongoengine import fields, Document, ImproperlyConfigured
from creators.models import Creator
| 34.27451
| 91
| 0.732838
|
import mongoengine
from mongoengine import fields, Document, ImproperlyConfigured
from creators.models import Creator
def make_ngrams(word, min_size=2):
length = len(word)
size_range = range(min_size, max(length, min_size) + 1)
return list(set(
word[i:i + size]
for size in size_range
for i in range(0, max(0, length - size) + 1)
))
def ngrams_to_representation(phrase):
ngrams = []
for word in phrase.split():
ngrams.extend(make_ngrams(word))
return ' '.join(ngrams)
class Book(Document):
title = fields.StringField(required=True)
isbns = fields.ListField(fields.StringField(unique=True), required=True)
authors = fields.ListField(fields.StringField(), required=True)
covers = fields.ListField(fields.StringField(), required=True)
publisher = fields.ReferenceField(Creator, reverse_delete_rule=mongoengine.CASCADE,
required=False, read_only=True)
active = fields.BooleanField(default=True)
ngrams = fields.StringField()
meta = {'indexes': [
{
'fields': ['$ngrams'],
'default_language': 'english',
}
]}
def save(self, *args, **kwargs):
self.ngrams = ngrams_to_representation(self.title)
return super(Book, self).save(*args, **kwargs)
class Content(Document):
title = fields.StringField(required=True)
description =fields.StringField(required=True)
images = fields.ListField(fields.StringField(required=True), required=True)
file = fields.StringField(required=True)
creator = fields.ReferenceField(Creator, read_only=True)
book = fields.ReferenceField(Book, required=True, reverse_delete_rule=mongoengine.CASCADE)
active = fields.BooleanField(default=True)
size = fields.IntField(default=4000)
animated = fields.BooleanField(default=False)
| 464
| 1,074
| 92
|
63a39dd2b74d99d3ae37afd2f983746c46666c93
| 67
|
py
|
Python
|
lectures/code/list_filter.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 4
|
2015-08-10T17:46:55.000Z
|
2020-04-18T21:09:03.000Z
|
lectures/code/list_filter.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | null | null | null |
lectures/code/list_filter.py
|
naskoch/python_course
|
84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3
|
[
"MIT"
] | 2
|
2019-04-24T03:31:02.000Z
|
2019-05-13T07:36:06.000Z
|
l = range(8)
print filter(lambda x: x % 2 == 0, l)
# [0, 2, 4, 6]
| 13.4
| 37
| 0.492537
|
l = range(8)
print filter(lambda x: x % 2 == 0, l)
# [0, 2, 4, 6]
| 0
| 0
| 0
|
5df99fe4deec0c6b7e42e4e7c61cbe46113104c4
| 3,014
|
py
|
Python
|
tests/pystrings/test_lineup_students.py
|
BrianLusina/PyCharm
|
144dd4f6b2d254507237f46c8ee175c407fe053d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/pystrings/test_lineup_students.py
|
BrianLusina/PyCharm
|
144dd4f6b2d254507237f46c8ee175c407fe053d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/pystrings/test_lineup_students.py
|
BrianLusina/PyCharm
|
144dd4f6b2d254507237f46c8ee175c407fe053d
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import unittest
from pystrings.lineup_students import lineup_students
s1 = "Tadashi Takahiro Takao Takashi Takayuki Takehiko Takeo Takeshi Takeshi"
lst1 = [
"Takehiko",
"Takayuki",
"Takahiro",
"Takeshi",
"Takeshi",
"Takashi",
"Tadashi",
"Takeo",
"Takao",
]
s2 = "Michio Miki Mikio Minori Minoru Mitsuo Mitsuru Nao Naoki Naoko Noboru Nobu Nobuo ,Nobuyuki Nori Norio Osamu Rafu Raiden Ringo Rokuro Ronin Ryo Ryoichi Ryota Ryozo Ryuichi Ryuu Saburo Sadao Samuru Satoru Satoshi Seiichi Seiji Senichi Shichiro Shig Shigekazu Shigeo Shigeru Shima Shin Shinichi Shinji Shiro Shoichi Shoji Shuichi Shuji Shunichi Susumu Tadao Tadashi Takahiro Takao Takashi Takayuki Takehiko Takeo Takeshi Takeshi Takumi Tama Tamotsu Taro Tatsuo Tatsuya Teruo Tetsip Tetsuya Tomi Tomio Toru Toshi Toshiaki Toshihiro Toshio Toshiyuki Toyo Tsuneo Tsutomu Tsuyoshi Uyeda Yasahiro Yasuhiro Yasuo Yasushi Yemon Yogi Yoichi Yori Yoshi Yoshiaki Yoshihiro Yoshikazu Yoshimitsu Yoshinori Yoshio Yoshiro Yoshito Yoshiyuki Yuichi Yuji Yuki"
lst2 = [
"Yoshimitsu",
"Yoshiyuki",
"Yoshinori",
"Yoshikazu",
"Yoshihiro",
"Toshiyuki",
"Toshihiro",
"Shigekazu",
",Nobuyuki",
"Yoshiaki",
"Yasuhiro",
"Yasahiro",
"Tsuyoshi",
"Toshiaki",
"Takehiko",
"Takayuki",
"Takahiro",
"Shunichi",
"Shinichi",
"Shichiro",
"Yoshito",
"Yoshiro",
"Yasushi",
"Tsutomu",
"Tetsuya",
"Tatsuya",
"Tamotsu",
"Takeshi",
"Takeshi",
"Takashi",
"Tadashi",
"Shuichi",
"Shoichi",
"Shigeru",
"Senichi",
"Seiichi",
"Satoshi",
"Ryuichi",
"Ryoichi",
"Mitsuru",
"Yuichi",
"Yoshio",
"Yoichi",
"Tsuneo",
"Toshio",
"Tetsip",
"Tatsuo",
"Takumi",
"Susumu",
"Shinji",
"Shigeo",
"Satoru",
"Samuru",
"Saburo",
"Rokuro",
"Raiden",
"Noboru",
"Mitsuo",
"Minoru",
"Minori",
"Michio",
"Yoshi",
"Yemon",
"Yasuo",
"Uyeda",
"Toshi",
"Tomio",
"Teruo",
"Takeo",
"Takao",
"Tadao",
"Shuji",
"Shoji",
"Shiro",
"Shima",
"Seiji",
"Sadao",
"Ryozo",
"Ryota",
"Ronin",
"Ringo",
"Osamu",
"Norio",
"Nobuo",
"Naoko",
"Naoki",
"Mikio",
"Yuki",
"Yuji",
"Yori",
"Yogi",
"Toyo",
"Toru",
"Tomi",
"Taro",
"Tama",
"Shin",
"Shig",
"Ryuu",
"Rafu",
"Nori",
"Nobu",
"Miki",
"Ryo",
"Nao",
]
s3 = "Yoshiro Yoshiro Tsuyoshi Shoichi Naoko Yori Takayuki Tsutomu Shigeo"
lst3 = [
"Tsuyoshi",
"Takayuki",
"Yoshiro",
"Yoshiro",
"Tsutomu",
"Shoichi",
"Shigeo",
"Naoko",
"Yori",
]
| 19.828947
| 747
| 0.585601
|
import unittest
from pystrings.lineup_students import lineup_students
s1 = "Tadashi Takahiro Takao Takashi Takayuki Takehiko Takeo Takeshi Takeshi"
lst1 = [
"Takehiko",
"Takayuki",
"Takahiro",
"Takeshi",
"Takeshi",
"Takashi",
"Tadashi",
"Takeo",
"Takao",
]
s2 = "Michio Miki Mikio Minori Minoru Mitsuo Mitsuru Nao Naoki Naoko Noboru Nobu Nobuo ,Nobuyuki Nori Norio Osamu Rafu Raiden Ringo Rokuro Ronin Ryo Ryoichi Ryota Ryozo Ryuichi Ryuu Saburo Sadao Samuru Satoru Satoshi Seiichi Seiji Senichi Shichiro Shig Shigekazu Shigeo Shigeru Shima Shin Shinichi Shinji Shiro Shoichi Shoji Shuichi Shuji Shunichi Susumu Tadao Tadashi Takahiro Takao Takashi Takayuki Takehiko Takeo Takeshi Takeshi Takumi Tama Tamotsu Taro Tatsuo Tatsuya Teruo Tetsip Tetsuya Tomi Tomio Toru Toshi Toshiaki Toshihiro Toshio Toshiyuki Toyo Tsuneo Tsutomu Tsuyoshi Uyeda Yasahiro Yasuhiro Yasuo Yasushi Yemon Yogi Yoichi Yori Yoshi Yoshiaki Yoshihiro Yoshikazu Yoshimitsu Yoshinori Yoshio Yoshiro Yoshito Yoshiyuki Yuichi Yuji Yuki"
lst2 = [
"Yoshimitsu",
"Yoshiyuki",
"Yoshinori",
"Yoshikazu",
"Yoshihiro",
"Toshiyuki",
"Toshihiro",
"Shigekazu",
",Nobuyuki",
"Yoshiaki",
"Yasuhiro",
"Yasahiro",
"Tsuyoshi",
"Toshiaki",
"Takehiko",
"Takayuki",
"Takahiro",
"Shunichi",
"Shinichi",
"Shichiro",
"Yoshito",
"Yoshiro",
"Yasushi",
"Tsutomu",
"Tetsuya",
"Tatsuya",
"Tamotsu",
"Takeshi",
"Takeshi",
"Takashi",
"Tadashi",
"Shuichi",
"Shoichi",
"Shigeru",
"Senichi",
"Seiichi",
"Satoshi",
"Ryuichi",
"Ryoichi",
"Mitsuru",
"Yuichi",
"Yoshio",
"Yoichi",
"Tsuneo",
"Toshio",
"Tetsip",
"Tatsuo",
"Takumi",
"Susumu",
"Shinji",
"Shigeo",
"Satoru",
"Samuru",
"Saburo",
"Rokuro",
"Raiden",
"Noboru",
"Mitsuo",
"Minoru",
"Minori",
"Michio",
"Yoshi",
"Yemon",
"Yasuo",
"Uyeda",
"Toshi",
"Tomio",
"Teruo",
"Takeo",
"Takao",
"Tadao",
"Shuji",
"Shoji",
"Shiro",
"Shima",
"Seiji",
"Sadao",
"Ryozo",
"Ryota",
"Ronin",
"Ringo",
"Osamu",
"Norio",
"Nobuo",
"Naoko",
"Naoki",
"Mikio",
"Yuki",
"Yuji",
"Yori",
"Yogi",
"Toyo",
"Toru",
"Tomi",
"Taro",
"Tama",
"Shin",
"Shig",
"Ryuu",
"Rafu",
"Nori",
"Nobu",
"Miki",
"Ryo",
"Nao",
]
s3 = "Yoshiro Yoshiro Tsuyoshi Shoichi Naoko Yori Takayuki Tsutomu Shigeo"
lst3 = [
"Tsuyoshi",
"Takayuki",
"Yoshiro",
"Yoshiro",
"Tsutomu",
"Shoichi",
"Shigeo",
"Naoko",
"Yori",
]
class Tests(unittest.TestCase):
def test1(self):
self.assertEqual(lineup_students(s1), (lst1))
def test_2(self):
self.assertEqual(lineup_students(s2), (lst2))
def test_3(self):
self.assertEqual(lineup_students(s3), (lst3))
| 149
| 10
| 103
|
2a7726202ca6d690fb3d53d3381336f8576ffc4e
| 4,179
|
py
|
Python
|
unittest/scripts/auto/py_shell/validation/plugin_cli_integration_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | null | null | null |
unittest/scripts/auto/py_shell/validation/plugin_cli_integration_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | 1
|
2021-09-12T22:07:06.000Z
|
2021-09-12T22:07:06.000Z
|
unittest/scripts/auto/py_shell/validation/plugin_cli_integration_norecord.py
|
mueller/mysql-shell
|
29bafc5692bd536a12c4e41c54cb587375fe52cf
|
[
"Apache-2.0"
] | null | null | null |
#@<OUT> CLI --help
The following objects provide command line operations:
cli_tester
CLI Integration Testing Plugin
cluster
Represents an InnoDB cluster.
dba
InnoDB cluster and replicaset management functions.
rs
Represents an InnoDB ReplicaSet.
shell
Gives access to general purpose functions and properties.
util
Global object that groups miscellaneous tools like upgrade checker and
JSON import.
#@<OUT> CLI plugin --help
The following object provides command line operations at 'cli_tester':
emptyChild
Empty object, exposes no functions but child does.
The following operations are available at 'cli_tester':
test
Testing cmd line args.
#@<OUT> CLI plugin function --help
NAME
test - Testing cmd line args.
SYNTAX
cli_tester test <stritem> <strlist> --namedstr=<str> <options>
WHERE
stritem: String - string parameter.
strlist: Array - string list parameter.
OPTIONS
--namedstr=<str>
String named parameter (for being after a list parameter).
--str=<str>
String option.
--strlist=<str list>
String list option.
#@<OUT> Interactive plugin function --help
ERROR: Invalid operation for cli_tester object: test-interactive
#@<OUT> Interactive plugin function help
NAME
testInteractive - Testing interactive function.
SYNTAX
cli_tester.testInteractive()
#@<OUT> CLI plugin nested child --help
The following object provides command line operations at 'cli_tester emptyChild':
grandChild
Grand child object exposing a function.
#@<OUT> CLI plugin nested grand child --help
The following operations are available at 'cli_tester emptyChild grandChild':
grand-child-function
Testing cmd line args in nested objects.
#@<OUT> CLI plugin nested grand child function --help
The following operations are available at 'cli_tester emptyChild grandChild':
grand-child-function
Testing cmd line args in nested objects.
#@<OUT> Test string list handling
String Parameter: 1,2,3,4,5
String List Parameter Item: 1
String List Parameter Item: 2
String List Parameter Item: 3
String List Parameter Item: 4
String List Parameter Item: 5
String Named Item: 1,2,3,4,5
String Option: 1,2,3,4,5
String List Option Item: 1
String List Option Item: 2
String List Option Item: 3
String List Option Item: 4
String List Option Item: 5
#@<OUT> Test string list quoted args for interpreter
String Parameter: 1,2,3,4,5
String List Parameter Item: 1,2
String List Parameter Item: 3
String List Parameter Item: 4,5
String Named Item: 1,2,3,4,5
String Option: 1,2,3,4,5
String List Option Item: 1,2
String List Option Item: 3
String List Option Item: 4,5
#@<OUT> Escaped comma in list parsing
String Parameter: 1,2,3,4,5
String List Parameter Item: 1,2
String List Parameter Item: 3
String List Parameter Item: 4,5
String Named Item: 1,2,3,4,5
String Option: 1,2,3,4,5
String List Option Item: 1,2
String List Option Item: 3
String List Option Item: 4,5
#@<OUT> Escaped quoting: \", \'
String Parameter: "1",2,3,4,5
String List Parameter Item: 1
String List Parameter Item: 2
String List Parameter Item: 3
String List Parameter Item: 4
String List Parameter Item: 5
String Named Item: 1,2,"3",4,5
String Option: 1,2,"3",4,5
String List Option Item: 1
String List Option Item: 2
String List Option Item: 3
String List Option Item: 4
String List Option Item: 5
String Parameter: '1',2,3,4,5
String List Parameter Item: 1
String List Parameter Item: 2
String List Parameter Item: 3
String List Parameter Item: 4
String List Parameter Item: 5
String Named Item: 1,2,'3',4,5
String Option: 1,2,'3',4,5
String List Option Item: 1
String List Option Item: 2
String List Option Item: 3
String List Option Item: 4
String List Option Item: 5
#@<OUT> Escaped equal: \=
String Parameter: =1,2,3,4,5
String List Parameter Item: 1=2
String List Parameter Item: 3
String List Parameter Item: 4=5
String Named Item: =1,2,3,4=5
String Option: =1,2,3,4=5
String List Option Item: 1=2
String List Option Item: 3
String List Option Item: 4=5
#@<OUT> CLI calling plugin nested grand child function
Unique Parameter: Success!!
| 25.796296
| 81
| 0.734865
|
#@<OUT> CLI --help
The following objects provide command line operations:
cli_tester
CLI Integration Testing Plugin
cluster
Represents an InnoDB cluster.
dba
InnoDB cluster and replicaset management functions.
rs
Represents an InnoDB ReplicaSet.
shell
Gives access to general purpose functions and properties.
util
Global object that groups miscellaneous tools like upgrade checker and
JSON import.
#@<OUT> CLI plugin --help
The following object provides command line operations at 'cli_tester':
emptyChild
Empty object, exposes no functions but child does.
The following operations are available at 'cli_tester':
test
Testing cmd line args.
#@<OUT> CLI plugin function --help
NAME
test - Testing cmd line args.
SYNTAX
cli_tester test <stritem> <strlist> --namedstr=<str> <options>
WHERE
stritem: String - string parameter.
strlist: Array - string list parameter.
OPTIONS
--namedstr=<str>
String named parameter (for being after a list parameter).
--str=<str>
String option.
--strlist=<str list>
String list option.
#@<OUT> Interactive plugin function --help
ERROR: Invalid operation for cli_tester object: test-interactive
#@<OUT> Interactive plugin function help
NAME
testInteractive - Testing interactive function.
SYNTAX
cli_tester.testInteractive()
#@<OUT> CLI plugin nested child --help
The following object provides command line operations at 'cli_tester emptyChild':
grandChild
Grand child object exposing a function.
#@<OUT> CLI plugin nested grand child --help
The following operations are available at 'cli_tester emptyChild grandChild':
grand-child-function
Testing cmd line args in nested objects.
#@<OUT> CLI plugin nested grand child function --help
The following operations are available at 'cli_tester emptyChild grandChild':
grand-child-function
Testing cmd line args in nested objects.
#@<OUT> Test string list handling
String Parameter: 1,2,3,4,5
String List Parameter Item: 1
String List Parameter Item: 2
String List Parameter Item: 3
String List Parameter Item: 4
String List Parameter Item: 5
String Named Item: 1,2,3,4,5
String Option: 1,2,3,4,5
String List Option Item: 1
String List Option Item: 2
String List Option Item: 3
String List Option Item: 4
String List Option Item: 5
#@<OUT> Test string list quoted args for interpreter
String Parameter: 1,2,3,4,5
String List Parameter Item: 1,2
String List Parameter Item: 3
String List Parameter Item: 4,5
String Named Item: 1,2,3,4,5
String Option: 1,2,3,4,5
String List Option Item: 1,2
String List Option Item: 3
String List Option Item: 4,5
#@<OUT> Escaped comma in list parsing
String Parameter: 1,2,3,4,5
String List Parameter Item: 1,2
String List Parameter Item: 3
String List Parameter Item: 4,5
String Named Item: 1,2,3,4,5
String Option: 1,2,3,4,5
String List Option Item: 1,2
String List Option Item: 3
String List Option Item: 4,5
#@<OUT> Escaped quoting: \", \'
String Parameter: "1",2,3,4,5
String List Parameter Item: 1
String List Parameter Item: 2
String List Parameter Item: 3
String List Parameter Item: 4
String List Parameter Item: 5
String Named Item: 1,2,"3",4,5
String Option: 1,2,"3",4,5
String List Option Item: 1
String List Option Item: 2
String List Option Item: 3
String List Option Item: 4
String List Option Item: 5
String Parameter: '1',2,3,4,5
String List Parameter Item: 1
String List Parameter Item: 2
String List Parameter Item: 3
String List Parameter Item: 4
String List Parameter Item: 5
String Named Item: 1,2,'3',4,5
String Option: 1,2,'3',4,5
String List Option Item: 1
String List Option Item: 2
String List Option Item: 3
String List Option Item: 4
String List Option Item: 5
#@<OUT> Escaped equal: \=
String Parameter: =1,2,3,4,5
String List Parameter Item: 1=2
String List Parameter Item: 3
String List Parameter Item: 4=5
String Named Item: =1,2,3,4=5
String Option: =1,2,3,4=5
String List Option Item: 1=2
String List Option Item: 3
String List Option Item: 4=5
#@<OUT> CLI calling plugin nested grand child function
Unique Parameter: Success!!
| 0
| 0
| 0
|
36840bcc724e35743cfcc58a05740a503f98cad9
| 2,507
|
py
|
Python
|
neurons/form/test/__init__.py
|
plq/neurons
|
f3627975daa3662e3a14265dca1150799ca0d27a
|
[
"BSD-3-Clause"
] | 4
|
2015-07-14T09:37:11.000Z
|
2017-01-07T09:55:59.000Z
|
neurons/form/test/__init__.py
|
arskom/neurons
|
d969dbac6c10a5cc1da32031740883e450889414
|
[
"BSD-3-Clause"
] | 3
|
2016-06-02T13:52:38.000Z
|
2018-11-22T08:55:20.000Z
|
neurons/form/test/__init__.py
|
plq/neurons
|
f3627975daa3662e3a14265dca1150799ca0d27a
|
[
"BSD-3-Clause"
] | 7
|
2015-02-01T14:02:33.000Z
|
2016-07-21T20:14:23.000Z
|
# encoding: utf8
#
# This file is part of the Neurons project.
# Copyright (c), Arskom Ltd. (arskom.com.tr),
# Burak Arslan <burak.arslan@arskom.com.tr>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Arskom Ltd., the neurons project nor the names of
# its its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from lxml import etree
| 38.569231
| 80
| 0.69645
|
# encoding: utf8
#
# This file is part of the Neurons project.
# Copyright (c), Arskom Ltd. (arskom.com.tr),
# Burak Arslan <burak.arslan@arskom.com.tr>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Arskom Ltd., the neurons project nor the names of
# its its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from lxml import etree
def strip_ns(par):
par.tag = par.tag.split('}', 1)[-1]
if len(par.nsmap) > 0:
par2 = etree.Element(par.tag, par.attrib)
par2.text = par.text
par2.tail = par.tail
par2.extend(par.getchildren())
par.getparent().insert(par.getparent().index(par), par2)
par.getparent().remove(par)
par = par2
for elt in par:
elt.tag = elt.tag.split('}', 1)[-1]
if len(elt.nsmap) > 0:
elt2 = etree.Element(elt.tag, elt.attrib)
elt2.text = elt.text
elt2.tail = elt.tail
elt2.extend(elt.getchildren())
elt.getparent().insert(elt.getparent().index(elt), elt2)
elt.getparent().remove(elt)
elt = elt2
strip_ns(elt)
return par
| 763
| 0
| 23
|
64ebb7f5b6f18386008165ea0eb4a0ab3cd8a833
| 17,729
|
py
|
Python
|
Fluid2d/core/operators.py
|
chenyg1119/fluid-simulation
|
f504042602fab54855177f20be2dfe1c7ee4f2d9
|
[
"MIT"
] | null | null | null |
Fluid2d/core/operators.py
|
chenyg1119/fluid-simulation
|
f504042602fab54855177f20be2dfe1c7ee4f2d9
|
[
"MIT"
] | null | null | null |
Fluid2d/core/operators.py
|
chenyg1119/fluid-simulation
|
f504042602fab54855177f20be2dfe1c7ee4f2d9
|
[
"MIT"
] | null | null | null |
from param import Param
from numpy import zeros, zeros_like, roll
from gmg.hierarchy import Gmg
import fortran_advection as fa
import fortran_operators as fo
import fortran_diag as fd
from fourier import Fourier
| 35.529058
| 79
| 0.504371
|
from param import Param
from numpy import zeros, zeros_like, roll
from gmg.hierarchy import Gmg
import fortran_advection as fa
import fortran_operators as fo
import fortran_diag as fd
from fourier import Fourier
class Operators(Param):
def __init__(self, param, grid):
self.list_param = ['varname_list', 'tracer_list',
'whosetspsi', 'mpi', 'npx', 'npy',
'nh', 'gravity', 'f0', 'beta', 'Rd',
'qgoperator', 'order', 'Kdiff', 'diffusion',
'enforce_momentum', 'isisland', 'aparab',
'flux_splitting_method', 'hydroepsilon',
'myrank', 'geometry', 'sqgoperator']
param.copy(self, self.list_param)
self.list_grid = ['msk', 'nxl', 'nyl', 'dx', 'dy',
'bcarea', 'mpitools', 'msknoslip',
'mskbc', 'domain_integration',
'nh', 'xr0', 'yr0', 'i0', 'j0', 'area']
grid.copy(self, self.list_grid)
self.first_time = True
# internal work array for the inversion
self.work = zeros((self.nyl, self.nxl))
self.work2 = zeros((self.nyl, self.nxl))
pp = {'np': param.npx, 'mp': param.npy, 'nh': param.nh,
'n': param.nx//param.npx, 'm': param.ny//param.npy,
'omega': 8./9., 'npmpmax': 1, 'verbose': False,
'dx': grid.dx, 'dy': grid.dy, 'n1': 32, 'n0': 4,
'method': 'deep', 'nagglo': 2,
'hydroepsilon': param.hydroepsilon,
'relaxation': param.relaxation}
# load the multigrid solver
#
# WARNING: the multigrid needs the mask at cell corners!!!
# not at cell centers
mskr = self.msk*1.
# this piece is a bit awkward: to initialize gmg, we need
# a mask with a halo properly filled but the fill_halo method
# belongs to gmg. We have a circular definition.
# the trick: define a dummy gmg first a msk=1 everywhere
# then grab the fill_halo method and redefine once again the
# multigrid, this time with the proper mask
# self.gmg = Gmg(pp,mskr)
# borrow the fill_halo from the multigrid
# self.fill_halo = self.gmg.grid[0].halo.fill
fo.celltocorner(mskr, self.work)
# self.fill_halo(self.work)
# del self.gmg
# del self.fill_halo
self.work[self.work < 1.] = 0.
self.mskp = self.msk*0
self.mskp[self.work == 1.] = 1
pp['verbose'] = True
if self.myrank == 0:
print('-'*50)
print(' Multigrid hierarchy')
print('-'*50)
if hasattr(self, 'qgoperator'):
pp['qgoperator'] = True
pp['Rd'] = self.Rd
self.gmg = Gmg(pp, self.work)
else:
self.gmg = Gmg(pp, self.work)
if hasattr(self, 'sqgoperator'):
self.fourier = Fourier(param, grid)
# borrow the fill_halo from the multigrid
self.fill_halo = self.gmg.grid[0].halo.fill
grid.fill_halo = self.gmg.grid[0].halo.fill
self.blwidth = param.Lx*0.05
# tentative for a regularized no-slip source term
coef = 0.*zeros_like(self.mskp)
coef[1:, 1:] = (self.mskp[:-1, 1:]+self.mskp[:-1, :-1]
+ self.mskp[1:, 1:]+self.mskp[1:, :-1])
# nbpsibc is the number of land psi-points surrounding a fluid cell
self.nbpsibc = (4.-coef)*self.msk
self.nbpsibc[self.nbpsibc > 0] = 1.
self.set_boundary_msk()
self.cst = zeros(5,)
# select the proper flux discretization
if self.order % 2 == 0:
self.fortran_adv = fa.adv_centered
self.cst[0] = grid.dx
self.cst[1] = grid.dy
self.cst[2] = 0.05
self.cst[3] = 0 # umax
self.cst[4] = 0 # unused
# should be updated at each timestep
# self.cst[3]=param.umax
else:
self.fortran_adv = fa.adv_upwind
self.cst[0] = grid.dx
self.cst[1] = grid.dy
self.cst[2] = 0.05
self.cst[3] = 0 # umax
self.cst[4] = self.aparab
# should be updated at each timestep
# self.cst[3]=param.umax
# controls the flux splitting method
# 0 = min/max
# 1 = parabolic
list_fs_method = ['minmax', 'parabolic']
if self.flux_splitting_method in list_fs_method:
self.fs_method = list_fs_method.index(
self.flux_splitting_method)
else:
print('Warning: %s does not exist' % self.flux_splitting_method)
print('replaced with the default: parabolic')
self.fs_method = list_fs_method.index('parabolic')
# these coefficients below are used for the thermalwind model
coef = 0.*zeros_like(self.msk)
coef[1:-1, 1:-1] = self.msk[1:-1, 2:]+self.msk[1:-1, 0:-2]
coef[coef < 2] = 0.
coef[coef == 2] = 0.5
self.fill_halo(coef)
self.coefb = coef*1.
coef = 0.*zeros_like(self.msk)
coef[1:-1, 1:-1] = self.msk[2:, 1:-1]+self.msk[0:-2, 1:-1]
coef[coef < 2] = 0.
coef[coef == 2] = 0.5
self.fill_halo(coef)
self.coefV = coef*1.
if type(self.Kdiff) != dict:
K = self.Kdiff
self.Kdiff = {}
for trac in self.tracer_list:
self.Kdiff[trac] = K
if self.diffusion:
print('diffusion coefficients')
print(' => ', self.Kdiff)
def set_boundary_msk(self):
""" for the no slip boundary source term """
# nh = self.nh
msk = self.msknoslip
z = (roll(msk, -1, axis=1)+roll(msk, -1, axis=0)
+ roll(msk, +1, axis=1)+roll(msk, +1, axis=0)-4*msk)
z = z*msk
self.mskbc = self.msk*0
self.mskbc[z < 0] = 1
# the halo will be filled later in operator.py
# when fill_halo will become available
# we can now fix the boundary mask
# to go with the new definition for the source term
# self.mskbc = self.nbpsibc.copy()
self.mskbc *= self.msknoslip
self.fill_halo(self.mskbc)
# idx in the 2D array where boundary terms are computed
# used for storage and i/o
# self.idxbc = where(self.mskbc==1)
self.bcarea = self.domain_integration(self.mskbc)
self.x2bc = self.domain_integration((self.xr0)**2
* self.mskbc*self.msknoslip)
self.y2bc = self.domain_integration((self.yr0)**2
* self.mskbc*self.msknoslip)
return
def smooth(msk, msk0, k):
y = (+roll(msk, -1, axis=1)+roll(msk, -1, axis=0)
+ roll(msk, +1, axis=1)+roll(msk, +1, axis=0))
z = msk*1.
z[y > 0] = k+1
z[msk > 0] = msk[msk > 0]
z[msk0 == 0] = 0
self.fill_halo(z)
return z
z0 = 1-msk*1.
nk = int(round((self.blwidth/self.dx)))
nk = 1
for k in range(nk):
z = z0*1.
z0 = smooth(z, msk, k)
z0[z0 == 0] = nk
z0 = z0/nk
z0 *= msk
z0 = (1.-z0)**(nk/2.)
z0[msk == 0] = 1
self.cv = (roll(z0, -1, axis=1)+z0)*.5
self.cu = (roll(z0, -1, axis=0)+z0)*.5
# self.mskbc = z0
# self.bcarea = self.domain_integration(z0)
def rhs_adv(self, x, t, dxdt):
""" compute -div(u*tracer) using finite volume flux discretization
the flux is computed at edge cells using p-th order interpolation
for p even, the flux is centered
for p odd, the flux is upwinded (more points on the upwind side) """
iu = self.varname_list.index('u')
iv = self.varname_list.index('v')
u = x[iu]
v = x[iv]
for trac in self.tracer_list:
ik = self.varname_list.index(trac)
y = dxdt[ik]
self.fortran_adv(self.msk, x[ik], y, u, v,
self.cst, self.nh,
self.fs_method,
self.order)
self.fill_halo(y)
# for an unknown reason dxdt[ik] is
# not updated by the Fortran routine
# it should be done manually
# (this yields an excessive data movement)
dxdt[ik][:, :] = y
def wallshear(self, x, shear):
# ip = self.varname_list.index('psi')
# meansource = fo.computewallshear(self.msk, x[ip],
# shear, self.dx, self.nh)
return
def rhs_noslip(self, x, source):
""" add the vorticity source term along the boundary to enforce
zero tangential velocity (=no-slip) """
ip = self.varname_list.index('psi')
# iu = self.varname_list.index('u')
# iv = self.varname_list.index('v')
iw = self.varname_list.index(self.whosetspsi)
fo.cornertocell(x[ip], self.work)
meansource = fo.computenoslipsourceterm(
self.msknoslip, x[ip], self.work, self.dx, self.dy, self.nh)
# K = self.dx*self.dy * 0.25
# self.work2[:, :] = self.work[:, :]
# for kt in range(2):
# fo.add_diffusion(self.msk, self.work, self.dx,
# self.nh, K, self.work2)
# self.fill_halo(self.work2)
# fo.add_diffusion(self.msk, self.work2, self.dx,
# self.nh, K, self.work)
# self.fill_halo(self.work)
# # self.work = self.work/(self.dx**2)*self.mskbc
source[:, :] = self.work
# this step is SUPER important to ensure GLOBAL vorticity conservation
meansource = self.domain_integration(source) / self.bcarea
source -= meansource*self.mskbc
if self.enforce_momentum:
xr = self.xr0
yr = self.yr0
# this step ensures the zero momentum
px = fd.computedotprod(self.msk, source, xr, self.nh)
py = fd.computedotprod(self.msk, source, yr, self.nh)
cst = self.mpitools.local_to_global([(px, 'sum'), (py, 'sum')])
px, py = cst[0]/self.x2bc, cst[1]/self.y2bc
source -= (px*xr+py*yr)*self.mskbc
self.fill_halo(source)
x[iw] -= source
def rhs_diffusion(self, x, t, dxdt, coef=1.):
""" add a diffusion term on the tracer variables """
for trac in self.tracer_list:
ik = self.varname_list.index(trac)
y = dxdt[ik]
fo.add_diffusion(self.msk, x[ik], self.dx, self.nh,
coef*self.Kdiff[trac], y)
self.fill_halo(y)
dxdt[ik] = y
def rhs_torque(self, x, t, dxdt):
""" compute g*db/dx for the Boussinesq model """
ib = self.varname_list.index('buoyancy')
iw = self.varname_list.index('vorticity')
y = dxdt[iw]
b = x[ib]
#y[1:-1, 1:-1] += self.gravity*self.diffx(b)
y *= self.msk
fo.add_torque(self.msk, b, self.dx, self.nh, self.gravity, y)
self.fill_halo(y)
dxdt[iw][:, :] = y
def rhs_torque_density(self, x, t, dxdt):
""" compute g*db/dx for the Boussinesq model """
ib = self.varname_list.index('density')
iw = self.varname_list.index('vorticity')
y = dxdt[iw]
b = x[ib]
#y[1:-1, 1:-1] += self.gravity*self.diffx(b)
# y *= self.msk
# trick: use -gravity to account that density is opposite to buoyancy
fo.add_torque(self.msk, b, self.dx, self.nh, -self.gravity, y)
self.fill_halo(y)
dxdt[iw][:, :] = y
def diffx(self, x):
nh = self.nh
if self.i0 == self.npx-1:
x[:, -nh] = 2*x[:, -nh-1]-x[:, -nh-2]
if self.i0 == 0:
x[:, nh-1] = 2*x[:, nh]-x[:, nh+1]
return 0.5*(x[1:-1, 2:]-x[1:-1, :-2])/self.dx
def diff1x(self, x):
nh = self.nh
if self.i0 == self.npx-1:
x[:, -nh] = 2*x[:, -nh-1]-x[:, -nh-2]
if self.i0 == 0:
x[:, nh-1] = 2*x[:, nh]-x[:, nh+1]
return (x[:, 1:]-x[:, :-1])/self.dx
def diffz(self, x):
nh = self.nh
if self.j0 == self.npy-1:
x[-nh, :] = 2*x[-nh-1, :]-x[-nh-2, :]
if self.j0 == 0:
x[nh-1, :] = 2*x[nh, :]-x[nh+1, :]
return 0.5*(x[2:, 1:-1]-x[:-2, 1:-1])/self.dy
def jacobian(self, x, y):
return self.diffx(x)*self.diffz(y)-self.diffz(x)*self.diffx(y)
def rhs_thermalwind(self, x, t, dxdt):
iu = self.varname_list.index('u')
ib = self.varname_list.index('buoyancy')
iw = self.varname_list.index('vorticity')
iV = self.varname_list.index('V')
nh = self.nh
# add the themal wind balance
# g*db/dx + f0*dV/dz
# to domega/dt
b = x[ib]
V = x[iV]
dw = dxdt[iw]
y = self.work
# dw[1:-1, 1:-1] += self.diffx(b)*self.gravity
# dw[1:-1, 1:-1] -= self.diffz(V)*self.f0
y[1:-1, 1:-1] = self.diffx(b)*self.gravity
y[1:-1, 1:-1] -= self.diffz(V)*self.f0
# dw[1:-1, 1:-1] += self.coefb[1:-1, 1:-1]*self.diffx(b)*self.gravity
# dw[1:-1, 1:-1] -= self.coefV[1:-1, 1:-1]*self.diffz(V)*self.f0
# dw[:, :nh+1] = 0
# dw[:, -nh-1:] = 0
y *= self.msk
self.fill_halo(y)
dw[:, :] += y
u = x[iu]
# dxdt[iV][:, 1:] -= 0.5*self.f0*(u[:, :-1]+u[:, 1:])
# dxdt[iV] *= self.msk
# self.fill_halo(dxdt[iV])
y[:, 1:] = - 0.5*self.f0*(u[:, :-1]+u[:, 1:])
y *= self.msk
self.fill_halo(y)
dxdt[iV][:, :] += y
def fourier_invert_vorticity(self, x, flag='full'):
""" invert using Fourier transform """
iu = self.varname_list.index('u')
iv = self.varname_list.index('v')
ip = self.varname_list.index('psi')
ivor = self.varname_list.index('vorticity')
ipv = self.varname_list.index('pv')
u = x[iu]
v = x[iv]
psi = x[ip]
pv = x[ipv]
vor = x[ivor]
self.fourier.invert(pv, psi, vor)
self.fill_halo(psi)
self.fill_halo(vor)
self.first_time = False
# compute (u,v) @ U,V points from psi @ cell corner
fo.computeorthogradient(self.msk, psi, self.dx, self.dy, self.nh, u, v)
x[iu] = u
x[iv] = v
def invert_vorticity(self, x, flag='full', island=False):
""" compute psi from vorticity (or 'whosetspsi' in general)
this routine interpolates the vorticity from cell centers to
cell corners (where psi is defined)
it then solves div*grad psi = omega with psi=0 along the boundary
(Dirichlet condition) using a multigrid
the non-divergent velocity is computed from psi"""
iu = self.varname_list.index('u')
iv = self.varname_list.index('v')
ip = self.varname_list.index('psi')
iw = self.varname_list.index(self.whosetspsi)
u = x[iu]
v = x[iv]
psi = x[ip]
fo.celltocorner(x[iw], self.work)
#fo.celltocornerbicubic(x[iw], self.work)
if island:
# correcting RHS for islands
self.work[:, :] -= self.rhsp
if flag == 'fast':
ite, res = self.gmg.twoVcycle(psi,
self.work,
{'maxite': 1,
'tol': 1e-6,
'verbose': True})
# ite, res = self.gmg.solve(psi, self.work,
# {'maxite': 2,
# 'tol': 1e-8,
# 'verbose': False})
else:
# compute to machine accuracy
if self.first_time:
verbose = True
else:
verbose = False
if (self.myrank == 0) and verbose:
print('-'*50)
print(' Convergence of the vorticity inversion')
print(' the residual should decrease by several orders')
print(' of magnitude otherwise something is wrong')
print('-'*50)
ite, res = self.gmg.solve(psi, self.work,
{'maxite': 4,
'tol': 1e-11,
'verbose': verbose})
if self.geometry == 'perio':
# make sure psi has zero mean (to avoid the drift)
psim = self.domain_integration(psi) / self.area
psi -= psim
# don't apply the fill_halo on it
# [because fill_halo, as it is, is applying periodic BC]
psi = psi*self.mskp
if island:
# we set psi on the boundary values by adding
# self.psi (defined in island module)
# before that line, psi=0 along all boundaries
psi += self.psi
# it should be added only if we invert for the total psi
# it should not be added if we compute the increment of psi
self.first_time = False
# compute (u,v) @ U,V points from psi @ cell corner
fo.computeorthogradient(self.msk, psi, self.dx, self.dy, self.nh, u, v)
# self.fill_halo(u)
# self.fill_halo(v)
x[iu] = u
x[iv] = v
x[ip] = psi
| 7,809
| 9,684
| 23
|
c1ed7b55dbc27e47f041d53b097d4e35886d4daf
| 779
|
py
|
Python
|
storagedef/tests/test_session.py
|
gjo/storagedef
|
f5574378220369441c5480cb10d77f7b739e25be
|
[
"BSD-3-Clause"
] | null | null | null |
storagedef/tests/test_session.py
|
gjo/storagedef
|
f5574378220369441c5480cb10d77f7b739e25be
|
[
"BSD-3-Clause"
] | null | null | null |
storagedef/tests/test_session.py
|
gjo/storagedef
|
f5574378220369441c5480cb10d77f7b739e25be
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
| 31.16
| 74
| 0.657253
|
# -*- coding: utf-8 -*-
import unittest
class SessionTestCase(unittest.TestCase):
def test_load_localmemory(self):
from ..session import Session
from ..engines.localmemory import LocalMemoryStorageEngine
session = Session.from_config({
'provider':
'storagedef.engines.localmemory:LocalMemoryStorageEngine',
})
self.assertIsInstance(session.engine, LocalMemoryStorageEngine)
def test_load_s3(self):
from ..session import Session
from ..engines.s3 import S3StorageEngine
session = Session.from_config({
'provider': 'storagedef.engines.s3:S3StorageEngine',
'bucket': 'mybucket',
})
self.assertIsInstance(session.engine, S3StorageEngine)
| 640
| 20
| 77
|
658e89ebe4dc3efb089e3dfff25af7b188ed2930
| 1,922
|
py
|
Python
|
python/gini.py
|
bpalmer4/code-snippets
|
e87f7baade69ff25e062f54d58b3f6f611c4c283
|
[
"MIT"
] | null | null | null |
python/gini.py
|
bpalmer4/code-snippets
|
e87f7baade69ff25e062f54d58b3f6f611c4c283
|
[
"MIT"
] | null | null | null |
python/gini.py
|
bpalmer4/code-snippets
|
e87f7baade69ff25e062f54d58b3f6f611c4c283
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
def gini(x):
"""Calculate the Gini coefficient for series of observed values (x).
where:
* x is the array of observed values (and all x are positive, non-zero values)
Note: x must be a numpy array or a pandas series
See: https://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
"""
# - sanity checks
if not isinstance(x, pd.Series) and not isinstance(x, np.ndarray):
raise TypeError('input series must be a pandas Series or a numpy ndarray')
if x.min() <= 0:
raise ValueError('all input values in series must be positive and non-zero')
# let's work with numpy arrays
if isinstance(x, pd.Series):
x = x.to_numpy()
# sort values in ascending order
x = np.sort(x)
# n is the number of values observed
n = len(x)
# i is the rank of the x-values when sorted in ascending order
i = np.arange(1, n+1)
# - calculate the Gini coefficient
return ((2 * i - n - 1) * x).sum() / (n * x.sum())
# An alternate approach ...
def gini2(x):
""" Calculate Gini from observed values using summation-as-integration """
if not isinstance(x, pd.Series) and not isinstance(x, np.ndarray):
raise TypeError('input series must be a pandas Series or a numpy array')
if x.min() <= 0:
raise ValueError('all input values in series must be positive and non-zero')
# let's work with numpy arrays
if isinstance(x, pd.Series):
x = x.to_numpy()
# calculate the line-of-equality and the lorenz curve
lorenz = np.sort(x).cumsum()
height = lorenz[-1]
n = len(lorenz)
equality = np.arange(1, n+1) * height / n
# calculate Gini
A_area = (equality - lorenz).sum() # area between lorenz and equality
AB_area = n * height / 2 # area of triangle
return A_area / AB_area
| 33.137931
| 84
| 0.631113
|
import pandas as pd
import numpy as np
def gini(x):
"""Calculate the Gini coefficient for series of observed values (x).
where:
* x is the array of observed values (and all x are positive, non-zero values)
Note: x must be a numpy array or a pandas series
See: https://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
"""
# - sanity checks
if not isinstance(x, pd.Series) and not isinstance(x, np.ndarray):
raise TypeError('input series must be a pandas Series or a numpy ndarray')
if x.min() <= 0:
raise ValueError('all input values in series must be positive and non-zero')
# let's work with numpy arrays
if isinstance(x, pd.Series):
x = x.to_numpy()
# sort values in ascending order
x = np.sort(x)
# n is the number of values observed
n = len(x)
# i is the rank of the x-values when sorted in ascending order
i = np.arange(1, n+1)
# - calculate the Gini coefficient
return ((2 * i - n - 1) * x).sum() / (n * x.sum())
# An alternate approach ...
def gini2(x):
""" Calculate Gini from observed values using summation-as-integration """
if not isinstance(x, pd.Series) and not isinstance(x, np.ndarray):
raise TypeError('input series must be a pandas Series or a numpy array')
if x.min() <= 0:
raise ValueError('all input values in series must be positive and non-zero')
# let's work with numpy arrays
if isinstance(x, pd.Series):
x = x.to_numpy()
# calculate the line-of-equality and the lorenz curve
lorenz = np.sort(x).cumsum()
height = lorenz[-1]
n = len(lorenz)
equality = np.arange(1, n+1) * height / n
# calculate Gini
A_area = (equality - lorenz).sum() # area between lorenz and equality
AB_area = n * height / 2 # area of triangle
return A_area / AB_area
| 0
| 0
| 0
|
c9a2aaa41b86faa1fb98953ab14ca22b8ca32369
| 3,160
|
py
|
Python
|
plugins/data/extract/link/referer-parser/sync_data.py
|
sniperkit/colly
|
4d1ea7dfb014b9ef4857bd9f24cad223a723938f
|
[
"Apache-2.0"
] | 2
|
2020-11-07T14:45:16.000Z
|
2020-12-19T20:26:15.000Z
|
plugins/data/extract/link/referer-parser/sync_data.py
|
sniperkit/colly
|
4d1ea7dfb014b9ef4857bd9f24cad223a723938f
|
[
"Apache-2.0"
] | 5
|
2021-03-10T09:06:43.000Z
|
2022-02-27T00:15:52.000Z
|
vendor/snowplow/referer-parser/sync_data.py
|
Jihun0109/marketing
|
c34f3a67cfbb2f7ea004cef9ac210c0eb5e4893c
|
[
"MIT"
] | 1
|
2019-09-20T20:44:02.000Z
|
2019-09-20T20:44:02.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013 Martin Katrenik, Snowplow Analytics Ltd. All rights reserved.
#
# This program is licensed to you under the Apache License Version 2.0,
# and you may not use this file except in compliance with the Apache
# License Version 2.0.
# You may obtain a copy of the Apache License Version 2.0 at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Apache License Version 2.0 is
# distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Apache License Version 2.0 for the specific language
# governing permissions and limitations there under.
# Authors:: Martin Katrenik, Alex Dean (mailto:support@snowplowanalytics.com)
# Copyright:: Copyright (c) 2013 Martin Katrenik, Snowplow Analytics Ltd
# License:: Apache License Version 2.0
# Syncs common referer-parser resources to the
# language-specific sub projects.
#
# Syncs:
# 1. The referers.yml, plus a generated JSON equivalent
# 2. The referer-tests.json
#
# Finishes by committing the synchronized resources.
import os
import shutil
import json
import yaml
import subprocess
root_path = os.path.dirname(__file__)
# Source paths
REFERER_SOURCE = os.path.join(root_path, 'resources', 'referers.yml')
REFERER_JSON_OUT = 'referers.json'
TEST_SOURCE = os.path.join(root_path, 'resources', 'referer-tests.json')
# Target paths
REFERER_TARGETS = [
os.path.join(root_path, "ruby","data"),
os.path.join(root_path, "java-scala","src","main","resources"),
os.path.join(root_path, "python","referer_parser","data"),
os.path.join(root_path, "nodejs","data"),
os.path.join(root_path, "dotnet","RefererParser","Resources"),
os.path.join(root_path, "php","data"),
os.path.join(root_path, "go", "data")
]
TEST_TARGETS = [
os.path.join(root_path, "java-scala","src","test","resources"),
# Add remainder as paths determined etc
]
# JSON builder
JSON = build_json()
# File ops
# Sync process
for dest in REFERER_TARGETS:
sync_referers_to(dest)
for dest in TEST_TARGETS:
sync_tests_to(dest)
# Commit on current branch
commit = "git commit {0}".format(" ".join(REFERER_TARGETS + TEST_TARGETS))
msg = "\"Updated {0}, {1} and {2} in sub-folder following update(s) to master copy\"".format(REFERER_SOURCE, REFERER_JSON_OUT, TEST_SOURCE)
subprocess.call(commit + ' -m' + msg, shell=True)
| 30.384615
| 139
| 0.713291
|
#!/usr/bin/env python
# Copyright (c) 2013 Martin Katrenik, Snowplow Analytics Ltd. All rights reserved.
#
# This program is licensed to you under the Apache License Version 2.0,
# and you may not use this file except in compliance with the Apache
# License Version 2.0.
# You may obtain a copy of the Apache License Version 2.0 at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Apache License Version 2.0 is
# distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Apache License Version 2.0 for the specific language
# governing permissions and limitations there under.
# Authors:: Martin Katrenik, Alex Dean (mailto:support@snowplowanalytics.com)
# Copyright:: Copyright (c) 2013 Martin Katrenik, Snowplow Analytics Ltd
# License:: Apache License Version 2.0
# Syncs common referer-parser resources to the
# language-specific sub projects.
#
# Syncs:
# 1. The referers.yml, plus a generated JSON equivalent
# 2. The referer-tests.json
#
# Finishes by committing the synchronized resources.
import os
import shutil
import json
import yaml
import subprocess
root_path = os.path.dirname(__file__)
# Source paths
REFERER_SOURCE = os.path.join(root_path, 'resources', 'referers.yml')
REFERER_JSON_OUT = 'referers.json'
TEST_SOURCE = os.path.join(root_path, 'resources', 'referer-tests.json')
# Target paths
REFERER_TARGETS = [
os.path.join(root_path, "ruby","data"),
os.path.join(root_path, "java-scala","src","main","resources"),
os.path.join(root_path, "python","referer_parser","data"),
os.path.join(root_path, "nodejs","data"),
os.path.join(root_path, "dotnet","RefererParser","Resources"),
os.path.join(root_path, "php","data"),
os.path.join(root_path, "go", "data")
]
TEST_TARGETS = [
os.path.join(root_path, "java-scala","src","test","resources"),
# Add remainder as paths determined etc
]
# JSON builder
def build_json():
searches = yaml.load(open(REFERER_SOURCE))
return json.dumps(searches, sort_keys = False, indent = 4)
JSON = build_json()
# File ops
def copy_file(src, dest):
try:
print "copying {0} to {1} ".format(src, dest)
shutil.copy(src, dest)
except shutil.Error as e:
print('Error: %s' % e)
except IOError as e:
print('IOError: %s' % e.strerror)
def write_file(content, dest):
print "writing to {0} ".format(dest)
with open(dest, 'w') as f:
f.write(content)
# Sync process
def sync_referers_to(dest):
copy_file(REFERER_SOURCE, dest)
write_file(JSON, os.path.join(dest, REFERER_JSON_OUT))
def sync_tests_to(dest):
copy_file(TEST_SOURCE, dest)
for dest in REFERER_TARGETS:
sync_referers_to(dest)
for dest in TEST_TARGETS:
sync_tests_to(dest)
# Commit on current branch
commit = "git commit {0}".format(" ".join(REFERER_TARGETS + TEST_TARGETS))
msg = "\"Updated {0}, {1} and {2} in sub-folder following update(s) to master copy\"".format(REFERER_SOURCE, REFERER_JSON_OUT, TEST_SOURCE)
subprocess.call(commit + ' -m' + msg, shell=True)
| 575
| 0
| 112
|
b273dda16cf9a97de8724449d86597cd23dc6400
| 14,414
|
py
|
Python
|
equation.py
|
MoZhou1995/DeepPDE_ActorCritic
|
9cc84b3f3c24318c72e9ac9b6fe8506f476c81bf
|
[
"MIT"
] | null | null | null |
equation.py
|
MoZhou1995/DeepPDE_ActorCritic
|
9cc84b3f3c24318c72e9ac9b6fe8506f476c81bf
|
[
"MIT"
] | null | null | null |
equation.py
|
MoZhou1995/DeepPDE_ActorCritic
|
9cc84b3f3c24318c72e9ac9b6fe8506f476c81bf
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from scipy.stats import multivariate_normal as normal
class Equation(object):
"""Base class for defining PDE related function."""
def w_tf(self, x, u):
"""Running cost in control problems."""
raise NotImplementedError
def Z_tf(self, x):
"""Terminal cost in control problems."""
raise NotImplementedError
def b_np(self, x): #num_sample * 1
"""a function whose level set is the boundary, in numpy"""
return np.sum(x**2, 1, keepdims=True) - (self.R ** 2)
def b_tf(self, x): #num_sample * 1
"""a function whose level set is the boundary, in tensorflow"""
return tf.reduce_sum(x**2, 1, keepdims=True) - (self.R ** 2)
def V_true(self, x):
"""True value function"""
raise NotImplementedError
def u_true(self, x):
"""Optimal control"""
raise NotImplementedError
def sigma(self, x, u, num_sample): #num_sample x dim x dim_w
"""diffusion coefficient"""
raise NotImplementedError
def drift(self, x, u):
"""drift in the SDE"""
raise NotImplementedError
def diffusion(self, x, u, dw, num_sample):
"""diffusion in the SDE"""
raise NotImplementedError
class LQR(Equation):
"""linear quadratic regulator"""
class VDP(Equation):
"""Van Der Pol oscillator"""
class ekn(Equation):
"""Diffusive Eikonal equation"""
class LQR_var(Equation):
"""linear quadratic regulator"""
| 46.198718
| 180
| 0.580477
|
import numpy as np
import tensorflow as tf
from scipy.stats import multivariate_normal as normal
class Equation(object):
"""Base class for defining PDE related function."""
def __init__(self, eqn_config):
self.dim = eqn_config.dim
self.gamma = eqn_config.discount
self.R = eqn_config.R
self.control_dim = eqn_config.control_dim
def sample_normal(self, num_sample, N): #normal sample for BM
r_Sample = np.random.uniform(low=0, high=self.R, size=[num_sample,1])
r = r_Sample**(1 / self.dim) * (self.R**((self.dim- 1) / self.dim ))
angle = normal.rvs(size=[num_sample, self.dim])
norm = np.sqrt(np.sum(angle**2, 1, keepdims=True))
x0 = r * angle / norm
dw_sample = normal.rvs(size=[num_sample, self.dim, N])# * sqrt_delta_t
x_bdry = normal.rvs(size=[num_sample, self.dim])
norm = np.sqrt(np.sum(np.square(x_bdry), 1, keepdims=True))
x_bdry = self.R * x_bdry / norm
return x0, dw_sample, x_bdry
def sample_bounded(self, num_sample, N): #bdd sample for BM
r_Sample = np.random.uniform(low=0, high=self.R, size=[num_sample,1])
r = r_Sample**(1 / self.dim) * (self.R**((self.dim- 1) / self.dim ))
angle = normal.rvs(size=[num_sample, self.dim])
norm = np.sqrt(np.sum(angle**2, 1, keepdims=True))
x0 = r * angle / norm
dw_sample = np.random.randint(6,size=[num_sample, self.dim, N])
dw_sample = np.floor((dw_sample - 1)/4) * np.sqrt(3.0)
x_bdry = normal.rvs(size=[num_sample, self.dim])
norm = np.sqrt(np.sum(np.square(x_bdry), 1, keepdims=True))
x_bdry = self.R * x_bdry / norm
return x0, dw_sample, x_bdry
def sample0(self, num_sample, N):
x0 = np.zeros(shape=[num_sample, self.dim]) + 0.01
dw_sample = normal.rvs(size=[num_sample, self.dim, N])
x_bdry = normal.rvs(size=[num_sample, self.dim])
norm = np.sqrt(np.sum(np.square(x_bdry), 1, keepdims=True))
x_bdry = self.R * x_bdry / norm
return x0, dw_sample, x_bdry
def propagate_naive(self, num_sample, x0, dw_sample, NN_control, training, T, N, cheat):
# the most naive scheme, just stop where next step is out
delta_t = T / N
sqrt_delta_t = np.sqrt(delta_t)
x_smp = tf.reshape(x0, [num_sample, self.dim, 1])
x_i = x0
flag = np.ones([num_sample])
for i in range(N):
if cheat:
u_i = self.u_true(x_i)
else:
u_i = NN_control(x_i, training, need_grad=False)
delta_x = self.drift(x_i, u_i) * delta_t + self.diffusion(x_i, u_i, dw_sample[:, :, i], num_sample) * sqrt_delta_t
x_iPlus1_temp = x_i + delta_x
Exit = self.b_tf(x_iPlus1_temp) #Exit>=0 means out
Exit = tf.reshape(tf.math.ceil((tf.math.sign(Exit)+1)/2), [num_sample]) #1 for Exit>=0, 0 for Exit<0
coef_i = flag * (1 - Exit)
if i==0:
coef = tf.reshape(coef_i, [num_sample, 1])
else:
coef = tf.concat([coef, tf.reshape(coef_i, [num_sample, 1])], axis=1)
x_i = x_i + delta_x * tf.reshape(coef_i, [num_sample,1])
x_smp = tf.concat([x_smp, tf.reshape(x_i, [num_sample, self.dim, 1])], axis=2)
flag = flag * (1 - Exit)
dt = np.ones([num_sample,N]) * delta_t
return x_smp, dt, coef
def propagate_adaptive(self, num_sample, x0, dw_sample, NN_control, training, T, N, cheat):
# the new scheme
delta_t = T / N
x_smp = tf.reshape(x0, [num_sample, self.dim, 1])
x_i = x0
x0_norm = tf.sqrt(tf.reduce_sum(x0**2,1))
#temp: 2 for inside (inner); 0 (and 1) for boundary layer; -2 (and -1) for outside
temp = tf.sign(self.R - x0_norm - self.sigma_Up*np.sqrt(3 * self.dim * delta_t)) + tf.sign(self.R - x0_norm)
#flag: 2 for inside; 1 means step size need modification; 0 means boundary, but we will move for at least a first step.
flag = np.ones([num_sample]) + tf.math.floor(temp/2)
for i in range(N):
xi_norm = tf.sqrt(tf.reduce_sum(x_i**2,1))
dt_i = (2*flag - (flag**2)) * ((self.R - xi_norm)**2) / (3 * self.dim * self.sigma_Up**2) + (flag**2 - 2*flag + 1) * delta_t
dt_i = tf.maximum(dt_i, delta_t*1e-4)
if cheat:
u_i = self.u_true(x_i)
else:
u_i = NN_control(x_i, training, need_grad=False)
delta_x = self.drift(x_i, u_i) * tf.reshape(dt_i, [num_sample,1]) + self.diffusion(x_i, u_i, dw_sample[:, :, i], num_sample) * tf.reshape(tf.sqrt(dt_i), [num_sample,1])
x_iPlus1_temp = x_i + delta_x
x_iPlus1_temp_norm = tf.sqrt(tf.reduce_sum(x_iPlus1_temp**2,1,keepdims=False))
temp = tf.sign(self.R - x_iPlus1_temp_norm - self.sigma_Up*np.sqrt(3 * self.dim * delta_t)) + tf.sign(self.R - x_iPlus1_temp_norm)
new_flag = (np.ones([num_sample]) + tf.math.floor(temp/2)) * tf.sign(flag)
coef_i = tf.sign(flag) * tf.sign(new_flag)
if i==0:
coef = tf.reshape(coef_i, [num_sample, 1])
dt = tf.reshape(dt_i, [num_sample, 1])
else:
coef = tf.concat([coef, tf.reshape(coef_i, [num_sample, 1])], axis=1)
dt = tf.concat([dt, tf.reshape(dt_i, [num_sample, 1])], axis=1)
x_i = x_i + delta_x * tf.reshape(coef_i, [num_sample,1])
x_smp = tf.concat([x_smp, tf.reshape(x_i, [num_sample, self.dim, 1])], axis=2)
flag = new_flag
return x_smp, dt, coef
def w_tf(self, x, u):
"""Running cost in control problems."""
raise NotImplementedError
def Z_tf(self, x):
"""Terminal cost in control problems."""
raise NotImplementedError
def b_np(self, x): #num_sample * 1
"""a function whose level set is the boundary, in numpy"""
return np.sum(x**2, 1, keepdims=True) - (self.R ** 2)
def b_tf(self, x): #num_sample * 1
"""a function whose level set is the boundary, in tensorflow"""
return tf.reduce_sum(x**2, 1, keepdims=True) - (self.R ** 2)
def V_true(self, x):
"""True value function"""
raise NotImplementedError
def u_true(self, x):
"""Optimal control"""
raise NotImplementedError
def sigma(self, x, u, num_sample): #num_sample x dim x dim_w
"""diffusion coefficient"""
raise NotImplementedError
def drift(self, x, u):
"""drift in the SDE"""
raise NotImplementedError
def diffusion(self, x, u, dw, num_sample):
"""diffusion in the SDE"""
raise NotImplementedError
class LQR(Equation):
"""linear quadratic regulator"""
def __init__(self, eqn_config):
super(LQR, self).__init__(eqn_config)
self.p = eqn_config.p
self.q = eqn_config.q
self.beta = eqn_config.beta
self.k = ( ((self.gamma**2) * (self.q**2) + 4 * self.p * self.q * (self.beta**2))**0.5 - self.q*self.gamma )/ (self.beta**2) / 2
self.sigma_Up = np.sqrt(2.0) #upper bound for sigma
def w_tf(self, x, u): #num_sample * 1
return tf.reduce_sum(self.p * tf.square(x) + self.q * tf.square(u), 1, keepdims=True) - 2*self.k*self.dim
def Z_tf(self, x): #num_sample * 1
return 0 * tf.reduce_sum(x, 1, keepdims=True) + self.k * (self.R ** 2)
def V_true(self, x): #num_sample * 1
return tf.reduce_sum(tf.square(x), 1, keepdims=True) * self.k
def u_true(self, x): #num_sample * dim
return -self.beta * self.k / self.q * x
def V_grad_true(self, x): #num_sample * dim
return 2 * self.k * x
def sigma(self, x, u, num_sample): # x is num_sample x dim, u is num_sample x dim_u, sigma is num_sample x dim x dim_w
return np.sqrt(2.0) * np.ones([num_sample,1,1]) * np.identity(self.dim)
def drift(self, x, u):
return self.beta * u
def diffusion(self, x, u, dw, num_sample): #sigma num_sample x dim x dim_w, dw is num_sample x dim_w
return tf.linalg.matvec(self.sigma(x, u, num_sample), dw) # num_sample x dim
class VDP(Equation):
"""Van Der Pol oscillator"""
def __init__(self, eqn_config):
super(VDP, self).__init__(eqn_config)
self.a = eqn_config.a
self.epsl = eqn_config.epsilon
self.q = eqn_config.q
self.sigma_Up = np.sqrt(2.0) #upper bound for sigma
def w_tf(self, x, u): #num_sample * 1
d = self.control_dim # dim/2
x1 = x[:,0:d] #num_sample * d
x2 = x[:,d:self.dim] #num_sample * d
px1 = tf.concat([x1[:,1:d],x1[:,0:1]],1) #num_sample * d
px2 = tf.concat([x2[:,1:d],x2[:,0:1]],1) #num_sample * d
nx1 = tf.concat([x1[:,d-1:d],x1[:,0:d-1]],1)
nx2 = tf.concat([x2[:,d-1:d],x2[:,0:d-1]],1)
dv1 = 2*self.a*x1 - self.epsl*(px1 + nx1) #num_sample * d
dv2 = 2*self.a*x2 - self.epsl*(px2 + nx2) #num_sample * d
temp = -self.gamma*self.epsl*(x1*px1 + x2*px2) + (dv2**2)/4/self.q - x2*dv1 - ((1-x1**2)*x2 - x1)*dv2
return tf.reduce_sum(temp + self.q*(u**2), 1, keepdims=True) + self.gamma*self.a*tf.reduce_sum(x**2, 1, keepdims=True) - 2*self.a*self.dim
def Z_tf(self, x): #num_sample * 1
return self.V_true(x)
def V_true(self, x): #num_sample * 1
d = self.control_dim # dim/2
x1 = x[:,0:d] #num_sample * d
x2 = x[:,d:self.dim] #num_sample * d
px1 = tf.concat([x1[:,1:d],x1[:,0:1]],1) #num_sample * d
px2 = tf.concat([x2[:,1:d],x2[:,0:1]],1) #num_sample * d
return self.a*tf.reduce_sum(x**2, 1, keepdims=True) - self.epsl*tf.reduce_sum(x1*px1 + x2*px2, 1, keepdims=True)
def u_true(self, x): #num_sample * 1
d = self.control_dim
x2 = x[:,d:self.dim] #num_sample * d
px2 = tf.concat([x2[:,1:d],x2[:,0:1]],1)
nx2 = tf.concat([x2[:,d-1:d],x2[:,0:d-1]],1)
return -(2*self.a*x2 - self.epsl*(px2 + nx2))/2/self.q
def V_grad_true(self, x): #num_sample * dim
d = self.control_dim # dim/2
x1 = x[:,0:d] #num_sample * d
x2 = x[:,d:self.dim] #num_sample * d
px1 = tf.concat([x1[:,1:d],x1[:,0:1]],1) #num_sample * d
px2 = tf.concat([x2[:,1:d],x2[:,0:1]],1) #num_sample * d
nx1 = tf.concat([x1[:,d-1:d],x1[:,0:d-1]],1)
nx2 = tf.concat([x2[:,d-1:d],x2[:,0:d-1]],1)
return tf.concat([2*self.a*x1 - self.epsl*(px1+nx1), 2*self.a*x2 - self.epsl*(px2+nx2)],axis=1)
def sigma(self, x, u, num_sample): # x is num_sample x dim, u is num_sample x dim_u, sigma is num_sample x dim x dim_w
return np.sqrt(2.0) * np.ones([num_sample,1,1]) * np.identity(self.dim)
def drift(self, x, u):
x_1 = x[:,0:self.control_dim] #num_sample * d
x_2 = x[:,self.control_dim:self.dim]
return tf.concat([x_2, (1 - x_1**2)*x_2 - x_1 + u],axis=1)
def diffusion(self, x, u, dw, num_sample): #sigma num_sample x dim x dim_w, dw is num_sample x dim_w
return tf.linalg.matvec(self.sigma(x, u, num_sample), dw) # num_sample x dim
class ekn(Equation):
"""Diffusive Eikonal equation"""
def __init__(self, eqn_config):
super(ekn, self).__init__(eqn_config)
self.a2 = eqn_config.a2
self.a3 = eqn_config.a3
self.epsl = 1/2/self.a2/self.dim
self.sigma_Up = np.sqrt(2.0) #upper bound for sigma
def w_tf(self, x, u): #num_sample * 1
return 0*tf.reduce_sum(x, 1, keepdims=True) + 1
def Z_tf(self, x): #num_sample * 1
return self.V_true(x)
def V_true(self, x): #num_sample * 1
x_norm = tf.reduce_sum(x**2, axis=1, keepdims=True)**0.5
return self.a3*x_norm**3 - self.a2 * x_norm**2# + self.a2 - self.a3
def u_true(self, x): #num_sample * 1
x_norm = tf.reduce_sum(x**2, axis=1, keepdims=True)**0.5
return x/x_norm
def V_grad_true(self, x):
x_norm = tf.reduce_sum(x**2, axis=1, keepdims=True)**0.5
return (3*self.a3*x_norm - 2*self.a2) * x
def sigma(self, x, u, num_sample): # x is num_sample x dim, u is num_sample x dim_u, sigma is num_sample x dim x dim_w
return np.sqrt(2.0) * np.ones([num_sample,1,1]) * np.identity(self.dim)
def drift(self, x, u):
x_norm = tf.reduce_sum(x**2, axis=1, keepdims=True)**0.5
c = 3 * (self.dim+1) * self.a3 / 2/self.a2 / self.dim / (2*self.a2 - 3*self.a3*x_norm)
return c * u
def diffusion(self, x, u, dw, num_sample): #sigma num_sample x dim x dim_w, dw is num_sample x dim_w
return tf.linalg.matvec(self.sigma(x, u, num_sample), dw) # num_sample x dim
class LQR_var(Equation):
"""linear quadratic regulator"""
def __init__(self, eqn_config):
super(LQR_var, self).__init__(eqn_config)
self.k = (np.sqrt(5)-1)/2
self.q = eqn_config.q
self.beta = eqn_config.beta
self.epsilon = eqn_config.epsilon
self.sigma_Up = np.sqrt(2.0) #upper bound for sigma
def w_tf(self, x, u): #num_sample * 1
temp = tf.reduce_sum(self.k**2 * (self.beta + 2*self.epsilon)**2 * x**2 / (self.q + 2*self.k * self.epsilon**2 * x**2), 1, keepdims=True)
return temp + tf.reduce_sum(self.gamma * self.k * tf.square(x) + self.q * tf.square(u), 1, keepdims=True) - 2*self.k*self.dim
def Z_tf(self, x): #num_sample * 1
return 0 * tf.reduce_sum(x, 1, keepdims=True) + self.k * (self.R ** 2)
def V_true(self, x): #num_sample * 1
return tf.reduce_sum(tf.square(x), 1, keepdims=True) * self.k
def u_true(self, x): #num_sample * dim
return - (self.beta + 2 * self.epsilon) * x / (self.q / self.k + 2 * self.epsilon**2 * x**2)
def V_grad_true(self, x): #num_sample * dim
return 2 * self.k * x
def sigma(self, x, u, num_sample): # x is num_sample x dim, u is num_sample x dim_u, sigma is num_sample x dim x dim_w
return np.sqrt(2.0) * tf.linalg.diag(1 + self.epsilon * x * u)
def drift(self, x, u):
return self.beta * u
def diffusion(self, x, u, dw, num_sample): #sigma num_sample x dim x dim_w, dw is num_sample x dim_w
return tf.linalg.matvec(self.sigma(x, u, num_sample), dw) # num_sample x dim
| 11,610
| 0
| 1,241
|
1aad48197290a7a10b69c9b0eba6aad4a26e0194
| 476
|
py
|
Python
|
core/handle_config.py
|
willdx/sensor_agent
|
28eebc8cb17dc53b5657d3898a196724da882ac8
|
[
"MIT"
] | null | null | null |
core/handle_config.py
|
willdx/sensor_agent
|
28eebc8cb17dc53b5657d3898a196724da882ac8
|
[
"MIT"
] | null | null | null |
core/handle_config.py
|
willdx/sensor_agent
|
28eebc8cb17dc53b5657d3898a196724da882ac8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
File Name: handle_config.py
Author: WillDX
mail: xiang.dai@shuyun.com
Created Time: 2016年04月19日 星期二 09时36分58秒
'''
import json
from ConfigParser import ConfigParser
| 23.8
| 61
| 0.701681
|
# -*- coding: utf-8 -*-
'''
File Name: handle_config.py
Author: WillDX
mail: xiang.dai@shuyun.com
Created Time: 2016年04月19日 星期二 09时36分58秒
'''
import json
from ConfigParser import ConfigParser
def handle(configfile,item_name):
#print "[configfile]:%s" % configfile
cp = ConfigParser()
cp.read(configfile)
res = cp.items(item_name)
#print "传感器获取资源项[%s]命令字典如下:" % item_name
#print(json.dumps(dict(res),indent=2,ensure_ascii=False))
return dict(res)
| 288
| 0
| 23
|
010a24de4b223fa7a4bf486d182055d2f5cfb459
| 88,207
|
py
|
Python
|
main.py
|
JunweiLiang/Object_Detection_Tracking
|
f86caaec97669a6da56f1b402cca4e179a85d2f0
|
[
"MIT"
] | 328
|
2019-05-27T03:09:02.000Z
|
2022-03-31T05:12:04.000Z
|
main.py
|
AnjaliPC/Object_Detection_Tracking
|
f86caaec97669a6da56f1b402cca4e179a85d2f0
|
[
"MIT"
] | 43
|
2019-06-05T14:04:09.000Z
|
2022-01-25T03:16:39.000Z
|
main.py
|
AnjaliPC/Object_Detection_Tracking
|
f86caaec97669a6da56f1b402cca4e179a85d2f0
|
[
"MIT"
] | 107
|
2019-05-27T06:26:38.000Z
|
2022-03-25T03:32:58.000Z
|
# coding=utf-8
"""
main script for training and testing mask rcnn on MSCOCO/DIVA/MEVA dataset
multi gpu version
"""
import argparse
import cv2
import math
import json
import random
import operator
import time
import os
import pickle
import sys
import threading
# so here won"t have poll allocator info
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# solve the issue of a bug in while loop, when you import the graph in
# multi-gpu, prefix is not added in while loop op [tf 1.14]
# https://github.com/tensorflow/tensorflow/issues/26526
os.environ["TF_ENABLE_CONTROL_FLOW_V2"] = "1"
# remove all the annoying warnings from tf v1.10 to v1.13
import logging
logging.getLogger("tensorflow").disabled = True
import matplotlib
# avoid the warning "gdk_cursor_new_for_display:
# assertion 'GDK_IS_DISPLAY (display)' failed" with Python 3
matplotlib.use('Agg')
import tensorflow as tf
import numpy as np
import pycocotools.mask as cocomask
from pycocotools.coco import COCO
from tqdm import tqdm
from glob import glob
from models import get_model
from models import pack
from models import initialize
from trainer import Trainer
from tester import Tester
from nn import resizeImage
from nn import fill_full_mask
from utils import evalcoco
from utils import match_detection
from utils import computeAP
from utils import computeAR_2
from utils import grouper
from utils import gather_dt
from utils import gather_gt
from utils import match_dt_gt
from utils import gather_act_singles
from utils import aggregate_eval
from utils import weighted_average
from utils import parse_nvidia_smi
from utils import sec2time
from utils import Dataset
from utils import Summary
from utils import nms_wrapper
from utils import FIFO_ME
# for using a COCO model to finetuning with DIVA data.
from class_ids import targetClass2id
from class_ids import targetAct2id
from class_ids import targetSingleAct2id
from class_ids import targetClass2id_mergeProp
from class_ids import targetClass2id_new
from class_ids import targetClass2id_new_nopo
from class_ids import targetAct2id_bupt
from class_ids import bupt_act_mapping
from class_ids import targetAct2id_meva
from class_ids import meva_act_mapping
from class_ids import coco_obj_class_to_id
from class_ids import coco_obj_id_to_class
from class_ids import coco_obj_to_actev_obj
targetid2class = {targetClass2id[one]:one for one in targetClass2id}
targetactid2class = {targetAct2id[one]:one for one in targetAct2id}
targetsingleactid2class = {
targetSingleAct2id[one]:one for one in targetSingleAct2id}
# coco class to DIVA class
eval_target = {
"Vehicle": ["car", "motorcycle", "bus", "truck", "vehicle"],
"Person": "person",
}
eval_best = "Person" # not used anymore, we use average as the best metric
# load all ground truth into memory
# given the gen_gt_diva
# train on diva dataset
# given a list of images, do the forward, save each image result separately
# for testing, dataset -> {"imgs":[],"ids":[]}, imgs is the image file path,
# test on coco dataset
gpu_util_logs = []
gpu_temp_logs = []
# use nvidia-smi to
if __name__ == "__main__":
config = get_args()
if config.mode == "pack":
config.is_pack_model = True
if config.is_pack_model:
pack(config)
else:
if config.log_time_and_gpu:
gpu_log_interval = 10 # every k seconds
start_time = time.time()
gpu_check_thread = threading.Thread(
target=log_gpu_util,
args=[gpu_log_interval, (config.gpuid_start, config.gpu)])
gpu_check_thread.daemon = True
gpu_check_thread.start()
if config.mode == "train":
train_diva(config)
elif config.mode == "test":
test(config)
elif config.mode == "forward":
forward(config)
else:
raise Exception("mode %s not supported"%(config.mode))
if config.log_time_and_gpu:
end_time = time.time()
print("total run time %s (%s), log gpu utilize every %s seconds and "
"get median %.2f%% and average %.2f%%. GPU temperature median "
"%.2f and average %.2f (C)" % (
sec2time(end_time - start_time),
end_time - start_time,
gpu_log_interval,
np.median(gpu_util_logs)*100,
np.mean(gpu_util_logs)*100,
np.median(gpu_temp_logs),
np.mean(gpu_temp_logs),
))
| 38.704256
| 131
| 0.612831
|
# coding=utf-8
"""
main script for training and testing mask rcnn on MSCOCO/DIVA/MEVA dataset
multi gpu version
"""
import argparse
import cv2
import math
import json
import random
import operator
import time
import os
import pickle
import sys
import threading
# so here won"t have poll allocator info
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# solve the issue of a bug in while loop, when you import the graph in
# multi-gpu, prefix is not added in while loop op [tf 1.14]
# https://github.com/tensorflow/tensorflow/issues/26526
os.environ["TF_ENABLE_CONTROL_FLOW_V2"] = "1"
# remove all the annoying warnings from tf v1.10 to v1.13
import logging
logging.getLogger("tensorflow").disabled = True
import matplotlib
# avoid the warning "gdk_cursor_new_for_display:
# assertion 'GDK_IS_DISPLAY (display)' failed" with Python 3
matplotlib.use('Agg')
import tensorflow as tf
import numpy as np
import pycocotools.mask as cocomask
from pycocotools.coco import COCO
from tqdm import tqdm
from glob import glob
from models import get_model
from models import pack
from models import initialize
from trainer import Trainer
from tester import Tester
from nn import resizeImage
from nn import fill_full_mask
from utils import evalcoco
from utils import match_detection
from utils import computeAP
from utils import computeAR_2
from utils import grouper
from utils import gather_dt
from utils import gather_gt
from utils import match_dt_gt
from utils import gather_act_singles
from utils import aggregate_eval
from utils import weighted_average
from utils import parse_nvidia_smi
from utils import sec2time
from utils import Dataset
from utils import Summary
from utils import nms_wrapper
from utils import FIFO_ME
# for using a COCO model to finetuning with DIVA data.
from class_ids import targetClass2id
from class_ids import targetAct2id
from class_ids import targetSingleAct2id
from class_ids import targetClass2id_mergeProp
from class_ids import targetClass2id_new
from class_ids import targetClass2id_new_nopo
from class_ids import targetAct2id_bupt
from class_ids import bupt_act_mapping
from class_ids import targetAct2id_meva
from class_ids import meva_act_mapping
from class_ids import coco_obj_class_to_id
from class_ids import coco_obj_id_to_class
from class_ids import coco_obj_to_actev_obj
targetid2class = {targetClass2id[one]:one for one in targetClass2id}
targetactid2class = {targetAct2id[one]:one for one in targetAct2id}
targetsingleactid2class = {
targetSingleAct2id[one]:one for one in targetSingleAct2id}
# coco class to DIVA class
eval_target = {
"Vehicle": ["car", "motorcycle", "bus", "truck", "vehicle"],
"Person": "person",
}
eval_best = "Person" # not used anymore, we use average as the best metric
def get_args():
global targetClass2id, targetid2class
parser = argparse.ArgumentParser()
parser.add_argument("datajson")
parser.add_argument("imgpath")
parser.add_argument("--log_time_and_gpu", action="store_true")
parser.add_argument("--outbasepath", type=str, default=None,
help="full path will be outbasepath/modelname/runId")
parser.add_argument("--actoutbasepath", type=str, default=None,
help="for activity box forward only")
parser.add_argument("--train_skip", type=int, default=1,
help="when load diva train set, skip how many.")
parser.add_argument("--train_skip_offset", type=int, default=0,
help="when load diva train set, offset before skip")
parser.add_argument("--val_skip", type=int, default=1,
help="when load diva val set, skip how many.")
parser.add_argument("--val_skip_offset", type=int, default=0,
help="when load diva train set, offset before skip")
parser.add_argument("--exit_after_val", action="store_true")
parser.add_argument("--forward_skip", type=int, default=1,
help="forward, skip how many.")
parser.add_argument("--use_two_level_outpath", action="store_true")
parser.add_argument("--start_from", type=int, default=0,
help="forward, start from which batch")
parser.add_argument("--modelname", type=str, default=None)
parser.add_argument("--num_class", type=int, default=81,
help="num catagory + 1 background")
# ---- for training, show losses" moving average
parser.add_argument("--show_loss_period", type=int, default=1000)
parser.add_argument("--loss_me_step", type=int, default=100,
help="moving average queue size")
# ------ extract fpn feature of the whole image
parser.add_argument("--extract_feat", action="store_true")
parser.add_argument("--feat_path", default=None)
parser.add_argument("--just_feat", action="store_true",
help="only extract full image feature no bounding box")
# ------ do object detection and extract the fpn feature for each *final*boxes
parser.add_argument("--get_box_feat", action="store_true")
parser.add_argument("--box_feat_path", default=None)
# ---different from above, only feat no object detection
parser.add_argument("--videolst", default=None)
parser.add_argument("--skip", action="store_true", help="skip existing npy")
parser.add_argument("--tococo", action="store_true",
help="for training in diva using coco model, map diva"
" class1to1 to coco")
parser.add_argument("--diva_class", action="store_true",
help="the last layer is 16 (full) class output as "
"the diva object classes")
parser.add_argument("--diva_class2", action="store_true",
help="the last layer is new classes with person_object"
" boxes")
parser.add_argument("--diva_class3", action="store_true",
help="the last layer is new classes without person_object"
" boxes")
parser.add_argument("--is_coco_model", action="store_true")
parser.add_argument("--person_only", action="store_true")
parser.add_argument("--merge_prop", action="store_true",
help="use annotation that merged prop and "
"Push_Pulled_Object and train")
parser.add_argument("--use_bg_score", action="store_true")
# ------------activity detection
parser.add_argument("--act_as_obj", action="store_true",
help="activity box as obj box")
parser.add_argument("--add_act", action="store_true",
help="add activitiy model")
# 07/2019
parser.add_argument("--bupt_exp", action="store_true",
help="bupt activity box exp")
parser.add_argument("--meva_exp", action="store_true",
help="meva activity box exp")
parser.add_argument("--check_img_exist", action="store_true",
help="check image exists when load data")
parser.add_argument("--fix_obj_model", action="store_true",
help="fix the object detection part including rpn")
# v1:
parser.add_argument("--num_act_class", type=int, default=36,
help="num catagory + 1 background")
parser.add_argument("--fastrcnn_act_fg_ratio", default=0.25, type=float)
parser.add_argument("--act_relation_nn", action="store_true",
help="add relation link in activity fastrnn head")
parser.add_argument("--act_loss_weight", default=1.0, type=float)
# ----- activity detection version 2
parser.add_argument("--act_v2", action="store_true")
parser.add_argument("--act_single_topk", type=int, default=5,
help="each box topk classes are output")
parser.add_argument("--num_act_single_class", default=36, type=int)
parser.add_argument("--num_act_pair_class", default=21, type=int)
# ---------------------------------------------
parser.add_argument("--debug", action="store_true",
help="load fewer image for debug in training")
parser.add_argument("--runId", type=int, default=1)
# forward mode: imgpath is the list of images
# will output result to outbasepath
# forward still need a coco validation json to get the catgory names
parser.add_argument("--mode", type=str, default="forward",
help="train | test | forward | boxfeat | givenbox")
parser.add_argument("--avg_feat", action="store_true",
help="for boxfeat mode, output 7x7x2048 or just "
"2048 for each box")
parser.add_argument("--boxjsonpath", default=None,
help="json contain a dict for all the boxes, imageId"
" -> boxes")
parser.add_argument("--boxfeatpath", default=None,
help="where to save the box feat path, will be a npy"
" for each image")
parser.add_argument("--boxclass", action="store_true",
help="do box classification as well")
parser.add_argument("--resnet152", action="store_true", help="")
parser.add_argument("--resnet50", action="store_true", help="")
parser.add_argument("--resnet34", action="store_true", help="")
parser.add_argument("--resnet18", action="store_true", help="")
parser.add_argument("--use_se", action="store_true",
help="use squeeze and excitation in backbone")
parser.add_argument("--use_resnext", action="store_true")
parser.add_argument("--is_fpn", action="store_true")
parser.add_argument("--use_gn", action="store_true",
help="whether to use group normalization")
parser.add_argument("--ignore_gn_vars", action="store_true",
help="add gn to previous model, will ignore loading "
"the gn var first")
parser.add_argument("--use_conv_frcnn_head", action="store_true",
help="use conv in fastrcnn head")
parser.add_argument("--use_att_frcnn_head", action="store_true",
help="use attention to sum [K, 7, 7, C] feature "
"into [K, C]")
parser.add_argument("--use_frcnn_class_agnostic", action="store_true",
help="use class agnostic fc head")
parser.add_argument("--conv_frcnn_head_dim", default=256, type=int)
parser.add_argument("--get_rpn_out", action="store_true")
parser.add_argument("--rpn_out_path", default=None)
parser.add_argument("--use_cpu_nms", action="store_true")
parser.add_argument("--no_nms", action="store_true",
help="not using nms in the end, "
"save all pre_nms_topk boxes;")
parser.add_argument("--save_all_box", action="store_true",
help="for DCR experiment, save all boxes "
"and scores in npz file")
parser.add_argument("--use_small_object_head", action="store_true")
parser.add_argument("--use_so_score_thres", action="store_true",
help="use score threshold before final nms")
parser.add_argument("--oversample_so_img", action="store_true")
parser.add_argument("--oversample_x", type=int, default=1, help="x + 1 times")
parser.add_argument("--skip_no_so_img", action="store_true")
parser.add_argument("--skip_no_object", default=None,
help="Bike, single object annotation filter")
parser.add_argument("--so_outpath", default=None)
parser.add_argument("--use_so_association", action="store_true")
parser.add_argument("--so_person_topk", type=int, default=10)
parser.add_argument("--freeze_rpn", action="store_true")
parser.add_argument("--freeze_fastrcnn", action="store_true")
parser.add_argument("--use_dilations", action="store_true",
help="use dilations=2 in res5")
parser.add_argument("--use_deformable", action="store_true",
help="use dilations=2 in res5")
parser.add_argument("--fpn_frcnn_fc_head_dim", type=int, default=1024)
parser.add_argument("--fpn_num_channel", type=int, default=256)
parser.add_argument("--freeze", type=int, default=0,
help="freeze backbone resnet until group 0|2")
parser.add_argument("--finer_resolution", action="store_true",
help="fpn use finer resolution conv")
parser.add_argument("--add_relation_nn", action="store_true",
help="add relation network feature")
parser.add_argument("--focal_loss", action="store_true",
help="use focal loss for RPN and FasterRCNN loss, "
"instead of cross entropy")
# for test mode on testing on the MSCOCO dataset, if not set this,
# will use our evaluation script
parser.add_argument("--use_coco_eval", action="store_true")
parser.add_argument("--coco2014_to_2017", action="store_true",
help="if use the cocoval 2014 json and use val2017"
" filepath, need this option to get the correct"
" file path")
parser.add_argument("--trainlst", type=str, default=None,
help="training frame name list,")
parser.add_argument("--valframepath", type=str, default=None,
help="path to top frame path")
parser.add_argument("--annopath", type=str, default=None,
help="path to annotation, each frame.npz")
parser.add_argument("--valannopath", type=str, default=None,
help="path to annotation, each frame.npz")
parser.add_argument("--one_level_framepath", action="store_true")
parser.add_argument("--flip_image", action="store_true",
help="for training, whether to random horizontal "
"flipping for input image, maybe not for "
"surveillance video")
parser.add_argument("--add_mask", action="store_true")
parser.add_argument("--vallst", type=str, default=None,
help="validation for training")
parser.add_argument("--load", action="store_true")
parser.add_argument("--load_best", action="store_true")
parser.add_argument("--skip_first_eval", action="store_true")
parser.add_argument("--best_first", type=float, default=None)
parser.add_argument("--force_first_eval", action="store_true")
parser.add_argument("--no_skip_error", action="store_true")
parser.add_argument("--show_stat", action="store_true",
help="show data distribution only")
# use for pre-trained model
parser.add_argument("--load_from", type=str, default=None)
parser.add_argument("--ignore_vars", type=str, default=None,
help="variables to ignore, multiple seperate by : "
"like: logits/W:logits/b, this var only need to "
"be var name's sub string to ignore")
parser.add_argument("--print_params", action="store_true",
help="print params and then exit")
parser.add_argument("--show_restore", action="store_true",
help="load from existing model (npz), show the"
" weight that is restored")
# -------------------- save model for deployment
parser.add_argument("--is_pack_model", action="store_true", default=False,
help="with is_test, this will pack the model to a path"
" instead of testing")
parser.add_argument("--pack_model_path", type=str, default=None,
help="path to save model, a .pb file")
parser.add_argument("--note", type=str, default=None,
help="leave a note for this packed model for"
" future reference")
parser.add_argument("--pack_modelconfig_path", type=str, default=None,
help="json file to save the config and note")
# forward with frozen gragp
parser.add_argument("--is_load_from_pb", action="store_true")
# for efficientdet
parser.add_argument("--is_efficientdet", action="store_true")
parser.add_argument("--efficientdet_modelname", default="efficientdet-d0")
parser.add_argument("--efficientdet_max_detection_topk", type=int,
default=5000, help="#topk boxes before NMS")
parser.add_argument("--efficientdet_min_level", type=int, default=3)
parser.add_argument("--efficientdet_max_level", type=int, default=7)
# ------------------------------------ model specifics
# ----------------------------------training detail
parser.add_argument("--use_all_mem", action="store_true")
parser.add_argument("--im_batch_size", type=int, default=1)
parser.add_argument("--rpn_batch_size", type=int, default=256,
help="num roi per image for RPN training")
parser.add_argument("--frcnn_batch_size", type=int, default=512,
help="num roi per image for fastRCNN training")
parser.add_argument("--rpn_test_post_nms_topk", type=int, default=1000,
help="test post nms, input to fast rcnn")
# fastrcnn output NMS suppressing iou >= this thresZ
parser.add_argument("--fastrcnn_nms_iou_thres", type=float, default=0.5)
parser.add_argument("--max_size", type=int, default=1333,
help="num roi per image for RPN and fastRCNN training")
parser.add_argument("--short_edge_size", type=int, default=800,
help="num roi per image for RPN and fastRCNN training")
parser.add_argument("--scale_jitter", action="store_true",
help="if set this, will random get int from min to max"
" to resize image;original param will still be used"
" in testing")
parser.add_argument("--short_edge_size_min", type=int, default=640,
help="num roi per image for RPN and fastRCNN training")
parser.add_argument("--short_edge_size_max", type=int, default=800,
help="num roi per image for RPN and fastRCNN training")
# ------------------------------mixup training
parser.add_argument("--use_mixup", action="store_true")
parser.add_argument("--use_constant_mixup_weight", action="store_true")
parser.add_argument("--mixup_constant_weight", type=float, default=0.5)
parser.add_argument("--mixup_chance", type=float, default=0.5,
help="the possibility of using mixup")
parser.add_argument("--max_mixup_per_frame", type=int, default=15)
# not used for fpn
parser.add_argument("--small_anchor_exp", action="store_true")
parser.add_argument("--positive_anchor_thres", default=0.7, type=float)
parser.add_argument("--negative_anchor_thres", default=0.3, type=float)
parser.add_argument("--fastrcnn_fg_ratio", default=0.25, type=float)
parser.add_argument("--gpu", default=1, type=int, help="number of gpu")
parser.add_argument("--gpuid_start", default=0, type=int,
help="start of gpu id")
parser.add_argument("--model_per_gpu", default=1, type=int,
help="it will be set as a /task:k in device")
parser.add_argument("--controller", default="/cpu:0",
help="controller for multigpu training")
#parser.add_argument("--num_step",type=int,default=360000)
parser.add_argument("--num_epochs", type=int, default=12)
parser.add_argument("--save_period", type=int, default=5000,
help="num steps to save model and eval")
# drop out rate
parser.add_argument("--keep_prob", default=1.0, type=float,
help="1.0 - drop out rate;remember to set it to 1.0 "
"in eval")
# l2 weight decay
parser.add_argument("--wd", default=None, type=float) # 0.0001
parser.add_argument("--init_lr", default=0.1, type=float,
help=("start learning rate"))
parser.add_argument("--use_lr_decay", action="store_true")
parser.add_argument("--learning_rate_decay", default=0.94, type=float,
help=("learning rate decay"))
parser.add_argument("--num_epoch_per_decay", default=2.0, type=float,
help=("how epoch after which lr decay"))
parser.add_argument("--use_cosine_schedule", action="store_true")
parser.add_argument("--use_exp_schedule", action="store_true")
parser.add_argument("--warm_up_steps", default=3000, type=int,
help=("warm up steps not epochs"))
parser.add_argument("--same_lr_steps", default=0, type=int,
help=("after warm up, keep the init_lr for k steps"))
parser.add_argument("--optimizer", default="adam", type=str,
help="optimizer: adam/adadelta")
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--result_score_thres", default=0.0001, type=float)
parser.add_argument("--result_per_im", default=100, type=int)
# clipping, suggest 100.0
parser.add_argument("--clip_gradient_norm", default=None, type=float,
help=("norm to clip gradient to"))
# for debug
parser.add_argument("--vis_pre", action="store_true",
help="visualize preprocess images")
parser.add_argument("--vis_path", default=None)
# for efficient use of COCO model classes
parser.add_argument("--use_partial_classes", action="store_true")
parser.add_argument("--is_multi", action="store_true",
help="use multi-img batch model")
args = parser.parse_args()
if args.use_cosine_schedule:
args.use_lr_decay = True
if args.use_exp_schedule:
args.use_lr_decay = True
args.use_cosine_schedule = False
if args.save_all_box:
args.no_nms = True
if args.no_nms:
args.use_cpu_nms = True # so to avoid using TF nms in the graph
#assert args.model_per_gpu == 1, "not work yet!"
#assert args.gpu*args.model_per_gpu == args.im_batch_size # one gpu one image
#args.controller = "/cpu:0" # parameter server
targetid2class = targetid2class
targetClass2id = targetClass2id
args.small_objects = ["Prop", "Push_Pulled_Object",
"Prop_plus_Push_Pulled_Object", "Bike"]
if args.use_small_object_head:
assert args.merge_prop
args.so_eval_target = {c:1 for c in args.small_objects}
args.small_objects_targetClass2id = {
c: i for i, c in enumerate(["BG"] + args.small_objects)}
args.small_objects_targetid2class = {
args.small_objects_targetClass2id[one]: one
for one in args.small_objects_targetClass2id}
if args.merge_prop:
targetClass2id = targetClass2id_mergeProp
targetid2class = {targetClass2id_mergeProp[one]:one
for one in targetClass2id_mergeProp}
if args.diva_class2:
targetClass2id = targetClass2id_new
targetid2class = {targetClass2id_new[one]:one for one in targetClass2id_new}
if args.diva_class3:
targetClass2id = targetClass2id_new_nopo
targetid2class = {targetClass2id_new_nopo[one]:one
for one in targetClass2id_new_nopo}
args.classname2id = targetClass2id
args.classid2name = targetid2class
if args.act_as_obj:
# replace the obj class with actitivy class
targetClass2id = targetAct2id
targetid2class = {targetAct2id[one]:one for one in targetAct2id}
if args.bupt_exp:
args.diva_class = True
args.act_as_obj = True
targetClass2id = targetAct2id_bupt
targetid2class = {targetAct2id_bupt[one]:one for one in targetAct2id_bupt}
if args.meva_exp:
args.diva_class = True
args.act_as_obj = True
targetClass2id = targetAct2id_meva
targetid2class = {targetAct2id_meva[one]:one for one in targetAct2id_meva}
if args.is_coco_model:
#assert args.mode == "forward" or args.mode == "pack"
args.diva_class = False
targetClass2id = coco_obj_class_to_id
targetid2class = coco_obj_id_to_class
if args.person_only:
targetid2class = {0: "BG", 1: "person"}
targetClass2id = {"BG": 0, "person": 1}
if args.use_partial_classes:
assert args.is_coco_model
args.partial_classes = [classname for classname in coco_obj_to_actev_obj]
args.classname2id = targetClass2id
args.classid2name = targetid2class
if not args.tococo:
assert len(targetid2class) == args.num_class
if not args.tococo and ((args.mode == "train") or (args.mode == "test")):
assert args.num_class == len(targetid2class.keys())
args.class_names = targetClass2id.keys()
if args.vis_pre:
assert args.vis_path is not None
if not os.path.exists(args.vis_path):
os.makedirs(args.vis_path)
if args.add_act and (args.mode == "forward"):
assert args.actoutbasepath is not None
mkdir(args.actoutbasepath)
if args.outbasepath is not None:
mkdir(args.outbasepath)
if args.skip_first_eval:
assert args.best_first is not None
if (args.outbasepath is not None) and (args.modelname is not None):
args.outpath = os.path.join(args.outbasepath,
args.modelname,
str(args.runId).zfill(2))
args.save_dir = os.path.join(args.outpath, "save")
args.save_dir_best = os.path.join(args.outpath, "save-best")
args.write_self_sum = True
args.self_summary_path = os.path.join(args.outpath, "train_sum.txt")
# path to save each validation step"s performance and loss
args.stats_path = os.path.join(args.outpath, "stats.json")
args.mrcnn_head_dim = 256
args.no_obj_detect = False
if args.mode == "videofeat":
args.no_obj_detect = True
args.anchor_stride = 16 # has to be 16 to match the image feature total stride
args.anchor_sizes = (32, 64, 128, 256, 512)
if args.small_anchor_exp:
args.anchor_sizes = (16, 32, 64, 96, 128, 256) # not used for fpn
if args.is_fpn:
args.anchor_strides = (4, 8, 16, 32, 64)
# we will pad H,W to be a multiplier of 32
# [3] is 32, since there is a total pixel reduce of 2x2x2x2x2
args.fpn_resolution_requirement = float(args.anchor_strides[3])
if args.is_efficientdet:
args.fpn_resolution_requirement = 128.0 # 2 ** max_level
args.short_edge_size = np.ceil(
args.short_edge_size / args.fpn_resolution_requirement) * \
args.fpn_resolution_requirement
args.max_size = np.ceil(args.max_size / args.fpn_resolution_requirement) \
* args.fpn_resolution_requirement
#args.fpn_num_channel = 256
#args.fpn_frcnn_fc_head_dim = 1024
if args.load_best:
args.load = True
if args.load_from is not None:
args.load = True
if args.mode == "train":
assert args.outbasepath is not None
assert args.modelname is not None
args.is_train = True
mkdir(args.save_dir)
mkdir(args.save_dir_best)
else:
args.is_train = False
args.num_epochs = 1
if args.get_rpn_out:
if not os.path.exists(args.rpn_out_path):
os.makedirs(args.rpn_out_path)
# ---- all the mask rcnn config
args.resnet_num_block = [3, 4, 23, 3] # resnet 101
args.use_basic_block = False # for resnet-34 and resnet-18
if args.resnet152:
args.resnet_num_block = [3, 8, 36, 3]
if args.resnet50:
args.resnet_num_block = [3, 4, 6, 3]
if args.resnet34:
args.resnet_num_block = [3, 4, 6, 3]
args.use_basic_block = True
if args.resnet18:
args.resnet_num_block = [2, 2, 2, 2]
args.use_basic_block = True
#args.short_edge_size = 800
#args.max_size = 1333
args.anchor_ratios = (0.5, 1, 2)
args.num_anchors = len(args.anchor_sizes) * len(args.anchor_ratios)
# iou thres to determine anchor label
#args.positive_anchor_thres = 0.7
#args.negative_anchor_thres = 0.3
# when getting region proposal, avoid getting too large boxes
args.bbox_decode_clip = np.log(args.max_size / 16.0)
# RPN training
args.rpn_fg_ratio = 0.5
args.rpn_batch_per_im = args.rpn_batch_size
args.rpn_min_size = 0 # 8?
args.rpn_proposal_nms_thres = 0.7
args.rpn_train_pre_nms_topk = 12000 # not used in fpn
args.rpn_train_post_nms_topk = 2000# this is used for fpn_nms_pre
# fastrcnn
args.fastrcnn_batch_per_im = args.frcnn_batch_size
args.fastrcnn_bbox_reg_weights = np.array([10, 10, 5, 5], dtype="float32")
#args.fastrcnn_bbox_reg_weights = np.array([20, 20, 10, 10], dtype="float32")
args.fastrcnn_fg_thres = 0.5 # iou thres
#args.fastrcnn_fg_ratio = 0.25 # 1:3 -> pos:neg
# testing
args.rpn_test_pre_nms_topk = 6000
#args.rpn_test_post_nms_topk = 700 #1300 # 700 takes 40 hours, # OOM at 1722,28,28,1024 # 800 OOM for gpu4
#args.fastrcnn_nms_thres = 0.5
#args.fastrcnn_nms_iou_thres = 0.5 # 0.3 is worse
#args.result_score_thres = 0.0001
#args.result_per_im = 100 # 400 # 100
if args.focal_loss and args.clip_gradient_norm is None:
print("Focal loss needs gradient clipping or will have NaN loss")
sys.exit()
return args
def add_coco(config,datajson):
coco = COCO(datajson)
cat_ids = coco.getCatIds() #[80], each is 1-90
cat_names = [c["name"] for c in coco.loadCats(cat_ids)] # [80]
config.classId_to_cocoId = {(i+1): v for i, v in enumerate(cat_ids)}
config.class_names = ["BG"] + cat_names
# 0-80
config.class_to_classId = {c:i for i, c in enumerate(config.class_names)}
config.classId_to_class = {i:c for i, c in enumerate(config.class_names)}
# load all ground truth into memory
def read_data_diva(config, idlst, framepath, annopath, tococo=False,
randp=None, is_train=False, one_level_framepath=False):
assert idlst is not None
assert framepath is not None
assert annopath is not None
assert len(targetid2class.keys()) == config.num_class
# load the coco class name to classId so we could convert the label name
#to label classId
if tococo:
add_coco(config, config.datajson)
imgs = [os.path.splitext(os.path.basename(line.strip()))[0]
for line in open(idlst, "r").readlines()]
if randp is not None:
imgs = random.sample(imgs, int(len(imgs)*randp))
data = {"imgs":[], "gt":[]}
if config.use_mixup and is_train:
data["mixup_weights"] = []
print("loading data..")
if config.print_params:
imgs = imgs[:100]
# in diva dataset, some class may be ignored
ignored_classes = {}
targetClass2exist = {classname:0 for classname in targetClass2id}
num_empty_actboxes = 0
targetAct2exist = {classname:0 for classname in targetAct2id}
ignored_act_classes = {}
num_empty_single_actboxes = 0
ignored_single_act_classes = {}
targetAct2exist_single = {classname:0 for classname in targetSingleAct2id}
act_single_fgratio = []
if config.debug:
imgs = imgs[:1000]
if (config.train_skip > 1) and is_train:
imgs.sort()
ori_num = len(imgs)
imgs = imgs[config.train_skip_offset::config.train_skip]
print("skipping [%s::%s], got %s/%s" % (
config.train_skip_offset, config.train_skip, len(imgs), ori_num))
if (config.val_skip > 1) and not is_train:
imgs.sort()
ori_num = len(imgs)
imgs = imgs[config.val_skip_offset::config.val_skip]
print("skipping [%s::%s], got %s/%s" % (
config.val_skip_offset, config.val_skip, len(imgs), ori_num))
# get starts for each img, the label distribution
# class -> [] num_box in each image
label_dist = {classname:[] for classname in targetClass2id}
label_dist_all = []
for img in tqdm(imgs, ascii=True, smoothing=0.5):
anno = os.path.join(annopath, "%s.npz"%img)
videoname = img.strip().split("_F_")[0]
if not os.path.exists(anno):
continue
if config.check_img_exist:
if not os.path.exists(os.path.join(framepath, videoname, "%s.jpg"%img)):
continue
anno = dict(np.load(anno, allow_pickle=True)) # "boxes" -> [K,4]
# boxes are x1,y1,x2,y2
original_box_num = len(anno["boxes"])
# feed act box as object boxes
if config.act_as_obj:
anno["labels"] = anno["actlabels"]
anno["boxes"] = anno["actboxes"]
# labels are one word, diva classname
labels = []
boxes = []
no_so_box = True
no_object = True
for i, classname in enumerate(list(anno["labels"])):
if classname in targetClass2id or (
config.bupt_exp and classname in bupt_act_mapping) or (
config.meva_exp and classname in meva_act_mapping):
if config.bupt_exp and classname in bupt_act_mapping:
classname = bupt_act_mapping[classname]
if config.meva_exp and classname in meva_act_mapping:
classname = meva_act_mapping[classname]
targetClass2exist[classname] = 1
labels.append(targetClass2id[classname])
boxes.append(anno["boxes"][i])
else:
ignored_classes[classname] = 1
if classname in config.small_objects:
no_so_box = False
if config.skip_no_object is not None:
if classname == config.skip_no_object:
no_object = False
if config.use_mixup and is_train:
mixup_boxes = []
mixup_labels = []
for i, classname in enumerate(
list(anno["mixup_labels"])[:config.max_mixup_per_frame]):
if classname in targetClass2id:
# not adding now, during run time will maybe add them
#labels.append(targetClass2id[classname])
#boxes.append(anno["mixup_boxes"][i])
mixup_boxes.append(anno["mixup_boxes"][i])
mixup_labels.append(targetClass2id[classname])
anno["mixup_boxes"] = np.array(mixup_boxes, dtype="float32")
anno["mixup_labels"] = mixup_labels
anno["boxes"] = np.array(boxes, dtype="float32")
anno["labels"] = labels
#assert len(anno["boxes"]) > 0
if len(anno["boxes"]) == 0:
continue
if config.skip_no_so_img and is_train:
if no_so_box:
continue
if config.skip_no_object and is_train:
if no_object:
continue
assert len(anno["labels"]) == len(anno["boxes"]), (
anno["labels"], anno["boxes"])
assert anno["boxes"].dtype == np.float32
if config.oversample_so_img and is_train and not no_so_box:
for i in range(config.oversample_x):
data["imgs"].append(os.path.join(framepath, videoname, "%s.jpg"%img))
data["gt"].append(anno)
# statics
if config.show_stat:
for classname in label_dist:
num_box_this_img = len(
[l for l in labels if l == targetClass2id[classname]])
label_dist[classname].append(num_box_this_img)
label_dist_all.append(len(labels))
if config.add_act:
# for activity anno, we couldn"t remove any of the boxes
assert len(anno["boxes"]) == original_box_num
if config.act_v2:
# make multi class labels
# BG class is at index 0
K = len(anno["boxes"])
actSingleLabels = np.zeros((K, config.num_act_single_class),
dtype="uint8")
# use this to mark BG
hasClass = np.zeros((K), dtype="bool")
for i, classname in enumerate(list(anno["actSingleLabels"])):
if classname in targetSingleAct2id:
targetAct2exist_single[classname] = 1
act_id = targetSingleAct2id[classname]
box_id = anno["actSingleIdxs"][i]
assert box_id >= 0 and box_id < K
actSingleLabels[box_id, act_id] = 1
hasClass[box_id] = True
else:
ignored_single_act_classes[classname] = 1
# mark the BG for boxes that has not activity annotation
actSingleLabels[np.logical_not(hasClass), 0] = 1
anno["actSingleLabels_npy"] = actSingleLabels
# compute the BG vs FG ratio for the activity boxes
act_single_fgratio.append(sum(hasClass)/float(K))
if sum(hasClass) == 0:
num_empty_single_actboxes += 1
continue
else:
act_labels = []
act_good_ids = []
for i, classname in enumerate(list(anno["actlabels"])):
if classname in targetAct2id:
targetAct2exist[classname] = 1
act_labels.append(targetAct2id[classname])
act_good_ids.append(i)
else:
ignored_act_classes[classname] = 1
#print anno["actboxes"].shape
if anno["actboxes"].shape[0] == 0:# ignore this image
num_empty_actboxes += 1
continue
anno["actboxes"] = anno["actboxes"][act_good_ids]
# it is a npy array of python list, so no :
anno["actboxidxs"] = anno["actboxidxs"][act_good_ids]
anno["actlabels"] = act_labels
assert len(anno["actboxes"]) == len(anno["actlabels"])
if config.use_mixup and is_train:
# the training lst and annotation is framename_M_framename.npz files
framename1, framename2 = img.strip().split("_M_")
videoname1 = framename1.strip().split("_F_")[0]
videoname2 = framename2.strip().split("_F_")[0]
data["imgs"].append(
(os.path.join(framepath, videoname1, "%s.jpg"%framename1),
os.path.join(framepath, videoname2, "%s.jpg"%framename2)))
data["gt"].append(anno)
weight = np.random.beta(1.5, 1.5)
if config.use_constant_mixup_weight:
weight = config.mixup_constant_weight
data["mixup_weights"].append(weight)
else:
if one_level_framepath:
data["imgs"].append(os.path.join(framepath, "%s.jpg"%img))
else:
data["imgs"].append(os.path.join(framepath, videoname, "%s.jpg"%img))
data["gt"].append(anno)
print("loaded %s/%s data" % (len(data["imgs"]), len(imgs)))
if config.show_stat:
for classname in label_dist:
d = label_dist[classname]
ratios = [a/float(b) for a, b in zip(d, label_dist_all)]
print("%s, [%s - %s], median %s per img, ratio:[%.3f - %.3f], "
"median %.3f, no label %s/%s [%.3f]" % (
classname, min(d), max(d), np.median(d), min(ratios),
max(ratios),
np.median(ratios), len([i for i in d if i == 0]), len(d),
len([i for i in d if i == 0])/float(len(d))))
print("each img has boxes: [%s - %s], median %s" % (
min(label_dist_all), max(label_dist_all), np.median(label_dist_all)))
if ignored_classes:
print("ignored %s " % (ignored_classes.keys()))
noDataClasses = [classname for classname in targetClass2exist
if targetClass2exist[classname] == 0]
if noDataClasses:
print("warning: class data not exists: %s, AR will be 1.0 for these" % (
noDataClasses))
if config.add_act:
if config.act_v2:
print(" each frame positive act box percentage min %.4f, max %.4f, "
"mean %.4f" % (
min(act_single_fgratio), max(act_single_fgratio),
np.mean(act_single_fgratio)))
if ignored_single_act_classes:
print("ignored activity %s" % (ignored_single_act_classes.keys()))
print("%s/%s has no single activity boxes" % (
num_empty_single_actboxes, len(data["imgs"])))
noDataClasses = [classname for classname in targetAct2exist_single
if targetAct2exist_single[classname] == 0]
if noDataClasses:
print("warning: single activity class data not exists: %s, " % (
noDataClasses))
else:
if ignored_act_classes:
print("ignored activity %s" % (ignored_act_classes.keys()))
print("%s/%s has no activity boxes" % (
num_empty_actboxes, len(data["imgs"])))
noDataClasses = [classname for classname in targetAct2exist
if targetAct2exist[classname] == 0]
if noDataClasses:
print("warning: activity class data not exists: %s, " % (noDataClasses))
return Dataset(data, add_gt=True)
# given the gen_gt_diva
# train on diva dataset
def train_diva(config):
global eval_target, targetid2class, targetClass2id
eval_target_weight = None
if config.diva_class:
# only care certain classes
eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {
"Person":0.15,
"Vehicle":0.15,
"Prop":0.15,
"Push_Pulled_Object":0.15,
"Bike":0.15,
}
if config.merge_prop:
eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object",
"Bike", "Prop_plus_Push_Pulled_Object"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {
"Person":0.15,
"Vehicle":0.15,
"Prop_plus_Push_Pulled_Object":0.2,
"Bike":0.2,
"Prop":0.15,
"Push_Pulled_Object":0.15,
}
if config.diva_class2:
# only care certain classes
eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike",
"Construction_Vehicle", "Bike_Person", "Prop_Person",
"Skateboard_Person"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {one:1.0/len(eval_target) for one in eval_target}
if config.diva_class3:
# only care certain classes
# removed construction vehicle 03/2019
eval_target = ["Vehicle", "Person", "Prop", "Push_Pulled_Object", "Bike"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {one:1.0/len(eval_target) for one in eval_target}
if config.add_act:
# same for single box act
# "vehicle_u_turn" is not used since not exists in val set
act_eval_target = ["vehicle_turning_right", "vehicle_turning_left",
"Unloading", "Transport_HeavyCarry", "Opening",
"Open_Trunk", "Loading", "Exiting", "Entering",
"Closing_Trunk", "Closing", "Interacts", "Pull",
"Riding", "Talking", "activity_carrying",
"specialized_talking_phone", "specialized_texting_phone"]
act_eval_target = {one:1 for one in act_eval_target}
act_eval_target_weight = {one:1.0/len(act_eval_target)
for one in act_eval_target}
if config.act_as_obj:
# "vehicle_u_turn" is not used since not exists in val set
eval_target = ["vehicle_turning_right", "vehicle_turning_left", "Unloading",
"Transport_HeavyCarry", "Opening", "Open_Trunk", "Loading",
"Exiting", "Entering", "Closing_Trunk", "Closing",
"Interacts", "Pull", "Riding", "Talking",
"activity_carrying", "specialized_talking_phone",
"specialized_texting_phone"]
if config.bupt_exp:
eval_target = ["Person-Vehicle", "Vehicle-Turning",
"Transport_HeavyCarry", "Pull", "Riding", "Talking",
"activity_carrying", "specialized_talking_phone",
"specialized_texting_phone"]
if config.meva_exp:
eval_target = ["Person-Vehicle", "Vehicle-Turning", "Person-Structure",
"Person_Heavy_Carry", "People_Talking", "Riding",
"Person_Sitting_Down", "Person_Sets_Down_Object"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {one:1.0/len(eval_target) for one in eval_target}
if config.is_coco_model:
# finetuning person boxes for AVA
eval_target = ["person"]
eval_target = {one:1 for one in eval_target}
eval_target_weight = {one:1.0/len(eval_target) for one in eval_target}
self_summary_strs = Summary()
stats = [] # tuples with {"metrics":,"step":,}
# load the frame count data first
train_data = read_data_diva(config, config.trainlst, config.imgpath,
config.annopath, tococo=False, is_train=True,
one_level_framepath=config.one_level_framepath)
val_data = read_data_diva(config, config.vallst, config.valframepath,
config.valannopath, tococo=False,
one_level_framepath=config.one_level_framepath)
config.train_num_examples = train_data.num_examples
if config.show_stat:
sys.exit()
# the total step (iteration) the model will run
num_steps = int(math.ceil(
train_data.num_examples/float(config.im_batch_size))) * config.num_epochs
num_val_steps = int(math.ceil(
val_data.num_examples/float(config.im_batch_size))) * 1
#config_vars = vars(config)
# model_per_gpu > 1 not work yet, need to set distributed computing
#model = get_model(config) # input is image paths
models = []
gpuids = list(range(config.gpuid_start, config.gpuid_start+config.gpu))
gpuids = gpuids * config.model_per_gpu
# example, model_per_gpu=2, gpu=2, gpuid_start=0
gpuids.sort()# [0,0,1,1]
taskids = list(range(config.model_per_gpu)) * config.gpu # [0,1,0,1]
for i, j in zip(gpuids, taskids):
models.append(get_model(config, gpuid=i, task=j,
controller=config.controller))
config.is_train = False
models_eval = []
for i, j in zip(gpuids, taskids):
models_eval.append(get_model(config, gpuid=i, task=j,
controller=config.controller))
config.is_train = True
trainer = Trainer(models, config)
tester = Tester(models_eval, config, add_mask=config.add_mask)
saver = tf.train.Saver(max_to_keep=5) # how many model to keep
bestsaver = tf.train.Saver(max_to_keep=5) # just for saving the best model
# start training!
tfconfig = tf.ConfigProto(allow_soft_placement=True)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True
# so only this gpu will be used
tfconfig.gpu_options.visible_device_list = "%s" % (",".join(
["%s" % i
for i in range(config.gpuid_start, config.gpuid_start+config.gpu)]))
with tf.Session(config=tfconfig) as sess:
self_summary_strs.add("total parameters: %s" % (cal_total_param()))
initialize(load=config.load, load_best=config.load_best, config=config,
sess=sess)
if config.print_params:
for var in tf.global_variables():
not_show = False
for c in ["Adam", "beta1_power", "beta2_power", "Adam_1", "Adadelta_1",
"Adadelta", "Momentum"]:
if c in var.name:
not_show = True
if not_show:
continue
shape = var.get_shape()
print("%s %s\n" % (var.name, shape))
sys.exit()
isStart = True
best = (-1.0, 1)
loss_me, wd_me, rpn_label_loss_me, rpn_box_loss_me, \
fastrcnn_label_loss_me, fastrcnn_box_loss_me, so_label_loss_me, \
act_loss_me, lr_me = \
[FIFO_ME(config.loss_me_step) for i in range(9)]
for batch in tqdm(train_data.get_batches(
config.im_batch_size, num_batches=num_steps),
total=num_steps, ascii=True, smoothing=1):
# start from 0 or the previous step
global_step = sess.run(models[0].global_step) + 1
validation_performance = None
if (global_step % config.save_period == 0) or \
(config.load and isStart and ((config.ignore_vars is None) or \
config.force_first_eval)): # time to save model
tqdm.write("step:%s/%s (epoch:%.3f)" % (
global_step, num_steps,
(config.num_epochs*global_step/float(num_steps))))
tqdm.write("\tsaving model %s..." % global_step)
saver.save(sess, os.path.join(config.save_dir, "model"),
global_step=global_step)
tqdm.write("\tdone")
if config.skip_first_eval and isStart:
tqdm.write("skipped first eval...")
validation_performance = config.best_first
else:
# cat_id -> imgid -> {"dm","dscores"}
e = {one:{} for one in eval_target.keys()}
if config.add_act:
e_act = {one:{} for one in act_eval_target.keys()}
if config.use_small_object_head:
e_so = {one:{} for one in config.so_eval_target.keys()}
for val_batch_ in tqdm(val_data.get_batches(
config.im_batch_size, num_batches=num_val_steps, shuffle=False),
total=num_val_steps, ascii=True, smoothing=1):
batch_idx, val_batches = val_batch_
this_batch_num = len(val_batches)
# multiple image at a time for parallel inferencing with
# multiple gpu
scales = []
imgids = []
for val_batch in val_batches:
# load the image here and resize
image = cv2.imread(val_batch.data["imgs"][0], cv2.IMREAD_COLOR)
imgid = os.path.splitext(
os.path.basename(val_batch.data["imgs"][0]))[0]
imgids.append(imgid)
assert image is not None, image
image = image.astype("float32")
val_batch.data["imgdata"] = [image]
resized_image = resizeImage(image, config.short_edge_size,
config.max_size)
# rememember the scale and original image
ori_shape = image.shape[:2]
#print(image.shape, resized_image.shape
# average H/h and W/w ?
scale = (resized_image.shape[0]*1.0/image.shape[0] + \
resized_image.shape[1]*1.0/image.shape[1])/2.0
val_batch.data["resized_image"] = [resized_image]
scales.append(scale)
outputs = tester.step(sess, val_batch_)
# post process this batch, also remember the ground truth
for i in range(this_batch_num): # num gpu
imgid = imgids[i]
scale = scales[i]
if config.add_act:
if config.act_v2:
boxes, labels, probs, actsingleboxes, actsinglelabels = \
outputs[i]
actsingleboxes = actsingleboxes / scale
else:
boxes, labels, probs, actboxes, actlabels, actprobs = \
outputs[i]
actboxes = actboxes / scale
else:
if config.add_mask:
boxes, labels, probs, masks = outputs[i]
else:
if config.use_small_object_head:
boxes, labels, probs, so_boxes, so_labels, so_probs = \
outputs[i]
so_boxes = so_boxes / scale
else:
boxes, labels, probs = outputs[i]
if config.use_cpu_nms:
boxes, labels, probs = nms_wrapper(boxes, probs, config)
val_batch = val_batches[i]
boxes = boxes / scale
# each class"s detection box and prob
target_dt_boxes = gather_dt(boxes, probs, labels, eval_target,
targetid2class,
tococo=config.tococo,
coco_class_names=config.class_names)
# gt
anno = val_batch.data["gt"][0] # one val_batch is single image
gt_boxes = gather_gt(anno["boxes"], anno["labels"], eval_target,
targetid2class)
# gt_boxes and target_dt_boxes for this image
# eval on one single image
match_dt_gt(e, imgid, target_dt_boxes, gt_boxes, eval_target)
if config.use_small_object_head:
target_so_dt_boxes = gather_dt(
so_boxes, so_probs, so_labels, config.so_eval_target,
config.small_objects_targetid2class)
anno = val_batch.data["gt"][0] # one val_batch is single image
small_object_classids = [targetClass2id[one]
for one in config.small_objects]
idxs = [i for i in range(len(anno["labels"]))
if anno["labels"][i] in small_object_classids]
gt_so_boxes = [anno["boxes"][i] for i in idxs]
# convert the original classid to the small object class id
gt_so_labels = [
small_object_classids.index(anno["labels"][i])+1
for i in idxs]
gt_so_boxes = gather_gt(gt_so_boxes, gt_so_labels,
config.so_eval_target,
config.small_objects_targetid2class)
match_dt_gt(e_so, imgid, target_so_dt_boxes, gt_so_boxes,
config.so_eval_target)
# eval the act box as well, put stuff in e_act
if config.add_act and config.act_v2:
# for v2, we have the single and pair boxes
# actsingleboxes [K,4]
# actsinglelabels [K,num_act_class]
# first we filter the BG boxes
# we select topk act class for each box
topk = config.act_single_topk
single_act_boxes, single_act_labels, single_act_probs = \
gather_act_singles(actsingleboxes, actsinglelabels, topk)
target_act_dt_boxes = gather_dt(
single_act_boxes, single_act_probs, single_act_labels,
act_eval_target, targetsingleactid2class)
# to collect the ground truth, each label will be a stand
# alone boxes
anno = val_batch.data["gt"][0] # one val_batch is single image
gt_single_act_boxes = []
gt_single_act_labels = []
gt_obj_boxes = anno["boxes"]
for bid, label in zip(
anno["actSingleIdxs"], anno["actSingleLabels"]):
if label in act_eval_target:
gt_single_act_boxes.append(gt_obj_boxes[bid])
gt_single_act_labels.append(targetSingleAct2id[label])
gt_act_boxes = gather_gt(
gt_single_act_boxes, gt_single_act_labels,
act_eval_target, targetsingleactid2class)
match_dt_gt(e_act, imgid, target_act_dt_boxes,
gt_act_boxes, act_eval_target)
if config.add_act and not config.act_v2:
target_act_dt_boxes = gather_dt(actboxes, actprobs, actlabels,
act_eval_target,
targetactid2class)
#gt
anno = val_batch.data["gt"][0] # one val_batch is single image
gt_act_boxes = gather_gt(
anno["actboxes"], anno["actlabels"],
act_eval_target, targetactid2class)
# gt_boxes and target_dt_boxes for this image
match_dt_gt(e_act, imgid, target_act_dt_boxes,
gt_act_boxes, act_eval_target)
# we have the dm and g matching for each image in e & e_act
# max detection per image per category
aps, ars = aggregate_eval(e, maxDet=100)
aps_str = "|".join(["%s:%.5f" % (class_, aps[class_])
for class_ in aps])
ars_str = "|".join(["%s:%.5f" % (class_, ars[class_])
for class_ in ars])
#validation_performance = ars[eval_best]
# now we use average AR and average AP or weighted
average_ap, average_ar = weighted_average(
aps, ars, eval_target_weight)
ap_weight = 1.0
ar_weight = 0.0
validation_performance = average_ap*ap_weight + average_ar*ar_weight
if config.add_act:
obj_validation_performance = validation_performance
aps, ars = aggregate_eval(e_act, maxDet=100)
act_aps_str = "|".join(["%s:%.5f"%(class_, aps[class_])
for class_ in aps])
act_ars_str = "|".join(["%s:%.5f"%(class_, ars[class_])
for class_ in ars])
average_ap, average_ar = weighted_average(
aps, ars, act_eval_target_weight)
ap_weight = 0.9
ar_weight = 0.1
act_validation_performance = average_ap*ap_weight + \
average_ar*ar_weight
act_perf_weight = 0.5
obj_perf_weight = 0.5
validation_performance = obj_perf_weight \
* obj_validation_performance + \
act_perf_weight*act_validation_performance
tqdm.write("\tval in %s at step %s, Obj AP:%s, AR:%s, obj "
"performance %s" % (
num_val_steps, global_step, aps_str, ars_str,
obj_validation_performance))
tqdm.write("\tAct AP:%s, AR:%s, this step val:%.5f, previous"
" best val at %s is %.5f" % (
act_aps_str, act_ars_str, validation_performance,
best[1], best[0]))
else:
if config.use_small_object_head:
so_aps, so_ars = aggregate_eval(e_so, maxDet=100)
so_average_ap, so_average_ar = weighted_average(so_aps, so_ars)
so_val = so_average_ap*0.5 + so_average_ar*0.5
so_weight = 0.5
validation_performance = (1 - so_weight)*validation_performance \
+ so_weight*so_val
so_aps_str = "|".join(["%s:%.5f"%(class_, so_aps[class_])
for class_ in so_aps])
so_ars_str = "|".join(["%s:%.5f"%(class_, so_ars[class_])
for class_ in so_ars])
tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, so_AP:%s, "
"so_AR:%s, this step val:%.5f, previous best val "
"at %s is %.5f" % (
num_val_steps, global_step, aps_str, ars_str, so_aps_str,
so_ars_str, validation_performance, best[1], best[0]))
else:
tqdm.write("\tval in %s at step %s, AP:%s, AR:%s, this step "
"val:%.5f, previous best val at %s is %.5f" % (
num_val_steps, global_step, aps_str, ars_str,
validation_performance, best[1], best[0]))
if validation_performance > best[0]:
tqdm.write("\tsaving best model %s..." % global_step)
bestsaver.save(sess, os.path.join(config.save_dir_best, "model"),
global_step=global_step)
tqdm.write("\tdone")
best = (validation_performance, global_step)
isStart = False
if config.exit_after_val:
print("exit after eval.")
break
# skip if the batch is not complete, usually the last few ones
if len(batch[1]) != config.gpu:
continue
try:
#loss, rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, fastrcnn_box_loss, train_op,act_losses = trainer.step(sess,batch)
loss, wds, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses, \
fastrcnn_box_losses, so_label_losses, act_losses, lr = \
trainer.step(sess, batch)
except Exception as e:
print(e)
bs = batch[1]
print("trainer error, batch files:%s"%([b.data["imgs"] for b in bs]))
sys.exit()
if math.isnan(loss):
tqdm.write("warning, nan loss: loss:%s,rpn_label_loss:%s, "
"rpn_box_loss:%s, fastrcnn_label_loss:%s, "
"fastrcnn_box_loss:%s" % (
loss, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses,
fastrcnn_box_losses))
if config.add_act:
tqdm.write("\tact_losses:%s" % (act_losses))
print("batch:%s" % (batch[1][0].data["imgs"]))
sys.exit()
# use moving average to compute loss
loss_me.put(loss)
lr_me.put(lr)
for wd, rpn_label_loss, rpn_box_loss, fastrcnn_label_loss, \
fastrcnn_box_loss, so_label_loss, act_loss in zip(
wds, rpn_label_losses, rpn_box_losses, fastrcnn_label_losses,
fastrcnn_box_losses, so_label_losses, act_losses):
wd_me.put(wd)
rpn_label_loss_me.put(rpn_label_loss)
rpn_box_loss_me.put(rpn_box_loss)
fastrcnn_label_loss_me.put(fastrcnn_label_loss)
fastrcnn_box_loss_me.put(fastrcnn_box_loss)
so_label_loss_me.put(so_label_loss)
act_loss_me.put(act_loss)
if global_step % config.show_loss_period == 0:
tqdm.write("step %s, moving average: learning_rate %.6f, loss %.6f,"
" weight decay loss %.6f, rpn_label_loss %.6f, rpn_box_loss"
" %.6f, fastrcnn_label_loss %.6f, fastrcnn_box_loss %.6f, "
"so_label_loss %.6f, act_loss %.6f" % (
global_step, lr_me.me(), loss_me.me(), wd_me.me(),
rpn_label_loss_me.me(), rpn_box_loss_me.me(),
fastrcnn_label_loss_me.me(), fastrcnn_box_loss_me.me(),
so_label_loss_me.me(), act_loss_me.me()))
# save these for ploting later
stats.append({
"s":float(global_step),
"l":float(loss),
"val":validation_performance
})
isStart = False
# save the last model
if global_step % config.save_period != 0: # time to save model
print("saved last model without evaluation.")
saver.save(sess, os.path.join(config.save_dir, "model"),
global_step=global_step)
if config.write_self_sum:
self_summary_strs.writeTo(config.self_summary_path)
with open(config.stats_path, "w") as f:
json.dump(stats, f)
# given a list of images, do the forward, save each image result separately
def forward(config):
imagelist = config.imgpath
if config.extract_feat:
assert config.feat_path is not None
assert config.is_fpn
if not os.path.exists(config.feat_path):
os.makedirs(config.feat_path)
print("also extracting fpn features")
all_images = [line.strip() for line in open(config.imgpath, "r").readlines()]
if config.forward_skip > 1:
all_images.sort()
ori_num = len(all_images)
all_images = all_images[::config.forward_skip]
print("skiiping %s, got %s/%s" % (
config.forward_skip, len(all_images), ori_num))
if config.check_img_exist:
exist_imgs = []
for image in all_images:
if os.path.exists(image):
exist_imgs.append(image)
print("%s/%s image exists" % (len(exist_imgs), len(all_images)))
all_images = exist_imgs
print("total images to test:%s"%len(all_images))
if config.use_small_object_head:
if not os.path.exists(config.so_outpath):
os.makedirs(config.so_outpath)
models = []
for i in range(config.gpuid_start, config.gpuid_start+config.gpu):
models.append(get_model(config, i, controller=config.controller))
model_final_boxes = [model.final_boxes for model in models]
# [R]
model_final_labels = [model.final_labels for model in models]
model_final_probs = [model.final_probs for model in models]
if config.extract_feat:
model_feats = [model.fpn_feature for model in models]
if config.add_mask:
# [R,14,14]
model_final_masks = [model.final_masks for model in models]
if config.add_act:
if config.act_v2:
model_act_single_boxes = [model.act_single_boxes for model in models]
model_act_single_label_logits = [model.act_single_label_logits
for model in models]
else:
model_act_final_boxes = [model.act_final_boxes for model in models]
# [R]
model_act_final_labels = [model.act_final_labels for model in models]
model_act_final_probs = [model.act_final_probs for model in models]
tfconfig = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True
tfconfig.gpu_options.visible_device_list = "%s" % (
",".join(["%s"%i
for i in range(
config.gpuid_start, config.gpuid_start+config.gpu)]))
with tf.Session(config=tfconfig) as sess:
# for packing model, the weights are already loaded
if not config.is_load_from_pb:
initialize(load=True, load_best=config.load_best, config=config,
sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
count = 0
for images in tqdm(grouper(all_images, config.im_batch_size), ascii=True):
count += 1
if config.start_from > 0:
if count <= config.start_from:
continue
images = [im for im in images if im is not None]
# multigpu will need full image inpu
this_batch_len = len(images)
if this_batch_len != config.im_batch_size:
need = config.im_batch_size - this_batch_len
images.extend(all_images[:need]) # redo some images
scales = []
resized_images = []
ori_shapes = []
imagenames = []
# the folder the image is in, for when we want a two-level output
pathnames = []
feed_dict = {}
for i, image in enumerate(images):
im = cv2.imread(image, cv2.IMREAD_COLOR)
imagename = os.path.splitext(os.path.basename(image))[0]
pathnames.append(image.split("/")[-2])
imagenames.append(imagename)
ori_shape = im.shape[:2]
# need to resize here, otherwise
# InvalidArgumentError (see above for traceback):
#Expected size[1] in [0, 83], but got 120
#[[Node: anchors/fm_anchors = Slice[Index=DT_INT32, T=DT_FLOAT,
#_device="/job:localhost/replica:0/task:0/device:GPU:0"]
#(anchors/all_anchors, anchors/fm_anchors/begin, anchors/stack)]]
resized_image = resizeImage(im, config.short_edge_size, config.max_size)
scale = (resized_image.shape[0]*1.0/im.shape[0] + \
resized_image.shape[1]*1.0/im.shape[1])/2.0
resized_images.append(resized_image)
scales.append(scale)
ori_shapes.append(ori_shape)
feed_dict.update(models[i].get_feed_dict_forward(resized_image))
sess_input = []
if config.just_feat:
outputs = sess.run(model_feats, feed_dict=feed_dict)
for i, feat in enumerate(outputs):
imagename = imagenames[i]
featfile = os.path.join(config.feat_path, "%s.npy"%imagename)
np.save(featfile, feat)
continue # no bounding boxes
if config.add_mask:
for _, boxes, labels, probs, masks in zip(
range(len(images)), model_final_boxes, model_final_labels,
model_final_probs, model_final_masks):
sess_input += [boxes, labels, probs, masks]
else:
if config.add_act:
if config.act_v2:
for _, boxes, labels, probs, actboxes, actlabels in zip(
range(len(images)), model_final_boxes, model_final_labels,
model_final_probs, model_act_single_boxes,
model_act_single_label_logits):
sess_input += [boxes, labels, probs, actboxes, actlabels]
else:
for _, boxes, labels, probs, actboxes, actlabels, actprobs in zip(
range(len(images)), model_final_boxes, model_final_labels,
model_final_probs, model_act_final_boxes,
model_act_final_labels, model_act_final_probs):
sess_input += [boxes, labels, probs, actboxes, actlabels,
actprobs]
else:
if config.extract_feat:
for _, boxes, labels, probs, feats in zip(
range(len(images)), model_final_boxes, model_final_labels,
model_final_probs, model_feats):
sess_input += [boxes, labels, probs, feats]
else:
if config.get_rpn_out:
model_proposal_boxes = [model.proposal_boxes for model in models]
model_proposal_scores = [model.proposal_scores
for model in models]
for _, boxes, labels, probs, prop_boxes, prop_scores in zip(
range(len(images)), model_final_boxes, model_final_labels,
model_final_probs, model_proposal_boxes,
model_proposal_scores):
sess_input += [boxes, labels, probs, prop_boxes, prop_scores]
else:
if config.use_small_object_head:
model_so_boxes = [model.so_final_boxes for model in models]
model_so_probs = [model.so_final_probs for model in models]
model_so_labels = [model.so_final_labels for model in models]
for _, boxes, labels, probs, so_boxes, so_labels, \
so_probs in zip(
range(len(images)), model_final_boxes,
model_final_labels, model_final_probs, model_so_boxes,
model_so_labels, model_so_probs):
sess_input += [boxes, labels, probs, so_boxes, so_labels,
so_probs]
else:
for _, boxes, labels, probs in zip(
range(len(images)), model_final_boxes, model_final_labels,
model_final_probs):
sess_input += [boxes, labels, probs]
outputs = sess.run(sess_input, feed_dict=feed_dict)
if config.add_mask:
pn = 4
else:
pn = 3
if config.add_act:
pn = 6
if config.act_v2:
pn = 5
else:
if config.extract_feat:
pn = 4
elif config.get_rpn_out:
pn = 5
elif config.use_small_object_head:
pn = 6
outputs = [outputs[i*pn:(i*pn+pn)] for i in range(len(images))]
for i, output in enumerate(outputs):
scale = scales[i]
ori_shape = ori_shapes[i]
imagename = imagenames[i]
if config.add_mask:
final_boxes, final_labels, final_probs, final_masks = output
final_boxes = final_boxes / scale
final_masks = [fill_full_mask(box, mask, ori_shape)
for box, mask in zip(final_boxes, final_masks)]
else:
if config.add_act:
if config.act_v2:
final_boxes, final_labels, final_probs, actsingleboxes, \
actsinglelabels = output
actsingleboxes = actsingleboxes / scale
else:
final_boxes, final_labels, final_probs, actboxes,\
actlabels, actprobs = output
actboxes = actboxes / scale
else:
if config.extract_feat:
final_boxes, final_labels, final_probs, final_feat = output
#print(final_feats.shape# [1,7,7,256]
# save the features
featfile = os.path.join(config.feat_path, "%s.npy"%imagename)
np.save(featfile, final_feat)
else:
if config.get_rpn_out:
final_boxes, final_labels, final_probs, prop_boxes, \
prop_scores = output
prop_boxes = prop_boxes / scale
props = np.concatenate(
[prop_boxes, np.expand_dims(prop_scores, axis=-1)],
axis=-1) # [K, 5]
# save the proposal boxes,
prop_file = os.path.join(config.rpn_out_path,
"%s.npy" % imagename)
np.save(prop_file, props)
else:
if config.use_small_object_head:
final_boxes, final_labels, final_probs, final_so_boxes, \
final_so_labels, final_so_probs = output
else:
final_boxes, final_labels, final_probs = output
if config.use_cpu_nms:
if not config.no_nms:
final_boxes, final_labels, final_probs = nms_wrapper(
final_boxes, final_probs, config)
final_boxes = final_boxes / scale
final_masks = [None for one in final_boxes]
if config.no_nms:
# will leave all K boxes, each box class is the max prob class
# final_boxes would be [num_class-1, K, 4]
# final_probs would be [num_class-1, K]
# final_labels is actually rcnn_boxes, [K, 4]
if config.save_all_box: # save all output as npz file instead
rcnn_boxes = final_labels
rcnn_boxes = rcnn_boxes / scale
# boxes are [x1, y1, x2, y2]
if config.use_frcnn_class_agnostic:
if final_boxes:
assert final_boxes[0, 1, 2] == final_boxes[1, 1, 2]
final_boxes = final_boxes[0, :, :] # [K, 4]
data = {
"rcnn_boxes": rcnn_boxes, # [K, 4]
"frcnn_boxes": final_boxes, # [C, K, 4] / [K, 4]
"frcnn_probs": final_probs, # [C, K] # C is num_class -1
}
target_file = os.path.join(config.outbasepath, "%s.npz"%imagename)
np.savez(target_file, **data)
continue # next image
else:
num_cat, num_box = final_boxes.shape[:2]
# [K]
best_cat = np.argmax(final_probs, axis=0)
# get the final labels first
final_labels = best_cat + 1
# use the final boxes, select the best cat for each box
final_boxes2 = np.zeros([num_box, 4], dtype="float")
for i in range(num_box):
final_boxes2[i, :] = final_boxes[best_cat[i], i, :]
final_boxes = final_boxes2
final_probs = np.amax(final_probs, axis=0) # [K]
final_masks = [None for one in final_boxes]
pred = []
for j, (box, prob, label, mask) in enumerate(
zip(final_boxes, final_probs, final_labels, final_masks)):
box[2] -= box[0]
box[3] -= box[1] # produce x,y,w,h output
cat_id = int(label)
cat_name = targetid2class[cat_id]
# encode mask
rle = None
if config.add_mask:
rle = cocomask.encode(np.array(mask[:, :, None], order="F"))[0]
rle["counts"] = rle["counts"].decode("ascii")
res = {
"category_id":cat_id,
"cat_name":cat_name, # [0-80]
"score":float(round(prob, 4)),
#"bbox": list(map(lambda x:float(round(x,1)),box)),
"bbox": [float(round(x, 1)) for x in box],
"segmentation":rle,
}
pred.append(res)
# save the data
outbasepath = config.outbasepath
if config.use_two_level_outpath:
pathname = pathnames[i]
outbasepath = os.path.join(config.outbasepath, pathname)
if not os.path.exists(outbasepath):
os.makedirs(outbasepath)
resultfile = os.path.join(outbasepath, "%s.json"%imagename)
with open(resultfile, "w") as f:
json.dump(pred, f)
if config.use_small_object_head:
so_pred = []
for j, (so_box, so_prob, so_label) in enumerate(
zip(final_so_boxes, final_so_probs, final_so_labels)):
so_box[2] -= so_box[0]
so_box[3] -= so_box[1] # produce x,y,w,h output
# so_label is the class id in the small objects,
# here the cat_id should follow the original class
cat_name = config.small_objects_targetid2class[so_label]
cat_id = targetClass2id[cat_name]
res = {
"category_id": cat_id,
"cat_name": cat_name,
"score": float(round(so_prob, 4)),
#"bbox": list(map(lambda x:float(round(x,1)), so_box)),
"bbox": [float(round(x, 1)) for x in so_box],
"segmentation": None,
}
so_pred.append(res)
resultfile = os.path.join(config.so_outpath, "%s.json" % imagename)
with open(resultfile, "w") as f:
json.dump(so_pred, f)
if config.add_act:
act_pred = []
if config.act_v2:
# assemble the single boxes and pair boxes?
topk = config.act_single_topk
single_act_boxes, single_act_labels, single_act_probs = \
gather_act_singles(actsingleboxes, actsinglelabels, topk)
for j, (act_box, act_prob, act_label) in enumerate(
zip(single_act_boxes, single_act_probs, single_act_labels)):
act_box[2] -= act_box[0]
act_box[3] -= act_box[1]
act_name = targetsingleactid2class[act_label]
res = {
"category_id":act_label,
"cat_name":act_name,
"score":float(round(act_prob, 4)),
#"bbox": list(map(lambda x:float(round(x,1)),act_box)),
"bbox": [float(round(x, 1)) for x in act_box],
"segmentation":None,
"v2":1,
"single":1,
}
act_pred.append(res)
else:
for j, (act_box, act_prob, act_label) in enumerate(
zip(actboxes, actprobs, actlabels)):
act_box[2] -= act_box[0]
act_box[3] -= act_box[1]
act_name = targetactid2class[act_label]
res = {
"category_id":act_label,
"cat_name":act_name,
"score":float(round(act_prob, 4)),
#"bbox": list(map(lambda x:float(round(x,1)),act_box)),
"bbox": [float(round(x, 1)) for x in act_box],
"segmentation":None,
"v2":0,
}
act_pred.append(res)
# save the act data
resultfile = os.path.join(config.actoutbasepath, "%s.json"%imagename)
with open(resultfile, "w") as f:
json.dump(act_pred, f)
def read_data_coco(datajson, config, add_gt=False, load_coco_class=False):
with open(datajson, "r") as f:
dj = json.load(f)
if load_coco_class:
add_coco(config, datajson)
data = {"imgs":[], "ids":[]}
if add_gt:
data = {"imgs":[], "ids":[], "gt":[]}
# read coco annotation file
for one in dj["images"]:
imgid = int(one["id"])
imgfile = os.path.join(config.imgpath, one["file_name"])
if config.coco2014_to_2017:
imgfile = os.path.join(config.imgpath, one["file_name"].split("_")[-1])
data["imgs"].append(imgfile)
data["ids"].append(imgid)
if add_gt:
# load the bounding box and so on
pass
return Dataset(data, add_gt=add_gt)
# for testing, dataset -> {"imgs":[],"ids":[]}, imgs is the image file path,
def forward_coco(dataset, num_batches, config, sess, tester, resize=True):
assert not config.diva_class # not working for this yet
# "id" -> (boxes, probs, labels, masks)
#pred = {}
# each is (image_id,cat_id,bbox,score,segmentation)
pred = []
for evalbatch in tqdm(
dataset.get_batches(config.im_batch_size, num_batches=num_batches,
shuffle=False,cap=True), total=num_batches):
_, batches = evalbatch
scales = []
ori_shapes = []
image_ids = []
for batch in batches:
# load the image here and resize
image = cv2.imread(batch.data["imgs"][0], cv2.IMREAD_COLOR)
assert image is not None, batch.data["imgs"][0]
image = image.astype("float32")
imageId = batch.data["ids"][0]
image_ids.append(imageId)
batch.data["imgdata"] = [image]
#if imageId != 139:
# continue
# resize image
# ppwwyyxx"s code do resizing in eval
if resize:
resized_image = resizeImage(image, config.short_edge_size,
config.max_size)
else:
resized_image = image
# rememember the scale and original image
ori_shape = image.shape[:2]
#print(image.shape, resized_image.shape
# average H/h and W/w ?
scale = (resized_image.shape[0]*1.0/image.shape[0] + \
resized_image.shape[1]*1.0/image.shape[1])/2.0
batch.data["resized_image"] = [resized_image]
scales.append(scale)
ori_shapes.append(ori_shape)
outputs = tester.step(sess, evalbatch)
for i, output in enumerate(outputs):
scale = scales[i]
ori_shape = ori_shapes[i]
imgid = image_ids[i]
if config.add_mask:
final_boxes, final_labels, final_probs, final_masks = output
final_boxes = final_boxes / scale
final_masks = [fill_full_mask(box, mask, ori_shape)
for box, mask in zip(final_boxes, final_masks)]
else:
final_boxes, final_labels, final_probs = output
final_boxes = final_boxes / scale
final_masks = [None for one in final_boxes]
for box, prob, label, mask in zip(final_boxes, final_probs,
final_labels, final_masks):
box[2] -= box[0]
box[3] -= box[1]
cat_id = config.classId_to_cocoId[label]
# encode mask
rle = None
if config.add_mask:
rle = cocomask.encode(np.array(mask[:, :, None], order="F"))[0]
rle["counts"] = rle["counts"].decode("ascii")
res = {
"image_id":imgid,#int
"category_id":cat_id,
"cat_name":config.class_names[label], #[0-80]
"score":float(round(prob, 4)),
#"bbox": list(map(lambda x:float(round(x,1)),box)),
"bbox": [float(round(x, 1)) for x in box],
"segmentation":rle
}
pred.append(res)
#print([(one["category_id"],one["score"],one["bbox"]) for one in pred]
#print(imageId
#sys.exit()
return pred
# test on coco dataset
def test(config):
test_data = read_data_coco(
config.datajson, config=config, add_gt=False, load_coco_class=True)
print("total testing samples:%s" % test_data.num_examples)
models = []
for i in range(config.gpuid_start, config.gpuid_start+config.gpu):
models.append(get_model(config, i, controller=config.controller))
tester = Tester(models, config, add_mask=config.add_mask)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
if not config.use_all_mem:
tfconfig.gpu_options.allow_growth = True
with tf.Session(config=tfconfig) as sess:
initialize(load=True, load_best=config.load_best, config=config, sess=sess)
# num_epoch should be 1
assert config.num_epochs == 1
num_steps = int(math.ceil(
test_data.num_examples/float(config.im_batch_size)))*config.num_epochs
# a list of imageids
pred = forward_coco(test_data, num_steps, config, sess, tester, resize=True)
#with open("coco.json","w") as f:
# json.dump(pred,f)
if config.use_coco_eval:
evalcoco(pred, config.datajson, add_mask=config.add_mask)
else:
# check our AP implementation, use our map implementation
# load the annotation first
all_cat_ids = {}
with open(config.datajson, "r") as f:
data = json.load(f)
gt = {} # imageid -> boxes:[], catids
for one in data["annotations"]:
cat_id = one["category_id"]
all_cat_ids[cat_id] = 1
imageid = int(one["image_id"])
if imageid not in gt:
gt[imageid] = {} # cat_ids -> boxes[]
#gt[imageid]["boxes"].append(one["bbox"]) # (x,y,w,h), float
#gt[imageid]["cat_ids"].append(one["category_id"])
if cat_id not in gt[imageid]:
gt[imageid][cat_id] = []
gt[imageid][cat_id].append(one["bbox"])
print("total category:%s" % len(all_cat_ids))
# get the aps/ars for each frame
dt = {} # imageid -> cat_id -> {boxes,scores}
for one in pred:
imageid = one["image_id"]
dt_bbox = one["bbox"]
score = one["score"]
cat_id = one["category_id"]
if imageid not in dt:
dt[imageid] = {}
if cat_id not in dt[imageid]:
dt[imageid][cat_id] = []
dt[imageid][cat_id].append((dt_bbox, score))
# accumulate all detection and compute AP once
e = {} # imageid -> catid
start = time.time()
for imageid in gt:
e[imageid] = {}
for cat_id in gt[imageid]:
g = gt[imageid][cat_id]
e[imageid][cat_id] = {
"dscores":[],
"dm":[],
"gt_num":len(g),
}
d = []
dscores = []
if imageid in dt and cat_id in dt[imageid]:
# sort the boxes based on the score first
dt[imageid][cat_id].sort(key=operator.itemgetter(1), reverse=True)
for boxes, score in dt[imageid][cat_id]:
d.append(boxes)
dscores.append(score)
dm, gm = match_detection(
d, g, cocomask.iou(d, g, [0 for _ in range(len(g))]),
iou_thres=0.5)
e[imageid][cat_id]["dscores"] = dscores
e[imageid][cat_id]["dm"] = dm
# accumulate results
maxDet = 100 # max detection per image per category
aps = {}
ars = {}
for catId in all_cat_ids:
# put all detection scores from all image together
dscores = np.concatenate(
[e[imageid][catId]["dscores"][:maxDet]
for imageid in e if catId in e[imageid]])
# sort
inds = np.argsort(-dscores, kind="mergesort")
dscores_sorted = dscores[inds]
# put all detection annotation together based on the score sorting
dm = np.concatenate(
[e[imageid][catId]["dm"][:maxDet]
for imageid in e if catId in e[imageid]])[inds]
num_gt = np.sum([e[imageid][catId]["gt_num"]
for imageid in e if catId in e[imageid]])
aps[catId] = computeAP(dm)
ars[catId] = computeAR_2(dm, num_gt)
mean_ap = np.mean([aps[catId] for catId in aps])
mean_ar = np.mean([ars[catId] for catId in ars])
took = time.time() - start
print("total dt image:%s, gt image:%s" % (len(dt), len(gt)))
print("mean AP with IoU 0.5:%s, mean AR with max detection %s:%s, "
"took %s seconds" % (mean_ap, maxDet, mean_ar, took))
def cal_total_param():
total = 0
for var in tf.trainable_variables():
shape = var.get_shape()
var_num = 1
for dim in shape:
var_num *= dim.value
total += var_num
return total
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
gpu_util_logs = []
gpu_temp_logs = []
# use nvidia-smi to
def log_gpu_util(interval, gpuid_range):
global gpu_util_logs
while True:
time.sleep(interval)
gpu_temps, gpu_utils = parse_nvidia_smi(gpuid_range)
gpu_util_logs.extend(gpu_utils)
gpu_temp_logs.extend(gpu_temps)
if __name__ == "__main__":
config = get_args()
if config.mode == "pack":
config.is_pack_model = True
if config.is_pack_model:
pack(config)
else:
if config.log_time_and_gpu:
gpu_log_interval = 10 # every k seconds
start_time = time.time()
gpu_check_thread = threading.Thread(
target=log_gpu_util,
args=[gpu_log_interval, (config.gpuid_start, config.gpu)])
gpu_check_thread.daemon = True
gpu_check_thread.start()
if config.mode == "train":
train_diva(config)
elif config.mode == "test":
test(config)
elif config.mode == "forward":
forward(config)
else:
raise Exception("mode %s not supported"%(config.mode))
if config.log_time_and_gpu:
end_time = time.time()
print("total run time %s (%s), log gpu utilize every %s seconds and "
"get median %.2f%% and average %.2f%%. GPU temperature median "
"%.2f and average %.2f (C)" % (
sec2time(end_time - start_time),
end_time - start_time,
gpu_log_interval,
np.median(gpu_util_logs)*100,
np.mean(gpu_util_logs)*100,
np.median(gpu_temp_logs),
np.mean(gpu_temp_logs),
))
| 83,635
| 0
| 247
|
9c4d73291d6906245b28999c70a3f800826072e4
| 1,889
|
py
|
Python
|
ingest/currency/currency.py
|
ourresearch/journalsdb
|
169feb9be684eac59f3294dccdb319eb10fe1958
|
[
"MIT"
] | 8
|
2021-02-01T21:00:20.000Z
|
2022-01-25T09:51:24.000Z
|
ingest/currency/currency.py
|
ourresearch/journalsdb
|
169feb9be684eac59f3294dccdb319eb10fe1958
|
[
"MIT"
] | 43
|
2021-04-28T00:20:53.000Z
|
2022-03-09T00:39:56.000Z
|
ingest/currency/currency.py
|
ourresearch/journalsdb
|
169feb9be684eac59f3294dccdb319eb10fe1958
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from app import app, db
from models.price import Currency
@app.cli.command("import_currency")
def import_currency():
"""
Loads up a provided CSV file of world currencies and extracts the unicode symbol
and text label associated with each currency. Loads these into the Currency table
in the price model.
CSV to use: https://gist.github.com/Chintan7027/fc4708d8b5c8d1639a7c#file-currency-symbols-csv
The Serbia Dinar has a '.' at the end which is removed by this script.
INR value should be updated manually with the _unicode-decimal value 8377
Run with 'flask import_currency'
"""
csv_file = os.path.join(app.root_path, "ingest/currency/currency.csv")
df = pd.read_csv(csv_file)
df = df.dropna()
MAX_SYMBOL_LENGTH = 3
for index, row in df.iterrows():
unicode_decimal = str(row["_unicode-decimal"])
unicode_as_array = unicode_decimal.split(", ")[:MAX_SYMBOL_LENGTH]
symbol = "".join([chr(int(u)) for u in unicode_as_array])
acronym = str(row["_code"])
text = row["__text"]
currency = db.session.query(Currency).filter_by(acronym=acronym).one_or_none()
if not currency:
print("adding currency: ", acronym + " " + text + " " + symbol)
currency = Currency(symbol=symbol, acronym=acronym, text=text)
db.session.add(currency)
else:
currency.symbol = symbol
currency.acronym = acronym
currency.text = text
db.session.commit()
@app.cli.command("delete_currency_table_values")
def delete_currency():
"""
Deletes all Currency entries from the database.
Run with 'flask delete_currency_table_values'
"""
currencies = db.session.query(Currency).all()
for currency in currencies:
db.session.delete(currency)
db.session.commit()
| 32.568966
| 98
| 0.670725
|
import os
import pandas as pd
from app import app, db
from models.price import Currency
@app.cli.command("import_currency")
def import_currency():
"""
Loads up a provided CSV file of world currencies and extracts the unicode symbol
and text label associated with each currency. Loads these into the Currency table
in the price model.
CSV to use: https://gist.github.com/Chintan7027/fc4708d8b5c8d1639a7c#file-currency-symbols-csv
The Serbia Dinar has a '.' at the end which is removed by this script.
INR value should be updated manually with the _unicode-decimal value 8377
Run with 'flask import_currency'
"""
csv_file = os.path.join(app.root_path, "ingest/currency/currency.csv")
df = pd.read_csv(csv_file)
df = df.dropna()
MAX_SYMBOL_LENGTH = 3
for index, row in df.iterrows():
unicode_decimal = str(row["_unicode-decimal"])
unicode_as_array = unicode_decimal.split(", ")[:MAX_SYMBOL_LENGTH]
symbol = "".join([chr(int(u)) for u in unicode_as_array])
acronym = str(row["_code"])
text = row["__text"]
currency = db.session.query(Currency).filter_by(acronym=acronym).one_or_none()
if not currency:
print("adding currency: ", acronym + " " + text + " " + symbol)
currency = Currency(symbol=symbol, acronym=acronym, text=text)
db.session.add(currency)
else:
currency.symbol = symbol
currency.acronym = acronym
currency.text = text
db.session.commit()
@app.cli.command("delete_currency_table_values")
def delete_currency():
"""
Deletes all Currency entries from the database.
Run with 'flask delete_currency_table_values'
"""
currencies = db.session.query(Currency).all()
for currency in currencies:
db.session.delete(currency)
db.session.commit()
| 0
| 0
| 0
|
e161d2751528bb56774b08bbf120664eaeffc03a
| 1,929
|
py
|
Python
|
estimators/single_ix_stratification.py
|
saadlabyad/aslsd
|
95a1cc660079972b45a77ec6dc587d9225489212
|
[
"BSD-3-Clause"
] | null | null | null |
estimators/single_ix_stratification.py
|
saadlabyad/aslsd
|
95a1cc660079972b45a77ec6dc587d9225489212
|
[
"BSD-3-Clause"
] | null | null | null |
estimators/single_ix_stratification.py
|
saadlabyad/aslsd
|
95a1cc660079972b45a77ec6dc587d9225489212
|
[
"BSD-3-Clause"
] | null | null | null |
# License: BSD 3 clause
import numpy as np
| 35.072727
| 75
| 0.567652
|
# License: BSD 3 clause
import numpy as np
class SingleIxStratification:
def __init__(self, n_events_i, **kwargs):
self.n_events_i = n_events_i
self.n_exact = kwargs.get('n_exact', 10**4)
self.strata = kwargs.get('strata', None)
self.n_strata = kwargs.get('n_strata', None)
self.abs_alloc = kwargs.get('abs_alloc', None)
self.n_exact = min(self.n_exact, self.n_events_i-1)
# m_spec is the index of the term starting from which we evalue
# functions at all indices
self.m_spec = self.n_events_i-self.n_exact
if self.strata is None:
self.strata = self.get_default_strata()
self.n_strata = len(self.strata)
strata_sizes = self.get_stratification_size()
self.strata_sizes = strata_sizes
if self.abs_alloc is None:
abs_alloc = self.get_default_abs_alloc()
self.abs_alloc = abs_alloc
else:
if len(self.abs_alloc) == self.n_strata:
self.abs_alloc = abs_alloc
else:
raise ValueError("Incompatible lengths between ",
"stratification and absolute allocation")
def get_default_strata(self):
if self.m_spec <= 100:
return [[0, self.m_spec-1]]
else:
L = np.logspace(0, np.log10(self.m_spec), num=10)
# Correct for roundoff errors
L[-1] = self.m_spec
L = list(L)
L = [int(x) for x in L]
return [[L[i], L[i+1]-1] for i in range(len(L)-1)]
def get_default_abs_alloc(self):
return np.array([min(10**3, self.strata_sizes[x])
for x in range(self.n_strata)])
def get_stratification_size(self):
strata_sizes = np.array([L[1]-L[0]+1 for L in self.strata])
return strata_sizes
| 1,733
| 8
| 139
|
040a6ffedbd2b9090e96d29e4bad69ccbc0fc6f9
| 2,305
|
py
|
Python
|
match/management/commands/rebuild_rankings.py
|
klobucar/statkeeper
|
6083d686fd04ef628ef1fffbb3154374d886a3da
|
[
"MIT"
] | 1
|
2015-10-15T22:29:48.000Z
|
2015-10-15T22:29:48.000Z
|
match/management/commands/rebuild_rankings.py
|
klobucar/statkeeper
|
6083d686fd04ef628ef1fffbb3154374d886a3da
|
[
"MIT"
] | null | null | null |
match/management/commands/rebuild_rankings.py
|
klobucar/statkeeper
|
6083d686fd04ef628ef1fffbb3154374d886a3da
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from match.models import Ranking, Match, Game, ParticipantRole, t
| 40.438596
| 90
| 0.658568
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from match.models import Ranking, Match, Game, ParticipantRole, t
class Command(BaseCommand):
help = 'Rebuilds all of the ratings from scratch.'
def handle(self, *args, **options):
# Clear out all of the current ratings
#Ranking.objects.all().delete()
for game in Game.objects.all():
ratings_cache = {}
for match in Match.objects.filter(game=game).order_by('timestamp'):
# Find all the participants for this match and split 'em out
participants = list(match.participant_set.all())
winners = [ p for p in participants if p.role == ParticipantRole.Win ]
losers = [ p for p in participants if p.role == ParticipantRole.Loss ]
drawers = [ p for p in participants if p.role == ParticipantRole.Draw ]
if drawers:
raise CommandError('Does not handle draws yet.')
if not winners or not losers:
raise CommandError('Ehh? Missing participants on match id %d' % match.id)
# Get the results
wins, losses = match.parse_results()
# Get the ids for the participants (in order)
winner_ids = [ w.user_id for w in winners ]
loser_ids = [ l.user_id for l in losers ]
# Get the ratings for those ids (also in order)
winner_ratings = [ ratings_cache.get(id, t.create_rating()) for id in winner_ids ]
loser_ratings = [ ratings_cache.get(id, t.create_rating()) for id in loser_ids ]
# Compute the ratings
for i in xrange(losses):
loser_ratings, winner_ratings = t.rate([loser_ratings, winner_ratings])
for i in xrange(wins):
winner_ratings, loser_ratings = t.rate([winner_ratings, loser_ratings])
# Use the fact that things are ordered to update the caches
for id, rating in zip(loser_ids, loser_ratings):
ratings_cache[id] = rating
for id, rating in zip(winner_ids, winner_ratings):
ratings_cache[id] = rating
users = User.objects.in_bulk(ratings_cache.keys())
for id, user in users.iteritems():
r, _ = Ranking.objects.get_or_create(user=user, game=game)
r.from_rating(ratings_cache[id])
r.save()
print r
| 2,021
| 84
| 23
|
7cdbc9bb02cf3f232c29d81fee332bc8cb2ff426
| 10,611
|
py
|
Python
|
tests/integration/resources_permissions/test_modules_resources.py
|
aavcc/taiga-openshift
|
7c33284573ceed38f755b8159ad83f3f68d2f7cb
|
[
"MIT"
] | null | null | null |
tests/integration/resources_permissions/test_modules_resources.py
|
aavcc/taiga-openshift
|
7c33284573ceed38f755b8159ad83f3f68d2f7cb
|
[
"MIT"
] | 12
|
2019-11-25T14:08:32.000Z
|
2021-06-24T10:35:51.000Z
|
tests/integration/resources_permissions/test_modules_resources.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | 1
|
2018-06-07T10:58:15.000Z
|
2018-06-07T10:58:15.000Z
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from django.core.urlresolvers import reverse
from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS
from taiga.base.utils import json
from tests import factories as f
from tests.utils import helper_test_http_method, disconnect_signals, reconnect_signals
from taiga.projects import choices as project_choices
from taiga.projects.votes.services import add_vote
from taiga.projects.notifications.services import add_watcher
from taiga.projects.occ import OCCResourceMixin
from unittest import mock
import pytest
pytestmark = pytest.mark.django_db
@pytest.fixture
| 46.336245
| 113
| 0.628499
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from django.core.urlresolvers import reverse
from taiga.permissions.choices import MEMBERS_PERMISSIONS, ANON_PERMISSIONS
from taiga.base.utils import json
from tests import factories as f
from tests.utils import helper_test_http_method, disconnect_signals, reconnect_signals
from taiga.projects import choices as project_choices
from taiga.projects.votes.services import add_vote
from taiga.projects.notifications.services import add_watcher
from taiga.projects.occ import OCCResourceMixin
from unittest import mock
import pytest
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_member_with_perms = f.UserFactory.create()
m.project_member_without_perms = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.other_user = f.UserFactory.create()
m.public_project = f.ProjectFactory(is_private=False,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
owner=m.project_owner)
m.private_project1 = f.ProjectFactory(is_private=True,
anon_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
public_permissions=list(map(lambda x: x[0], ANON_PERMISSIONS)),
owner=m.project_owner)
m.private_project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner)
m.blocked_project = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
blocked_code=project_choices.BLOCKED_BY_STAFF)
m.public_membership = f.MembershipFactory(project=m.public_project,
user=m.project_member_with_perms,
role__project=m.public_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.private_membership1 = f.MembershipFactory(project=m.private_project1,
user=m.project_member_with_perms,
role__project=m.private_project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project1,
user=m.project_member_without_perms,
role__project=m.private_project1,
role__permissions=[])
m.private_membership2 = f.MembershipFactory(project=m.private_project2,
user=m.project_member_with_perms,
role__project=m.private_project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project2,
user=m.project_member_without_perms,
role__project=m.private_project2,
role__permissions=[])
m.blocked_membership = f.MembershipFactory(project=m.blocked_project,
user=m.project_member_with_perms,
role__project=m.blocked_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.blocked_project,
user=m.project_member_without_perms,
role__project=m.blocked_project,
role__permissions=[])
f.MembershipFactory(project=m.public_project,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project1,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project2,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.blocked_project,
user=m.project_owner,
is_admin=True)
return m
def test_modules_retrieve(client, data):
public_url = reverse('projects-modules', kwargs={"pk": data.public_project.pk})
private_url1 = reverse('projects-modules', kwargs={"pk": data.private_project1.pk})
private_url2 = reverse('projects-modules', kwargs={"pk": data.private_project2.pk})
blocked_url = reverse('projects-modules', kwargs={"pk": data.blocked_project.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'get', private_url1, None, users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'get', private_url2, None, users)
assert results == [404, 404, 404, 403, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [404, 404, 404, 403, 200]
def test_modules_update(client, data):
public_url = reverse('projects-modules', kwargs={"pk": data.public_project.pk})
private_url1 = reverse('projects-modules', kwargs={"pk": data.private_project1.pk})
private_url2 = reverse('projects-modules', kwargs={"pk": data.private_project2.pk})
blocked_url = reverse('projects-modules', kwargs={"pk": data.blocked_project.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
results = helper_test_http_method(client, 'put', public_url, {"att": "test"}, users)
assert results == [405, 405, 405, 405, 405]
results = helper_test_http_method(client, 'put', private_url1, {"att": "test"}, users)
assert results == [405, 405, 405, 405, 405]
results = helper_test_http_method(client, 'put', private_url2, {"att": "test"}, users)
assert results == [405, 405, 405, 405, 405]
results = helper_test_http_method(client, 'put', blocked_url, {"att": "test"}, users)
assert results == [405, 405, 405, 405, 405]
def test_modules_delete(client, data):
public_url = reverse('projects-modules', kwargs={"pk": data.public_project.pk})
private_url1 = reverse('projects-modules', kwargs={"pk": data.private_project1.pk})
private_url2 = reverse('projects-modules', kwargs={"pk": data.private_project2.pk})
blocked_url = reverse('projects-modules', kwargs={"pk": data.blocked_project.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [405, 405, 405, 405]
results = helper_test_http_method(client, 'delete', private_url1, None, users)
assert results == [405, 405, 405, 405]
results = helper_test_http_method(client, 'delete', private_url2, None, users)
assert results == [405, 405, 405, 405]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [405, 405, 405, 405]
def test_modules_patch(client, data):
public_url = reverse('projects-modules', kwargs={"pk": data.public_project.pk})
private_url1 = reverse('projects-modules', kwargs={"pk": data.private_project1.pk})
private_url2 = reverse('projects-modules', kwargs={"pk": data.private_project2.pk})
blocked_url = reverse('projects-modules', kwargs={"pk": data.blocked_project.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
with mock.patch.object(OCCResourceMixin, "_validate_and_update_version"):
patch_data = json.dumps({"att": "test"})
results = helper_test_http_method(client, 'patch', public_url, patch_data, users)
assert results == [401, 403, 403, 403, 204]
patch_data = json.dumps({"att": "test"})
results = helper_test_http_method(client, 'patch', private_url1, patch_data, users)
assert results == [401, 403, 403, 403, 204]
patch_data = json.dumps({"att": "test"})
results = helper_test_http_method(client, 'patch', private_url2, patch_data, users)
assert results == [404, 404, 404, 403, 204]
patch_data = json.dumps({"att": "test"})
results = helper_test_http_method(client, 'patch', blocked_url, patch_data, users)
assert results == [404, 404, 404, 403, 451]
| 8,852
| 0
| 160
|
cd2800c70b3b2e622fe1914ac86a58e45bb8462d
| 5,068
|
py
|
Python
|
rssdldmng/rssdldapi.py
|
alexpayne482/rssdldmng
|
4428f10171902861702fc0f528d3d9576923541a
|
[
"MIT"
] | null | null | null |
rssdldmng/rssdldapi.py
|
alexpayne482/rssdldmng
|
4428f10171902861702fc0f528d3d9576923541a
|
[
"MIT"
] | 1
|
2019-11-25T15:54:02.000Z
|
2019-11-25T15:54:02.000Z
|
rssdldmng/rssdldapi.py
|
alexpayne482/rssdldmng
|
4428f10171902861702fc0f528d3d9576923541a
|
[
"MIT"
] | null | null | null |
import logging
from rssdldmng.utils.restserver import RESTHttpServer
_LOGGER = logging.getLogger(__name__)
| 38.687023
| 112
| 0.545185
|
import logging
from rssdldmng.utils.restserver import RESTHttpServer
_LOGGER = logging.getLogger(__name__)
class ApiServer(RESTHttpServer):
def __init__(self, port, mng):
self.routes = {
r'^/$': {'file': '/index.html', 'media_type': 'text/html'},
r'^\/(?!api\/).*$': {'file': '/', 'media_type': 'text/html'},
r'^/api/config$': {'GET': self.get_config, 'media_type': 'application/json'},
r'^/api/shows$': {'GET': self.get_shows, 'media_type': 'application/json'},
r'^/api/latest$': {'GET': self.get_latest, 'media_type': 'application/json'},
r'^/api/status$': {'GET': self.get_status, 'media_type': 'application/json'},
r'^/api/checkfeed.*$': {'PUT': self.put_checkfeed, 'media_type': 'application/json'},
r'^/api/db/.*$': {'GET': self.get_db, 'PUT': self.put_db, 'media_type': 'application/json'},
# r'^/api/setshows$': {'PUT': self.put_shows, 'media_type': 'application/json'},
r'^/api/trakt/list.*$': {'GET': self.get_traktlist, 'media_type': 'application/json'},
r'^/api/test.*$': {'GET': self.test, 'media_type': 'application/json'},
}
self.manager = mng
self.servedir = '.' # os.path.join(self.manager.config['cfgdir'], 'www')
RESTHttpServer.__init__(self, '', port, self.routes, self.servedir)
def get_config(self, handler):
return self.manager.config
def get_shows(self, handler):
if not self.manager.downloader:
return 'internal error'
return self.manager.downloader.series
def put_shows(self, handler):
if type(self.manager.config['downloader']['filters']['series']) is str:
return 'FAIL'
shows = handler.get_payload()
_LOGGER.debug("set_shows: {0}".format(shows))
self.manager.config['downloader']['filters']['series'] = shows
self.manager.save_config(self.manager.config)
return 'OK'
def get_latest(self, handler):
return self.manager.get_latest(21)
def get_status(self, handler):
return self.manager.get_status(21)
def get_traktlist(self, handler):
args, params = self.get_args(handler.path, 4)
if len(args) < 1:
return 'no trakt username provided'
from rssdldmng.rssdld.trakt import Trakt
return Trakt(args[0], args[1] if len(args) >= 2 else 'watchlist').getShows()
def put_checkfeed(self, handler):
if not self.manager.downloader:
return 'internal error'
args, params = self.get_args(handler.path, 2)
if 'feed' not in params:
return 'no feed provided'
res = self.manager.downloader.parseFeed(params['feed'])
return res
def get_db(self, handler):
if not self.manager.downloader:
return 'internal error'
args, params = self.get_args(handler.path, 3)
if len(args) < 1:
return 'no action provided'
if args[0] == 'dump' or args[0] == 'dumpall':
state = -1
if len(args) >= 2:
state = int(args[1])
if args[0] == 'dump':
return [e.cleaned() for e in self.manager.downloader.getEpisodes(state=state)]
else:
return self.manager.downloader.getEpisodes(state=state)
return 'invalid action'
def put_db(self, handler):
if not self.manager.downloader:
return 'internal error'
args, params = self.get_args(handler.path, 3)
if len(args) < 1:
return 'no action provided'
if args[0] == 'set':
if len(args) < 3:
return 'no hash provided or state'
if not self.manager.downloader.updateEpisode(args[1], int(args[2])):
return 'FAIL'
return 'OK'
# hash = params['hash'] if 'hash' in params else None
# showname = params['show'] if 'show' in params else None
# season = params['season'] if 'season' in params else -1
# episode = params['episode'] if 'episode' in params else -1
# return [e.cleaned() for e in self.manager.downloader.getEpisodes(state=state)]
return 'invalid action'
def test(self, handler):
# self.manager.downloader.tk.setCollected(None, "elementary", 1, 1)
return 'OK'
def get_args(self, path, index):
args = path.split('/')[index:]
lastarg = args[-1].split('?')
args[-1] = lastarg[0]
paramlist = lastarg[1] if len(lastarg) > 1 else None
params = {}
if paramlist:
for p in paramlist.split('&'):
if '=' in p:
params[p.split('=')[0]] = p.split('=')[1]
else:
params[p] = True
return (args, params)
| 4,570
| 11
| 371
|
c67749db26476426cc9820a06907a762ef3a4bc1
| 4,477
|
py
|
Python
|
vimeoct/settings.py
|
nkawa/vimeo-coursetool
|
729215fe23b1bf05918a38d21e585a7b8862e75a
|
[
"Apache-2.0"
] | null | null | null |
vimeoct/settings.py
|
nkawa/vimeo-coursetool
|
729215fe23b1bf05918a38d21e585a7b8862e75a
|
[
"Apache-2.0"
] | null | null | null |
vimeoct/settings.py
|
nkawa/vimeo-coursetool
|
729215fe23b1bf05918a38d21e585a7b8862e75a
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for vimeoct project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from dotenv import load_dotenv, find_dotenv
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-6jcn$qgw^3cv7zp81m04%eni^mv@ter=u54uwppqg34czm45u#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#DEBUG = False
ALLOWED_HOSTS = ["online.tmi.mirai.nagoya-u.ac.jp"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'auth0login'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vimeoct.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vimeoct.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ja-jp'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
# SOCIAL AUTH AUTH0 BACKEND CONFIG
SOCIAL_AUTH_TRAILING_SLASH = False
SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID')
SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile',
'email'
]
SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
AUDIENCE = None
if os.environ.get('AUTH0_AUDIENCE'):
AUDIENCE = os.environ.get('AUTH0_AUDIENCE')
else:
if SOCIAL_AUTH_AUTH0_DOMAIN:
AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo'
if AUDIENCE:
SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE}
AUTHENTICATION_BACKENDS = {
'auth0login.auth0backend.Auth0',
'django.contrib.auth.backends.ModelBackend'
}
LOGIN_URL = '/login/auth0'
LOGIN_REDIRECT_URL = '/dashboard'
# This is for staticfile serving
# You have to set STATIC_ROOT in .env
# If you change static file, please do "python3 manage.py collectstatic"
if os.environ.get('STATIC_ROOT'):
STATIC_ROOT = os.environ.get('STATIC_ROOT')
| 26.181287
| 91
| 0.715881
|
"""
Django settings for vimeoct project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from dotenv import load_dotenv, find_dotenv
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-6jcn$qgw^3cv7zp81m04%eni^mv@ter=u54uwppqg34czm45u#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
#DEBUG = False
ALLOWED_HOSTS = ["online.tmi.mirai.nagoya-u.ac.jp"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'auth0login'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vimeoct.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vimeoct.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ja-jp'
TIME_ZONE = 'Asia/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
# SOCIAL AUTH AUTH0 BACKEND CONFIG
SOCIAL_AUTH_TRAILING_SLASH = False
SOCIAL_AUTH_AUTH0_KEY = os.environ.get('AUTH0_CLIENT_ID')
SOCIAL_AUTH_AUTH0_SECRET = os.environ.get('AUTH0_CLIENT_SECRET')
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile',
'email'
]
SOCIAL_AUTH_AUTH0_DOMAIN = os.environ.get('AUTH0_DOMAIN')
AUDIENCE = None
if os.environ.get('AUTH0_AUDIENCE'):
AUDIENCE = os.environ.get('AUTH0_AUDIENCE')
else:
if SOCIAL_AUTH_AUTH0_DOMAIN:
AUDIENCE = 'https://' + SOCIAL_AUTH_AUTH0_DOMAIN + '/userinfo'
if AUDIENCE:
SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {'audience': AUDIENCE}
AUTHENTICATION_BACKENDS = {
'auth0login.auth0backend.Auth0',
'django.contrib.auth.backends.ModelBackend'
}
LOGIN_URL = '/login/auth0'
LOGIN_REDIRECT_URL = '/dashboard'
# This is for staticfile serving
# You have to set STATIC_ROOT in .env
# If you change static file, please do "python3 manage.py collectstatic"
if os.environ.get('STATIC_ROOT'):
STATIC_ROOT = os.environ.get('STATIC_ROOT')
| 0
| 0
| 0
|
170b9d642252f24e9587f0ac465d2676a5e83e22
| 478
|
py
|
Python
|
discord/ext/wizard/__init__.py
|
Makiyu-py/discord-ext-wizard
|
59bd066ab451ee41d267aefe5f71e8485f6bc4a0
|
[
"MIT"
] | 4
|
2021-04-02T16:16:50.000Z
|
2021-08-03T13:21:45.000Z
|
discord/ext/wizard/__init__.py
|
Makiyu-py/discord-ext-wizard
|
59bd066ab451ee41d267aefe5f71e8485f6bc4a0
|
[
"MIT"
] | 7
|
2021-05-14T11:31:50.000Z
|
2021-05-27T09:06:45.000Z
|
discord/ext/wizard/__init__.py
|
Makiyu-py/discord-ext-wizard
|
59bd066ab451ee41d267aefe5f71e8485f6bc4a0
|
[
"MIT"
] | 1
|
2021-05-18T23:58:23.000Z
|
2021-05-18T23:58:23.000Z
|
from collections import namedtuple
_VersionInfo = namedtuple("_VersionInfo", "major minor patch")
version_info = _VersionInfo(0, 3, 0)
__version__ = ".".join(str(v) for v in version_info)
__license__ = "MIT"
__author__ = "FalseDev"
__title__ = "discord-ext-wizard"
__copyright__ = "Copyright 2021 {}".format(__author__)
__uri__ = "https://github.com/{}/{}".format(__author__, __title__)
from .checks import *
from .mixins import *
from .prompt import *
from .wizard import *
| 28.117647
| 66
| 0.738494
|
from collections import namedtuple
_VersionInfo = namedtuple("_VersionInfo", "major minor patch")
version_info = _VersionInfo(0, 3, 0)
__version__ = ".".join(str(v) for v in version_info)
__license__ = "MIT"
__author__ = "FalseDev"
__title__ = "discord-ext-wizard"
__copyright__ = "Copyright 2021 {}".format(__author__)
__uri__ = "https://github.com/{}/{}".format(__author__, __title__)
from .checks import *
from .mixins import *
from .prompt import *
from .wizard import *
| 0
| 0
| 0
|
d2a1e5d665881867fbf743ab4d526c8f32e55e2d
| 14,088
|
py
|
Python
|
trans_ms/transport_management/doctype/transportation_order/transportation_order.py
|
mohsinalimat/transport
|
3d32bd27f505f64b948f48d0bfc5c7ccaf61c4a2
|
[
"MIT"
] | null | null | null |
trans_ms/transport_management/doctype/transportation_order/transportation_order.py
|
mohsinalimat/transport
|
3d32bd27f505f64b948f48d0bfc5c7ccaf61c4a2
|
[
"MIT"
] | null | null | null |
trans_ms/transport_management/doctype/transportation_order/transportation_order.py
|
mohsinalimat/transport
|
3d32bd27f505f64b948f48d0bfc5c7ccaf61c4a2
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Aakvatech Limited and contributors
# For license information, please see license.txt
# import frappe
from __future__ import unicode_literals
import frappe
import time
import datetime
from frappe.model.document import Document
from frappe import _
import json
from frappe.utils import nowdate
from trans_ms.utlis.dimension import set_dimension
class TransportationOrder(Document):
'''def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
#If reference doctype is set
if self.get('reference_docname'):
for d in self.get_all_children():
d.parent = self.get('reference_docname')
d.parenttype = self.get('reference_doctype')
if self.get('reference_doctype') == "Import":
d.parentfield = "assign_transport"
else:
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype'''
# Custom load method for loading child tables data from imports and exports request
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(
_("{0} {1} not found").format(_(self.doctype), self.name),
frappe.DoesNotExistError,
)
super(Document, self).__init__(d)
if self.name == "DocType" and self.doctype == "DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
#
# For table fields load from request origin(if there is) else load normal
# Also add back compatibiilty for when transport assignements were being loaded from import
#
for df in table_fields:
if d.reference_doctype and d.reference_docname:
# Fieldname depending on if it's Export or Import
if d.reference_doctype == "Import" and df.fieldname == "cargo":
fieldname = "cargo_information"
elif (
d.reference_doctype == "Import"
and df.fieldname == "assign_transport"
):
fieldname = "assign_transport"
if df.fieldname == "assign_transport" and self.get("version") == 2:
children = frappe.db.get_values(
df.options,
{
"parent": self.name,
"parenttype": self.doctype,
"parentfield": "assign_transport",
},
"*",
as_dict=True,
order_by="idx asc",
)
else:
children = frappe.db.get_values(
df.options,
{
"parent": d.reference_docname,
"parenttype": d.reference_doctype,
"parentfield": fieldname,
},
"*",
as_dict=True,
order_by="idx asc",
)
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
else:
children = frappe.db.get_values(
df.options,
{
"parent": self.name,
"parenttype": self.doctype,
"parentfield": df.fieldname,
},
"*",
as_dict=True,
order_by="idx asc",
)
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
@frappe.whitelist(allow_guest=True)
@frappe.whitelist(allow_guest=True)
@frappe.whitelist(allow_guest=True)
@frappe.whitelist()
| 37.870968
| 158
| 0.565233
|
# Copyright (c) 2021, Aakvatech Limited and contributors
# For license information, please see license.txt
# import frappe
from __future__ import unicode_literals
import frappe
import time
import datetime
from frappe.model.document import Document
from frappe import _
import json
from frappe.utils import nowdate
from trans_ms.utlis.dimension import set_dimension
class TransportationOrder(Document):
def validate(self):
if self.customer:
currency = frappe.get_value("Customer", self.customer, "default_currency")
if currency:
for row in self.assign_transport:
row.currency = currency
for row in self.assign_transport:
if not row.assigned_vehicle:
continue
vehicle_status = frappe.get_value("Vehicle", row.assigned_vehicle, "status")
if vehicle_status == "In Trip":
existing_vehicle_trip = frappe.db.get_value(
"Vehicle Trip",
{
"reference_doctype": row.doctype,
"reference_docname": row.name,
},
)
if not existing_vehicle_trip:
frappe.throw(
_("Vehicle {0} is in trip").format(row.assigned_vehicle)
)
def before_save(self):
# For assignment status
if not self.assign_transport:
self.set("assignment_status", "Waiting Assignment")
elif self.cargo_type == "Container":
assigned_containers = []
for row in self.assign_transport:
assigned_containers.append(row.container_number)
for row in self.cargo:
if row.container_number not in assigned_containers:
self.set("assignment_status", "Partially Assigned")
else:
self.set("assignment_status", "Fully Assigned")
elif self.cargo_type == "Loose Cargo":
total_assigned = 0
for row in self.assign_transport:
total_assigned = total_assigned + row.get("amount", 0)
if self.amount > total_assigned:
self.set("assignment_status", "Partially Assigned")
else:
self.set("assignment_status", "Fully Assigned")
def get_all_children(self, parenttype=None):
# If reference doctype is set
if self.get("reference_docname"):
return self.get("assign_transport")
else:
"""Returns all children documents from **Table** type field in a list."""
ret = []
for df in self.meta.get("fields", {"fieldtype": "Table"}):
if parenttype:
if df.options == parenttype:
return self.get(df.fieldname)
value = self.get(df.fieldname)
if isinstance(value, list):
ret.extend(value)
return ret
def update_children(self):
# update child tables
# If reference doctype is set
if self.get("reference_docname"):
self.update_child_table("assign_transport")
else:
for df in self.meta.get_table_fields():
self.update_child_table(df.fieldname, df)
'''def set_parent_in_children(self):
"""Updates `parent` and `parenttype` property in all children."""
#If reference doctype is set
if self.get('reference_docname'):
for d in self.get_all_children():
d.parent = self.get('reference_docname')
d.parenttype = self.get('reference_doctype')
if self.get('reference_doctype') == "Import":
d.parentfield = "assign_transport"
else:
for d in self.get_all_children():
d.parent = self.name
d.parenttype = self.doctype'''
# Custom load method for loading child tables data from imports and exports request
def load_from_db(self):
"""Load document and children from database and create properties
from fields"""
if not getattr(self, "_metaclass", False) and self.meta.issingle:
single_doc = frappe.db.get_singles_dict(self.doctype)
if not single_doc:
single_doc = frappe.new_doc(self.doctype).as_dict()
single_doc["name"] = self.doctype
del single_doc["__islocal"]
super(Document, self).__init__(single_doc)
self.init_valid_columns()
self._fix_numeric_types()
else:
d = frappe.db.get_value(self.doctype, self.name, "*", as_dict=1)
if not d:
frappe.throw(
_("{0} {1} not found").format(_(self.doctype), self.name),
frappe.DoesNotExistError,
)
super(Document, self).__init__(d)
if self.name == "DocType" and self.doctype == "DocType":
from frappe.model.meta import doctype_table_fields
table_fields = doctype_table_fields
else:
table_fields = self.meta.get_table_fields()
#
# For table fields load from request origin(if there is) else load normal
# Also add back compatibiilty for when transport assignements were being loaded from import
#
for df in table_fields:
if d.reference_doctype and d.reference_docname:
# Fieldname depending on if it's Export or Import
if d.reference_doctype == "Import" and df.fieldname == "cargo":
fieldname = "cargo_information"
elif (
d.reference_doctype == "Import"
and df.fieldname == "assign_transport"
):
fieldname = "assign_transport"
if df.fieldname == "assign_transport" and self.get("version") == 2:
children = frappe.db.get_values(
df.options,
{
"parent": self.name,
"parenttype": self.doctype,
"parentfield": "assign_transport",
},
"*",
as_dict=True,
order_by="idx asc",
)
else:
children = frappe.db.get_values(
df.options,
{
"parent": d.reference_docname,
"parenttype": d.reference_doctype,
"parentfield": fieldname,
},
"*",
as_dict=True,
order_by="idx asc",
)
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
else:
children = frappe.db.get_values(
df.options,
{
"parent": self.name,
"parenttype": self.doctype,
"parentfield": df.fieldname,
},
"*",
as_dict=True,
order_by="idx asc",
)
if children:
self.set(df.fieldname, children)
else:
self.set(df.fieldname, [])
# sometimes __setup__ can depend on child values, hence calling again at the end
if hasattr(self, "__setup__"):
self.__setup__()
@frappe.whitelist(allow_guest=True)
def transport_order_scheduler():
# Create requests for imports less than 10 days to eta
for row in frappe.db.sql(
"""SELECT name, eta, reference_file_number FROM `tabImport` WHERE (status <> 'Closed' OR status IS NULL) AND `eta` < timestampadd(day, -10, now())""",
as_dict=1,
):
create_transport_order(
reference_doctype="Import",
reference_docname=row.name,
file_number=row.reference_file_number,
)
@frappe.whitelist(allow_guest=True)
def create_transport_order(**args):
args = frappe._dict(args)
existing_transport_order = frappe.db.get_value(
"Transport Order", {"file_number": args.file_number}
)
# Timestamp
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
if not existing_transport_order:
request = frappe.new_doc("Transportion Order")
request.update(
{
"reference_doctype": args.reference_doctype,
"reference_docname": args.reference_docname,
"file_number": args.file_number,
"request_received": args.request_received,
"customer": args.customer,
"consignee": args.consignee,
"shipper": args.shipper,
"cargo_location_country": args.cargo_location_country,
"cargo_location_city": args.cargo_location_city,
"cargo_destination_country": args.cargo_destination_country,
"cargo_destination_city": args.cargo_destination_city,
"transport_type": args.transport_type,
"version": 2,
}
)
request.insert(ignore_permissions=True)
return request.name
else:
return existing_transport_order
@frappe.whitelist(allow_guest=True)
def assign_vehicle(**args):
args = frappe._dict(args)
# Change cargo status to assigned (1)
# doc = frappe.get_doc("Cargo Details", args.cargo_docname)
# doc.db_set("transport_status", "0", False)
# doc.db_set("idx", args.cargo_idx, False)
# Add/Update assigned transport details
existing_transport_details = frappe.db.get_value(
"Transport Assignment", {"cargo": args.cargo_docname}
)
if existing_transport_details:
# Update the transport details
doc = frappe.get_doc("Transport Assignment", existing_transport_details)
doc.assigned_vehicle = args.assigned_vehicle
doc.assigned_trailer = args.assigned_trailer
doc.assigned_driver = args.assigned_driver
doc.cargo = args.cargo_docname
doc.amount = args.amount
doc.expected_loading_date = args.expected_loading_date
doc.container_number = args.container_number
doc.units = args.units
doc.transporter_type = args.transporter_type
doc.sub_contractor = args.sub_contractor
doc.vehicle_plate_number = args.vehicle_plate_number
doc.trailer_plate_number = args.trailer_plate_number
doc.driver_name = args.driver_name
doc.passport_number = args.passport_number
doc.route = args.route
doc.idx = args.assigned_idx
doc.save()
else:
request = frappe.new_doc("Transport Assignment")
request.update(
{
"cargo": args.cargo_docname,
"amount": args.amount,
"expected_loading_date": args.expected_loading_date,
"container_number": args.container_number,
"units": args.units,
"transporter_type": args.transporter_type,
"sub_contractor": args.sub_contractor,
"vehicle_plate_number": args.vehicle_plate_number,
"trailer_plate_number": args.trailer_plate_number,
"driver_name": args.driver_name,
"passport_number": args.passport_number,
"route": args.route,
"parent": args.reference_docname,
"parenttype": args.reference_doctype,
"parentfield": "assign_transport",
"assigned_vehicle": args.assigned_vehicle,
"assigned_trailer": args.assigned_trailer,
"assigned_driver": args.assigned_driver,
"idx": args.assigned_idx,
}
)
request.insert(ignore_permissions=True)
# Edit vehicle status
"""vehicle = frappe.get_doc("Vehicle", args.assigned_vehicle)
if vehicle.status == 'Available':
vehicle.status = "Booked"
vehicle.save()"""
return "success"
@frappe.whitelist()
def create_sales_invoice(doc, rows):
doc = frappe.get_doc(json.loads(doc))
rows = json.loads(rows)
if not rows:
return
items = []
for row in rows:
description = ""
if row["assigned_vehicle"]:
description += "<b>VEHICLE NUMBER: " + row["assigned_vehicle"]
if row["route"]:
description += "<BR>ROUTE: " + row["route"]
if frappe.db.get_value("Cargo Details", row["cargo"], "bl_number"):
description += "<BR>BL NUMBER: " + frappe.db.get_value("Cargo Details", row["cargo"], "bl_number")
items.append(
{
"item_code": row["item"],
"qty": 1,
"uom": frappe.get_value("Item", row["item"], "stock_uom"),
"rate": row["rate"],
"description": description,
}
)
invoice = frappe.get_doc(
dict(
doctype="Sales Invoice",
customer=doc.customer,
currency=row["currency"],
posting_date=nowdate(),
company=doc.company,
items=items,
),
)
set_dimension(doc, invoice)
for row in invoice.items:
set_dimension(doc, invoice, tr_child=row)
frappe.flags.ignore_account_permission = True
invoice.set_taxes()
invoice.set_missing_values()
invoice.flags.ignore_mandatory = True
invoice.calculate_taxes_and_totals()
invoice.insert(ignore_permissions=True)
for item in doc.assign_transport:
if item.name in [i["name"] for i in rows]:
item.invoice = invoice.name
# doc.save()
frappe.msgprint(_("Sales Inoice {0} Created").format(invoice.name), alert=True)
return invoice
| 9,018
| 0
| 195
|
c94324dc07f3d22abb00e60ed1680dcaf4d026ca
| 2,149
|
py
|
Python
|
vc3/preprocess.py
|
ta1fukawa/vc-beta
|
c3a80c44be90f72bfe0bcac08f27c4882700c9b6
|
[
"MIT"
] | null | null | null |
vc3/preprocess.py
|
ta1fukawa/vc-beta
|
c3a80c44be90f72bfe0bcac08f27c4882700c9b6
|
[
"MIT"
] | null | null | null |
vc3/preprocess.py
|
ta1fukawa/vc-beta
|
c3a80c44be90f72bfe0bcac08f27c4882700c9b6
|
[
"MIT"
] | null | null | null |
import argparse
import pathlib
import sys
import torch
import torchaudio
import yaml
import common
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert wav to mel spectrogram')
parser.add_argument('wav_dir', type=pathlib.Path, help='path to directory of speaker directories containing wav files')
parser.add_argument('mel_dir', type=pathlib.Path, help='path to directory to save mel spectrograms')
parser.add_argument('embed_dir', type=pathlib.Path, help='path to directory to save embeddings')
parser.add_argument('encoder_path', type=pathlib.Path, help='path to speaker encoder')
parser.add_argument('config_path', type=pathlib.Path, help='path to config')
if 'debugpy' in sys.modules:
args = parser.parse_args([
'autovc/wavs-jvs',
'vc3/mel-jvs',
'vc3/embed-jvs',
'autovc2/dvector.pt',
'vc3/preprocess.yaml',
])
else:
args = parser.parse_args([])
main(**vars(args))
| 33.578125
| 128
| 0.642624
|
import argparse
import pathlib
import sys
import torch
import torchaudio
import yaml
import common
def main(wav_dir, mel_dir, embed_dir, encoder_path, config_path):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
encoder = torch.load(encoder_path).to(device).eval()
config = yaml.load(config_path.read_text(), Loader=yaml.FullLoader)
embed_dir.mkdir(exist_ok=True, parents=True)
for speaker in wav_dir.iterdir():
if not speaker.is_dir():
continue
speaker_name = speaker.name
(mel_dir / speaker_name).mkdir(exist_ok=True, parents=True)
mels = []
for wav in speaker.iterdir():
if not wav.is_file() or wav.suffix != '.wav':
continue
wave, sample_rate = torchaudio.load(wav)
wave, sample_rate = common.norm_wave(wave, **config['norm_wave'])
mel, _ = common.wave2mel(wave, sample_rate, **config['mel'])
torch.save(mel, (mel_dir / speaker_name / f'{wav.stem}.pt'))
mels.append(mel)
embed = common.mel2embed(mels, encoder, device, **config['mel2embed'])
torch.save(embed, (embed_dir / f'{speaker_name}.pt'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert wav to mel spectrogram')
parser.add_argument('wav_dir', type=pathlib.Path, help='path to directory of speaker directories containing wav files')
parser.add_argument('mel_dir', type=pathlib.Path, help='path to directory to save mel spectrograms')
parser.add_argument('embed_dir', type=pathlib.Path, help='path to directory to save embeddings')
parser.add_argument('encoder_path', type=pathlib.Path, help='path to speaker encoder')
parser.add_argument('config_path', type=pathlib.Path, help='path to config')
if 'debugpy' in sys.modules:
args = parser.parse_args([
'autovc/wavs-jvs',
'vc3/mel-jvs',
'vc3/embed-jvs',
'autovc2/dvector.pt',
'vc3/preprocess.yaml',
])
else:
args = parser.parse_args([])
main(**vars(args))
| 1,091
| 0
| 23
|
6d53251e78bb7ec25c3252e40a239ccdb6143e6e
| 5,948
|
py
|
Python
|
docweb/views_wiki.py
|
pv/pydocweb
|
05c7b69c3903e2bb90cca511f18f9c10d7926cc6
|
[
"BSD-3-Clause"
] | 2
|
2016-07-26T17:13:00.000Z
|
2017-12-13T12:46:46.000Z
|
docweb/views_wiki.py
|
pv/pydocweb
|
05c7b69c3903e2bb90cca511f18f9c10d7926cc6
|
[
"BSD-3-Clause"
] | null | null | null |
docweb/views_wiki.py
|
pv/pydocweb
|
05c7b69c3903e2bb90cca511f18f9c10d7926cc6
|
[
"BSD-3-Clause"
] | null | null | null |
import pydocweb.docweb.rst as rst
from pydocweb.docweb.utils import *
from pydocweb.docweb.models import *
from views_docstring import EditForm
#------------------------------------------------------------------------------
# Wiki
#------------------------------------------------------------------------------
WIKI_CACHE_AGE = 60
@permission_required('docweb.change_wikipage')
| 37.408805
| 118
| 0.535138
|
import pydocweb.docweb.rst as rst
from pydocweb.docweb.utils import *
from pydocweb.docweb.models import *
from views_docstring import EditForm
#------------------------------------------------------------------------------
# Wiki
#------------------------------------------------------------------------------
WIKI_CACHE_AGE = 60
def frontpage(request):
return HttpResponsePermanentRedirect(reverse(view, args=['Front Page']))
def view(request, name):
try:
page = WikiPage.on_site.get(name=name)
revision = request.GET.get('revision')
try:
revision = int(revision)
rev = page.revisions.get(revno=revision)
except (TypeError, ValueError, WikiPageRevision.DoesNotExist):
rev = page
if not rev.text and revision is None:
raise WikiPage.DoesNotExist()
body = rst.render_html(rev.text, cache_max_age=WIKI_CACHE_AGE)
if body is None:
raise WikiPage.DoesNotExist()
return render_template(request, 'wiki/page.html',
dict(name=name, body_html=body,
revision=revision))
except WikiPage.DoesNotExist:
return render_template(request, 'wiki/not_found.html',
dict(name=name))
@permission_required('docweb.change_wikipage')
def edit(request, name):
site = Site.objects.get_current()
if request.method == 'POST':
if request.POST.get('button_cancel'):
return HttpResponseRedirect(reverse(view, args=[name]))
revision = None
form = EditForm(request.POST)
if form.is_valid():
data = form.cleaned_data
if request.POST.get('button_preview'):
preview = rst.render_html(data['text'],
cache_max_age=WIKI_CACHE_AGE)
try:
prev_text = WikiPage.on_site.get(name=name).text
if not isinstance(prev_text, unicode):
prev_text = prev_text.decode('utf-8')
except WikiPage.DoesNotExist:
prev_text = ""
diff_html = html_diff_text(prev_text, data['text'],
'previous revision',
'current text')
return render_template(
request, 'wiki/edit.html',
dict(form=form, name=name,
revision=revision,
diff_html=diff_html,
preview_html=preview))
else:
page, created = WikiPage.on_site.get_or_create(name=name,
site=site)
page.edit(data['text'],
request.user.username,
data['comment'])
return HttpResponseRedirect(reverse(view, args=[name]))
else:
try:
revision = request.GET.get('revision')
page = WikiPage.on_site.get(name=name)
try:
revision = int(revision)
rev = page.revisions.get(revno=revision)
comment = "Reverted"
except (TypeError, ValueError, WikiPageRevision.DoesNotExist):
rev = page.revisions.all()[0]
comment = ""
data = dict(text=rev.text, comment=comment)
except (WikiPage.DoesNotExist, IndexError):
data = {}
revision = None
form = EditForm(initial=data)
return render_template(request, 'wiki/edit.html',
dict(form=form, name=name, revision=revision))
def log(request, name):
page = get_object_or_404(WikiPage, name=name)
if request.method == "POST":
if request.POST.get('button_diff'):
try:
rev1 = int(request.POST.get('rev1'))
rev2 = int(request.POST.get('rev2'))
return HttpResponseRedirect(reverse(diff,
args=[name, rev1, rev2]))
except (ValueError, TypeError):
pass
author_map = get_author_map()
revisions = []
for rev in page.revisions.all():
revisions.append(dict(
id=rev.revno,
author=author_map.get(rev.author, rev.author),
comment=rev.comment,
timestamp=rev.timestamp,
))
return render_template(request, 'wiki/log.html',
dict(name=name, revisions=revisions))
def diff(request, name, rev1, rev2):
page = get_object_or_404(WikiPage, name=name)
try:
if str(rev1).lower() == "cur":
rev1 = page.revisions.all()[0]
else:
rev1 = get_object_or_404(WikiPageRevision, revno=int(rev1))
if str(rev2).lower() == "cur":
rev2 = page.revisions.all()[0]
else:
rev2 = get_object_or_404(WikiPageRevision, revno=int(rev2))
except (ValueError, TypeError):
raise Http404()
name1 = str(rev1.revno)
name2 = str(rev2.revno)
diff = html_diff_text(rev1.text, rev2.text, label_a=name1, label_b=name2)
return render_template(request, 'wiki/diff.html',
dict(name=name, name1=name1, name2=name2,
diff_html=diff))
def diff_prev(request, name, rev2):
site = Site.objects.get_current()
page = get_object_or_404(WikiPage, name=name)
try:
rev2 = get_object_or_404(WikiPageRevision, revno=int(rev2)).revno
except (ValueError, TypeError):
raise Http404()
try:
rev1 = WikiPageRevision.objects.filter(page__site=site, page=page, revno__lt=rev2).order_by('-revno')[0].revno
except (IndexError, AttributeError):
rev1 = "cur"
return diff(request, name, rev1, rev2)
| 5,429
| 0
| 137
|
503a9f4c8fd00e62e81e2c3830c78b86686ae2d1
| 5,344
|
py
|
Python
|
gym/searchingCoin.py
|
GodWriter/RL-Pytorch
|
26f5b3ec4c12bd5c7c33d0a9b0952165c86330ae
|
[
"Apache-2.0"
] | null | null | null |
gym/searchingCoin.py
|
GodWriter/RL-Pytorch
|
26f5b3ec4c12bd5c7c33d0a9b0952165c86330ae
|
[
"Apache-2.0"
] | null | null | null |
gym/searchingCoin.py
|
GodWriter/RL-Pytorch
|
26f5b3ec4c12bd5c7c33d0a9b0952165c86330ae
|
[
"Apache-2.0"
] | null | null | null |
import gym
import numpy
import random
import logging
from gym import spaces
from gym.utils import seeding
| 33.610063
| 87
| 0.549214
|
import gym
import numpy
import random
import logging
from gym import spaces
from gym.utils import seeding
class GridEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 2}
def __init__(self):
self.states = [1, 2, 3, 4, 5, 6, 7, 8] # 状态空间
self.actions = ['n', 'e', 's', 'w'] # 动作空间
# 定义机器人可能的位置,提前计算好了
self.x = [140, 220, 300, 380, 460, 140, 300, 460]
self.y = [250, 250, 250, 250, 250, 150, 150, 150]
# 定义终止状态
self.terminate_states = dict()
self.terminate_states[6] = 1
self.terminate_states[7] = 1
self.terminate_states[8] = 1
# 定义reward
self.rewards = dict()
self.rewards['1_s'] = -1.0
self.rewards['3_s'] = 1.0
self.rewards['5_s'] = -1.0
# 定义状态转移
self.t = dict()
self.t['1_s'] = 6
self.t['1_e'] = 2
self.t['2_w'] = 1
self.t['2_e'] = 3
self.t['3_s'] = 7
self.t['3_w'] = 2
self.t['3_e'] = 4
self.t['4_w'] = 3
self.t['4_e'] = 5
self.t['5_s'] = 8
self.t['5_w'] = 4
self.seed()
self.viewer = None
self.state = None
def step(self, action):
state = self.state # 系统当前状态
if state in self.terminate_states:
return state, 0, True, {} # 下一时刻动作,回报,是否终止,调试信息
key = "%d_%s" % (state, action) # 状态和动作组合为状态转移字典的键值
# 状态转移
next_state = self.t[key] if key in self.t else state
self.state = next_state
is_terminal = True if next_state in self.terminate_states else False
r = 0.0 if key not in self.rewards else self.rewards[key]
return next_state, r, is_terminal, {}
def reset(self):
self.state = self.states[int(random.random() * len(self.states))]
print("Current State:", self.state)
return self.state
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(width=600, height=400)
# 创建网格世界
self.line1 = rendering.Line((100, 300), (500, 300))
self.line2 = rendering.Line((100, 200), (500, 200))
self.line3 = rendering.Line((100, 300), (100, 100))
self.line4 = rendering.Line((180, 300), (180, 100))
self.line5 = rendering.Line((260, 300), (260, 100))
self.line6 = rendering.Line((340, 300), (340, 100))
self.line7 = rendering.Line((420, 300), (420, 100))
self.line8 = rendering.Line((500, 300), (500, 100))
self.line9 = rendering.Line((100, 100), (180, 100))
self.line10 = rendering.Line((260, 100), (340, 100))
self.line11 = rendering.Line((420, 100), (500, 100))
# 创建第一个骷髅
self.kulo1 = rendering.make_circle(40)
self.kulo1.add_attr(rendering.Transform(translation=(140, 150)))
self.kulo1.set_color(0, 0, 0)
# 创建第二个骷髅
self.kulo2 = rendering.make_circle(40)
self.kulo2.add_attr(rendering.Transform(translation=(460, 150)))
self.kulo2.set_color(0, 0, 0)
# 创建金条
self.gold = rendering.make_circle(40)
self.gold.add_attr(rendering.Transform(translation=(300, 150)))
self.gold.set_color(1, 0.9, 0)
# 创建机器人
self.robot = rendering.make_circle(30)
self.robottrans = rendering.Transform()
self.robot.add_attr(self.robottrans)
self.robot.set_color(0.8, 0.6, 0.4)
self.line1.set_color(0, 0, 0)
self.line2.set_color(0, 0, 0)
self.line3.set_color(0, 0, 0)
self.line4.set_color(0, 0, 0)
self.line5.set_color(0, 0, 0)
self.line6.set_color(0, 0, 0)
self.line7.set_color(0, 0, 0)
self.line8.set_color(0, 0, 0)
self.line9.set_color(0, 0, 0)
self.line10.set_color(0, 0, 0)
self.line11.set_color(0, 0, 0)
self.viewer.add_geom(self.line1)
self.viewer.add_geom(self.line2)
self.viewer.add_geom(self.line3)
self.viewer.add_geom(self.line4)
self.viewer.add_geom(self.line5)
self.viewer.add_geom(self.line6)
self.viewer.add_geom(self.line7)
self.viewer.add_geom(self.line8)
self.viewer.add_geom(self.line9)
self.viewer.add_geom(self.line10)
self.viewer.add_geom(self.line11)
self.viewer.add_geom(self.kulo1)
self.viewer.add_geom(self.kulo2)
self.viewer.add_geom(self.gold)
self.viewer.add_geom(self.robot)
if self.state is None:
return None
# self.state从[1, 8],而self.x/self.y坐标从[0, 7],所以需要-1
self.robottrans.set_translation(self.x[self.state - 1], self.y[self.state - 1])
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
| 5,224
| 239
| 23
|
31ed9358d47167d47cc2a9ef7b971f57e974edfd
| 257
|
py
|
Python
|
scheduled_bots/geneprotein/test_ChromosomeBot.py
|
turoger/scheduled-bots
|
23fd30ccc242391151af3a1727f9fbf9dc95d433
|
[
"MIT"
] | 6
|
2017-05-04T01:04:26.000Z
|
2022-03-04T12:22:17.000Z
|
scheduled_bots/geneprotein/test_ChromosomeBot.py
|
turoger/scheduled-bots
|
23fd30ccc242391151af3a1727f9fbf9dc95d433
|
[
"MIT"
] | 55
|
2017-03-14T21:16:44.000Z
|
2022-03-02T12:39:14.000Z
|
scheduled_bots/geneprotein/test_ChromosomeBot.py
|
turoger/scheduled-bots
|
23fd30ccc242391151af3a1727f9fbf9dc95d433
|
[
"MIT"
] | 13
|
2017-02-10T21:40:06.000Z
|
2022-01-18T01:27:52.000Z
|
from datetime import datetime
from .ChromosomeBot import get_or_create
from ..geneprotein import organism_info
| 25.7
| 85
| 0.762646
|
from datetime import datetime
from .ChromosomeBot import get_or_create
from ..geneprotein import organism_info
def test_get_or_create():
wdid = get_or_create("XII", "NC_001144.5", organism_info[559292], datetime.now())
assert wdid == 'Q27525657'
| 121
| 0
| 23
|
e30e6d26d59427ddc87d2537e60dbfb3b11c8932
| 569
|
py
|
Python
|
neuronit/load/views.py
|
neuronit/pfa
|
6483f23de3ac43ae1121760ab44a2cae1f2cc901
|
[
"MIT"
] | null | null | null |
neuronit/load/views.py
|
neuronit/pfa
|
6483f23de3ac43ae1121760ab44a2cae1f2cc901
|
[
"MIT"
] | null | null | null |
neuronit/load/views.py
|
neuronit/pfa
|
6483f23de3ac43ae1121760ab44a2cae1f2cc901
|
[
"MIT"
] | null | null | null |
from django.http import Http404
from django.shortcuts import render
from play.models import Game, Save
| 31.611111
| 92
| 0.657293
|
from django.http import Http404
from django.shortcuts import render
from play.models import Game, Save
def load(request):
if request.method == "POST":
save = Save.objects.filter(name=request.POST.get("sel1")).first()
return render(request, 'play/play.html')
if request.user.is_authenticated():
save_list = Save.objects.filter(player_name=request.user.username).order_by('-date')
context = {
'save_list': save_list,
}
return render(request, 'load/load.html', context)
else:
raise Http404
| 442
| 0
| 23
|
32b11fcdda72235ee222465e07bdb3c68d9c7cb7
| 3,006
|
py
|
Python
|
src/app.py
|
colin-chang/md2html
|
9fba39ee309c61515955090690b801fc37845e91
|
[
"MIT"
] | 1
|
2020-01-06T10:07:17.000Z
|
2020-01-06T10:07:17.000Z
|
src/app.py
|
colin-chang/md2html
|
9fba39ee309c61515955090690b801fc37845e91
|
[
"MIT"
] | null | null | null |
src/app.py
|
colin-chang/md2html
|
9fba39ee309c61515955090690b801fc37845e91
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import shutil
import os
import sys
import subprocess
from platform import system
input_arg = output_arg = None
if __name__ == "__main__":
if system() != 'Linux' and system() != 'Darwin':
print('only linux is supported currently')
else:
main()
| 31.978723
| 86
| 0.57352
|
# -*- coding:utf-8 -*-
import shutil
import os
import sys
import subprocess
from platform import system
input_arg = output_arg = None
def main():
args = sys.argv
if len(args) < 3:
print('missing 2 required positional argument:"input","output"')
return
global input_arg, output_arg
input_arg, output_arg = args[1], args[2]
if not os.path.isdir(output_arg) or not os.path.exists(output_arg):
print('{} must be a directory and exists'.format(output_arg))
return
mds = []
if not os.path.exists(input_arg):
print('{} does not exist'.format(input_arg))
return
elif os.path.isfile(input_arg):
mds.append(input_arg)
elif os.path.isdir(input_arg):
for root, _, files in os.walk(input_arg):
for file in files:
if file.endswith('.md'):
mds.append(os.path.join(root, file))
else:
print('{} must be a file or directory'.format(input_arg))
return
__run_command(
'chmod -R 777 static {} {}'.format(input_arg, output_arg))
__build(mds, output_arg)
def __build(mds, output):
input_path = input_arg if os.path.isdir(input_arg) else os.path.dirname(input_arg)
for md in mds:
if os.path.getsize(md) <= 0:
print('ignore empty file {}.'.format(md))
continue
__run_command('rm -rf static/*')
_root = os.path.dirname(md)
for root, _, files in os.walk(_root):
for file in files:
if file.endswith('.md'):
continue
src = os.path.join(root, file)
tar = src.replace(_root, 'static')
if not os.path.exists(os.path.dirname(tar)):
__run_command('mkdir -p {}'.format(os.path.dirname(tar)))
__run_command('cp {} {}'.format(src, tar))
__run_command('cp {} static/README.md'.format(md))
os.mkdir('dist')
__run_command('npm run build')
os.rename(os.path.join('dist', 'index.html'), os.path.join(
'dist', '{}.html'.format(os.path.splitext(os.path.basename(md))[0])))
des_path = os.path.dirname(md).replace(input_path, output_arg)
__run_command('mkdir -p {}'.format(des_path))
# mv dist/* dest
for root, _, files in os.walk('dist'):
for file in files:
cur_file = os.path.join(root, file)
des_file = cur_file.replace('dist', des_path)
if not os.path.exists(os.path.dirname(des_file)):
__run_command('mkdir -p {}'.format(os.path.dirname(des_file)))
__run_command('mv {} {}'.format(cur_file, des_file))
shutil.rmtree('dist')
def __run_command(cmd):
if not cmd:
return
subprocess.run(cmd.split(' '))
if __name__ == "__main__":
if system() != 'Linux' and system() != 'Darwin':
print('only linux is supported currently')
else:
main()
| 2,641
| 0
| 69
|
a61f85fa9ce56be8d28765166b20414be4530278
| 2,571
|
py
|
Python
|
church/cli.py
|
mdickinson/alonzo
|
a66d4c253c9f4338ebf1177a0b0e84bf7cc9c0f6
|
[
"Apache-2.0"
] | null | null | null |
church/cli.py
|
mdickinson/alonzo
|
a66d4c253c9f4338ebf1177a0b0e84bf7cc9c0f6
|
[
"Apache-2.0"
] | null | null | null |
church/cli.py
|
mdickinson/alonzo
|
a66d4c253c9f4338ebf1177a0b0e84bf7cc9c0f6
|
[
"Apache-2.0"
] | null | null | null |
"""
To do:
- catch KeyboardInterrupt while evaluating
- show all?
- show statistics
- fix up exception handling; decorator?
"""
import cmd
from church.ast import ParseError
from church.environment import (
UndefinedNameError,
)
from church.eval import (
environment,
reduce,
Suspension,
)
from church.expr import (
definition,
expr,
name,
unexpr,
)
from church.token import (
TokenError,
)
INTRO_TEXT = """\
Welcome to the interactive lambda calculus interpreter.
Type 'help' to see supported commands.
"""
| 23.587156
| 65
| 0.580319
|
"""
To do:
- catch KeyboardInterrupt while evaluating
- show all?
- show statistics
- fix up exception handling; decorator?
"""
import cmd
from church.ast import ParseError
from church.environment import (
UndefinedNameError,
)
from church.eval import (
environment,
reduce,
Suspension,
)
from church.expr import (
definition,
expr,
name,
unexpr,
)
from church.token import (
TokenError,
)
INTRO_TEXT = """\
Welcome to the interactive lambda calculus interpreter.
Type 'help' to see supported commands.
"""
class LambdaCmd(cmd.Cmd):
prompt = "(church) "
intro = INTRO_TEXT
def __init__(self, *args, **kwargs):
super(LambdaCmd, self).__init__(*args, **kwargs)
self.environment = environment()
def emptyline(self):
pass
def precmd(self, line):
# Strip off any comment.
line, *_ = line.partition('#')
return line.strip()
def do_exit(self, arg):
r"""Leave the interpreter."""
return True
def do_let(self, arg):
r"""Define a name for a lambda term.
Examples
--------
let two = \f x.f(f x)
let add m n = \f x.m f(n f x)
let four = add two two
"""
try:
name, body = definition(arg, self.environment)
except (UndefinedNameError, TokenError, ParseError) as e:
self.stdout.write("{}\n".format(e))
return
self.environment = self.environment.append(
name,
Suspension(body, self.environment),
)
def do_eval(self, arg):
r"""Evaluate a lambda term, reducing to normal form."""
try:
term = expr(arg, self.environment)
except (UndefinedNameError, ParseError, TokenError) as e:
self.stdout.write("{}\n".format(e))
return
result = reduce(term, self.environment)
self.stdout.write("{}\n".format(unexpr(result)))
def do_show(self, arg):
r"""Show the definition of a previously defined name."""
try:
_, suspension = name(arg, self.environment)
except (TokenError, ParseError):
self.stdout.write("Usage: show <identifier>\n")
return
except UndefinedNameError as e:
self.stdout.write("{}\n".format(e))
return
replacements = {
parameter: parameter.name
for parameter, _ in suspension.env
}
self.stdout.write("{}\n".format(unexpr(
suspension.term, replacements)))
| 227
| 1,772
| 23
|
0fcac02991080919964d4007bc5b9504cc40cc8b
| 730
|
py
|
Python
|
aydin/it/classic_denoisers/test/test_dictionary.py
|
AhmetCanSolak/aydin
|
e8bc81ee88c96e0f34986df30a63c96468a45f70
|
[
"BSD-3-Clause"
] | 78
|
2021-11-08T16:11:23.000Z
|
2022-03-27T17:51:04.000Z
|
aydin/it/classic_denoisers/test/test_dictionary.py
|
AhmetCanSolak/aydin
|
e8bc81ee88c96e0f34986df30a63c96468a45f70
|
[
"BSD-3-Clause"
] | 19
|
2021-11-08T17:15:40.000Z
|
2022-03-30T17:46:55.000Z
|
aydin/it/classic_denoisers/test/test_dictionary.py
|
AhmetCanSolak/aydin
|
e8bc81ee88c96e0f34986df30a63c96468a45f70
|
[
"BSD-3-Clause"
] | 7
|
2021-11-09T17:42:32.000Z
|
2022-03-09T00:37:57.000Z
|
import pytest
from aydin.io.datasets import cropped_newyork
from aydin.it.classic_denoisers.demo.demo_2D_dictionary_fixed import (
demo_dictionary_fixed,
)
from aydin.it.classic_denoisers.demo.demo_2D_dictionary_learned import (
demo_dictionary_learned,
)
from aydin.it.classic_denoisers.dictionary_fixed import denoise_dictionary_fixed
from aydin.it.classic_denoisers.test.util_test_nd import check_nd
@pytest.mark.heavy
| 29.2
| 84
| 0.816438
|
import pytest
from aydin.io.datasets import cropped_newyork
from aydin.it.classic_denoisers.demo.demo_2D_dictionary_fixed import (
demo_dictionary_fixed,
)
from aydin.it.classic_denoisers.demo.demo_2D_dictionary_learned import (
demo_dictionary_learned,
)
from aydin.it.classic_denoisers.dictionary_fixed import denoise_dictionary_fixed
from aydin.it.classic_denoisers.test.util_test_nd import check_nd
def test_dictionary_learned():
assert demo_dictionary_learned(cropped_newyork(), display=False) >= 0.636 - 0.02
@pytest.mark.heavy
def test_dictionary_fixed():
assert demo_dictionary_fixed(cropped_newyork(), display=False) >= 0.636 - 0.02
def test_dictionary_nd():
check_nd(denoise_dictionary_fixed)
| 227
| 0
| 68
|
d1fae01ff815b39362b9f115804b8421474f2b1b
| 600
|
py
|
Python
|
big_data/maps/convert_rates_tsv_to_json.py
|
paulhtremblay/big-data
|
dfa2aa9877300a57e7a9368af59c07fcc5841b4f
|
[
"MIT"
] | null | null | null |
big_data/maps/convert_rates_tsv_to_json.py
|
paulhtremblay/big-data
|
dfa2aa9877300a57e7a9368af59c07fcc5841b4f
|
[
"MIT"
] | 7
|
2020-06-05T18:13:25.000Z
|
2022-03-11T23:19:48.000Z
|
big_data/maps/convert_rates_tsv_to_json.py
|
paulhtremblay/big-data
|
dfa2aa9877300a57e7a9368af59c07fcc5841b4f
|
[
"MIT"
] | 1
|
2020-11-25T18:24:37.000Z
|
2020-11-25T18:24:37.000Z
|
import pprint
import json
pp = pprint.PrettyPrinter(indent = 4)
if __name__ == '__main__':
pp.pprint(main())
| 23.076923
| 71
| 0.506667
|
import pprint
import json
pp = pprint.PrettyPrinter(indent = 4)
def convert(path):
with open(path, 'r') as read_obj:
line = 'init'
counter = 0
l = []
while line:
line = read_obj.readline()
counter += 1
if counter == 1:
continue
fields = line.split('\t')
if len(fields) != 2:
continue
l.append({'id':fields[0], 'rate':float(fields[1].strip())})
return l
def main():
return convert('unemployment.tsv')
if __name__ == '__main__':
pp.pprint(main())
| 440
| 0
| 46
|
4330ac10e234b0ed1e230392a489121eb2e4a3a4
| 610
|
py
|
Python
|
9/9-1-2.py
|
liuhanyu200/pygame
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
[
"BSD-3-Clause"
] | null | null | null |
9/9-1-2.py
|
liuhanyu200/pygame
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
[
"BSD-3-Clause"
] | null | null | null |
9/9-1-2.py
|
liuhanyu200/pygame
|
38a68e779e6b0a63edb1758fca98ebbf40bb0444
|
[
"BSD-3-Clause"
] | null | null | null |
# coding:utf-8
restaurant = Restaurant('丽水云泉大酒店', 'duck')
print(restaurant.restaurant_name)
print(restaurant.cuisine_type)
restaurant.describe_restaurant()
restaurant.open_restaurant()
| 27.727273
| 59
| 0.72623
|
# coding:utf-8
class Restaurant(object):
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print("restaurant name is " + self.restaurant_name)
print("cuisine_type is " + self.cuisine_type)
@staticmethod
def open_restaurant() -> None:
print("restaurant is now business ing!")
restaurant = Restaurant('丽水云泉大酒店', 'duck')
print(restaurant.restaurant_name)
print(restaurant.cuisine_type)
restaurant.describe_restaurant()
restaurant.open_restaurant()
| 298
| 102
| 23
|
534ab4de75347d1cad8852248bb5bb829a73b542
| 2,351
|
py
|
Python
|
sprocket/util/hdf5.py
|
zhouming-hfut/sprocket
|
68d4005284b72a891d0c0f81afabea087fc45960
|
[
"MIT"
] | null | null | null |
sprocket/util/hdf5.py
|
zhouming-hfut/sprocket
|
68d4005284b72a891d0c0f81afabea087fc45960
|
[
"MIT"
] | null | null | null |
sprocket/util/hdf5.py
|
zhouming-hfut/sprocket
|
68d4005284b72a891d0c0f81afabea087fc45960
|
[
"MIT"
] | 1
|
2020-06-29T03:09:23.000Z
|
2020-06-29T03:09:23.000Z
|
# -*- coding: utf-8 -*-
import os
import h5py
class HDF5(object):
"""HDF5 handler
Offer the hdf5 format file handler
Parameters
---------
fpath : str,
Path of hdf5 file
mode : str,
Open h5 as write and/or read mode
`a` : open as read/write (Default)
`w` : open as write only
`r` : open as read only
Attributes
---------
h5 : hdf5 class
"""
def read(self, ext=None):
"""Read vector or array from h5 file
Parameters
---------
ext : str
File extention including h5 file
Returns
-------
array : array,
Array of hdf5 packed data
"""
if ext is None:
raise ValueError("Please specify an existing extention.")
return self.h5[ext].value
def save(self, data, ext=None):
"""Write vector or array into h5 file
Parameters
---------
data :
Vector or array will be wrote into h5 file
ext: str
File label of saved file
"""
# remove if 'ext' already exist
if ext in self.h5.keys():
del self.h5[ext]
self.h5.create_dataset(ext, data=data)
self.h5.flush()
return
| 22.825243
| 69
| 0.518503
|
# -*- coding: utf-8 -*-
import os
import h5py
class HDF5(object):
"""HDF5 handler
Offer the hdf5 format file handler
Parameters
---------
fpath : str,
Path of hdf5 file
mode : str,
Open h5 as write and/or read mode
`a` : open as read/write (Default)
`w` : open as write only
`r` : open as read only
Attributes
---------
h5 : hdf5 class
"""
def __init__(self, fpath, mode='a'):
self.fpath = fpath
self.dirname, self.filename = os.path.split(self.fpath)
self.flbl, _ = os.path.splitext(self.filename)
if mode is None:
raise ValueError("Please specify the mode.")
else:
self.mode = mode
# create directory if not exist
if self.mode == 'w' or self.mode == 'a':
# create directory if not exist
if not os.path.exists(self.dirname):
os.makedirs(self.dirname)
# warn overwriting
if self.mode == 'w':
if os.path.exists(self.fpath):
print("overwrite: " + self.fpath)
# check file existing for reading
if self.mode == 'r':
if not os.path.exists(self.fpath):
raise FileNotFoundError(
"h5 file does not exist in " + self.fpath)
# open hdf5 file to fpath
self.h5 = h5py.File(self.fpath, self.mode)
def read(self, ext=None):
"""Read vector or array from h5 file
Parameters
---------
ext : str
File extention including h5 file
Returns
-------
array : array,
Array of hdf5 packed data
"""
if ext is None:
raise ValueError("Please specify an existing extention.")
return self.h5[ext].value
def save(self, data, ext=None):
"""Write vector or array into h5 file
Parameters
---------
data :
Vector or array will be wrote into h5 file
ext: str
File label of saved file
"""
# remove if 'ext' already exist
if ext in self.h5.keys():
del self.h5[ext]
self.h5.create_dataset(ext, data=data)
self.h5.flush()
return
def close(self):
self.h5.close()
return
| 1,006
| 0
| 54
|
24b126d6fe31cd3452fad67f497ed45acbd26584
| 2,370
|
py
|
Python
|
neuronit/play/migrations/0019_auto_20170406_1114.py
|
neuronit/pfa
|
6483f23de3ac43ae1121760ab44a2cae1f2cc901
|
[
"MIT"
] | null | null | null |
neuronit/play/migrations/0019_auto_20170406_1114.py
|
neuronit/pfa
|
6483f23de3ac43ae1121760ab44a2cae1f2cc901
|
[
"MIT"
] | null | null | null |
neuronit/play/migrations/0019_auto_20170406_1114.py
|
neuronit/pfa
|
6483f23de3ac43ae1121760ab44a2cae1f2cc901
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-04-06 09:14
from __future__ import unicode_literals
from django.db import migrations, models
| 40.862069
| 410
| 0.596624
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-04-06 09:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('play', '0018_auto_20170331_1014'),
]
operations = [
migrations.DeleteModel(
name='Save',
),
migrations.RemoveField(
model_name='reseau',
name='info',
),
migrations.AlterField(
model_name='reseau',
name='defeat',
field=models.FloatField(blank=True, help_text='punishement for each lost game', null=True, verbose_name='defeat punishement'),
),
migrations.AlterField(
model_name='reseau',
name='network_layers',
field=models.CharField(blank=True, help_text='number of hidden layers for a given network, format: *,*,...,*, . Unexpecting behavior if incorrect form', max_length=16, null=True, verbose_name='hidden layers'),
),
migrations.AlterField(
model_name='reseau',
name='type',
field=models.CharField(choices=[('select_type', 'select'), ('mlp', 'MLP'), ('elman', 'Elman'), ('jordan', 'Jordan'), ('op_tempo', 'Temporizer'), ('op_plus', 'Add'), ('op_minus', 'Substract'), ('op_divide', 'Divide'), ('op_avg', 'Average'), ('op_recomp', 'Recomposite'), ('op_decomp', 'Decomposite')], default='select', help_text='type involving different behavior of your network', max_length=100),
),
migrations.AlterField(
model_name='reseau',
name='victory',
field=models.FloatField(blank=True, help_text='reward at each game won', null=True, verbose_name='victory reward'),
),
migrations.AlterField(
model_name='reseau',
name='weight',
field=models.FloatField(blank=True, help_text='weight influence for each iteration', null=True, verbose_name='weight scaling'),
),
migrations.AlterField(
model_name='reseau_info',
name='info',
field=models.CharField(default='toto', max_length=300),
),
migrations.AlterField(
model_name='reseau_info',
name='network_name',
field=models.CharField(default='', max_length=100),
),
]
| 0
| 2,192
| 23
|
3ddda6b61449da34e0f8e8b290c8f652cbcfea85
| 187
|
py
|
Python
|
example.py
|
RangeHighGuestTeacher/Hello-World-Template
|
34e786df22b29c92d0a44f79405e689f66c7d244
|
[
"MIT"
] | null | null | null |
example.py
|
RangeHighGuestTeacher/Hello-World-Template
|
34e786df22b29c92d0a44f79405e689f66c7d244
|
[
"MIT"
] | null | null | null |
example.py
|
RangeHighGuestTeacher/Hello-World-Template
|
34e786df22b29c92d0a44f79405e689f66c7d244
|
[
"MIT"
] | null | null | null |
print("Hello World")
########## Edit below this line
# print your name to the console
########## Edit above this line
# Once you've finished save and commit your changes to GitHub
| 15.583333
| 61
| 0.657754
|
print("Hello World")
########## Edit below this line
# print your name to the console
########## Edit above this line
# Once you've finished save and commit your changes to GitHub
| 0
| 0
| 0
|
ca3eed6733efd71c4cd494c8c7e0014f4a43becf
| 7,720
|
py
|
Python
|
CBHelper-evaluateCallBack/lambda_function.py
|
aws-samples/amazon-connect-callback-helper
|
9aff8bb69ae03d948d38cfa9b128d80c847b3a13
|
[
"MIT-0"
] | 2
|
2021-12-18T00:15:09.000Z
|
2022-03-26T09:49:25.000Z
|
CBHelper-evaluateCallBack/lambda_function.py
|
aws-samples/amazon-connect-callback-helper
|
9aff8bb69ae03d948d38cfa9b128d80c847b3a13
|
[
"MIT-0"
] | 1
|
2021-12-30T13:44:44.000Z
|
2021-12-30T13:44:44.000Z
|
CBHelper-evaluateCallBack/lambda_function.py
|
aws-samples/amazon-connect-callback-helper
|
9aff8bb69ae03d948d38cfa9b128d80c847b3a13
|
[
"MIT-0"
] | null | null | null |
##CBWitness-EvaluateCallBack Function
import json
import boto3
import os
import datetime
import pytz
import base64
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key
#===============================
| 34.774775
| 163
| 0.630829
|
##CBWitness-EvaluateCallBack Function
import json
import boto3
import os
import datetime
import pytz
import base64
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key
def lambda_handler(event, context):
print(event)
CONTACTS_TABLE= os.environ['CONTACTS_TABLE']
contactPhone = str(event['Details']['ContactData']['CustomerEndpoint']['Address'])
CONFIG_SECRET= os.environ['CONFIG_SECRET']
connect_config=json.loads(get_config(CONFIG_SECRET))
instanceId = connect_config['CONNECT_INSTANCE_ID']
queueId =connect_config['CONNECT_QUEUE_ID']
timezone = str(event['Details']['ContactData']['Attributes']['timezone'])
if('minimumperiod' in event['Details']['ContactData']['Attributes']): MinTimeWindow = int(event['Details']['ContactData']['Attributes']['minimumperiod'])
else: MinTimeWindow = 60
if('maximumcbcontacts' in event['Details']['ContactData']['Attributes']): MaxContacts = int(event['Details']['ContactData']['Attributes']['maximumcbcontacts'])
else: MaxContacts = 10
if('slopedperiod' in event['Details']['ContactData']['Attributes']): slopedPeriod = int(event['Details']['ContactData']['Attributes']['slopedperiod'])
else: slopedPeriod = True
print("MaxContacts:" + str(MaxContacts))
print("MinTimeWindow:" + str(MinTimeWindow))
print("MinTimeWindow:" + str(slopedPeriod))
queuedContacts=get_queued_contacts(instanceId,queueId)
print("Contacts in Queue:" + str(queuedContacts))
if(phone_lookup(contactPhone, CONTACTS_TABLE)):
alreadyQueued=True
else:
alreadyQueued=False
closing_time=get_closing_time(instanceId, queueId)
if(not(alreadyQueued) and queuedContacts <= MaxContacts):
response ={"cbTier" : get_allowed_tier(MinTimeWindow,closing_time['Hours'],closing_time['Minutes'],timezone, slopedPeriod )}
else:
response ={"cbTier" : "notvalid"}
return response
#===============================
def get_allowed_tier(MinTimeWindow,EndTimeHours,EndTimeMins, time_zone, slopedPeriod):
now = datetime.datetime.now(pytz.timezone(time_zone))
utcNow=now.astimezone(pytz.timezone('UTC'))
closingTime = utcNow.replace(hour=EndTimeHours, minute=EndTimeMins,second=00)
availTime = closingTime-now
availMins = availTime.seconds/60
if (slopedPeriod and availMins >= 0):
availRatio = availMins / MinTimeWindow
if (availRatio == 0): availTier = "notvalid"
if (availRatio < .25): availTier = "below25"
if (availRatio < .5 and availRatio >=.25): availTier = "below50"
if (availRatio < .75 and availRatio >=.5): availTier = "below75"
if (availRatio >=.75 and availRatio <1): availTier = "below100"
if (availRatio >= 1): availTier = "valid"
elif (availMins >= 0): availTier = "valid"
else: availTier = "notvalid"
return (availTier)
def get_queued_contacts(instanceId,queueId):
connect = boto3.client('connect')
response = connect.get_current_metric_data(
InstanceId=instanceId,
Filters={
'Queues': [
queueId,
],
'Channels': [
'VOICE',
]
},
Groupings=[
'QUEUE',
],
CurrentMetrics=[
{
'Name': 'CONTACTS_IN_QUEUE',
'Unit': 'COUNT'
},
]
)
print(response['MetricResults'])
return int(response['MetricResults'][0]['Collections'][0]['Value'])
def get_closing_time(instanceId, queueId):
connect = boto3.client('connect')
queue = connect.describe_queue(
InstanceId=instanceId,
QueueId=queueId
)
hours = connect.describe_hours_of_operation(
InstanceId=instanceId,
HoursOfOperationId=queue['Queue']['HoursOfOperationId']
)
now = datetime.datetime.now()
today=now.strftime("%A")
closingTime = list(filter(lambda x:x["Day"]==today.upper(),hours['HoursOfOperation']['Config']))
return closingTime[0]['EndTime']
def get_config(secret_name):
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager'
)
# In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return secret
def remove_contactId(phone,table):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table)
try:
response = table.delete_item(
Key={
'contactId': phone
}
)
except Exception as e:
print (e)
else:
return response
def phone_lookup(phone, table):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table)
response = table.query(
KeyConditionExpression=Key('phoneNumber').eq(phone)
)
if (response['Count']):
print("Found contact:" + str(response['Items'][0]))
contactId = response['Items'][0]['contactId']
else:
print("Not contact found")
contactId = False
return contactId
def update_contact(phone, contactId, table):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(table)
try:
response = table.update_item(
Key={
'phone': phone
},
UpdateExpression='SET #item = :newState',
ExpressionAttributeNames={
'#item': 'contactId'
},
ExpressionAttributeValues={
':newState': contactId
},
ReturnValues="UPDATED_NEW")
print (response)
except Exception as e:
print (e)
else:
return response
| 7,289
| 0
| 188
|
946672f5a6656cc681f0ef3ce0dcf3df218f6871
| 284
|
py
|
Python
|
passenger_wsgi.py
|
pilnujemy/foundation-manager
|
1f1d6afcbb408c87a171bcbe3f9e58570eb478b6
|
[
"BSD-3-Clause"
] | 1
|
2016-01-04T06:30:24.000Z
|
2016-01-04T06:30:24.000Z
|
passenger_wsgi.py
|
pilnujemy/foundation-manager
|
1f1d6afcbb408c87a171bcbe3f9e58570eb478b6
|
[
"BSD-3-Clause"
] | 36
|
2015-11-27T14:17:34.000Z
|
2016-07-14T10:23:52.000Z
|
passenger_wsgi.py
|
pilnujemy/foundation-manager
|
1f1d6afcbb408c87a171bcbe3f9e58570eb478b6
|
[
"BSD-3-Clause"
] | 1
|
2016-05-14T01:11:28.000Z
|
2016-05-14T01:11:28.000Z
|
import sys, os
sys.path.append(os.getcwd())
import ConfigParser
cp = ConfigParser.ConfigParser();
cp.read('../.environ')
os.environ.update({key.upper():value for key, value in cp.items('env')})
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 25.818182
| 73
| 0.757042
|
import sys, os
sys.path.append(os.getcwd())
import ConfigParser
cp = ConfigParser.ConfigParser();
cp.read('../.environ')
os.environ.update({key.upper():value for key, value in cp.items('env')})
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 0
| 0
| 0
|
4278806ab1065f5c0bd76d9bd9e691ed2d94f3c0
| 1,113
|
py
|
Python
|
src/osyris/core/base.py
|
osyris-project/osyris
|
bff42d864a7d5d248f7023216e32fe97bc06dca6
|
[
"BSD-3-Clause"
] | 2
|
2022-02-08T14:41:19.000Z
|
2022-02-08T14:41:51.000Z
|
src/osyris/core/base.py
|
osyris-project/osyris
|
bff42d864a7d5d248f7023216e32fe97bc06dca6
|
[
"BSD-3-Clause"
] | 20
|
2022-01-24T09:34:14.000Z
|
2022-03-30T20:01:39.000Z
|
src/osyris/core/base.py
|
osyris-project/osyris
|
bff42d864a7d5d248f7023216e32fe97bc06dca6
|
[
"BSD-3-Clause"
] | null | null | null |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Osyris contributors (https://github.com/osyris-project/osyris)
from .tools import make_label
import numpy as np
| 25.883721
| 83
| 0.625337
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Osyris contributors (https://github.com/osyris-project/osyris)
from .tools import make_label
import numpy as np
class Base:
def __repr__(self):
return str(self)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy()
@property
def label(self):
return make_label(name=self.name, unit=self.unit)
def min(self):
return np.amin(self)
def max(self):
return np.amax(self)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""
Numpy array_ufunc protocol to allow Array to work with numpy ufuncs.
"""
if method != "__call__":
# Only handle ufuncs as callables
return NotImplemented
return self._wrap_numpy(ufunc, *inputs, **kwargs)
def __array_function__(self, func, types, args, kwargs):
"""
Numpy array_function protocol to allow Array to work with numpy
functions.
"""
return self._wrap_numpy(func, *args, **kwargs)
| 180
| 735
| 23
|
4032a6cd71d1138bf43cdaa8fa18e442760dd786
| 492
|
py
|
Python
|
Fourteen/verify.py
|
tbjoern/adventofcode
|
3fb8b4e18f03825c448d8935ab39b8bf08202d90
|
[
"MIT"
] | null | null | null |
Fourteen/verify.py
|
tbjoern/adventofcode
|
3fb8b4e18f03825c448d8935ab39b8bf08202d90
|
[
"MIT"
] | null | null | null |
Fourteen/verify.py
|
tbjoern/adventofcode
|
3fb8b4e18f03825c448d8935ab39b8bf08202d90
|
[
"MIT"
] | null | null | null |
muster = open("muster.txt", "r")
output = open("output.txt", "r")
mustervals = []
outputvals = []
for line in muster:
a,b,val = line.split(" ")
mustervals.append(int(val))
for line in output:
outputvals.append(int(line))
print len(mustervals)
print len(outputvals)
length = min(len(mustervals), len(outputvals))
for i in range(length):
if mustervals[i] != outputvals[i]:
print i
print "muster: " + str(mustervals[i])
print "output: " + str(outputvals[i])
| 21.391304
| 47
| 0.644309
|
muster = open("muster.txt", "r")
output = open("output.txt", "r")
mustervals = []
outputvals = []
for line in muster:
a,b,val = line.split(" ")
mustervals.append(int(val))
for line in output:
outputvals.append(int(line))
print len(mustervals)
print len(outputvals)
length = min(len(mustervals), len(outputvals))
for i in range(length):
if mustervals[i] != outputvals[i]:
print i
print "muster: " + str(mustervals[i])
print "output: " + str(outputvals[i])
| 0
| 0
| 0
|
de7d0dd2eb661fd9591ad6ff497fdd859aff26f0
| 2,014
|
py
|
Python
|
python_src/032.py
|
BurnySc2/leetcode-solutions
|
db86cc945f81d2b84891c5446f4887253011890e
|
[
"MIT"
] | null | null | null |
python_src/032.py
|
BurnySc2/leetcode-solutions
|
db86cc945f81d2b84891c5446f4887253011890e
|
[
"MIT"
] | 1
|
2021-08-01T09:56:31.000Z
|
2021-08-01T09:56:31.000Z
|
python_src/032.py
|
BurnySc2/leetcode-solutions
|
db86cc945f81d2b84891c5446f4887253011890e
|
[
"MIT"
] | null | null | null |
"""
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
https://leetcode.com/problems/longest-valid-parentheses/
"""
test_cases = ["(()", ")()())", "())()()()", "())((())()", "())((()))", "()(()", "))(()))()"]
results = [2, 4, 6, 6, 6, 2, 4]
if __name__ == "__main__":
app = Solution()
for test_case, correct_result in zip(test_cases, results):
assert (
app.longestValidParentheses(test_case) == correct_result
), f"My result: {app.longestValidParentheses(test_case)}, correct result: {correct_result}\nTest Case: {test_case}"
| 35.964286
| 132
| 0.551639
|
"""
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
https://leetcode.com/problems/longest-valid-parentheses/
"""
class Solution:
def validParentheses(self, s: str) -> bool:
open_parentheses = 0
for i in s:
if i == "(":
open_parentheses += 1
elif open_parentheses <= 0:
return False
else:
open_parentheses -= 1
if open_parentheses > 0:
return False
return True
def longestValidParentheses(self, s: str) -> int:
# TODO Too slow but should work
"""
Cases:
- More closed brackets than open brackets encountered
- No valid pairs at first (lots of open brackets in a row)
"""
results = []
length = len(s)
for start_index in range(0, length, 1):
if s[start_index] == ")":
continue
offset = length - start_index
start_range = offset + offset % 2
min_window_size = max(results, default=2)
for offset_index in range(start_range, min_window_size - 1, -2):
end_index = start_index + offset_index
if end_index - start_index < min_window_size:
break
substring = s[start_index:end_index]
if self.validParentheses(substring):
results.append(len(substring))
return max(results, default=0)
test_cases = ["(()", ")()())", "())()()()", "())((())()", "())((()))", "()(()", "))(()))()"]
results = [2, 4, 6, 6, 6, 2, 4]
if __name__ == "__main__":
app = Solution()
for test_case, correct_result in zip(test_cases, results):
assert (
app.longestValidParentheses(test_case) == correct_result
), f"My result: {app.longestValidParentheses(test_case)}, correct result: {correct_result}\nTest Case: {test_case}"
| 337
| 1,005
| 23
|
81c4df0259303ed60ceeedb1ad263c6f1e994a54
| 1,294
|
py
|
Python
|
setup.py
|
racitup/djangoshop-subscribe
|
ecc4ca1656a74cae279fa5af5fe1d6864d935cc1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
racitup/djangoshop-subscribe
|
ecc4ca1656a74cae279fa5af5fe1d6864d935cc1
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
racitup/djangoshop-subscribe
|
ecc4ca1656a74cae279fa5af5fe1d6864d935cc1
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from setuptools import setup, find_packages
import shop_subscribe
with open('README.rst') as fd:
README = fd.read()
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
setup(
author="Richard Case",
author_email="rich@racitup.com",
name="djangoshop-subscribe",
packages=find_packages(exclude=['doc']),
version=shop_subscribe.__version__,
description="An email subscription plugin for Django-SHOP",
long_description=README,
url='https://github.com/racitup/djangoshop-subscribe',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
keywords= ['Django', 'Django-SHOP'],
include_package_data=True,
zip_safe=False,
install_requires=[
'Django>=1.10.0,<1.11',
'django-shop>=0.11',
'django-post-office>3.0.0',
],
)
| 29.409091
| 75
| 0.66306
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from setuptools import setup, find_packages
import shop_subscribe
with open('README.rst') as fd:
README = fd.read()
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
setup(
author="Richard Case",
author_email="rich@racitup.com",
name="djangoshop-subscribe",
packages=find_packages(exclude=['doc']),
version=shop_subscribe.__version__,
description="An email subscription plugin for Django-SHOP",
long_description=README,
url='https://github.com/racitup/djangoshop-subscribe',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
keywords= ['Django', 'Django-SHOP'],
include_package_data=True,
zip_safe=False,
install_requires=[
'Django>=1.10.0,<1.11',
'django-shop>=0.11',
'django-post-office>3.0.0',
],
)
| 0
| 0
| 0
|
80311d17a08a43ab8a1c3dd590cb3604fb754b7a
| 588
|
py
|
Python
|
hardhat/recipes/perl/file_basedir.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/perl/file_basedir.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
hardhat/recipes/perl/file_basedir.py
|
stangelandcl/hardhat
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
[
"MIT"
] | null | null | null |
from ..base import GnuRecipe
| 34.588235
| 75
| 0.619048
|
from ..base import GnuRecipe
class FileBaseDirRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(FileBaseDirRecipe, self).__init__(*args, **kwargs)
self.sha256 = '120a57ef78535e13e1465717b4056aff' \
'4ce69af1e31c67c65d1177a52169082b'
self.name = 'perl5-file-basedir'
self.depends = ['perl5-module-build']
self.version = '0.07'
self.url = 'http://search.cpan.org/CPAN/authors/id/K/KI/KIMRYAN/' \
'File-BaseDir-$version.tar.gz'
self.configure_args = ['perl', 'Makefile.PL']
| 495
| 14
| 49
|
1c8c055cc9d8cafce62eefcecb12eaad504ef9b2
| 13,934
|
py
|
Python
|
pyathena/sqlalchemy_athena.py
|
l1x/PyAthena
|
7aa94a85bf194b1c0c08048a1562bfb43ef6b3fb
|
[
"MIT"
] | null | null | null |
pyathena/sqlalchemy_athena.py
|
l1x/PyAthena
|
7aa94a85bf194b1c0c08048a1562bfb43ef6b3fb
|
[
"MIT"
] | null | null | null |
pyathena/sqlalchemy_athena.py
|
l1x/PyAthena
|
7aa94a85bf194b1c0c08048a1562bfb43ef6b3fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import math
import numbers
import re
import tenacity
from sqlalchemy import exc, util
from sqlalchemy.engine import Engine, reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.exc import NoSuchTableError, OperationalError
from sqlalchemy.sql.compiler import (
DDLCompiler,
GenericTypeCompiler,
IdentifierPreparer,
SQLCompiler,
)
from sqlalchemy.sql.sqltypes import (
BIGINT,
BINARY,
BOOLEAN,
DATE,
DECIMAL,
FLOAT,
INTEGER,
NULLTYPE,
STRINGTYPE,
TIMESTAMP,
)
from tenacity import retry_if_exception, stop_after_attempt, wait_exponential
import pyathena
class UniversalSet(object):
"""UniversalSet
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py"""
class AthenaDMLIdentifierPreparer(IdentifierPreparer):
"""PrestoIdentifierPreparer
https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py"""
reserved_words = UniversalSet()
class AthenaStatementCompiler(SQLCompiler):
"""PrestoCompiler
https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py"""
_TYPE_MAPPINGS = {
"boolean": BOOLEAN,
"real": FLOAT,
"float": FLOAT,
"double": FLOAT,
"tinyint": INTEGER,
"smallint": INTEGER,
"integer": INTEGER,
"bigint": BIGINT,
"decimal": DECIMAL,
"char": STRINGTYPE,
"varchar": STRINGTYPE,
"array": STRINGTYPE,
"row": STRINGTYPE, # StructType
"varbinary": BINARY,
"map": STRINGTYPE,
"date": DATE,
"timestamp": TIMESTAMP,
}
| 31.453725
| 95
| 0.600617
|
# -*- coding: utf-8 -*-
import math
import numbers
import re
import tenacity
from sqlalchemy import exc, util
from sqlalchemy.engine import Engine, reflection
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.exc import NoSuchTableError, OperationalError
from sqlalchemy.sql.compiler import (
DDLCompiler,
GenericTypeCompiler,
IdentifierPreparer,
SQLCompiler,
)
from sqlalchemy.sql.sqltypes import (
BIGINT,
BINARY,
BOOLEAN,
DATE,
DECIMAL,
FLOAT,
INTEGER,
NULLTYPE,
STRINGTYPE,
TIMESTAMP,
)
from tenacity import retry_if_exception, stop_after_attempt, wait_exponential
import pyathena
class UniversalSet(object):
"""UniversalSet
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py"""
def __contains__(self, item):
return True
class AthenaDMLIdentifierPreparer(IdentifierPreparer):
"""PrestoIdentifierPreparer
https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py"""
reserved_words = UniversalSet()
class AthenaDDLIdentifierPreparer(IdentifierPreparer):
def __init__(
self,
dialect,
initial_quote="`",
final_quote=None,
escape_quote="`",
quote_case_sensitive_collations=True,
omit_schema=False,
):
super(AthenaDDLIdentifierPreparer, self).__init__(
dialect=dialect,
initial_quote=initial_quote,
final_quote=final_quote,
escape_quote=escape_quote,
quote_case_sensitive_collations=quote_case_sensitive_collations,
omit_schema=omit_schema,
)
class AthenaStatementCompiler(SQLCompiler):
"""PrestoCompiler
https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py"""
def visit_char_length_func(self, fn, **kw):
return "length{0}".format(self.function_argspec(fn, **kw))
class AthenaTypeCompiler(GenericTypeCompiler):
def visit_FLOAT(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_REAL(self, type_, **kw):
return "DOUBLE"
def visit_NUMERIC(self, type_, **kw):
return self.visit_DECIMAL(type_, **kw)
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % {"precision": type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % {
"precision": type_.precision,
"scale": type_.scale,
}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP"
def visit_DATETIME(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
raise exc.CompileError("Data type `{0}` is not supported".format(type_))
def visit_CLOB(self, type_, **kw):
return self.visit_BINARY(type_, **kw)
def visit_NCLOB(self, type_, **kw):
return self.visit_BINARY(type_, **kw)
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_TEXT(self, type_, **kw):
return "STRING"
def visit_BLOB(self, type_, **kw):
return self.visit_BINARY(type_, **kw)
def visit_BINARY(self, type_, **kw):
return "BINARY"
def visit_VARBINARY(self, type_, **kw):
return self.visit_BINARY(type_, **kw)
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
class AthenaDDLCompiler(DDLCompiler):
@property
def preparer(self):
return self._preparer
@preparer.setter
def preparer(self, value):
pass
def __init__(
self,
dialect,
statement,
bind=None,
schema_translate_map=None,
compile_kwargs=util.immutabledict(),
):
self._preparer = AthenaDDLIdentifierPreparer(dialect)
super(AthenaDDLCompiler, self).__init__(
dialect=dialect,
statement=statement,
bind=bind,
schema_translate_map=schema_translate_map,
compile_kwargs=compile_kwargs,
)
def visit_create_table(self, create):
table = create.element
preparer = self.preparer
text = "\nCREATE EXTERNAL "
text += "TABLE " + preparer.format_table(table) + " "
text += "("
separator = "\n"
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '{0}', column '{1}'): {2}").format(
table.description, column.name, ce.args[0]
)
)
)
const = self.create_table_constraints(
table,
_include_foreign_key_constraints=create.include_foreign_key_constraints,
)
if const:
text += separator + "\t" + const
text += "\n)\n%s\n\n" % self.post_create_table(table)
return text
def post_create_table(self, table):
raw_connection = table.bind.raw_connection()
# TODO Supports orc, avro, json, csv or tsv format
text = "STORED AS PARQUET\n"
location = (
raw_connection._kwargs["s3_dir"]
if "s3_dir" in raw_connection._kwargs
else raw_connection.s3_staging_dir
)
if not location:
raise exc.CompileError(
"`s3_dir` or `s3_staging_dir` parameter is required"
" in the connection string."
)
schema = table.schema if table.schema else raw_connection.schema_name
text += "LOCATION '{0}{1}/{2}/'\n".format(location, schema, table.name)
compression = raw_connection._kwargs.get("compression")
if compression:
text += "TBLPROPERTIES ('parquet.compress'='{0}')\n".format(
compression.upper()
)
return text
_TYPE_MAPPINGS = {
"boolean": BOOLEAN,
"real": FLOAT,
"float": FLOAT,
"double": FLOAT,
"tinyint": INTEGER,
"smallint": INTEGER,
"integer": INTEGER,
"bigint": BIGINT,
"decimal": DECIMAL,
"char": STRINGTYPE,
"varchar": STRINGTYPE,
"array": STRINGTYPE,
"row": STRINGTYPE, # StructType
"varbinary": BINARY,
"map": STRINGTYPE,
"date": DATE,
"timestamp": TIMESTAMP,
}
class AthenaDialect(DefaultDialect):
name = "awsathena"
driver = "rest"
preparer = AthenaDMLIdentifierPreparer
statement_compiler = AthenaStatementCompiler
ddl_compiler = AthenaDDLCompiler
type_compiler = AthenaTypeCompiler
default_paramstyle = pyathena.paramstyle
supports_alter = False
supports_pk_autoincrement = False
supports_default_values = False
supports_empty_insert = False
supports_multivalues_insert = True
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
supports_native_boolean = True
postfetch_lastrowid = False
_pattern_data_catlog_exception = re.compile(
r"(((Database|Namespace)\ (?P<schema>.+))|(Table\ (?P<table>.+)))\ not\ found\."
)
_pattern_column_type = re.compile(r"^([a-zA-Z]+)($|\(.+\)$)")
@classmethod
def dbapi(cls):
return pyathena
def _raw_connection(self, connection):
if isinstance(connection, Engine):
return connection.raw_connection()
return connection.connection
def create_connect_args(self, url):
# Connection string format:
# awsathena+rest://
# {aws_access_key_id}:{aws_secret_access_key}@athena.{region_name}.amazonaws.com:443/
# {schema_name}?s3_staging_dir={s3_staging_dir}&...
opts = {
"aws_access_key_id": url.username if url.username else None,
"aws_secret_access_key": url.password if url.password else None,
"region_name": re.sub(
r"^athena\.([a-z0-9-]+)\.amazonaws\.(com|com.cn)$", r"\1", url.host
),
"schema_name": url.database if url.database else "default",
}
opts.update(url.query)
return [[], opts]
@reflection.cache
def get_schema_names(self, connection, **kw):
query = """
SELECT schema_name
FROM information_schema.schemata
WHERE schema_name NOT IN ('information_schema')
"""
return [row.schema_name for row in connection.execute(query).fetchall()]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
raw_connection = self._raw_connection(connection)
schema = schema if schema else raw_connection.schema_name
query = """
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '{schema}'
""".format(
schema=schema
)
return [row.table_name for row in connection.execute(query).fetchall()]
def has_table(self, connection, table_name, schema=None):
try:
columns = self.get_columns(connection, table_name, schema)
return True if columns else False
except NoSuchTableError:
return False
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
raw_connection = self._raw_connection(connection)
schema = schema if schema else raw_connection.schema_name
query = """
SELECT
table_schema,
table_name,
column_name,
data_type,
is_nullable,
column_default,
ordinal_position,
comment
FROM information_schema.columns
WHERE table_schema = '{schema}'
AND table_name = '{table}'
""".format(
schema=schema, table=table_name
)
retry_config = raw_connection.retry_config
retry = tenacity.Retrying(
retry=retry_if_exception(
lambda exc: self._retry_if_data_catalog_exception(
exc, schema, table_name
)
),
stop=stop_after_attempt(retry_config.attempt),
wait=wait_exponential(
multiplier=retry_config.multiplier,
max=retry_config.max_delay,
exp_base=retry_config.exponential_base,
),
reraise=True,
)
try:
return [
{
"name": row.column_name,
"type": _TYPE_MAPPINGS.get(
self._get_column_type(row.data_type), NULLTYPE
),
"nullable": True if row.is_nullable == "YES" else False,
"default": row.column_default
if not self._is_nan(row.column_default)
else None,
"ordinal_position": row.ordinal_position,
"comment": row.comment,
}
for row in retry(connection.execute, query).fetchall()
]
except OperationalError as e:
if not self._retry_if_data_catalog_exception(e, schema, table_name):
raise NoSuchTableError(table_name) from e
else:
raise e
def _retry_if_data_catalog_exception(self, exc, schema, table_name):
if not isinstance(exc, OperationalError):
return False
match = self._pattern_data_catlog_exception.search(str(exc))
if match and (
match.group("schema") == schema or match.group("table") == table_name
):
return False
return True
def _get_column_type(self, type_):
return self._pattern_column_type.sub(r"\1", type_)
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Athena has no support for foreign keys.
return [] # pragma: no cover
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Athena has no support for primary keys.
return [] # pragma: no cover
def get_indexes(self, connection, table_name, schema=None, **kw):
# Athena has no support for indexes.
return [] # pragma: no cover
def do_rollback(self, dbapi_connection):
# No transactions for Athena
pass # pragma: no cover
def _check_unicode_returns(self, connection, additional_tests=None):
# Requests gives back Unicode strings
return True # pragma: no cover
def _check_unicode_description(self, connection):
# Requests gives back Unicode strings
return True # pragma: no cover
def _is_nan(self, column_default):
return isinstance(column_default, numbers.Real) and math.isnan(column_default)
| 9,969
| 1,622
| 765
|
d3da38f2d666dcc506c3bf9f7ca5edfd4290743f
| 279
|
py
|
Python
|
test.py
|
callumfrance/ThisPersonDoesNotExistAPI
|
8aaf83a65f3d527be087610ee3758d665babf657
|
[
"Apache-2.0",
"MIT"
] | 2
|
2020-11-05T19:01:03.000Z
|
2021-01-19T19:58:48.000Z
|
test.py
|
callumfrance/ThisPersonDoesNotExistAPI
|
8aaf83a65f3d527be087610ee3758d665babf657
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
test.py
|
callumfrance/ThisPersonDoesNotExistAPI
|
8aaf83a65f3d527be087610ee3758d665babf657
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-03-16T11:02:26.000Z
|
2021-03-16T11:02:26.000Z
|
import asyncio
from thispersondoesnotexist import get_online_person, save_online_person
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 19.928571
| 72
| 0.767025
|
import asyncio
from thispersondoesnotexist import get_online_person, save_online_person
async def main():
# picture = await get_online_person()
# print(picture)
await save_online_person('test.png')
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 101
| 0
| 23
|
bf99192e6c6314c0c50f380e59c48b9b0a88a055
| 1,502
|
py
|
Python
|
python/lad/tests/test_lad.py
|
mirca/least-absolute-deviations
|
d6e8463e0bfd38000e4696c7048f5935df6b304e
|
[
"MIT"
] | 4
|
2018-07-14T02:56:25.000Z
|
2019-10-12T10:12:30.000Z
|
python/lad/tests/test_lad.py
|
mirca/least-absolute-deviations
|
d6e8463e0bfd38000e4696c7048f5935df6b304e
|
[
"MIT"
] | 5
|
2018-06-10T13:11:39.000Z
|
2019-03-19T16:14:59.000Z
|
python/lad/tests/test_lad.py
|
mirca/least-absolute-deviations
|
d6e8463e0bfd38000e4696c7048f5935df6b304e
|
[
"MIT"
] | 2
|
2019-03-19T16:11:17.000Z
|
2019-07-01T08:46:53.000Z
|
import tensorflow as tf
import numpy as np
import pytest
from ..lad import lad, lad_polyfit
sess = tf.Session()
true_params = np.array([3, 10])
x = np.linspace(-1, 1, 10000)
y = np.random.laplace(loc=x * true_params[0] + true_params[1], scale=1.)
@pytest.mark.parametrize("yerr", [(None), (np.ones(len(y))), (np.std(y))])
@pytest.mark.parametrize("yerr", [(None), (np.ones(len(y))), (np.std(y))])
@pytest.mark.parametrize("order", [(1), (2), (3)])
@pytest.mark.parametrize("beta_true", [(np.arange(20).reshape(1, -1)),
(np.concatenate([np.zeros(10), np.arange(10)]).reshape(1, -1))])
| 38.512821
| 103
| 0.627164
|
import tensorflow as tf
import numpy as np
import pytest
from ..lad import lad, lad_polyfit
sess = tf.Session()
true_params = np.array([3, 10])
x = np.linspace(-1, 1, 10000)
y = np.random.laplace(loc=x * true_params[0] + true_params[1], scale=1.)
@pytest.mark.parametrize("yerr", [(None), (np.ones(len(y))), (np.std(y))])
def test_lad(yerr):
X = np.vstack([x, np.ones(len(x))])
coeffs = sess.run(lad(X.T, y, yerr=yerr))
assert ((abs(coeffs.flatten() - true_params) / true_params) < 5e-2).all()
@pytest.mark.parametrize("yerr", [(None), (np.ones(len(y))), (np.std(y))])
def test_lad_polyfit(yerr):
coeffs = sess.run(lad_polyfit(x, y, yerr=yerr))
assert ((abs(coeffs.flatten() - true_params) / true_params) < 5e-2).all()
@pytest.mark.parametrize("order", [(1), (2), (3)])
def test_lad_polyfit_order(order):
coeffs = sess.run(lad_polyfit(x, y, order=order))
assert ((abs(coeffs.flatten()[-2:] - true_params) / true_params) < 5e-2).all()
if order > 1:
assert (abs(coeffs[:-2]) < 1e-1).all()
@pytest.mark.parametrize("beta_true", [(np.arange(20).reshape(1, -1)),
(np.concatenate([np.zeros(10), np.arange(10)]).reshape(1, -1))])
def test_lad_noise_free(beta_true):
X = np.vander(x, N=np.shape(beta_true)[-1])
y_true = np.dot(beta_true, X.T).reshape(-1)
beta_est = sess.run(lad_polyfit(x, y_true, order=np.shape(beta_true)[-1]-1))
assert (np.abs((beta_true.T - beta_est)) / (1 + beta_true.T) < 2e-2).all()
| 784
| 0
| 88
|
ba31972842ede1fcd2bfaff44d561f3756b6c72a
| 2,319
|
py
|
Python
|
venv/lib/python3.9/site-packages/pydata_google_auth/_webserver.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 32
|
2018-09-14T23:31:00.000Z
|
2022-03-28T16:02:37.000Z
|
venv/lib/python3.9/site-packages/pydata_google_auth/_webserver.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 38
|
2018-08-14T20:52:52.000Z
|
2022-03-10T21:02:35.000Z
|
venv/lib/python3.9/site-packages/pydata_google_auth/_webserver.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 10
|
2018-08-19T01:39:34.000Z
|
2022-02-28T14:26:33.000Z
|
"""Helpers for running a local webserver to receive authorization code."""
import socket
from contextlib import closing
from pydata_google_auth import exceptions
LOCALHOST = "localhost"
DEFAULT_PORTS_TO_TRY = 100
def is_port_open(port):
"""Check if a port is open on localhost.
Based on StackOverflow answer: https://stackoverflow.com/a/43238489/101923
Parameters
----------
port : int
A port to check on localhost.
Returns
-------
is_open : bool
True if a socket can be opened at the requested port.
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
try:
sock.bind((LOCALHOST, port))
sock.listen(1)
except socket.error:
is_open = False
else:
is_open = True
return is_open
def find_open_port(start=8080, stop=None):
"""Find an open port between ``start`` and ``stop``.
Parameters
----------
start : Optional[int]
Beginning of range of ports to try. Defaults to 8080.
stop : Optional[int]
End of range of ports to try (not including exactly equals ``stop``).
This function tries 100 possible ports if no ``stop`` is specified.
Returns
-------
Optional[int]
``None`` if no open port is found, otherwise an integer indicating an
open port.
"""
if not stop:
stop = start + DEFAULT_PORTS_TO_TRY
for port in range(start, stop):
if is_port_open(port):
return port
# No open ports found.
return None
def run_local_server(app_flow):
"""Run local webserver installed app flow on some open port.
Parameters
----------
app_flow : google_auth_oauthlib.flow.InstalledAppFlow
Installed application flow to fetch user credentials.
Returns
-------
google.auth.credentials.Credentials
User credentials from installed application flow.
Raises
------
pydata_google_auth.exceptions.PyDataConnectionError
If no open port can be found in the range from 8080 to 8089,
inclusive.
"""
port = find_open_port()
if not port:
raise exceptions.PyDataConnectionError("Could not find open port.")
return app_flow.run_local_server(host=LOCALHOST, port=port)
| 25.766667
| 78
| 0.641656
|
"""Helpers for running a local webserver to receive authorization code."""
import socket
from contextlib import closing
from pydata_google_auth import exceptions
LOCALHOST = "localhost"
DEFAULT_PORTS_TO_TRY = 100
def is_port_open(port):
"""Check if a port is open on localhost.
Based on StackOverflow answer: https://stackoverflow.com/a/43238489/101923
Parameters
----------
port : int
A port to check on localhost.
Returns
-------
is_open : bool
True if a socket can be opened at the requested port.
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
try:
sock.bind((LOCALHOST, port))
sock.listen(1)
except socket.error:
is_open = False
else:
is_open = True
return is_open
def find_open_port(start=8080, stop=None):
"""Find an open port between ``start`` and ``stop``.
Parameters
----------
start : Optional[int]
Beginning of range of ports to try. Defaults to 8080.
stop : Optional[int]
End of range of ports to try (not including exactly equals ``stop``).
This function tries 100 possible ports if no ``stop`` is specified.
Returns
-------
Optional[int]
``None`` if no open port is found, otherwise an integer indicating an
open port.
"""
if not stop:
stop = start + DEFAULT_PORTS_TO_TRY
for port in range(start, stop):
if is_port_open(port):
return port
# No open ports found.
return None
def run_local_server(app_flow):
"""Run local webserver installed app flow on some open port.
Parameters
----------
app_flow : google_auth_oauthlib.flow.InstalledAppFlow
Installed application flow to fetch user credentials.
Returns
-------
google.auth.credentials.Credentials
User credentials from installed application flow.
Raises
------
pydata_google_auth.exceptions.PyDataConnectionError
If no open port can be found in the range from 8080 to 8089,
inclusive.
"""
port = find_open_port()
if not port:
raise exceptions.PyDataConnectionError("Could not find open port.")
return app_flow.run_local_server(host=LOCALHOST, port=port)
| 0
| 0
| 0
|
e16a1165292bbf1bedad6c78d031716220a41e24
| 5,380
|
py
|
Python
|
campaigns/models.py
|
alimahdiyar/Developing-Community-Web
|
a663a687e0f286f197d4a7bf347f67cd130275f7
|
[
"MIT"
] | 2
|
2018-06-02T12:30:00.000Z
|
2018-07-19T14:41:39.000Z
|
campaigns/models.py
|
Developing-Community/Developing-Community-Web
|
a663a687e0f286f197d4a7bf347f67cd130275f7
|
[
"MIT"
] | 5
|
2021-06-08T19:09:00.000Z
|
2022-03-11T23:25:14.000Z
|
campaigns/models.py
|
Developing-Community/web
|
a663a687e0f286f197d4a7bf347f67cd130275f7
|
[
"MIT"
] | 2
|
2018-05-27T14:58:34.000Z
|
2018-05-27T15:03:04.000Z
|
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils import timezone
from enumfields import Enum # Uses Ethan Furman's "enum34" backport
from enumfields import EnumField
from sorl.thumbnail import ImageField
from content.models import Content
from taxonomy.models import Term
from team.models import Team
# Create your models here.
from web import settings
# Sales Campaign
| 35.163399
| 115
| 0.686803
|
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils import timezone
from enumfields import Enum # Uses Ethan Furman's "enum34" backport
from enumfields import EnumField
from sorl.thumbnail import ImageField
from content.models import Content
from taxonomy.models import Term
from team.models import Team
# Create your models here.
from web import settings
def campaign_image_upload_location(instance, filename):
x = timezone.now()
return "%s/%s/%s/%s" % (x.year, x.month, x.day, filename)
class CampaignType(Enum):
WORK_OLD = "WORK"
MENTORING_OLD = "MENTORING"
STUDY_OLD = "STUDY"
SALES_OLD = "SALES"
WORKSHOP_OLD = "WORKSHOP"
PRESENTATION_OLD = "PRESENTATION"
EVENT_OLD = "EVENT"
MENTORING = "mentoring"
STUDY = "study"
SALES = "sales"
WORKSHOP = "workshop"
PRESENTATION = "presentation"
EVENT = "event"
class Campaign(models.Model): # We want comment to have a foreign key to all contents so we use all of them as one
title = models.CharField(max_length=10000)
type = EnumField(CampaignType, max_length=1000)
slug = models.SlugField(blank=True, null=True)
# application = models.ForeignKey(Application, default=1, null=True, on_delete=models.CASCADE)
image = ImageField(upload_to=campaign_image_upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
banner_image = models.ImageField(upload_to=campaign_image_upload_location,
null=True,
blank=True,
width_field="banner_width_field",
height_field="banner_height_field")
width_field = models.IntegerField(default=0)
height_field = models.IntegerField(default=0)
banner_width_field = models.IntegerField(default=0)
banner_height_field = models.IntegerField(default=0)
description = models.TextField()
start_time = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=True)
end_time = models.DateField(auto_now=False, auto_now_add=False, null=True, blank=True)
duration_days = models.IntegerField(blank=True, null=True)
@property
def name(self):
return self.title
def __str__(self):
return self.name
class Meta:
unique_together = ("slug", "type")
class CampaignPartyRelationType(Enum): # A subclass of Enum
CREATOR = "CREATOR"
MANAGER = "MANAGER"
MEMBER = "MEMBER"
class CampaignPartyRelation(models.Model):
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE)
# party
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
# type = models.CharField(
# max_length=30,
# choices=[(tag.value, tag.name) for tag in CampaignPartyRelationType]
# )
type = EnumField(CampaignPartyRelationType, max_length=1000)
def __str__(self):
return str(self.content_object) + " | " + self.campaign.title
class CampaignContentRelationType(Enum): # A subclass of Enum
CREATED_ON = "CREATED_ON"
class CampaignContentRelation(models.Model):
campaign = models.ForeignKey(Campaign, related_name='rel_campaign', on_delete=models.CASCADE)
content = models.ForeignKey(Content, related_name='rel_content', on_delete=models.CASCADE)
type = EnumField(CampaignContentRelationType, default=CampaignContentRelationType.CREATED_ON, max_length=1000)
def __str__(self):
return str(self.content) + " | " + self.campaign.title
class CampaignTermRealtionType(Enum):
SUBJECT = "SUBJECT"
class CampaignTermRelation(models.Model):
campaign = models.ForeignKey(Campaign, related_name='campaign', on_delete=models.CASCADE)
term = models.ForeignKey(Term, related_name='term', on_delete=models.CASCADE)
# type = models.CharField(
# max_length=30,
# choices=[(tag.value, tag.name) for tag in CampaignTermRealtionType]
# )
type = EnumField(CampaignTermRealtionType, max_length=1000)
class CampaignEnrollmentRequest(models.Model):
# TODO: convert to profile
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE)
campaign = models.ForeignKey(Campaign, related_name='request_campaign', on_delete=models.CASCADE)
note = models.TextField(blank=True, null=True)
# Sales Campaign
class Product(models.Model):
seller = models.ForeignKey(Team, on_delete=models.CASCADE, blank=True, related_name="seller")
name = models.CharField(blank=True, null=True, max_length=255)
description = models.TextField(blank=True, null=True)
price = models.IntegerField()
profile_image = models.ImageField(null=True,
blank=True,
width_field="width_field",
height_field="height_field")
height_field = models.IntegerField(default=0, null=True)
width_field = models.IntegerField(default=0, null=True)
| 288
| 4,333
| 253
|
3b3e51bbf94b1d39d45c67a57ee1414456ec670b
| 181,187
|
py
|
Python
|
src/cplex/_internal/_pycplex.py
|
tfang94/paql-project
|
0848d13a0f2489349d196a596cc1a1a1f2ee1bfe
|
[
"MIT"
] | 1
|
2021-04-28T21:30:39.000Z
|
2021-04-28T21:30:39.000Z
|
src/cplex/_internal/_pycplex.py
|
tfang94/paql-project
|
0848d13a0f2489349d196a596cc1a1a1f2ee1bfe
|
[
"MIT"
] | null | null | null |
src/cplex/_internal/_pycplex.py
|
tfang94/paql-project
|
0848d13a0f2489349d196a596cc1a1a1f2ee1bfe
|
[
"MIT"
] | 3
|
2021-04-25T16:51:47.000Z
|
2022-02-03T21:04:34.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
# Import the low-level C/C++ module
if __package__ or '.' in __name__:
from . import _pycplex_platform
else:
import _pycplex_platform
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
CPX_FEATURES_H = _pycplex_platform.CPX_FEATURES_H
CPX_FEATURE_REMOTE_OBJECT = _pycplex_platform.CPX_FEATURE_REMOTE_OBJECT
CPX_FEATURE_DISTRIBUTED_MIP = _pycplex_platform.CPX_FEATURE_DISTRIBUTED_MIP
CPX_CPXAUTOINTTYPES_H_H = _pycplex_platform.CPX_CPXAUTOINTTYPES_H_H
CPXBYTE_DEFINED = _pycplex_platform.CPXBYTE_DEFINED
CPXINT_DEFINED = _pycplex_platform.CPXINT_DEFINED
CPXLONG_DEFINED = _pycplex_platform.CPXLONG_DEFINED
CPXSHORT_DEFINED = _pycplex_platform.CPXSHORT_DEFINED
CPXULONG_DEFINED = _pycplex_platform.CPXULONG_DEFINED
CPX_STR_PARAM_MAX = _pycplex_platform.CPX_STR_PARAM_MAX
# Register cpxiodevice in _pycplex_platform:
_pycplex_platform.cpxiodevice_swigregister(cpxiodevice)
cvar = _pycplex_platform.cvar
CPX_NULL = cvar.CPX_NULL
ext_name = cvar.ext_name
CPX_VERSION = _pycplex_platform.CPX_VERSION
CPX_VERSION_VERSION = _pycplex_platform.CPX_VERSION_VERSION
CPX_VERSION_RELEASE = _pycplex_platform.CPX_VERSION_RELEASE
CPX_VERSION_MODIFICATION = _pycplex_platform.CPX_VERSION_MODIFICATION
CPX_VERSION_FIX = _pycplex_platform.CPX_VERSION_FIX
CPX_INFBOUND = _pycplex_platform.CPX_INFBOUND
CPX_MINBOUND = _pycplex_platform.CPX_MINBOUND
CPX_PWL_MAXSLOPE = _pycplex_platform.CPX_PWL_MAXSLOPE
CPX_PWL_MINSLOPE = _pycplex_platform.CPX_PWL_MINSLOPE
CPX_PARAMTYPE_NONE = _pycplex_platform.CPX_PARAMTYPE_NONE
CPX_PARAMTYPE_INT = _pycplex_platform.CPX_PARAMTYPE_INT
CPX_PARAMTYPE_DOUBLE = _pycplex_platform.CPX_PARAMTYPE_DOUBLE
CPX_PARAMTYPE_STRING = _pycplex_platform.CPX_PARAMTYPE_STRING
CPX_PARAMTYPE_LONG = _pycplex_platform.CPX_PARAMTYPE_LONG
CPX_NO_SOLN = _pycplex_platform.CPX_NO_SOLN
CPX_AUTO_SOLN = _pycplex_platform.CPX_AUTO_SOLN
CPX_BASIC_SOLN = _pycplex_platform.CPX_BASIC_SOLN
CPX_NONBASIC_SOLN = _pycplex_platform.CPX_NONBASIC_SOLN
CPX_PRIMAL_SOLN = _pycplex_platform.CPX_PRIMAL_SOLN
CPX_PRECOL_LOW = _pycplex_platform.CPX_PRECOL_LOW
CPX_PRECOL_UP = _pycplex_platform.CPX_PRECOL_UP
CPX_PRECOL_FIX = _pycplex_platform.CPX_PRECOL_FIX
CPX_PRECOL_AGG = _pycplex_platform.CPX_PRECOL_AGG
CPX_PRECOL_OTHER = _pycplex_platform.CPX_PRECOL_OTHER
CPX_PREROW_RED = _pycplex_platform.CPX_PREROW_RED
CPX_PREROW_AGG = _pycplex_platform.CPX_PREROW_AGG
CPX_PREROW_OTHER = _pycplex_platform.CPX_PREROW_OTHER
CPX_AUTO = _pycplex_platform.CPX_AUTO
CPX_ON = _pycplex_platform.CPX_ON
CPX_OFF = _pycplex_platform.CPX_OFF
CPX_MAX = _pycplex_platform.CPX_MAX
CPX_MIN = _pycplex_platform.CPX_MIN
CPX_DATACHECK_OFF = _pycplex_platform.CPX_DATACHECK_OFF
CPX_DATACHECK_WARN = _pycplex_platform.CPX_DATACHECK_WARN
CPX_DATACHECK_ASSIST = _pycplex_platform.CPX_DATACHECK_ASSIST
CPX_PPRIIND_PARTIAL = _pycplex_platform.CPX_PPRIIND_PARTIAL
CPX_PPRIIND_AUTO = _pycplex_platform.CPX_PPRIIND_AUTO
CPX_PPRIIND_DEVEX = _pycplex_platform.CPX_PPRIIND_DEVEX
CPX_PPRIIND_STEEP = _pycplex_platform.CPX_PPRIIND_STEEP
CPX_PPRIIND_STEEPQSTART = _pycplex_platform.CPX_PPRIIND_STEEPQSTART
CPX_PPRIIND_FULL = _pycplex_platform.CPX_PPRIIND_FULL
CPX_DPRIIND_AUTO = _pycplex_platform.CPX_DPRIIND_AUTO
CPX_DPRIIND_FULL = _pycplex_platform.CPX_DPRIIND_FULL
CPX_DPRIIND_STEEP = _pycplex_platform.CPX_DPRIIND_STEEP
CPX_DPRIIND_FULLSTEEP = _pycplex_platform.CPX_DPRIIND_FULLSTEEP
CPX_DPRIIND_STEEPQSTART = _pycplex_platform.CPX_DPRIIND_STEEPQSTART
CPX_DPRIIND_DEVEX = _pycplex_platform.CPX_DPRIIND_DEVEX
CPX_PARALLEL_DETERMINISTIC = _pycplex_platform.CPX_PARALLEL_DETERMINISTIC
CPX_PARALLEL_AUTO = _pycplex_platform.CPX_PARALLEL_AUTO
CPX_PARALLEL_OPPORTUNISTIC = _pycplex_platform.CPX_PARALLEL_OPPORTUNISTIC
CPX_WRITELEVEL_AUTO = _pycplex_platform.CPX_WRITELEVEL_AUTO
CPX_WRITELEVEL_ALLVARS = _pycplex_platform.CPX_WRITELEVEL_ALLVARS
CPX_WRITELEVEL_DISCRETEVARS = _pycplex_platform.CPX_WRITELEVEL_DISCRETEVARS
CPX_WRITELEVEL_NONZEROVARS = _pycplex_platform.CPX_WRITELEVEL_NONZEROVARS
CPX_WRITELEVEL_NONZERODISCRETEVARS = _pycplex_platform.CPX_WRITELEVEL_NONZERODISCRETEVARS
CPX_OPTIMALITYTARGET_AUTO = _pycplex_platform.CPX_OPTIMALITYTARGET_AUTO
CPX_OPTIMALITYTARGET_OPTIMALCONVEX = _pycplex_platform.CPX_OPTIMALITYTARGET_OPTIMALCONVEX
CPX_OPTIMALITYTARGET_FIRSTORDER = _pycplex_platform.CPX_OPTIMALITYTARGET_FIRSTORDER
CPX_OPTIMALITYTARGET_OPTIMALGLOBAL = _pycplex_platform.CPX_OPTIMALITYTARGET_OPTIMALGLOBAL
CPX_ALG_NONE = _pycplex_platform.CPX_ALG_NONE
CPX_ALG_AUTOMATIC = _pycplex_platform.CPX_ALG_AUTOMATIC
CPX_ALG_PRIMAL = _pycplex_platform.CPX_ALG_PRIMAL
CPX_ALG_DUAL = _pycplex_platform.CPX_ALG_DUAL
CPX_ALG_NET = _pycplex_platform.CPX_ALG_NET
CPX_ALG_BARRIER = _pycplex_platform.CPX_ALG_BARRIER
CPX_ALG_SIFTING = _pycplex_platform.CPX_ALG_SIFTING
CPX_ALG_CONCURRENT = _pycplex_platform.CPX_ALG_CONCURRENT
CPX_ALG_BAROPT = _pycplex_platform.CPX_ALG_BAROPT
CPX_ALG_PIVOTIN = _pycplex_platform.CPX_ALG_PIVOTIN
CPX_ALG_PIVOTOUT = _pycplex_platform.CPX_ALG_PIVOTOUT
CPX_ALG_PIVOT = _pycplex_platform.CPX_ALG_PIVOT
CPX_ALG_FEASOPT = _pycplex_platform.CPX_ALG_FEASOPT
CPX_ALG_MIP = _pycplex_platform.CPX_ALG_MIP
CPX_ALG_BENDERS = _pycplex_platform.CPX_ALG_BENDERS
CPX_ALG_MULTIOBJ = _pycplex_platform.CPX_ALG_MULTIOBJ
CPX_ALG_ROBUST = _pycplex_platform.CPX_ALG_ROBUST
CPX_AT_LOWER = _pycplex_platform.CPX_AT_LOWER
CPX_BASIC = _pycplex_platform.CPX_BASIC
CPX_AT_UPPER = _pycplex_platform.CPX_AT_UPPER
CPX_FREE_SUPER = _pycplex_platform.CPX_FREE_SUPER
CPX_NO_VARIABLE = _pycplex_platform.CPX_NO_VARIABLE
CPX_CONTINUOUS = _pycplex_platform.CPX_CONTINUOUS
CPX_BINARY = _pycplex_platform.CPX_BINARY
CPX_INTEGER = _pycplex_platform.CPX_INTEGER
CPX_SEMICONT = _pycplex_platform.CPX_SEMICONT
CPX_SEMIINT = _pycplex_platform.CPX_SEMIINT
CPX_PREREDUCE_PRIMALANDDUAL = _pycplex_platform.CPX_PREREDUCE_PRIMALANDDUAL
CPX_PREREDUCE_DUALONLY = _pycplex_platform.CPX_PREREDUCE_DUALONLY
CPX_PREREDUCE_PRIMALONLY = _pycplex_platform.CPX_PREREDUCE_PRIMALONLY
CPX_PREREDUCE_NOPRIMALORDUAL = _pycplex_platform.CPX_PREREDUCE_NOPRIMALORDUAL
CPX_PREREFORM_ALL = _pycplex_platform.CPX_PREREFORM_ALL
CPX_PREREFORM_INTERFERE_CRUSH = _pycplex_platform.CPX_PREREFORM_INTERFERE_CRUSH
CPX_PREREFORM_INTERFERE_UNCRUSH = _pycplex_platform.CPX_PREREFORM_INTERFERE_UNCRUSH
CPX_PREREFORM_NONE = _pycplex_platform.CPX_PREREFORM_NONE
CPX_CONFLICT_EXCLUDED = _pycplex_platform.CPX_CONFLICT_EXCLUDED
CPX_CONFLICT_POSSIBLE_MEMBER = _pycplex_platform.CPX_CONFLICT_POSSIBLE_MEMBER
CPX_CONFLICT_POSSIBLE_LB = _pycplex_platform.CPX_CONFLICT_POSSIBLE_LB
CPX_CONFLICT_POSSIBLE_UB = _pycplex_platform.CPX_CONFLICT_POSSIBLE_UB
CPX_CONFLICT_MEMBER = _pycplex_platform.CPX_CONFLICT_MEMBER
CPX_CONFLICT_LB = _pycplex_platform.CPX_CONFLICT_LB
CPX_CONFLICT_UB = _pycplex_platform.CPX_CONFLICT_UB
CPX_CONFLICTALG_AUTO = _pycplex_platform.CPX_CONFLICTALG_AUTO
CPX_CONFLICTALG_FAST = _pycplex_platform.CPX_CONFLICTALG_FAST
CPX_CONFLICTALG_PROPAGATE = _pycplex_platform.CPX_CONFLICTALG_PROPAGATE
CPX_CONFLICTALG_PRESOLVE = _pycplex_platform.CPX_CONFLICTALG_PRESOLVE
CPX_CONFLICTALG_IIS = _pycplex_platform.CPX_CONFLICTALG_IIS
CPX_CONFLICTALG_LIMITSOLVE = _pycplex_platform.CPX_CONFLICTALG_LIMITSOLVE
CPX_CONFLICTALG_SOLVE = _pycplex_platform.CPX_CONFLICTALG_SOLVE
CPXPROB_LP = _pycplex_platform.CPXPROB_LP
CPXPROB_MILP = _pycplex_platform.CPXPROB_MILP
CPXPROB_FIXEDMILP = _pycplex_platform.CPXPROB_FIXEDMILP
CPXPROB_NODELP = _pycplex_platform.CPXPROB_NODELP
CPXPROB_QP = _pycplex_platform.CPXPROB_QP
CPXPROB_MIQP = _pycplex_platform.CPXPROB_MIQP
CPXPROB_FIXEDMIQP = _pycplex_platform.CPXPROB_FIXEDMIQP
CPXPROB_NODEQP = _pycplex_platform.CPXPROB_NODEQP
CPXPROB_QCP = _pycplex_platform.CPXPROB_QCP
CPXPROB_MIQCP = _pycplex_platform.CPXPROB_MIQCP
CPXPROB_NODEQCP = _pycplex_platform.CPXPROB_NODEQCP
CPX_LPREADER_LEGACY = _pycplex_platform.CPX_LPREADER_LEGACY
CPX_LPREADER_NEW = _pycplex_platform.CPX_LPREADER_NEW
CPX_PARAM_ALL_MIN = _pycplex_platform.CPX_PARAM_ALL_MIN
CPX_PARAM_ALL_MAX = _pycplex_platform.CPX_PARAM_ALL_MAX
CPX_CALLBACK_PRIMAL = _pycplex_platform.CPX_CALLBACK_PRIMAL
CPX_CALLBACK_DUAL = _pycplex_platform.CPX_CALLBACK_DUAL
CPX_CALLBACK_NETWORK = _pycplex_platform.CPX_CALLBACK_NETWORK
CPX_CALLBACK_PRIMAL_CROSSOVER = _pycplex_platform.CPX_CALLBACK_PRIMAL_CROSSOVER
CPX_CALLBACK_DUAL_CROSSOVER = _pycplex_platform.CPX_CALLBACK_DUAL_CROSSOVER
CPX_CALLBACK_BARRIER = _pycplex_platform.CPX_CALLBACK_BARRIER
CPX_CALLBACK_PRESOLVE = _pycplex_platform.CPX_CALLBACK_PRESOLVE
CPX_CALLBACK_QPBARRIER = _pycplex_platform.CPX_CALLBACK_QPBARRIER
CPX_CALLBACK_QPSIMPLEX = _pycplex_platform.CPX_CALLBACK_QPSIMPLEX
CPX_CALLBACK_TUNING = _pycplex_platform.CPX_CALLBACK_TUNING
CPX_CALLBACK_INFO_PRIMAL_OBJ = _pycplex_platform.CPX_CALLBACK_INFO_PRIMAL_OBJ
CPX_CALLBACK_INFO_DUAL_OBJ = _pycplex_platform.CPX_CALLBACK_INFO_DUAL_OBJ
CPX_CALLBACK_INFO_PRIMAL_INFMEAS = _pycplex_platform.CPX_CALLBACK_INFO_PRIMAL_INFMEAS
CPX_CALLBACK_INFO_DUAL_INFMEAS = _pycplex_platform.CPX_CALLBACK_INFO_DUAL_INFMEAS
CPX_CALLBACK_INFO_PRIMAL_FEAS = _pycplex_platform.CPX_CALLBACK_INFO_PRIMAL_FEAS
CPX_CALLBACK_INFO_DUAL_FEAS = _pycplex_platform.CPX_CALLBACK_INFO_DUAL_FEAS
CPX_CALLBACK_INFO_ITCOUNT = _pycplex_platform.CPX_CALLBACK_INFO_ITCOUNT
CPX_CALLBACK_INFO_CROSSOVER_PPUSH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PPUSH
CPX_CALLBACK_INFO_CROSSOVER_PEXCH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PEXCH
CPX_CALLBACK_INFO_CROSSOVER_DPUSH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DPUSH
CPX_CALLBACK_INFO_CROSSOVER_DEXCH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DEXCH
CPX_CALLBACK_INFO_CROSSOVER_SBCNT = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_SBCNT
CPX_CALLBACK_INFO_PRESOLVE_ROWSGONE = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_ROWSGONE
CPX_CALLBACK_INFO_PRESOLVE_COLSGONE = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_COLSGONE
CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST
CPX_CALLBACK_INFO_PRESOLVE_COEFFS = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_COEFFS
CPX_CALLBACK_INFO_USER_PROBLEM = _pycplex_platform.CPX_CALLBACK_INFO_USER_PROBLEM
CPX_CALLBACK_INFO_TUNING_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_TUNING_PROGRESS
CPX_CALLBACK_INFO_ENDTIME = _pycplex_platform.CPX_CALLBACK_INFO_ENDTIME
CPX_CALLBACK_INFO_ITCOUNT_LONG = _pycplex_platform.CPX_CALLBACK_INFO_ITCOUNT_LONG
CPX_CALLBACK_INFO_CROSSOVER_PPUSH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PPUSH_LONG
CPX_CALLBACK_INFO_CROSSOVER_PEXCH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PEXCH_LONG
CPX_CALLBACK_INFO_CROSSOVER_DPUSH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DPUSH_LONG
CPX_CALLBACK_INFO_CROSSOVER_DEXCH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DEXCH_LONG
CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST_LONG = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST_LONG
CPX_CALLBACK_INFO_PRESOLVE_COEFFS_LONG = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_COEFFS_LONG
CPX_CALLBACK_INFO_ENDDETTIME = _pycplex_platform.CPX_CALLBACK_INFO_ENDDETTIME
CPX_CALLBACK_INFO_STARTTIME = _pycplex_platform.CPX_CALLBACK_INFO_STARTTIME
CPX_CALLBACK_INFO_STARTDETTIME = _pycplex_platform.CPX_CALLBACK_INFO_STARTDETTIME
CPX_TUNE_AVERAGE = _pycplex_platform.CPX_TUNE_AVERAGE
CPX_TUNE_MINMAX = _pycplex_platform.CPX_TUNE_MINMAX
CPX_TUNE_ABORT = _pycplex_platform.CPX_TUNE_ABORT
CPX_TUNE_TILIM = _pycplex_platform.CPX_TUNE_TILIM
CPX_TUNE_DETTILIM = _pycplex_platform.CPX_TUNE_DETTILIM
CPX_FEASOPT_MIN_SUM = _pycplex_platform.CPX_FEASOPT_MIN_SUM
CPX_FEASOPT_OPT_SUM = _pycplex_platform.CPX_FEASOPT_OPT_SUM
CPX_FEASOPT_MIN_INF = _pycplex_platform.CPX_FEASOPT_MIN_INF
CPX_FEASOPT_OPT_INF = _pycplex_platform.CPX_FEASOPT_OPT_INF
CPX_FEASOPT_MIN_QUAD = _pycplex_platform.CPX_FEASOPT_MIN_QUAD
CPX_FEASOPT_OPT_QUAD = _pycplex_platform.CPX_FEASOPT_OPT_QUAD
CPX_BENDERSSTRATEGY_OFF = _pycplex_platform.CPX_BENDERSSTRATEGY_OFF
CPX_BENDERSSTRATEGY_AUTO = _pycplex_platform.CPX_BENDERSSTRATEGY_AUTO
CPX_BENDERSSTRATEGY_USER = _pycplex_platform.CPX_BENDERSSTRATEGY_USER
CPX_BENDERSSTRATEGY_WORKERS = _pycplex_platform.CPX_BENDERSSTRATEGY_WORKERS
CPX_BENDERSSTRATEGY_FULL = _pycplex_platform.CPX_BENDERSSTRATEGY_FULL
CPX_ANNOTATIONDATA_LONG = _pycplex_platform.CPX_ANNOTATIONDATA_LONG
CPX_ANNOTATIONDATA_DOUBLE = _pycplex_platform.CPX_ANNOTATIONDATA_DOUBLE
CPX_ANNOTATIONOBJ_OBJ = _pycplex_platform.CPX_ANNOTATIONOBJ_OBJ
CPX_ANNOTATIONOBJ_COL = _pycplex_platform.CPX_ANNOTATIONOBJ_COL
CPX_ANNOTATIONOBJ_ROW = _pycplex_platform.CPX_ANNOTATIONOBJ_ROW
CPX_ANNOTATIONOBJ_SOS = _pycplex_platform.CPX_ANNOTATIONOBJ_SOS
CPX_ANNOTATIONOBJ_IND = _pycplex_platform.CPX_ANNOTATIONOBJ_IND
CPX_ANNOTATIONOBJ_QC = _pycplex_platform.CPX_ANNOTATIONOBJ_QC
CPX_ANNOTATIONOBJ_LAST = _pycplex_platform.CPX_ANNOTATIONOBJ_LAST
CPXIIS_COMPLETE = _pycplex_platform.CPXIIS_COMPLETE
CPXIIS_PARTIAL = _pycplex_platform.CPXIIS_PARTIAL
CPXIIS_AT_LOWER = _pycplex_platform.CPXIIS_AT_LOWER
CPXIIS_FIXED = _pycplex_platform.CPXIIS_FIXED
CPXIIS_AT_UPPER = _pycplex_platform.CPXIIS_AT_UPPER
CPX_BARORDER_AUTO = _pycplex_platform.CPX_BARORDER_AUTO
CPX_BARORDER_AMD = _pycplex_platform.CPX_BARORDER_AMD
CPX_BARORDER_AMF = _pycplex_platform.CPX_BARORDER_AMF
CPX_BARORDER_ND = _pycplex_platform.CPX_BARORDER_ND
CPX_MIPEMPHASIS_BALANCED = _pycplex_platform.CPX_MIPEMPHASIS_BALANCED
CPX_MIPEMPHASIS_FEASIBILITY = _pycplex_platform.CPX_MIPEMPHASIS_FEASIBILITY
CPX_MIPEMPHASIS_OPTIMALITY = _pycplex_platform.CPX_MIPEMPHASIS_OPTIMALITY
CPX_MIPEMPHASIS_BESTBOUND = _pycplex_platform.CPX_MIPEMPHASIS_BESTBOUND
CPX_MIPEMPHASIS_HIDDENFEAS = _pycplex_platform.CPX_MIPEMPHASIS_HIDDENFEAS
CPX_MIPEMPHASIS_HEURISTIC = _pycplex_platform.CPX_MIPEMPHASIS_HEURISTIC
CPX_TYPE_VAR = _pycplex_platform.CPX_TYPE_VAR
CPX_TYPE_SOS1 = _pycplex_platform.CPX_TYPE_SOS1
CPX_TYPE_SOS2 = _pycplex_platform.CPX_TYPE_SOS2
CPX_TYPE_USER = _pycplex_platform.CPX_TYPE_USER
CPX_TYPE_ANY = _pycplex_platform.CPX_TYPE_ANY
CPX_VARSEL_MININFEAS = _pycplex_platform.CPX_VARSEL_MININFEAS
CPX_VARSEL_DEFAULT = _pycplex_platform.CPX_VARSEL_DEFAULT
CPX_VARSEL_MAXINFEAS = _pycplex_platform.CPX_VARSEL_MAXINFEAS
CPX_VARSEL_PSEUDO = _pycplex_platform.CPX_VARSEL_PSEUDO
CPX_VARSEL_STRONG = _pycplex_platform.CPX_VARSEL_STRONG
CPX_VARSEL_PSEUDOREDUCED = _pycplex_platform.CPX_VARSEL_PSEUDOREDUCED
CPX_NODESEL_DFS = _pycplex_platform.CPX_NODESEL_DFS
CPX_NODESEL_BESTBOUND = _pycplex_platform.CPX_NODESEL_BESTBOUND
CPX_NODESEL_BESTEST = _pycplex_platform.CPX_NODESEL_BESTEST
CPX_NODESEL_BESTEST_ALT = _pycplex_platform.CPX_NODESEL_BESTEST_ALT
CPX_MIPORDER_COST = _pycplex_platform.CPX_MIPORDER_COST
CPX_MIPORDER_BOUNDS = _pycplex_platform.CPX_MIPORDER_BOUNDS
CPX_MIPORDER_SCALEDCOST = _pycplex_platform.CPX_MIPORDER_SCALEDCOST
CPX_BRANCH_GLOBAL = _pycplex_platform.CPX_BRANCH_GLOBAL
CPX_BRANCH_DOWN = _pycplex_platform.CPX_BRANCH_DOWN
CPX_BRANCH_UP = _pycplex_platform.CPX_BRANCH_UP
CPX_BRDIR_DOWN = _pycplex_platform.CPX_BRDIR_DOWN
CPX_BRDIR_AUTO = _pycplex_platform.CPX_BRDIR_AUTO
CPX_BRDIR_UP = _pycplex_platform.CPX_BRDIR_UP
CPX_CUT_COVER = _pycplex_platform.CPX_CUT_COVER
CPX_CUT_GUBCOVER = _pycplex_platform.CPX_CUT_GUBCOVER
CPX_CUT_FLOWCOVER = _pycplex_platform.CPX_CUT_FLOWCOVER
CPX_CUT_CLIQUE = _pycplex_platform.CPX_CUT_CLIQUE
CPX_CUT_FRAC = _pycplex_platform.CPX_CUT_FRAC
CPX_CUT_MIR = _pycplex_platform.CPX_CUT_MIR
CPX_CUT_FLOWPATH = _pycplex_platform.CPX_CUT_FLOWPATH
CPX_CUT_DISJ = _pycplex_platform.CPX_CUT_DISJ
CPX_CUT_IMPLBD = _pycplex_platform.CPX_CUT_IMPLBD
CPX_CUT_ZEROHALF = _pycplex_platform.CPX_CUT_ZEROHALF
CPX_CUT_MCF = _pycplex_platform.CPX_CUT_MCF
CPX_CUT_LOCALCOVER = _pycplex_platform.CPX_CUT_LOCALCOVER
CPX_CUT_TIGHTEN = _pycplex_platform.CPX_CUT_TIGHTEN
CPX_CUT_OBJDISJ = _pycplex_platform.CPX_CUT_OBJDISJ
CPX_CUT_LANDP = _pycplex_platform.CPX_CUT_LANDP
CPX_CUT_USER = _pycplex_platform.CPX_CUT_USER
CPX_CUT_TABLE = _pycplex_platform.CPX_CUT_TABLE
CPX_CUT_SOLNPOOL = _pycplex_platform.CPX_CUT_SOLNPOOL
CPX_CUT_LOCALIMPLBD = _pycplex_platform.CPX_CUT_LOCALIMPLBD
CPX_CUT_BQP = _pycplex_platform.CPX_CUT_BQP
CPX_CUT_RLT = _pycplex_platform.CPX_CUT_RLT
CPX_CUT_BENDERS = _pycplex_platform.CPX_CUT_BENDERS
CPX_CUT_NUM_TYPES = _pycplex_platform.CPX_CUT_NUM_TYPES
CPX_MIPSEARCH_AUTO = _pycplex_platform.CPX_MIPSEARCH_AUTO
CPX_MIPSEARCH_TRADITIONAL = _pycplex_platform.CPX_MIPSEARCH_TRADITIONAL
CPX_MIPSEARCH_DYNAMIC = _pycplex_platform.CPX_MIPSEARCH_DYNAMIC
CPX_MIPKAPPA_OFF = _pycplex_platform.CPX_MIPKAPPA_OFF
CPX_MIPKAPPA_AUTO = _pycplex_platform.CPX_MIPKAPPA_AUTO
CPX_MIPKAPPA_SAMPLE = _pycplex_platform.CPX_MIPKAPPA_SAMPLE
CPX_MIPKAPPA_FULL = _pycplex_platform.CPX_MIPKAPPA_FULL
CPX_MIPSTART_AUTO = _pycplex_platform.CPX_MIPSTART_AUTO
CPX_MIPSTART_CHECKFEAS = _pycplex_platform.CPX_MIPSTART_CHECKFEAS
CPX_MIPSTART_SOLVEFIXED = _pycplex_platform.CPX_MIPSTART_SOLVEFIXED
CPX_MIPSTART_SOLVEMIP = _pycplex_platform.CPX_MIPSTART_SOLVEMIP
CPX_MIPSTART_REPAIR = _pycplex_platform.CPX_MIPSTART_REPAIR
CPX_MIPSTART_NOCHECK = _pycplex_platform.CPX_MIPSTART_NOCHECK
CPX_CALLBACK_MIP = _pycplex_platform.CPX_CALLBACK_MIP
CPX_CALLBACK_MIP_BRANCH = _pycplex_platform.CPX_CALLBACK_MIP_BRANCH
CPX_CALLBACK_MIP_NODE = _pycplex_platform.CPX_CALLBACK_MIP_NODE
CPX_CALLBACK_MIP_HEURISTIC = _pycplex_platform.CPX_CALLBACK_MIP_HEURISTIC
CPX_CALLBACK_MIP_SOLVE = _pycplex_platform.CPX_CALLBACK_MIP_SOLVE
CPX_CALLBACK_MIP_CUT_LOOP = _pycplex_platform.CPX_CALLBACK_MIP_CUT_LOOP
CPX_CALLBACK_MIP_PROBE = _pycplex_platform.CPX_CALLBACK_MIP_PROBE
CPX_CALLBACK_MIP_FRACCUT = _pycplex_platform.CPX_CALLBACK_MIP_FRACCUT
CPX_CALLBACK_MIP_DISJCUT = _pycplex_platform.CPX_CALLBACK_MIP_DISJCUT
CPX_CALLBACK_MIP_FLOWMIR = _pycplex_platform.CPX_CALLBACK_MIP_FLOWMIR
CPX_CALLBACK_MIP_INCUMBENT_NODESOLN = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_NODESOLN
CPX_CALLBACK_MIP_DELETENODE = _pycplex_platform.CPX_CALLBACK_MIP_DELETENODE
CPX_CALLBACK_MIP_BRANCH_NOSOLN = _pycplex_platform.CPX_CALLBACK_MIP_BRANCH_NOSOLN
CPX_CALLBACK_MIP_CUT_LAST = _pycplex_platform.CPX_CALLBACK_MIP_CUT_LAST
CPX_CALLBACK_MIP_CUT_FEAS = _pycplex_platform.CPX_CALLBACK_MIP_CUT_FEAS
CPX_CALLBACK_MIP_CUT_UNBD = _pycplex_platform.CPX_CALLBACK_MIP_CUT_UNBD
CPX_CALLBACK_MIP_INCUMBENT_HEURSOLN = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_HEURSOLN
CPX_CALLBACK_MIP_INCUMBENT_USERSOLN = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_USERSOLN
CPX_CALLBACK_MIP_INCUMBENT_MIPSTART = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_MIPSTART
CPX_CALLBACK_INFO_BEST_INTEGER = _pycplex_platform.CPX_CALLBACK_INFO_BEST_INTEGER
CPX_CALLBACK_INFO_BEST_REMAINING = _pycplex_platform.CPX_CALLBACK_INFO_BEST_REMAINING
CPX_CALLBACK_INFO_NODE_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_NODE_COUNT
CPX_CALLBACK_INFO_NODES_LEFT = _pycplex_platform.CPX_CALLBACK_INFO_NODES_LEFT
CPX_CALLBACK_INFO_MIP_ITERATIONS = _pycplex_platform.CPX_CALLBACK_INFO_MIP_ITERATIONS
CPX_CALLBACK_INFO_CUTOFF = _pycplex_platform.CPX_CALLBACK_INFO_CUTOFF
CPX_CALLBACK_INFO_CLIQUE_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_CLIQUE_COUNT
CPX_CALLBACK_INFO_COVER_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_COVER_COUNT
CPX_CALLBACK_INFO_MIP_FEAS = _pycplex_platform.CPX_CALLBACK_INFO_MIP_FEAS
CPX_CALLBACK_INFO_FLOWCOVER_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_FLOWCOVER_COUNT
CPX_CALLBACK_INFO_GUBCOVER_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_GUBCOVER_COUNT
CPX_CALLBACK_INFO_IMPLBD_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_IMPLBD_COUNT
CPX_CALLBACK_INFO_PROBE_PHASE = _pycplex_platform.CPX_CALLBACK_INFO_PROBE_PHASE
CPX_CALLBACK_INFO_PROBE_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_PROBE_PROGRESS
CPX_CALLBACK_INFO_FRACCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_FRACCUT_COUNT
CPX_CALLBACK_INFO_FRACCUT_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_FRACCUT_PROGRESS
CPX_CALLBACK_INFO_DISJCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_DISJCUT_COUNT
CPX_CALLBACK_INFO_DISJCUT_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_DISJCUT_PROGRESS
CPX_CALLBACK_INFO_FLOWPATH_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_FLOWPATH_COUNT
CPX_CALLBACK_INFO_MIRCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_MIRCUT_COUNT
CPX_CALLBACK_INFO_FLOWMIR_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_FLOWMIR_PROGRESS
CPX_CALLBACK_INFO_ZEROHALFCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_ZEROHALFCUT_COUNT
CPX_CALLBACK_INFO_MY_THREAD_NUM = _pycplex_platform.CPX_CALLBACK_INFO_MY_THREAD_NUM
CPX_CALLBACK_INFO_USER_THREADS = _pycplex_platform.CPX_CALLBACK_INFO_USER_THREADS
CPX_CALLBACK_INFO_MIP_REL_GAP = _pycplex_platform.CPX_CALLBACK_INFO_MIP_REL_GAP
CPX_CALLBACK_INFO_MCFCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_MCFCUT_COUNT
CPX_CALLBACK_INFO_KAPPA_STABLE = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_STABLE
CPX_CALLBACK_INFO_KAPPA_SUSPICIOUS = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_SUSPICIOUS
CPX_CALLBACK_INFO_KAPPA_UNSTABLE = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_UNSTABLE
CPX_CALLBACK_INFO_KAPPA_ILLPOSED = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_ILLPOSED
CPX_CALLBACK_INFO_KAPPA_MAX = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_MAX
CPX_CALLBACK_INFO_KAPPA_ATTENTION = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_ATTENTION
CPX_CALLBACK_INFO_LANDPCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_LANDPCUT_COUNT
CPX_CALLBACK_INFO_USERCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_USERCUT_COUNT
CPX_CALLBACK_INFO_TABLECUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_TABLECUT_COUNT
CPX_CALLBACK_INFO_SOLNPOOLCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_SOLNPOOLCUT_COUNT
CPX_CALLBACK_INFO_BENDERS_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_BENDERS_COUNT
CPX_CALLBACK_INFO_NODE_COUNT_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_COUNT_LONG
CPX_CALLBACK_INFO_NODES_LEFT_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODES_LEFT_LONG
CPX_CALLBACK_INFO_MIP_ITERATIONS_LONG = _pycplex_platform.CPX_CALLBACK_INFO_MIP_ITERATIONS_LONG
CPX_CALLBACK_INFO_LAZY_SOURCE = _pycplex_platform.CPX_CALLBACK_INFO_LAZY_SOURCE
CPX_CALLBACK_INFO_NODE_SIINF = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SIINF
CPX_CALLBACK_INFO_NODE_NIINF = _pycplex_platform.CPX_CALLBACK_INFO_NODE_NIINF
CPX_CALLBACK_INFO_NODE_ESTIMATE = _pycplex_platform.CPX_CALLBACK_INFO_NODE_ESTIMATE
CPX_CALLBACK_INFO_NODE_DEPTH = _pycplex_platform.CPX_CALLBACK_INFO_NODE_DEPTH
CPX_CALLBACK_INFO_NODE_OBJVAL = _pycplex_platform.CPX_CALLBACK_INFO_NODE_OBJVAL
CPX_CALLBACK_INFO_NODE_TYPE = _pycplex_platform.CPX_CALLBACK_INFO_NODE_TYPE
CPX_CALLBACK_INFO_NODE_VAR = _pycplex_platform.CPX_CALLBACK_INFO_NODE_VAR
CPX_CALLBACK_INFO_NODE_SOS = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SOS
CPX_CALLBACK_INFO_NODE_SEQNUM = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SEQNUM
CPX_CALLBACK_INFO_NODE_USERHANDLE = _pycplex_platform.CPX_CALLBACK_INFO_NODE_USERHANDLE
CPX_CALLBACK_INFO_NODE_NODENUM = _pycplex_platform.CPX_CALLBACK_INFO_NODE_NODENUM
CPX_CALLBACK_INFO_NODE_SEQNUM_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SEQNUM_LONG
CPX_CALLBACK_INFO_NODE_NODENUM_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_NODENUM_LONG
CPX_CALLBACK_INFO_NODE_DEPTH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_DEPTH_LONG
CPX_CALLBACK_INFO_SOS_TYPE = _pycplex_platform.CPX_CALLBACK_INFO_SOS_TYPE
CPX_CALLBACK_INFO_SOS_SIZE = _pycplex_platform.CPX_CALLBACK_INFO_SOS_SIZE
CPX_CALLBACK_INFO_SOS_IS_FEASIBLE = _pycplex_platform.CPX_CALLBACK_INFO_SOS_IS_FEASIBLE
CPX_CALLBACK_INFO_SOS_MEMBER_INDEX = _pycplex_platform.CPX_CALLBACK_INFO_SOS_MEMBER_INDEX
CPX_CALLBACK_INFO_SOS_MEMBER_REFVAL = _pycplex_platform.CPX_CALLBACK_INFO_SOS_MEMBER_REFVAL
CPX_CALLBACK_INFO_SOS_NUM = _pycplex_platform.CPX_CALLBACK_INFO_SOS_NUM
CPX_CALLBACK_INFO_IC_NUM = _pycplex_platform.CPX_CALLBACK_INFO_IC_NUM
CPX_CALLBACK_INFO_IC_IMPLYING_VAR = _pycplex_platform.CPX_CALLBACK_INFO_IC_IMPLYING_VAR
CPX_CALLBACK_INFO_IC_IMPLIED_VAR = _pycplex_platform.CPX_CALLBACK_INFO_IC_IMPLIED_VAR
CPX_CALLBACK_INFO_IC_SENSE = _pycplex_platform.CPX_CALLBACK_INFO_IC_SENSE
CPX_CALLBACK_INFO_IC_COMPL = _pycplex_platform.CPX_CALLBACK_INFO_IC_COMPL
CPX_CALLBACK_INFO_IC_RHS = _pycplex_platform.CPX_CALLBACK_INFO_IC_RHS
CPX_CALLBACK_INFO_IC_IS_FEASIBLE = _pycplex_platform.CPX_CALLBACK_INFO_IC_IS_FEASIBLE
CPX_INCUMBENT_ID = _pycplex_platform.CPX_INCUMBENT_ID
CPX_RAMPUP_DISABLED = _pycplex_platform.CPX_RAMPUP_DISABLED
CPX_RAMPUP_AUTO = _pycplex_platform.CPX_RAMPUP_AUTO
CPX_RAMPUP_DYNAMIC = _pycplex_platform.CPX_RAMPUP_DYNAMIC
CPX_RAMPUP_INFINITE = _pycplex_platform.CPX_RAMPUP_INFINITE
CPX_CALLBACK_DEFAULT = _pycplex_platform.CPX_CALLBACK_DEFAULT
CPX_CALLBACK_FAIL = _pycplex_platform.CPX_CALLBACK_FAIL
CPX_CALLBACK_SET = _pycplex_platform.CPX_CALLBACK_SET
CPX_CALLBACK_ABORT_CUT_LOOP = _pycplex_platform.CPX_CALLBACK_ABORT_CUT_LOOP
CPX_USECUT_FORCE = _pycplex_platform.CPX_USECUT_FORCE
CPX_USECUT_PURGE = _pycplex_platform.CPX_USECUT_PURGE
CPX_USECUT_FILTER = _pycplex_platform.CPX_USECUT_FILTER
CPX_INTEGER_FEASIBLE = _pycplex_platform.CPX_INTEGER_FEASIBLE
CPX_INTEGER_INFEASIBLE = _pycplex_platform.CPX_INTEGER_INFEASIBLE
CPX_IMPLIED_INTEGER_FEASIBLE = _pycplex_platform.CPX_IMPLIED_INTEGER_FEASIBLE
CPX_CON_LOWER_BOUND = _pycplex_platform.CPX_CON_LOWER_BOUND
CPX_CON_UPPER_BOUND = _pycplex_platform.CPX_CON_UPPER_BOUND
CPX_CON_LINEAR = _pycplex_platform.CPX_CON_LINEAR
CPX_CON_QUADRATIC = _pycplex_platform.CPX_CON_QUADRATIC
CPX_CON_SOS = _pycplex_platform.CPX_CON_SOS
CPX_CON_INDICATOR = _pycplex_platform.CPX_CON_INDICATOR
CPX_CON_PWL = _pycplex_platform.CPX_CON_PWL
CPX_CON_ABS = _pycplex_platform.CPX_CON_ABS
CPX_CON_MINEXPR = _pycplex_platform.CPX_CON_MINEXPR
CPX_CON_MAXEXPR = _pycplex_platform.CPX_CON_MAXEXPR
CPX_CON_LAST_CONTYPE = _pycplex_platform.CPX_CON_LAST_CONTYPE
CPX_INDICATOR_IF = _pycplex_platform.CPX_INDICATOR_IF
CPX_INDICATOR_ONLYIF = _pycplex_platform.CPX_INDICATOR_ONLYIF
CPX_INDICATOR_IFANDONLYIF = _pycplex_platform.CPX_INDICATOR_IFANDONLYIF
CPXNET_NO_DISPLAY_OBJECTIVE = _pycplex_platform.CPXNET_NO_DISPLAY_OBJECTIVE
CPXNET_TRUE_OBJECTIVE = _pycplex_platform.CPXNET_TRUE_OBJECTIVE
CPXNET_PENALIZED_OBJECTIVE = _pycplex_platform.CPXNET_PENALIZED_OBJECTIVE
CPXNET_PRICE_AUTO = _pycplex_platform.CPXNET_PRICE_AUTO
CPXNET_PRICE_PARTIAL = _pycplex_platform.CPXNET_PRICE_PARTIAL
CPXNET_PRICE_MULT_PART = _pycplex_platform.CPXNET_PRICE_MULT_PART
CPXNET_PRICE_SORT_MULT_PART = _pycplex_platform.CPXNET_PRICE_SORT_MULT_PART
CPX_NETFIND_PURE = _pycplex_platform.CPX_NETFIND_PURE
CPX_NETFIND_REFLECT = _pycplex_platform.CPX_NETFIND_REFLECT
CPX_NETFIND_SCALE = _pycplex_platform.CPX_NETFIND_SCALE
CPX_QCPDUALS_NO = _pycplex_platform.CPX_QCPDUALS_NO
CPX_QCPDUALS_IFPOSSIBLE = _pycplex_platform.CPX_QCPDUALS_IFPOSSIBLE
CPX_QCPDUALS_FORCE = _pycplex_platform.CPX_QCPDUALS_FORCE
CPX_CPXAUTOCONSTANTS_H_H = _pycplex_platform.CPX_CPXAUTOCONSTANTS_H_H
CPX_BENDERS_ANNOTATION = _pycplex_platform.CPX_BENDERS_ANNOTATION
CPX_BENDERS_MASTERVALUE = _pycplex_platform.CPX_BENDERS_MASTERVALUE
CPX_BIGINT = _pycplex_platform.CPX_BIGINT
CPX_BIGLONG = _pycplex_platform.CPX_BIGLONG
CPX_CALLBACKCONTEXT_BRANCHING = _pycplex_platform.CPX_CALLBACKCONTEXT_BRANCHING
CPX_CALLBACKCONTEXT_CANDIDATE = _pycplex_platform.CPX_CALLBACKCONTEXT_CANDIDATE
CPX_CALLBACKCONTEXT_GLOBAL_PROGRESS = _pycplex_platform.CPX_CALLBACKCONTEXT_GLOBAL_PROGRESS
CPX_CALLBACKCONTEXT_LOCAL_PROGRESS = _pycplex_platform.CPX_CALLBACKCONTEXT_LOCAL_PROGRESS
CPX_CALLBACKCONTEXT_RELAXATION = _pycplex_platform.CPX_CALLBACKCONTEXT_RELAXATION
CPX_CALLBACKCONTEXT_THREAD_DOWN = _pycplex_platform.CPX_CALLBACKCONTEXT_THREAD_DOWN
CPX_CALLBACKCONTEXT_THREAD_UP = _pycplex_platform.CPX_CALLBACKCONTEXT_THREAD_UP
CPX_DUAL_OBJ = _pycplex_platform.CPX_DUAL_OBJ
CPX_EXACT_KAPPA = _pycplex_platform.CPX_EXACT_KAPPA
CPX_KAPPA = _pycplex_platform.CPX_KAPPA
CPX_KAPPA_ATTENTION = _pycplex_platform.CPX_KAPPA_ATTENTION
CPX_KAPPA_ILLPOSED = _pycplex_platform.CPX_KAPPA_ILLPOSED
CPX_KAPPA_MAX = _pycplex_platform.CPX_KAPPA_MAX
CPX_KAPPA_STABLE = _pycplex_platform.CPX_KAPPA_STABLE
CPX_KAPPA_SUSPICIOUS = _pycplex_platform.CPX_KAPPA_SUSPICIOUS
CPX_KAPPA_UNSTABLE = _pycplex_platform.CPX_KAPPA_UNSTABLE
CPX_LAZYCONSTRAINTCALLBACK_HEUR = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_HEUR
CPX_LAZYCONSTRAINTCALLBACK_MIPSTART = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_MIPSTART
CPX_LAZYCONSTRAINTCALLBACK_NODE = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_NODE
CPX_LAZYCONSTRAINTCALLBACK_USER = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_USER
CPX_MAX_COMP_SLACK = _pycplex_platform.CPX_MAX_COMP_SLACK
CPX_MAX_DUAL_INFEAS = _pycplex_platform.CPX_MAX_DUAL_INFEAS
CPX_MAX_DUAL_RESIDUAL = _pycplex_platform.CPX_MAX_DUAL_RESIDUAL
CPX_MAX_INDSLACK_INFEAS = _pycplex_platform.CPX_MAX_INDSLACK_INFEAS
CPX_MAX_INT_INFEAS = _pycplex_platform.CPX_MAX_INT_INFEAS
CPX_MAX_PI = _pycplex_platform.CPX_MAX_PI
CPX_MAX_PRIMAL_INFEAS = _pycplex_platform.CPX_MAX_PRIMAL_INFEAS
CPX_MAX_PRIMAL_RESIDUAL = _pycplex_platform.CPX_MAX_PRIMAL_RESIDUAL
CPX_MAX_PWLSLACK_INFEAS = _pycplex_platform.CPX_MAX_PWLSLACK_INFEAS
CPX_MAX_QCPRIMAL_RESIDUAL = _pycplex_platform.CPX_MAX_QCPRIMAL_RESIDUAL
CPX_MAX_QCSLACK = _pycplex_platform.CPX_MAX_QCSLACK
CPX_MAX_QCSLACK_INFEAS = _pycplex_platform.CPX_MAX_QCSLACK_INFEAS
CPX_MAX_RED_COST = _pycplex_platform.CPX_MAX_RED_COST
CPX_MAX_SCALED_DUAL_INFEAS = _pycplex_platform.CPX_MAX_SCALED_DUAL_INFEAS
CPX_MAX_SCALED_DUAL_RESIDUAL = _pycplex_platform.CPX_MAX_SCALED_DUAL_RESIDUAL
CPX_MAX_SCALED_PI = _pycplex_platform.CPX_MAX_SCALED_PI
CPX_MAX_SCALED_PRIMAL_INFEAS = _pycplex_platform.CPX_MAX_SCALED_PRIMAL_INFEAS
CPX_MAX_SCALED_PRIMAL_RESIDUAL = _pycplex_platform.CPX_MAX_SCALED_PRIMAL_RESIDUAL
CPX_MAX_SCALED_RED_COST = _pycplex_platform.CPX_MAX_SCALED_RED_COST
CPX_MAX_SCALED_SLACK = _pycplex_platform.CPX_MAX_SCALED_SLACK
CPX_MAX_SCALED_X = _pycplex_platform.CPX_MAX_SCALED_X
CPX_MAX_SLACK = _pycplex_platform.CPX_MAX_SLACK
CPX_MAX_X = _pycplex_platform.CPX_MAX_X
CPX_MULTIOBJ_BARITCNT = _pycplex_platform.CPX_MULTIOBJ_BARITCNT
CPX_MULTIOBJ_BESTOBJVAL = _pycplex_platform.CPX_MULTIOBJ_BESTOBJVAL
CPX_MULTIOBJ_BLEND = _pycplex_platform.CPX_MULTIOBJ_BLEND
CPX_MULTIOBJ_DEGCNT = _pycplex_platform.CPX_MULTIOBJ_DEGCNT
CPX_MULTIOBJ_DETTIME = _pycplex_platform.CPX_MULTIOBJ_DETTIME
CPX_MULTIOBJ_DEXCH = _pycplex_platform.CPX_MULTIOBJ_DEXCH
CPX_MULTIOBJ_DPUSH = _pycplex_platform.CPX_MULTIOBJ_DPUSH
CPX_MULTIOBJ_ERROR = _pycplex_platform.CPX_MULTIOBJ_ERROR
CPX_MULTIOBJ_ITCNT = _pycplex_platform.CPX_MULTIOBJ_ITCNT
CPX_MULTIOBJ_METHOD = _pycplex_platform.CPX_MULTIOBJ_METHOD
CPX_MULTIOBJ_NODECNT = _pycplex_platform.CPX_MULTIOBJ_NODECNT
CPX_MULTIOBJ_NODELEFTCNT = _pycplex_platform.CPX_MULTIOBJ_NODELEFTCNT
CPX_MULTIOBJ_OBJVAL = _pycplex_platform.CPX_MULTIOBJ_OBJVAL
CPX_MULTIOBJ_PEXCH = _pycplex_platform.CPX_MULTIOBJ_PEXCH
CPX_MULTIOBJ_PHASE1CNT = _pycplex_platform.CPX_MULTIOBJ_PHASE1CNT
CPX_MULTIOBJ_PPUSH = _pycplex_platform.CPX_MULTIOBJ_PPUSH
CPX_MULTIOBJ_PRIORITY = _pycplex_platform.CPX_MULTIOBJ_PRIORITY
CPX_MULTIOBJ_SIFTITCNT = _pycplex_platform.CPX_MULTIOBJ_SIFTITCNT
CPX_MULTIOBJ_SIFTPHASE1CNT = _pycplex_platform.CPX_MULTIOBJ_SIFTPHASE1CNT
CPX_MULTIOBJ_STATUS = _pycplex_platform.CPX_MULTIOBJ_STATUS
CPX_MULTIOBJ_TIME = _pycplex_platform.CPX_MULTIOBJ_TIME
CPX_NO_PRIORITY_CHANGE = _pycplex_platform.CPX_NO_PRIORITY_CHANGE
CPX_OBJ_GAP = _pycplex_platform.CPX_OBJ_GAP
CPX_PRIMAL_OBJ = _pycplex_platform.CPX_PRIMAL_OBJ
CPX_RELAXATION_FLAG_NOSOLVE = _pycplex_platform.CPX_RELAXATION_FLAG_NOSOLVE
CPX_SOLNPOOL_DIV = _pycplex_platform.CPX_SOLNPOOL_DIV
CPX_SOLNPOOL_FIFO = _pycplex_platform.CPX_SOLNPOOL_FIFO
CPX_SOLNPOOL_FILTER_DIVERSITY = _pycplex_platform.CPX_SOLNPOOL_FILTER_DIVERSITY
CPX_SOLNPOOL_FILTER_RANGE = _pycplex_platform.CPX_SOLNPOOL_FILTER_RANGE
CPX_SOLNPOOL_OBJ = _pycplex_platform.CPX_SOLNPOOL_OBJ
CPX_STAT_ABORT_DETTIME_LIM = _pycplex_platform.CPX_STAT_ABORT_DETTIME_LIM
CPX_STAT_ABORT_DUAL_OBJ_LIM = _pycplex_platform.CPX_STAT_ABORT_DUAL_OBJ_LIM
CPX_STAT_ABORT_IT_LIM = _pycplex_platform.CPX_STAT_ABORT_IT_LIM
CPX_STAT_ABORT_OBJ_LIM = _pycplex_platform.CPX_STAT_ABORT_OBJ_LIM
CPX_STAT_ABORT_PRIM_OBJ_LIM = _pycplex_platform.CPX_STAT_ABORT_PRIM_OBJ_LIM
CPX_STAT_ABORT_TIME_LIM = _pycplex_platform.CPX_STAT_ABORT_TIME_LIM
CPX_STAT_ABORT_USER = _pycplex_platform.CPX_STAT_ABORT_USER
CPX_STAT_BENDERS_NUM_BEST = _pycplex_platform.CPX_STAT_BENDERS_NUM_BEST
CPX_STAT_CONFLICT_ABORT_CONTRADICTION = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_CONTRADICTION
CPX_STAT_CONFLICT_ABORT_DETTIME_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_DETTIME_LIM
CPX_STAT_CONFLICT_ABORT_IT_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_IT_LIM
CPX_STAT_CONFLICT_ABORT_MEM_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_MEM_LIM
CPX_STAT_CONFLICT_ABORT_NODE_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_NODE_LIM
CPX_STAT_CONFLICT_ABORT_OBJ_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_OBJ_LIM
CPX_STAT_CONFLICT_ABORT_TIME_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_TIME_LIM
CPX_STAT_CONFLICT_ABORT_USER = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_USER
CPX_STAT_CONFLICT_FEASIBLE = _pycplex_platform.CPX_STAT_CONFLICT_FEASIBLE
CPX_STAT_CONFLICT_MINIMAL = _pycplex_platform.CPX_STAT_CONFLICT_MINIMAL
CPX_STAT_FEASIBLE = _pycplex_platform.CPX_STAT_FEASIBLE
CPX_STAT_FEASIBLE_RELAXED_INF = _pycplex_platform.CPX_STAT_FEASIBLE_RELAXED_INF
CPX_STAT_FEASIBLE_RELAXED_QUAD = _pycplex_platform.CPX_STAT_FEASIBLE_RELAXED_QUAD
CPX_STAT_FEASIBLE_RELAXED_SUM = _pycplex_platform.CPX_STAT_FEASIBLE_RELAXED_SUM
CPX_STAT_FIRSTORDER = _pycplex_platform.CPX_STAT_FIRSTORDER
CPX_STAT_INFEASIBLE = _pycplex_platform.CPX_STAT_INFEASIBLE
CPX_STAT_INForUNBD = _pycplex_platform.CPX_STAT_INForUNBD
CPX_STAT_MULTIOBJ_INFEASIBLE = _pycplex_platform.CPX_STAT_MULTIOBJ_INFEASIBLE
CPX_STAT_MULTIOBJ_INForUNBD = _pycplex_platform.CPX_STAT_MULTIOBJ_INForUNBD
CPX_STAT_MULTIOBJ_NON_OPTIMAL = _pycplex_platform.CPX_STAT_MULTIOBJ_NON_OPTIMAL
CPX_STAT_MULTIOBJ_OPTIMAL = _pycplex_platform.CPX_STAT_MULTIOBJ_OPTIMAL
CPX_STAT_MULTIOBJ_STOPPED = _pycplex_platform.CPX_STAT_MULTIOBJ_STOPPED
CPX_STAT_MULTIOBJ_UNBOUNDED = _pycplex_platform.CPX_STAT_MULTIOBJ_UNBOUNDED
CPX_STAT_NUM_BEST = _pycplex_platform.CPX_STAT_NUM_BEST
CPX_STAT_OPTIMAL = _pycplex_platform.CPX_STAT_OPTIMAL
CPX_STAT_OPTIMAL_FACE_UNBOUNDED = _pycplex_platform.CPX_STAT_OPTIMAL_FACE_UNBOUNDED
CPX_STAT_OPTIMAL_INFEAS = _pycplex_platform.CPX_STAT_OPTIMAL_INFEAS
CPX_STAT_OPTIMAL_RELAXED_INF = _pycplex_platform.CPX_STAT_OPTIMAL_RELAXED_INF
CPX_STAT_OPTIMAL_RELAXED_QUAD = _pycplex_platform.CPX_STAT_OPTIMAL_RELAXED_QUAD
CPX_STAT_OPTIMAL_RELAXED_SUM = _pycplex_platform.CPX_STAT_OPTIMAL_RELAXED_SUM
CPX_STAT_UNBOUNDED = _pycplex_platform.CPX_STAT_UNBOUNDED
CPX_SUM_COMP_SLACK = _pycplex_platform.CPX_SUM_COMP_SLACK
CPX_SUM_DUAL_INFEAS = _pycplex_platform.CPX_SUM_DUAL_INFEAS
CPX_SUM_DUAL_RESIDUAL = _pycplex_platform.CPX_SUM_DUAL_RESIDUAL
CPX_SUM_INDSLACK_INFEAS = _pycplex_platform.CPX_SUM_INDSLACK_INFEAS
CPX_SUM_INT_INFEAS = _pycplex_platform.CPX_SUM_INT_INFEAS
CPX_SUM_PI = _pycplex_platform.CPX_SUM_PI
CPX_SUM_PRIMAL_INFEAS = _pycplex_platform.CPX_SUM_PRIMAL_INFEAS
CPX_SUM_PRIMAL_RESIDUAL = _pycplex_platform.CPX_SUM_PRIMAL_RESIDUAL
CPX_SUM_PWLSLACK_INFEAS = _pycplex_platform.CPX_SUM_PWLSLACK_INFEAS
CPX_SUM_QCPRIMAL_RESIDUAL = _pycplex_platform.CPX_SUM_QCPRIMAL_RESIDUAL
CPX_SUM_QCSLACK = _pycplex_platform.CPX_SUM_QCSLACK
CPX_SUM_QCSLACK_INFEAS = _pycplex_platform.CPX_SUM_QCSLACK_INFEAS
CPX_SUM_RED_COST = _pycplex_platform.CPX_SUM_RED_COST
CPX_SUM_SCALED_DUAL_INFEAS = _pycplex_platform.CPX_SUM_SCALED_DUAL_INFEAS
CPX_SUM_SCALED_DUAL_RESIDUAL = _pycplex_platform.CPX_SUM_SCALED_DUAL_RESIDUAL
CPX_SUM_SCALED_PI = _pycplex_platform.CPX_SUM_SCALED_PI
CPX_SUM_SCALED_PRIMAL_INFEAS = _pycplex_platform.CPX_SUM_SCALED_PRIMAL_INFEAS
CPX_SUM_SCALED_PRIMAL_RESIDUAL = _pycplex_platform.CPX_SUM_SCALED_PRIMAL_RESIDUAL
CPX_SUM_SCALED_RED_COST = _pycplex_platform.CPX_SUM_SCALED_RED_COST
CPX_SUM_SCALED_SLACK = _pycplex_platform.CPX_SUM_SCALED_SLACK
CPX_SUM_SCALED_X = _pycplex_platform.CPX_SUM_SCALED_X
CPX_SUM_SLACK = _pycplex_platform.CPX_SUM_SLACK
CPX_SUM_X = _pycplex_platform.CPX_SUM_X
CPXERR_ABORT_STRONGBRANCH = _pycplex_platform.CPXERR_ABORT_STRONGBRANCH
CPXERR_ADJ_SIGN_QUAD = _pycplex_platform.CPXERR_ADJ_SIGN_QUAD
CPXERR_ADJ_SIGN_SENSE = _pycplex_platform.CPXERR_ADJ_SIGN_SENSE
CPXERR_ADJ_SIGNS = _pycplex_platform.CPXERR_ADJ_SIGNS
CPXERR_ARC_INDEX_RANGE = _pycplex_platform.CPXERR_ARC_INDEX_RANGE
CPXERR_ARRAY_BAD_SOS_TYPE = _pycplex_platform.CPXERR_ARRAY_BAD_SOS_TYPE
CPXERR_ARRAY_NOT_ASCENDING = _pycplex_platform.CPXERR_ARRAY_NOT_ASCENDING
CPXERR_ARRAY_TOO_LONG = _pycplex_platform.CPXERR_ARRAY_TOO_LONG
CPXERR_BAD_ARGUMENT = _pycplex_platform.CPXERR_BAD_ARGUMENT
CPXERR_BAD_BOUND_SENSE = _pycplex_platform.CPXERR_BAD_BOUND_SENSE
CPXERR_BAD_BOUND_TYPE = _pycplex_platform.CPXERR_BAD_BOUND_TYPE
CPXERR_BAD_CHAR = _pycplex_platform.CPXERR_BAD_CHAR
CPXERR_BAD_CTYPE = _pycplex_platform.CPXERR_BAD_CTYPE
CPXERR_BAD_DECOMPOSITION = _pycplex_platform.CPXERR_BAD_DECOMPOSITION
CPXERR_BAD_DIRECTION = _pycplex_platform.CPXERR_BAD_DIRECTION
CPXERR_BAD_EXPO_RANGE = _pycplex_platform.CPXERR_BAD_EXPO_RANGE
CPXERR_BAD_EXPONENT = _pycplex_platform.CPXERR_BAD_EXPONENT
CPXERR_BAD_FILETYPE = _pycplex_platform.CPXERR_BAD_FILETYPE
CPXERR_BAD_ID = _pycplex_platform.CPXERR_BAD_ID
CPXERR_BAD_INDCONSTR = _pycplex_platform.CPXERR_BAD_INDCONSTR
CPXERR_BAD_INDICATOR = _pycplex_platform.CPXERR_BAD_INDICATOR
CPXERR_BAD_INDTYPE = _pycplex_platform.CPXERR_BAD_INDTYPE
CPXERR_BAD_LAZY_UCUT = _pycplex_platform.CPXERR_BAD_LAZY_UCUT
CPXERR_BAD_LUB = _pycplex_platform.CPXERR_BAD_LUB
CPXERR_BAD_METHOD = _pycplex_platform.CPXERR_BAD_METHOD
CPXERR_BAD_MULTIOBJ_ATTR = _pycplex_platform.CPXERR_BAD_MULTIOBJ_ATTR
CPXERR_BAD_NAME = _pycplex_platform.CPXERR_BAD_NAME
CPXERR_BAD_NUMBER = _pycplex_platform.CPXERR_BAD_NUMBER
CPXERR_BAD_OBJ_SENSE = _pycplex_platform.CPXERR_BAD_OBJ_SENSE
CPXERR_BAD_PARAM_NAME = _pycplex_platform.CPXERR_BAD_PARAM_NAME
CPXERR_BAD_PARAM_NUM = _pycplex_platform.CPXERR_BAD_PARAM_NUM
CPXERR_BAD_PIVOT = _pycplex_platform.CPXERR_BAD_PIVOT
CPXERR_BAD_PRIORITY = _pycplex_platform.CPXERR_BAD_PRIORITY
CPXERR_BAD_PROB_TYPE = _pycplex_platform.CPXERR_BAD_PROB_TYPE
CPXERR_BAD_ROW_ID = _pycplex_platform.CPXERR_BAD_ROW_ID
CPXERR_BAD_SECTION_BOUNDS = _pycplex_platform.CPXERR_BAD_SECTION_BOUNDS
CPXERR_BAD_SECTION_ENDATA = _pycplex_platform.CPXERR_BAD_SECTION_ENDATA
CPXERR_BAD_SECTION_QMATRIX = _pycplex_platform.CPXERR_BAD_SECTION_QMATRIX
CPXERR_BAD_SENSE = _pycplex_platform.CPXERR_BAD_SENSE
CPXERR_BAD_SOS_TYPE = _pycplex_platform.CPXERR_BAD_SOS_TYPE
CPXERR_BAD_STATUS = _pycplex_platform.CPXERR_BAD_STATUS
CPXERR_BAS_FILE_SHORT = _pycplex_platform.CPXERR_BAS_FILE_SHORT
CPXERR_BAS_FILE_SIZE = _pycplex_platform.CPXERR_BAS_FILE_SIZE
CPXERR_BENDERS_MASTER_SOLVE = _pycplex_platform.CPXERR_BENDERS_MASTER_SOLVE
CPXERR_CALLBACK = _pycplex_platform.CPXERR_CALLBACK
CPXERR_CALLBACK_INCONSISTENT = _pycplex_platform.CPXERR_CALLBACK_INCONSISTENT
CPXERR_CAND_NOT_POINT = _pycplex_platform.CPXERR_CAND_NOT_POINT
CPXERR_CAND_NOT_RAY = _pycplex_platform.CPXERR_CAND_NOT_RAY
CPXERR_CNTRL_IN_NAME = _pycplex_platform.CPXERR_CNTRL_IN_NAME
CPXERR_COL_INDEX_RANGE = _pycplex_platform.CPXERR_COL_INDEX_RANGE
CPXERR_COL_REPEAT_PRINT = _pycplex_platform.CPXERR_COL_REPEAT_PRINT
CPXERR_COL_REPEATS = _pycplex_platform.CPXERR_COL_REPEATS
CPXERR_COL_ROW_REPEATS = _pycplex_platform.CPXERR_COL_ROW_REPEATS
CPXERR_COL_UNKNOWN = _pycplex_platform.CPXERR_COL_UNKNOWN
CPXERR_CONFLICT_UNSTABLE = _pycplex_platform.CPXERR_CONFLICT_UNSTABLE
CPXERR_COUNT_OVERLAP = _pycplex_platform.CPXERR_COUNT_OVERLAP
CPXERR_COUNT_RANGE = _pycplex_platform.CPXERR_COUNT_RANGE
CPXERR_CPUBINDING_FAILURE = _pycplex_platform.CPXERR_CPUBINDING_FAILURE
CPXERR_DBL_MAX = _pycplex_platform.CPXERR_DBL_MAX
CPXERR_DECOMPRESSION = _pycplex_platform.CPXERR_DECOMPRESSION
CPXERR_DETTILIM_STRONGBRANCH = _pycplex_platform.CPXERR_DETTILIM_STRONGBRANCH
CPXERR_DUP_ENTRY = _pycplex_platform.CPXERR_DUP_ENTRY
CPXERR_DYNFUNC = _pycplex_platform.CPXERR_DYNFUNC
CPXERR_DYNLOAD = _pycplex_platform.CPXERR_DYNLOAD
CPXERR_ENCODING_CONVERSION = _pycplex_platform.CPXERR_ENCODING_CONVERSION
CPXERR_EXTRA_BV_BOUND = _pycplex_platform.CPXERR_EXTRA_BV_BOUND
CPXERR_EXTRA_FR_BOUND = _pycplex_platform.CPXERR_EXTRA_FR_BOUND
CPXERR_EXTRA_FX_BOUND = _pycplex_platform.CPXERR_EXTRA_FX_BOUND
CPXERR_EXTRA_INTEND = _pycplex_platform.CPXERR_EXTRA_INTEND
CPXERR_EXTRA_INTORG = _pycplex_platform.CPXERR_EXTRA_INTORG
CPXERR_EXTRA_SOSEND = _pycplex_platform.CPXERR_EXTRA_SOSEND
CPXERR_EXTRA_SOSORG = _pycplex_platform.CPXERR_EXTRA_SOSORG
CPXERR_FAIL_OPEN_READ = _pycplex_platform.CPXERR_FAIL_OPEN_READ
CPXERR_FAIL_OPEN_WRITE = _pycplex_platform.CPXERR_FAIL_OPEN_WRITE
CPXERR_FILE_ENTRIES = _pycplex_platform.CPXERR_FILE_ENTRIES
CPXERR_FILE_FORMAT = _pycplex_platform.CPXERR_FILE_FORMAT
CPXERR_FILE_IO = _pycplex_platform.CPXERR_FILE_IO
CPXERR_FILTER_VARIABLE_TYPE = _pycplex_platform.CPXERR_FILTER_VARIABLE_TYPE
CPXERR_ILL_DEFINED_PWL = _pycplex_platform.CPXERR_ILL_DEFINED_PWL
CPXERR_IN_INFOCALLBACK = _pycplex_platform.CPXERR_IN_INFOCALLBACK
CPXERR_INDEX_NOT_BASIC = _pycplex_platform.CPXERR_INDEX_NOT_BASIC
CPXERR_INDEX_RANGE = _pycplex_platform.CPXERR_INDEX_RANGE
CPXERR_INDEX_RANGE_HIGH = _pycplex_platform.CPXERR_INDEX_RANGE_HIGH
CPXERR_INDEX_RANGE_LOW = _pycplex_platform.CPXERR_INDEX_RANGE_LOW
CPXERR_INT_TOO_BIG = _pycplex_platform.CPXERR_INT_TOO_BIG
CPXERR_INT_TOO_BIG_INPUT = _pycplex_platform.CPXERR_INT_TOO_BIG_INPUT
CPXERR_INVALID_NUMBER = _pycplex_platform.CPXERR_INVALID_NUMBER
CPXERR_LIMITS_TOO_BIG = _pycplex_platform.CPXERR_LIMITS_TOO_BIG
CPXERR_LINE_TOO_LONG = _pycplex_platform.CPXERR_LINE_TOO_LONG
CPXERR_LO_BOUND_REPEATS = _pycplex_platform.CPXERR_LO_BOUND_REPEATS
CPXERR_LOCK_CREATE = _pycplex_platform.CPXERR_LOCK_CREATE
CPXERR_LP_NOT_IN_ENVIRONMENT = _pycplex_platform.CPXERR_LP_NOT_IN_ENVIRONMENT
CPXERR_LP_PARSE = _pycplex_platform.CPXERR_LP_PARSE
CPXERR_MASTER_SOLVE = _pycplex_platform.CPXERR_MASTER_SOLVE
CPXERR_MIPSEARCH_WITH_CALLBACKS = _pycplex_platform.CPXERR_MIPSEARCH_WITH_CALLBACKS
CPXERR_MISS_SOS_TYPE = _pycplex_platform.CPXERR_MISS_SOS_TYPE
CPXERR_MSG_NO_CHANNEL = _pycplex_platform.CPXERR_MSG_NO_CHANNEL
CPXERR_MSG_NO_FILEPTR = _pycplex_platform.CPXERR_MSG_NO_FILEPTR
CPXERR_MSG_NO_FUNCTION = _pycplex_platform.CPXERR_MSG_NO_FUNCTION
CPXERR_MULTIOBJ_SUBPROB_SOLVE = _pycplex_platform.CPXERR_MULTIOBJ_SUBPROB_SOLVE
CPXERR_MULTIPLE_PROBS_IN_REMOTE_ENVIRONMENT = _pycplex_platform.CPXERR_MULTIPLE_PROBS_IN_REMOTE_ENVIRONMENT
CPXERR_NAME_CREATION = _pycplex_platform.CPXERR_NAME_CREATION
CPXERR_NAME_NOT_FOUND = _pycplex_platform.CPXERR_NAME_NOT_FOUND
CPXERR_NAME_TOO_LONG = _pycplex_platform.CPXERR_NAME_TOO_LONG
CPXERR_NAN = _pycplex_platform.CPXERR_NAN
CPXERR_NEED_OPT_SOLN = _pycplex_platform.CPXERR_NEED_OPT_SOLN
CPXERR_NEGATIVE_SURPLUS = _pycplex_platform.CPXERR_NEGATIVE_SURPLUS
CPXERR_NET_DATA = _pycplex_platform.CPXERR_NET_DATA
CPXERR_NET_FILE_SHORT = _pycplex_platform.CPXERR_NET_FILE_SHORT
CPXERR_NO_BARRIER_SOLN = _pycplex_platform.CPXERR_NO_BARRIER_SOLN
CPXERR_NO_BASIC_SOLN = _pycplex_platform.CPXERR_NO_BASIC_SOLN
CPXERR_NO_BASIS = _pycplex_platform.CPXERR_NO_BASIS
CPXERR_NO_BOUND_SENSE = _pycplex_platform.CPXERR_NO_BOUND_SENSE
CPXERR_NO_BOUND_TYPE = _pycplex_platform.CPXERR_NO_BOUND_TYPE
CPXERR_NO_COLUMNS_SECTION = _pycplex_platform.CPXERR_NO_COLUMNS_SECTION
CPXERR_NO_CONFLICT = _pycplex_platform.CPXERR_NO_CONFLICT
CPXERR_NO_DECOMPOSITION = _pycplex_platform.CPXERR_NO_DECOMPOSITION
CPXERR_NO_DUAL_SOLN = _pycplex_platform.CPXERR_NO_DUAL_SOLN
CPXERR_NO_ENDATA = _pycplex_platform.CPXERR_NO_ENDATA
CPXERR_NO_ENVIRONMENT = _pycplex_platform.CPXERR_NO_ENVIRONMENT
CPXERR_NO_FILENAME = _pycplex_platform.CPXERR_NO_FILENAME
CPXERR_NO_ID = _pycplex_platform.CPXERR_NO_ID
CPXERR_NO_ID_FIRST = _pycplex_platform.CPXERR_NO_ID_FIRST
CPXERR_NO_INT_X = _pycplex_platform.CPXERR_NO_INT_X
CPXERR_NO_KAPPASTATS = _pycplex_platform.CPXERR_NO_KAPPASTATS
CPXERR_NO_LU_FACTOR = _pycplex_platform.CPXERR_NO_LU_FACTOR
CPXERR_NO_MEMORY = _pycplex_platform.CPXERR_NO_MEMORY
CPXERR_NO_MIPSTART = _pycplex_platform.CPXERR_NO_MIPSTART
CPXERR_NO_NAME_SECTION = _pycplex_platform.CPXERR_NO_NAME_SECTION
CPXERR_NO_NAMES = _pycplex_platform.CPXERR_NO_NAMES
CPXERR_NO_NORMS = _pycplex_platform.CPXERR_NO_NORMS
CPXERR_NO_NUMBER = _pycplex_platform.CPXERR_NO_NUMBER
CPXERR_NO_NUMBER_BOUND = _pycplex_platform.CPXERR_NO_NUMBER_BOUND
CPXERR_NO_NUMBER_FIRST = _pycplex_platform.CPXERR_NO_NUMBER_FIRST
CPXERR_NO_OBJ_NAME = _pycplex_platform.CPXERR_NO_OBJ_NAME
CPXERR_NO_OBJ_SENSE = _pycplex_platform.CPXERR_NO_OBJ_SENSE
CPXERR_NO_OBJECTIVE = _pycplex_platform.CPXERR_NO_OBJECTIVE
CPXERR_NO_OP_OR_SENSE = _pycplex_platform.CPXERR_NO_OP_OR_SENSE
CPXERR_NO_OPERATOR = _pycplex_platform.CPXERR_NO_OPERATOR
CPXERR_NO_ORDER = _pycplex_platform.CPXERR_NO_ORDER
CPXERR_NO_PROBLEM = _pycplex_platform.CPXERR_NO_PROBLEM
CPXERR_NO_QP_OPERATOR = _pycplex_platform.CPXERR_NO_QP_OPERATOR
CPXERR_NO_QUAD_EXP = _pycplex_platform.CPXERR_NO_QUAD_EXP
CPXERR_NO_RHS_COEFF = _pycplex_platform.CPXERR_NO_RHS_COEFF
CPXERR_NO_RHS_IN_OBJ = _pycplex_platform.CPXERR_NO_RHS_IN_OBJ
CPXERR_NO_ROW_NAME = _pycplex_platform.CPXERR_NO_ROW_NAME
CPXERR_NO_ROW_SENSE = _pycplex_platform.CPXERR_NO_ROW_SENSE
CPXERR_NO_ROWS_SECTION = _pycplex_platform.CPXERR_NO_ROWS_SECTION
CPXERR_NO_SENSIT = _pycplex_platform.CPXERR_NO_SENSIT
CPXERR_NO_SOLN = _pycplex_platform.CPXERR_NO_SOLN
CPXERR_NO_SOLNPOOL = _pycplex_platform.CPXERR_NO_SOLNPOOL
CPXERR_NO_SOS = _pycplex_platform.CPXERR_NO_SOS
CPXERR_NO_TREE = _pycplex_platform.CPXERR_NO_TREE
CPXERR_NO_VECTOR_SOLN = _pycplex_platform.CPXERR_NO_VECTOR_SOLN
CPXERR_NODE_INDEX_RANGE = _pycplex_platform.CPXERR_NODE_INDEX_RANGE
CPXERR_NODE_ON_DISK = _pycplex_platform.CPXERR_NODE_ON_DISK
CPXERR_NOT_DUAL_UNBOUNDED = _pycplex_platform.CPXERR_NOT_DUAL_UNBOUNDED
CPXERR_NOT_FIXED = _pycplex_platform.CPXERR_NOT_FIXED
CPXERR_NOT_FOR_BENDERS = _pycplex_platform.CPXERR_NOT_FOR_BENDERS
CPXERR_NOT_FOR_DISTMIP = _pycplex_platform.CPXERR_NOT_FOR_DISTMIP
CPXERR_NOT_FOR_MIP = _pycplex_platform.CPXERR_NOT_FOR_MIP
CPXERR_NOT_FOR_MULTIOBJ = _pycplex_platform.CPXERR_NOT_FOR_MULTIOBJ
CPXERR_NOT_FOR_QCP = _pycplex_platform.CPXERR_NOT_FOR_QCP
CPXERR_NOT_FOR_QP = _pycplex_platform.CPXERR_NOT_FOR_QP
CPXERR_NOT_MILPCLASS = _pycplex_platform.CPXERR_NOT_MILPCLASS
CPXERR_NOT_MIN_COST_FLOW = _pycplex_platform.CPXERR_NOT_MIN_COST_FLOW
CPXERR_NOT_MIP = _pycplex_platform.CPXERR_NOT_MIP
CPXERR_NOT_MIQPCLASS = _pycplex_platform.CPXERR_NOT_MIQPCLASS
CPXERR_NOT_ONE_PROBLEM = _pycplex_platform.CPXERR_NOT_ONE_PROBLEM
CPXERR_NOT_QP = _pycplex_platform.CPXERR_NOT_QP
CPXERR_NOT_SAV_FILE = _pycplex_platform.CPXERR_NOT_SAV_FILE
CPXERR_NOT_UNBOUNDED = _pycplex_platform.CPXERR_NOT_UNBOUNDED
CPXERR_NULL_POINTER = _pycplex_platform.CPXERR_NULL_POINTER
CPXERR_ORDER_BAD_DIRECTION = _pycplex_platform.CPXERR_ORDER_BAD_DIRECTION
CPXERR_OVERFLOW = _pycplex_platform.CPXERR_OVERFLOW
CPXERR_PARAM_INCOMPATIBLE = _pycplex_platform.CPXERR_PARAM_INCOMPATIBLE
CPXERR_PARAM_TOO_BIG = _pycplex_platform.CPXERR_PARAM_TOO_BIG
CPXERR_PARAM_TOO_SMALL = _pycplex_platform.CPXERR_PARAM_TOO_SMALL
CPXERR_PRESLV_ABORT = _pycplex_platform.CPXERR_PRESLV_ABORT
CPXERR_PRESLV_BAD_PARAM = _pycplex_platform.CPXERR_PRESLV_BAD_PARAM
CPXERR_PRESLV_BASIS_MEM = _pycplex_platform.CPXERR_PRESLV_BASIS_MEM
CPXERR_PRESLV_COPYORDER = _pycplex_platform.CPXERR_PRESLV_COPYORDER
CPXERR_PRESLV_COPYSOS = _pycplex_platform.CPXERR_PRESLV_COPYSOS
CPXERR_PRESLV_CRUSHFORM = _pycplex_platform.CPXERR_PRESLV_CRUSHFORM
CPXERR_PRESLV_DETTIME_LIM = _pycplex_platform.CPXERR_PRESLV_DETTIME_LIM
CPXERR_PRESLV_DUAL = _pycplex_platform.CPXERR_PRESLV_DUAL
CPXERR_PRESLV_FAIL_BASIS = _pycplex_platform.CPXERR_PRESLV_FAIL_BASIS
CPXERR_PRESLV_INF = _pycplex_platform.CPXERR_PRESLV_INF
CPXERR_PRESLV_INForUNBD = _pycplex_platform.CPXERR_PRESLV_INForUNBD
CPXERR_PRESLV_NO_BASIS = _pycplex_platform.CPXERR_PRESLV_NO_BASIS
CPXERR_PRESLV_NO_PROB = _pycplex_platform.CPXERR_PRESLV_NO_PROB
CPXERR_PRESLV_SOLN_MIP = _pycplex_platform.CPXERR_PRESLV_SOLN_MIP
CPXERR_PRESLV_SOLN_QP = _pycplex_platform.CPXERR_PRESLV_SOLN_QP
CPXERR_PRESLV_START_LP = _pycplex_platform.CPXERR_PRESLV_START_LP
CPXERR_PRESLV_TIME_LIM = _pycplex_platform.CPXERR_PRESLV_TIME_LIM
CPXERR_PRESLV_UNBD = _pycplex_platform.CPXERR_PRESLV_UNBD
CPXERR_PRESLV_UNCRUSHFORM = _pycplex_platform.CPXERR_PRESLV_UNCRUSHFORM
CPXERR_PRIIND = _pycplex_platform.CPXERR_PRIIND
CPXERR_PRM_DATA = _pycplex_platform.CPXERR_PRM_DATA
CPXERR_PROTOCOL = _pycplex_platform.CPXERR_PROTOCOL
CPXERR_Q_DIVISOR = _pycplex_platform.CPXERR_Q_DIVISOR
CPXERR_Q_DUP_ENTRY = _pycplex_platform.CPXERR_Q_DUP_ENTRY
CPXERR_Q_NOT_INDEF = _pycplex_platform.CPXERR_Q_NOT_INDEF
CPXERR_Q_NOT_POS_DEF = _pycplex_platform.CPXERR_Q_NOT_POS_DEF
CPXERR_Q_NOT_SYMMETRIC = _pycplex_platform.CPXERR_Q_NOT_SYMMETRIC
CPXERR_QCP_SENSE = _pycplex_platform.CPXERR_QCP_SENSE
CPXERR_QCP_SENSE_FILE = _pycplex_platform.CPXERR_QCP_SENSE_FILE
CPXERR_QUAD_EXP_NOT_2 = _pycplex_platform.CPXERR_QUAD_EXP_NOT_2
CPXERR_QUAD_IN_ROW = _pycplex_platform.CPXERR_QUAD_IN_ROW
CPXERR_RANGE_SECTION_ORDER = _pycplex_platform.CPXERR_RANGE_SECTION_ORDER
CPXERR_RESTRICTED_VERSION = _pycplex_platform.CPXERR_RESTRICTED_VERSION
CPXERR_RHS_IN_OBJ = _pycplex_platform.CPXERR_RHS_IN_OBJ
CPXERR_RIM_REPEATS = _pycplex_platform.CPXERR_RIM_REPEATS
CPXERR_RIM_ROW_REPEATS = _pycplex_platform.CPXERR_RIM_ROW_REPEATS
CPXERR_RIMNZ_REPEATS = _pycplex_platform.CPXERR_RIMNZ_REPEATS
CPXERR_ROW_INDEX_RANGE = _pycplex_platform.CPXERR_ROW_INDEX_RANGE
CPXERR_ROW_REPEAT_PRINT = _pycplex_platform.CPXERR_ROW_REPEAT_PRINT
CPXERR_ROW_REPEATS = _pycplex_platform.CPXERR_ROW_REPEATS
CPXERR_ROW_UNKNOWN = _pycplex_platform.CPXERR_ROW_UNKNOWN
CPXERR_SAV_FILE_DATA = _pycplex_platform.CPXERR_SAV_FILE_DATA
CPXERR_SAV_FILE_VALUE = _pycplex_platform.CPXERR_SAV_FILE_VALUE
CPXERR_SAV_FILE_WRITE = _pycplex_platform.CPXERR_SAV_FILE_WRITE
CPXERR_SBASE_ILLEGAL = _pycplex_platform.CPXERR_SBASE_ILLEGAL
CPXERR_SBASE_INCOMPAT = _pycplex_platform.CPXERR_SBASE_INCOMPAT
CPXERR_SINGULAR = _pycplex_platform.CPXERR_SINGULAR
CPXERR_STR_PARAM_TOO_LONG = _pycplex_platform.CPXERR_STR_PARAM_TOO_LONG
CPXERR_SUBPROB_SOLVE = _pycplex_platform.CPXERR_SUBPROB_SOLVE
CPXERR_SYNCPRIM_CREATE = _pycplex_platform.CPXERR_SYNCPRIM_CREATE
CPXERR_SYSCALL = _pycplex_platform.CPXERR_SYSCALL
CPXERR_THREAD_FAILED = _pycplex_platform.CPXERR_THREAD_FAILED
CPXERR_TILIM_CONDITION_NO = _pycplex_platform.CPXERR_TILIM_CONDITION_NO
CPXERR_TILIM_STRONGBRANCH = _pycplex_platform.CPXERR_TILIM_STRONGBRANCH
CPXERR_TOO_MANY_COEFFS = _pycplex_platform.CPXERR_TOO_MANY_COEFFS
CPXERR_TOO_MANY_COLS = _pycplex_platform.CPXERR_TOO_MANY_COLS
CPXERR_TOO_MANY_RIMNZ = _pycplex_platform.CPXERR_TOO_MANY_RIMNZ
CPXERR_TOO_MANY_RIMS = _pycplex_platform.CPXERR_TOO_MANY_RIMS
CPXERR_TOO_MANY_ROWS = _pycplex_platform.CPXERR_TOO_MANY_ROWS
CPXERR_TOO_MANY_THREADS = _pycplex_platform.CPXERR_TOO_MANY_THREADS
CPXERR_TREE_MEMORY_LIMIT = _pycplex_platform.CPXERR_TREE_MEMORY_LIMIT
CPXERR_TUNE_MIXED = _pycplex_platform.CPXERR_TUNE_MIXED
CPXERR_UNIQUE_WEIGHTS = _pycplex_platform.CPXERR_UNIQUE_WEIGHTS
CPXERR_UNSUPPORTED_CONSTRAINT_TYPE = _pycplex_platform.CPXERR_UNSUPPORTED_CONSTRAINT_TYPE
CPXERR_UNSUPPORTED_OPERATION = _pycplex_platform.CPXERR_UNSUPPORTED_OPERATION
CPXERR_UP_BOUND_REPEATS = _pycplex_platform.CPXERR_UP_BOUND_REPEATS
CPXERR_WORK_FILE_OPEN = _pycplex_platform.CPXERR_WORK_FILE_OPEN
CPXERR_WORK_FILE_READ = _pycplex_platform.CPXERR_WORK_FILE_READ
CPXERR_WORK_FILE_WRITE = _pycplex_platform.CPXERR_WORK_FILE_WRITE
CPXERR_XMLPARSE = _pycplex_platform.CPXERR_XMLPARSE
CPXMESSAGEBUFSIZE = _pycplex_platform.CPXMESSAGEBUFSIZE
CPXMI_BIGM_COEF = _pycplex_platform.CPXMI_BIGM_COEF
CPXMI_BIGM_TO_IND = _pycplex_platform.CPXMI_BIGM_TO_IND
CPXMI_BIGM_VARBOUND = _pycplex_platform.CPXMI_BIGM_VARBOUND
CPXMI_CANCEL_TOL = _pycplex_platform.CPXMI_CANCEL_TOL
CPXMI_EPGAP_LARGE = _pycplex_platform.CPXMI_EPGAP_LARGE
CPXMI_EPGAP_OBJOFFSET = _pycplex_platform.CPXMI_EPGAP_OBJOFFSET
CPXMI_FEAS_TOL = _pycplex_platform.CPXMI_FEAS_TOL
CPXMI_FRACTION_SCALING = _pycplex_platform.CPXMI_FRACTION_SCALING
CPXMI_IND_NZ_LARGE_NUM = _pycplex_platform.CPXMI_IND_NZ_LARGE_NUM
CPXMI_IND_NZ_SMALL_NUM = _pycplex_platform.CPXMI_IND_NZ_SMALL_NUM
CPXMI_IND_RHS_LARGE_NUM = _pycplex_platform.CPXMI_IND_RHS_LARGE_NUM
CPXMI_IND_RHS_SMALL_NUM = _pycplex_platform.CPXMI_IND_RHS_SMALL_NUM
CPXMI_KAPPA_ILLPOSED = _pycplex_platform.CPXMI_KAPPA_ILLPOSED
CPXMI_KAPPA_SUSPICIOUS = _pycplex_platform.CPXMI_KAPPA_SUSPICIOUS
CPXMI_KAPPA_UNSTABLE = _pycplex_platform.CPXMI_KAPPA_UNSTABLE
CPXMI_LB_LARGE_NUM = _pycplex_platform.CPXMI_LB_LARGE_NUM
CPXMI_LB_SMALL_NUM = _pycplex_platform.CPXMI_LB_SMALL_NUM
CPXMI_LC_NZ_LARGE_NUM = _pycplex_platform.CPXMI_LC_NZ_LARGE_NUM
CPXMI_LC_NZ_SMALL_NUM = _pycplex_platform.CPXMI_LC_NZ_SMALL_NUM
CPXMI_LC_RHS_LARGE_NUM = _pycplex_platform.CPXMI_LC_RHS_LARGE_NUM
CPXMI_LC_RHS_SMALL_NUM = _pycplex_platform.CPXMI_LC_RHS_SMALL_NUM
CPXMI_MULTIOBJ_COEFFS = _pycplex_platform.CPXMI_MULTIOBJ_COEFFS
CPXMI_MULTIOBJ_LARGE_NUM = _pycplex_platform.CPXMI_MULTIOBJ_LARGE_NUM
CPXMI_MULTIOBJ_MIX = _pycplex_platform.CPXMI_MULTIOBJ_MIX
CPXMI_MULTIOBJ_OPT_TOL = _pycplex_platform.CPXMI_MULTIOBJ_OPT_TOL
CPXMI_MULTIOBJ_SMALL_NUM = _pycplex_platform.CPXMI_MULTIOBJ_SMALL_NUM
CPXMI_NZ_LARGE_NUM = _pycplex_platform.CPXMI_NZ_LARGE_NUM
CPXMI_NZ_SMALL_NUM = _pycplex_platform.CPXMI_NZ_SMALL_NUM
CPXMI_OBJ_LARGE_NUM = _pycplex_platform.CPXMI_OBJ_LARGE_NUM
CPXMI_OBJ_SMALL_NUM = _pycplex_platform.CPXMI_OBJ_SMALL_NUM
CPXMI_OPT_TOL = _pycplex_platform.CPXMI_OPT_TOL
CPXMI_PWL_SLOPE_LARGE_NUM = _pycplex_platform.CPXMI_PWL_SLOPE_LARGE_NUM
CPXMI_PWL_SLOPE_SMALL_NUM = _pycplex_platform.CPXMI_PWL_SLOPE_SMALL_NUM
CPXMI_QC_LINNZ_LARGE_NUM = _pycplex_platform.CPXMI_QC_LINNZ_LARGE_NUM
CPXMI_QC_LINNZ_SMALL_NUM = _pycplex_platform.CPXMI_QC_LINNZ_SMALL_NUM
CPXMI_QC_QNZ_LARGE_NUM = _pycplex_platform.CPXMI_QC_QNZ_LARGE_NUM
CPXMI_QC_QNZ_SMALL_NUM = _pycplex_platform.CPXMI_QC_QNZ_SMALL_NUM
CPXMI_QC_RHS_LARGE_NUM = _pycplex_platform.CPXMI_QC_RHS_LARGE_NUM
CPXMI_QC_RHS_SMALL_NUM = _pycplex_platform.CPXMI_QC_RHS_SMALL_NUM
CPXMI_QOBJ_LARGE_NUM = _pycplex_platform.CPXMI_QOBJ_LARGE_NUM
CPXMI_QOBJ_SMALL_NUM = _pycplex_platform.CPXMI_QOBJ_SMALL_NUM
CPXMI_QOPT_TOL = _pycplex_platform.CPXMI_QOPT_TOL
CPXMI_RHS_LARGE_NUM = _pycplex_platform.CPXMI_RHS_LARGE_NUM
CPXMI_RHS_SMALL_NUM = _pycplex_platform.CPXMI_RHS_SMALL_NUM
CPXMI_SAMECOEFF_COL = _pycplex_platform.CPXMI_SAMECOEFF_COL
CPXMI_SAMECOEFF_IND = _pycplex_platform.CPXMI_SAMECOEFF_IND
CPXMI_SAMECOEFF_LAZY = _pycplex_platform.CPXMI_SAMECOEFF_LAZY
CPXMI_SAMECOEFF_MULTIOBJ = _pycplex_platform.CPXMI_SAMECOEFF_MULTIOBJ
CPXMI_SAMECOEFF_OBJ = _pycplex_platform.CPXMI_SAMECOEFF_OBJ
CPXMI_SAMECOEFF_QLIN = _pycplex_platform.CPXMI_SAMECOEFF_QLIN
CPXMI_SAMECOEFF_QUAD = _pycplex_platform.CPXMI_SAMECOEFF_QUAD
CPXMI_SAMECOEFF_RHS = _pycplex_platform.CPXMI_SAMECOEFF_RHS
CPXMI_SAMECOEFF_ROW = _pycplex_platform.CPXMI_SAMECOEFF_ROW
CPXMI_SAMECOEFF_UCUT = _pycplex_platform.CPXMI_SAMECOEFF_UCUT
CPXMI_SINGLE_PRECISION = _pycplex_platform.CPXMI_SINGLE_PRECISION
CPXMI_SYMMETRY_BREAKING_INEQ = _pycplex_platform.CPXMI_SYMMETRY_BREAKING_INEQ
CPXMI_UB_LARGE_NUM = _pycplex_platform.CPXMI_UB_LARGE_NUM
CPXMI_UB_SMALL_NUM = _pycplex_platform.CPXMI_UB_SMALL_NUM
CPXMI_UC_NZ_LARGE_NUM = _pycplex_platform.CPXMI_UC_NZ_LARGE_NUM
CPXMI_UC_NZ_SMALL_NUM = _pycplex_platform.CPXMI_UC_NZ_SMALL_NUM
CPXMI_UC_RHS_LARGE_NUM = _pycplex_platform.CPXMI_UC_RHS_LARGE_NUM
CPXMI_UC_RHS_SMALL_NUM = _pycplex_platform.CPXMI_UC_RHS_SMALL_NUM
CPXMI_WIDE_COEFF_RANGE = _pycplex_platform.CPXMI_WIDE_COEFF_RANGE
CPXMIP_ABORT_FEAS = _pycplex_platform.CPXMIP_ABORT_FEAS
CPXMIP_ABORT_INFEAS = _pycplex_platform.CPXMIP_ABORT_INFEAS
CPXMIP_ABORT_RELAXATION_UNBOUNDED = _pycplex_platform.CPXMIP_ABORT_RELAXATION_UNBOUNDED
CPXMIP_ABORT_RELAXED = _pycplex_platform.CPXMIP_ABORT_RELAXED
CPXMIP_DETTIME_LIM_FEAS = _pycplex_platform.CPXMIP_DETTIME_LIM_FEAS
CPXMIP_DETTIME_LIM_INFEAS = _pycplex_platform.CPXMIP_DETTIME_LIM_INFEAS
CPXMIP_FAIL_FEAS = _pycplex_platform.CPXMIP_FAIL_FEAS
CPXMIP_FAIL_FEAS_NO_TREE = _pycplex_platform.CPXMIP_FAIL_FEAS_NO_TREE
CPXMIP_FAIL_INFEAS = _pycplex_platform.CPXMIP_FAIL_INFEAS
CPXMIP_FAIL_INFEAS_NO_TREE = _pycplex_platform.CPXMIP_FAIL_INFEAS_NO_TREE
CPXMIP_FEASIBLE = _pycplex_platform.CPXMIP_FEASIBLE
CPXMIP_FEASIBLE_RELAXED_INF = _pycplex_platform.CPXMIP_FEASIBLE_RELAXED_INF
CPXMIP_FEASIBLE_RELAXED_QUAD = _pycplex_platform.CPXMIP_FEASIBLE_RELAXED_QUAD
CPXMIP_FEASIBLE_RELAXED_SUM = _pycplex_platform.CPXMIP_FEASIBLE_RELAXED_SUM
CPXMIP_INFEASIBLE = _pycplex_platform.CPXMIP_INFEASIBLE
CPXMIP_INForUNBD = _pycplex_platform.CPXMIP_INForUNBD
CPXMIP_MEM_LIM_FEAS = _pycplex_platform.CPXMIP_MEM_LIM_FEAS
CPXMIP_MEM_LIM_INFEAS = _pycplex_platform.CPXMIP_MEM_LIM_INFEAS
CPXMIP_NODE_LIM_FEAS = _pycplex_platform.CPXMIP_NODE_LIM_FEAS
CPXMIP_NODE_LIM_INFEAS = _pycplex_platform.CPXMIP_NODE_LIM_INFEAS
CPXMIP_OPTIMAL = _pycplex_platform.CPXMIP_OPTIMAL
CPXMIP_OPTIMAL_INFEAS = _pycplex_platform.CPXMIP_OPTIMAL_INFEAS
CPXMIP_OPTIMAL_POPULATED = _pycplex_platform.CPXMIP_OPTIMAL_POPULATED
CPXMIP_OPTIMAL_POPULATED_TOL = _pycplex_platform.CPXMIP_OPTIMAL_POPULATED_TOL
CPXMIP_OPTIMAL_RELAXED_INF = _pycplex_platform.CPXMIP_OPTIMAL_RELAXED_INF
CPXMIP_OPTIMAL_RELAXED_QUAD = _pycplex_platform.CPXMIP_OPTIMAL_RELAXED_QUAD
CPXMIP_OPTIMAL_RELAXED_SUM = _pycplex_platform.CPXMIP_OPTIMAL_RELAXED_SUM
CPXMIP_OPTIMAL_TOL = _pycplex_platform.CPXMIP_OPTIMAL_TOL
CPXMIP_POPULATESOL_LIM = _pycplex_platform.CPXMIP_POPULATESOL_LIM
CPXMIP_SOL_LIM = _pycplex_platform.CPXMIP_SOL_LIM
CPXMIP_TIME_LIM_FEAS = _pycplex_platform.CPXMIP_TIME_LIM_FEAS
CPXMIP_TIME_LIM_INFEAS = _pycplex_platform.CPXMIP_TIME_LIM_INFEAS
CPXMIP_UNBOUNDED = _pycplex_platform.CPXMIP_UNBOUNDED
CPX_CPXAUTOENUMS_H_H = _pycplex_platform.CPX_CPXAUTOENUMS_H_H
CPXCALLBACKINFO_THREADID = _pycplex_platform.CPXCALLBACKINFO_THREADID
CPXCALLBACKINFO_NODECOUNT = _pycplex_platform.CPXCALLBACKINFO_NODECOUNT
CPXCALLBACKINFO_ITCOUNT = _pycplex_platform.CPXCALLBACKINFO_ITCOUNT
CPXCALLBACKINFO_BEST_SOL = _pycplex_platform.CPXCALLBACKINFO_BEST_SOL
CPXCALLBACKINFO_BEST_BND = _pycplex_platform.CPXCALLBACKINFO_BEST_BND
CPXCALLBACKINFO_THREADS = _pycplex_platform.CPXCALLBACKINFO_THREADS
CPXCALLBACKINFO_FEASIBLE = _pycplex_platform.CPXCALLBACKINFO_FEASIBLE
CPXCALLBACKINFO_TIME = _pycplex_platform.CPXCALLBACKINFO_TIME
CPXCALLBACKINFO_DETTIME = _pycplex_platform.CPXCALLBACKINFO_DETTIME
CPXCALLBACKINFO_NODEUID = _pycplex_platform.CPXCALLBACKINFO_NODEUID
CPXCALLBACKINFO_NODEDEPTH = _pycplex_platform.CPXCALLBACKINFO_NODEDEPTH
CPXCALLBACKINFO_CANDIDATE_SOURCE = _pycplex_platform.CPXCALLBACKINFO_CANDIDATE_SOURCE
CPXCALLBACKINFO_RESTARTS = _pycplex_platform.CPXCALLBACKINFO_RESTARTS
CPXCALLBACKINFO_AFTERCUTLOOP = _pycplex_platform.CPXCALLBACKINFO_AFTERCUTLOOP
CPXCALLBACKINFO_NODESLEFT = _pycplex_platform.CPXCALLBACKINFO_NODESLEFT
CPXCALLBACKSOLUTION_NOCHECK = _pycplex_platform.CPXCALLBACKSOLUTION_NOCHECK
CPXCALLBACKSOLUTION_CHECKFEAS = _pycplex_platform.CPXCALLBACKSOLUTION_CHECKFEAS
CPXCALLBACKSOLUTION_PROPAGATE = _pycplex_platform.CPXCALLBACKSOLUTION_PROPAGATE
CPXCALLBACKSOLUTION_SOLVE = _pycplex_platform.CPXCALLBACKSOLUTION_SOLVE
CPXINFO_BYTE = _pycplex_platform.CPXINFO_BYTE
CPXINFO_SHORT = _pycplex_platform.CPXINFO_SHORT
CPXINFO_INT = _pycplex_platform.CPXINFO_INT
CPXINFO_LONG = _pycplex_platform.CPXINFO_LONG
CPXINFO_DOUBLE = _pycplex_platform.CPXINFO_DOUBLE
CPXPUBLICPARAMS_H = _pycplex_platform.CPXPUBLICPARAMS_H
CPX_PARAM_ADVIND = _pycplex_platform.CPX_PARAM_ADVIND
CPX_PARAM_AGGFILL = _pycplex_platform.CPX_PARAM_AGGFILL
CPX_PARAM_AGGIND = _pycplex_platform.CPX_PARAM_AGGIND
CPX_PARAM_CLOCKTYPE = _pycplex_platform.CPX_PARAM_CLOCKTYPE
CPX_PARAM_CRAIND = _pycplex_platform.CPX_PARAM_CRAIND
CPX_PARAM_DEPIND = _pycplex_platform.CPX_PARAM_DEPIND
CPX_PARAM_DPRIIND = _pycplex_platform.CPX_PARAM_DPRIIND
CPX_PARAM_PRICELIM = _pycplex_platform.CPX_PARAM_PRICELIM
CPX_PARAM_EPMRK = _pycplex_platform.CPX_PARAM_EPMRK
CPX_PARAM_EPOPT = _pycplex_platform.CPX_PARAM_EPOPT
CPX_PARAM_EPPER = _pycplex_platform.CPX_PARAM_EPPER
CPX_PARAM_EPRHS = _pycplex_platform.CPX_PARAM_EPRHS
CPX_PARAM_SIMDISPLAY = _pycplex_platform.CPX_PARAM_SIMDISPLAY
CPX_PARAM_ITLIM = _pycplex_platform.CPX_PARAM_ITLIM
CPX_PARAM_ROWREADLIM = _pycplex_platform.CPX_PARAM_ROWREADLIM
CPX_PARAM_NETFIND = _pycplex_platform.CPX_PARAM_NETFIND
CPX_PARAM_COLREADLIM = _pycplex_platform.CPX_PARAM_COLREADLIM
CPX_PARAM_NZREADLIM = _pycplex_platform.CPX_PARAM_NZREADLIM
CPX_PARAM_OBJLLIM = _pycplex_platform.CPX_PARAM_OBJLLIM
CPX_PARAM_OBJULIM = _pycplex_platform.CPX_PARAM_OBJULIM
CPX_PARAM_PERIND = _pycplex_platform.CPX_PARAM_PERIND
CPX_PARAM_PERLIM = _pycplex_platform.CPX_PARAM_PERLIM
CPX_PARAM_PPRIIND = _pycplex_platform.CPX_PARAM_PPRIIND
CPX_PARAM_PREIND = _pycplex_platform.CPX_PARAM_PREIND
CPX_PARAM_REINV = _pycplex_platform.CPX_PARAM_REINV
CPX_PARAM_SCAIND = _pycplex_platform.CPX_PARAM_SCAIND
CPX_PARAM_SCRIND = _pycplex_platform.CPX_PARAM_SCRIND
CPX_PARAM_SINGLIM = _pycplex_platform.CPX_PARAM_SINGLIM
CPX_PARAM_TILIM = _pycplex_platform.CPX_PARAM_TILIM
CPX_PARAM_PREDUAL = _pycplex_platform.CPX_PARAM_PREDUAL
CPX_PARAM_PREPASS = _pycplex_platform.CPX_PARAM_PREPASS
CPX_PARAM_DATACHECK = _pycplex_platform.CPX_PARAM_DATACHECK
CPX_PARAM_REDUCE = _pycplex_platform.CPX_PARAM_REDUCE
CPX_PARAM_PRELINEAR = _pycplex_platform.CPX_PARAM_PRELINEAR
CPX_PARAM_LPMETHOD = _pycplex_platform.CPX_PARAM_LPMETHOD
CPX_PARAM_QPMETHOD = _pycplex_platform.CPX_PARAM_QPMETHOD
CPX_PARAM_WORKDIR = _pycplex_platform.CPX_PARAM_WORKDIR
CPX_PARAM_WORKMEM = _pycplex_platform.CPX_PARAM_WORKMEM
CPX_PARAM_THREADS = _pycplex_platform.CPX_PARAM_THREADS
CPX_PARAM_CONFLICTALG = _pycplex_platform.CPX_PARAM_CONFLICTALG
CPX_PARAM_CONFLICTDISPLAY = _pycplex_platform.CPX_PARAM_CONFLICTDISPLAY
CPX_PARAM_SIFTDISPLAY = _pycplex_platform.CPX_PARAM_SIFTDISPLAY
CPX_PARAM_SIFTALG = _pycplex_platform.CPX_PARAM_SIFTALG
CPX_PARAM_SIFTITLIM = _pycplex_platform.CPX_PARAM_SIFTITLIM
CPX_PARAM_MPSLONGNUM = _pycplex_platform.CPX_PARAM_MPSLONGNUM
CPX_PARAM_MEMORYEMPHASIS = _pycplex_platform.CPX_PARAM_MEMORYEMPHASIS
CPX_PARAM_NUMERICALEMPHASIS = _pycplex_platform.CPX_PARAM_NUMERICALEMPHASIS
CPX_PARAM_FEASOPTMODE = _pycplex_platform.CPX_PARAM_FEASOPTMODE
CPX_PARAM_PARALLELMODE = _pycplex_platform.CPX_PARAM_PARALLELMODE
CPX_PARAM_TUNINGMEASURE = _pycplex_platform.CPX_PARAM_TUNINGMEASURE
CPX_PARAM_TUNINGREPEAT = _pycplex_platform.CPX_PARAM_TUNINGREPEAT
CPX_PARAM_TUNINGTILIM = _pycplex_platform.CPX_PARAM_TUNINGTILIM
CPX_PARAM_TUNINGDISPLAY = _pycplex_platform.CPX_PARAM_TUNINGDISPLAY
CPX_PARAM_WRITELEVEL = _pycplex_platform.CPX_PARAM_WRITELEVEL
CPX_PARAM_RANDOMSEED = _pycplex_platform.CPX_PARAM_RANDOMSEED
CPX_PARAM_DETTILIM = _pycplex_platform.CPX_PARAM_DETTILIM
CPX_PARAM_FILEENCODING = _pycplex_platform.CPX_PARAM_FILEENCODING
CPX_PARAM_APIENCODING = _pycplex_platform.CPX_PARAM_APIENCODING
CPX_PARAM_OPTIMALITYTARGET = _pycplex_platform.CPX_PARAM_OPTIMALITYTARGET
CPX_PARAM_CLONELOG = _pycplex_platform.CPX_PARAM_CLONELOG
CPX_PARAM_TUNINGDETTILIM = _pycplex_platform.CPX_PARAM_TUNINGDETTILIM
CPX_PARAM_CPUMASK = _pycplex_platform.CPX_PARAM_CPUMASK
CPX_PARAM_SOLUTIONTYPE = _pycplex_platform.CPX_PARAM_SOLUTIONTYPE
CPX_PARAM_WARNLIM = _pycplex_platform.CPX_PARAM_WARNLIM
CPX_PARAM_SIFTSIM = _pycplex_platform.CPX_PARAM_SIFTSIM
CPX_PARAM_DYNAMICROWS = _pycplex_platform.CPX_PARAM_DYNAMICROWS
CPX_PARAM_RECORD = _pycplex_platform.CPX_PARAM_RECORD
CPX_PARAM_PARAMDISPLAY = _pycplex_platform.CPX_PARAM_PARAMDISPLAY
CPX_PARAM_FOLDING = _pycplex_platform.CPX_PARAM_FOLDING
CPX_PARAM_PREREFORM = _pycplex_platform.CPX_PARAM_PREREFORM
CPX_PARAM_WORKERALG = _pycplex_platform.CPX_PARAM_WORKERALG
CPX_PARAM_BENDERSSTRATEGY = _pycplex_platform.CPX_PARAM_BENDERSSTRATEGY
CPX_PARAM_BENDERSFEASCUTTOL = _pycplex_platform.CPX_PARAM_BENDERSFEASCUTTOL
CPX_PARAM_BENDERSOPTCUTTOL = _pycplex_platform.CPX_PARAM_BENDERSOPTCUTTOL
CPX_PARAM_MULTIOBJDISPLAY = _pycplex_platform.CPX_PARAM_MULTIOBJDISPLAY
CPX_PARAM_BRDIR = _pycplex_platform.CPX_PARAM_BRDIR
CPX_PARAM_BTTOL = _pycplex_platform.CPX_PARAM_BTTOL
CPX_PARAM_CLIQUES = _pycplex_platform.CPX_PARAM_CLIQUES
CPX_PARAM_COEREDIND = _pycplex_platform.CPX_PARAM_COEREDIND
CPX_PARAM_COVERS = _pycplex_platform.CPX_PARAM_COVERS
CPX_PARAM_CUTLO = _pycplex_platform.CPX_PARAM_CUTLO
CPX_PARAM_CUTUP = _pycplex_platform.CPX_PARAM_CUTUP
CPX_PARAM_EPAGAP = _pycplex_platform.CPX_PARAM_EPAGAP
CPX_PARAM_EPGAP = _pycplex_platform.CPX_PARAM_EPGAP
CPX_PARAM_EPINT = _pycplex_platform.CPX_PARAM_EPINT
CPX_PARAM_MIPDISPLAY = _pycplex_platform.CPX_PARAM_MIPDISPLAY
CPX_PARAM_MIPINTERVAL = _pycplex_platform.CPX_PARAM_MIPINTERVAL
CPX_PARAM_INTSOLLIM = _pycplex_platform.CPX_PARAM_INTSOLLIM
CPX_PARAM_NODEFILEIND = _pycplex_platform.CPX_PARAM_NODEFILEIND
CPX_PARAM_NODELIM = _pycplex_platform.CPX_PARAM_NODELIM
CPX_PARAM_NODESEL = _pycplex_platform.CPX_PARAM_NODESEL
CPX_PARAM_OBJDIF = _pycplex_platform.CPX_PARAM_OBJDIF
CPX_PARAM_MIPORDIND = _pycplex_platform.CPX_PARAM_MIPORDIND
CPX_PARAM_RELOBJDIF = _pycplex_platform.CPX_PARAM_RELOBJDIF
CPX_PARAM_STARTALG = _pycplex_platform.CPX_PARAM_STARTALG
CPX_PARAM_SUBALG = _pycplex_platform.CPX_PARAM_SUBALG
CPX_PARAM_TRELIM = _pycplex_platform.CPX_PARAM_TRELIM
CPX_PARAM_VARSEL = _pycplex_platform.CPX_PARAM_VARSEL
CPX_PARAM_BNDSTRENIND = _pycplex_platform.CPX_PARAM_BNDSTRENIND
CPX_PARAM_HEURFREQ = _pycplex_platform.CPX_PARAM_HEURFREQ
CPX_PARAM_MIPORDTYPE = _pycplex_platform.CPX_PARAM_MIPORDTYPE
CPX_PARAM_CUTSFACTOR = _pycplex_platform.CPX_PARAM_CUTSFACTOR
CPX_PARAM_RELAXPREIND = _pycplex_platform.CPX_PARAM_RELAXPREIND
CPX_PARAM_PRESLVND = _pycplex_platform.CPX_PARAM_PRESLVND
CPX_PARAM_BBINTERVAL = _pycplex_platform.CPX_PARAM_BBINTERVAL
CPX_PARAM_FLOWCOVERS = _pycplex_platform.CPX_PARAM_FLOWCOVERS
CPX_PARAM_IMPLBD = _pycplex_platform.CPX_PARAM_IMPLBD
CPX_PARAM_PROBE = _pycplex_platform.CPX_PARAM_PROBE
CPX_PARAM_GUBCOVERS = _pycplex_platform.CPX_PARAM_GUBCOVERS
CPX_PARAM_STRONGCANDLIM = _pycplex_platform.CPX_PARAM_STRONGCANDLIM
CPX_PARAM_STRONGITLIM = _pycplex_platform.CPX_PARAM_STRONGITLIM
CPX_PARAM_FRACCAND = _pycplex_platform.CPX_PARAM_FRACCAND
CPX_PARAM_FRACCUTS = _pycplex_platform.CPX_PARAM_FRACCUTS
CPX_PARAM_FRACPASS = _pycplex_platform.CPX_PARAM_FRACPASS
CPX_PARAM_FLOWPATHS = _pycplex_platform.CPX_PARAM_FLOWPATHS
CPX_PARAM_MIRCUTS = _pycplex_platform.CPX_PARAM_MIRCUTS
CPX_PARAM_DISJCUTS = _pycplex_platform.CPX_PARAM_DISJCUTS
CPX_PARAM_AGGCUTLIM = _pycplex_platform.CPX_PARAM_AGGCUTLIM
CPX_PARAM_MIPCBREDLP = _pycplex_platform.CPX_PARAM_MIPCBREDLP
CPX_PARAM_CUTPASS = _pycplex_platform.CPX_PARAM_CUTPASS
CPX_PARAM_MIPEMPHASIS = _pycplex_platform.CPX_PARAM_MIPEMPHASIS
CPX_PARAM_SYMMETRY = _pycplex_platform.CPX_PARAM_SYMMETRY
CPX_PARAM_DIVETYPE = _pycplex_platform.CPX_PARAM_DIVETYPE
CPX_PARAM_RINSHEUR = _pycplex_platform.CPX_PARAM_RINSHEUR
CPX_PARAM_LBHEUR = _pycplex_platform.CPX_PARAM_LBHEUR
CPX_PARAM_REPEATPRESOLVE = _pycplex_platform.CPX_PARAM_REPEATPRESOLVE
CPX_PARAM_PROBETIME = _pycplex_platform.CPX_PARAM_PROBETIME
CPX_PARAM_POLISHTIME = _pycplex_platform.CPX_PARAM_POLISHTIME
CPX_PARAM_REPAIRTRIES = _pycplex_platform.CPX_PARAM_REPAIRTRIES
CPX_PARAM_EPLIN = _pycplex_platform.CPX_PARAM_EPLIN
CPX_PARAM_EPRELAX = _pycplex_platform.CPX_PARAM_EPRELAX
CPX_PARAM_FPHEUR = _pycplex_platform.CPX_PARAM_FPHEUR
CPX_PARAM_EACHCUTLIM = _pycplex_platform.CPX_PARAM_EACHCUTLIM
CPX_PARAM_SOLNPOOLCAPACITY = _pycplex_platform.CPX_PARAM_SOLNPOOLCAPACITY
CPX_PARAM_SOLNPOOLREPLACE = _pycplex_platform.CPX_PARAM_SOLNPOOLREPLACE
CPX_PARAM_SOLNPOOLGAP = _pycplex_platform.CPX_PARAM_SOLNPOOLGAP
CPX_PARAM_SOLNPOOLAGAP = _pycplex_platform.CPX_PARAM_SOLNPOOLAGAP
CPX_PARAM_SOLNPOOLINTENSITY = _pycplex_platform.CPX_PARAM_SOLNPOOLINTENSITY
CPX_PARAM_POPULATELIM = _pycplex_platform.CPX_PARAM_POPULATELIM
CPX_PARAM_MIPSEARCH = _pycplex_platform.CPX_PARAM_MIPSEARCH
CPX_PARAM_MIQCPSTRAT = _pycplex_platform.CPX_PARAM_MIQCPSTRAT
CPX_PARAM_ZEROHALFCUTS = _pycplex_platform.CPX_PARAM_ZEROHALFCUTS
CPX_PARAM_HEUREFFORT = _pycplex_platform.CPX_PARAM_HEUREFFORT
CPX_PARAM_POLISHAFTEREPAGAP = _pycplex_platform.CPX_PARAM_POLISHAFTEREPAGAP
CPX_PARAM_POLISHAFTEREPGAP = _pycplex_platform.CPX_PARAM_POLISHAFTEREPGAP
CPX_PARAM_POLISHAFTERNODE = _pycplex_platform.CPX_PARAM_POLISHAFTERNODE
CPX_PARAM_POLISHAFTERINTSOL = _pycplex_platform.CPX_PARAM_POLISHAFTERINTSOL
CPX_PARAM_POLISHAFTERTIME = _pycplex_platform.CPX_PARAM_POLISHAFTERTIME
CPX_PARAM_MCFCUTS = _pycplex_platform.CPX_PARAM_MCFCUTS
CPX_PARAM_MIPKAPPASTATS = _pycplex_platform.CPX_PARAM_MIPKAPPASTATS
CPX_PARAM_AUXROOTTHREADS = _pycplex_platform.CPX_PARAM_AUXROOTTHREADS
CPX_PARAM_INTSOLFILEPREFIX = _pycplex_platform.CPX_PARAM_INTSOLFILEPREFIX
CPX_PARAM_PROBEDETTIME = _pycplex_platform.CPX_PARAM_PROBEDETTIME
CPX_PARAM_POLISHAFTERDETTIME = _pycplex_platform.CPX_PARAM_POLISHAFTERDETTIME
CPX_PARAM_LANDPCUTS = _pycplex_platform.CPX_PARAM_LANDPCUTS
CPX_PARAM_NODECUTS = _pycplex_platform.CPX_PARAM_NODECUTS
CPX_PARAM_RAMPUPDURATION = _pycplex_platform.CPX_PARAM_RAMPUPDURATION
CPX_PARAM_RAMPUPDETTILIM = _pycplex_platform.CPX_PARAM_RAMPUPDETTILIM
CPX_PARAM_RAMPUPTILIM = _pycplex_platform.CPX_PARAM_RAMPUPTILIM
CPX_PARAM_LOCALIMPLBD = _pycplex_platform.CPX_PARAM_LOCALIMPLBD
CPX_PARAM_BQPCUTS = _pycplex_platform.CPX_PARAM_BQPCUTS
CPX_PARAM_RLTCUTS = _pycplex_platform.CPX_PARAM_RLTCUTS
CPX_PARAM_SUBMIPSTARTALG = _pycplex_platform.CPX_PARAM_SUBMIPSTARTALG
CPX_PARAM_SUBMIPSUBALG = _pycplex_platform.CPX_PARAM_SUBMIPSUBALG
CPX_PARAM_SUBMIPSCAIND = _pycplex_platform.CPX_PARAM_SUBMIPSCAIND
CPX_PARAM_SUBMIPNODELIMIT = _pycplex_platform.CPX_PARAM_SUBMIPNODELIMIT
CPX_PARAM_SOS1REFORM = _pycplex_platform.CPX_PARAM_SOS1REFORM
CPX_PARAM_SOS2REFORM = _pycplex_platform.CPX_PARAM_SOS2REFORM
CPX_PARAM_BAREPCOMP = _pycplex_platform.CPX_PARAM_BAREPCOMP
CPX_PARAM_BARGROWTH = _pycplex_platform.CPX_PARAM_BARGROWTH
CPX_PARAM_BAROBJRNG = _pycplex_platform.CPX_PARAM_BAROBJRNG
CPX_PARAM_BARALG = _pycplex_platform.CPX_PARAM_BARALG
CPX_PARAM_BARCOLNZ = _pycplex_platform.CPX_PARAM_BARCOLNZ
CPX_PARAM_BARDISPLAY = _pycplex_platform.CPX_PARAM_BARDISPLAY
CPX_PARAM_BARITLIM = _pycplex_platform.CPX_PARAM_BARITLIM
CPX_PARAM_BARMAXCOR = _pycplex_platform.CPX_PARAM_BARMAXCOR
CPX_PARAM_BARORDER = _pycplex_platform.CPX_PARAM_BARORDER
CPX_PARAM_BARSTARTALG = _pycplex_platform.CPX_PARAM_BARSTARTALG
CPX_PARAM_BARCROSSALG = _pycplex_platform.CPX_PARAM_BARCROSSALG
CPX_PARAM_BARQCPEPCOMP = _pycplex_platform.CPX_PARAM_BARQCPEPCOMP
CPX_PARAM_QPNZREADLIM = _pycplex_platform.CPX_PARAM_QPNZREADLIM
CPX_PARAM_CALCQCPDUALS = _pycplex_platform.CPX_PARAM_CALCQCPDUALS
CPX_PARAM_QPMAKEPSDIND = _pycplex_platform.CPX_PARAM_QPMAKEPSDIND
CPX_PARAM_QTOLININD = _pycplex_platform.CPX_PARAM_QTOLININD
CPX_PARAM_NETITLIM = _pycplex_platform.CPX_PARAM_NETITLIM
CPX_PARAM_NETEPOPT = _pycplex_platform.CPX_PARAM_NETEPOPT
CPX_PARAM_NETEPRHS = _pycplex_platform.CPX_PARAM_NETEPRHS
CPX_PARAM_NETPPRIIND = _pycplex_platform.CPX_PARAM_NETPPRIIND
CPX_PARAM_NETDISPLAY = _pycplex_platform.CPX_PARAM_NETDISPLAY
CPX_CPXAUTOTYPES_H_H = _pycplex_platform.CPX_CPXAUTOTYPES_H_H
CPX_CPXAUTOSTRUCTS_H_H = _pycplex_platform.CPX_CPXAUTOSTRUCTS_H_H
# Register cpxdeserializer in _pycplex_platform:
_pycplex_platform.cpxdeserializer_swigregister(cpxdeserializer)
# Register cpxserializer in _pycplex_platform:
_pycplex_platform.cpxserializer_swigregister(cpxserializer)
CPX_CPLEXX_H = _pycplex_platform.CPX_CPLEXX_H
CPX_APIMODEL_SMALL = _pycplex_platform.CPX_APIMODEL_SMALL
CPX_APIMODEL_LARGE = _pycplex_platform.CPX_APIMODEL_LARGE
CPX_APIMODEL = _pycplex_platform.CPX_APIMODEL
# Register cb_struct in _pycplex_platform:
_pycplex_platform.cb_struct_swigregister(cb_struct)
CPX_CPLEXE_H = _pycplex_platform.CPX_CPLEXE_H
CPXE_H = _pycplex_platform.CPXE_H
MIPE_H = _pycplex_platform.MIPE_H
CPXAUTOE_H = _pycplex_platform.CPXAUTOE_H
CPX_AUTOES_H = _pycplex_platform.CPX_AUTOES_H
CPX_AUTOEL_H = _pycplex_platform.CPX_AUTOEL_H
CPX_AUTOEX_H = _pycplex_platform.CPX_AUTOEX_H
# Register cpxpyiodevice in _pycplex_platform:
_pycplex_platform.cpxpyiodevice_swigregister(cpxpyiodevice)
# Register intPtr in _pycplex_platform:
_pycplex_platform.intPtr_swigregister(intPtr)
# Register cpxlongPtr in _pycplex_platform:
_pycplex_platform.cpxlongPtr_swigregister(cpxlongPtr)
# Register doublePtr in _pycplex_platform:
_pycplex_platform.doublePtr_swigregister(doublePtr)
# Register CPXLPptrPtr in _pycplex_platform:
_pycplex_platform.CPXLPptrPtr_swigregister(CPXLPptrPtr)
# Register CPXENVptrPtr in _pycplex_platform:
_pycplex_platform.CPXENVptrPtr_swigregister(CPXENVptrPtr)
# Register CPXCHANNELptrPtr in _pycplex_platform:
_pycplex_platform.CPXCHANNELptrPtr_swigregister(CPXCHANNELptrPtr)
# Register CPXPARAMSETptrPtr in _pycplex_platform:
_pycplex_platform.CPXPARAMSETptrPtr_swigregister(CPXPARAMSETptrPtr)
# Register intArray in _pycplex_platform:
_pycplex_platform.intArray_swigregister(intArray)
# Register doubleArray in _pycplex_platform:
_pycplex_platform.doubleArray_swigregister(doubleArray)
# Register longArray in _pycplex_platform:
_pycplex_platform.longArray_swigregister(longArray)
| 60.577399
| 1,804
| 0.802933
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
# Import the low-level C/C++ module
if __package__ or '.' in __name__:
from . import _pycplex_platform
else:
import _pycplex_platform
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static:
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
CPX_FEATURES_H = _pycplex_platform.CPX_FEATURES_H
CPX_FEATURE_REMOTE_OBJECT = _pycplex_platform.CPX_FEATURE_REMOTE_OBJECT
CPX_FEATURE_DISTRIBUTED_MIP = _pycplex_platform.CPX_FEATURE_DISTRIBUTED_MIP
CPX_CPXAUTOINTTYPES_H_H = _pycplex_platform.CPX_CPXAUTOINTTYPES_H_H
CPXBYTE_DEFINED = _pycplex_platform.CPXBYTE_DEFINED
CPXINT_DEFINED = _pycplex_platform.CPXINT_DEFINED
CPXLONG_DEFINED = _pycplex_platform.CPXLONG_DEFINED
CPXSHORT_DEFINED = _pycplex_platform.CPXSHORT_DEFINED
CPXULONG_DEFINED = _pycplex_platform.CPXULONG_DEFINED
CPX_STR_PARAM_MAX = _pycplex_platform.CPX_STR_PARAM_MAX
class cpxiodevice(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
cpxiodev_eof = property(_pycplex_platform.cpxiodevice_cpxiodev_eof_get, _pycplex_platform.cpxiodevice_cpxiodev_eof_set)
cpxiodev_error = property(_pycplex_platform.cpxiodevice_cpxiodev_error_get, _pycplex_platform.cpxiodevice_cpxiodev_error_set)
cpxiodev_rewind = property(_pycplex_platform.cpxiodevice_cpxiodev_rewind_get, _pycplex_platform.cpxiodevice_cpxiodev_rewind_set)
cpxiodev_flush = property(_pycplex_platform.cpxiodevice_cpxiodev_flush_get, _pycplex_platform.cpxiodevice_cpxiodev_flush_set)
cpxiodev_close = property(_pycplex_platform.cpxiodevice_cpxiodev_close_get, _pycplex_platform.cpxiodevice_cpxiodev_close_set)
cpxiodev_putc = property(_pycplex_platform.cpxiodevice_cpxiodev_putc_get, _pycplex_platform.cpxiodevice_cpxiodev_putc_set)
cpxiodev_puts = property(_pycplex_platform.cpxiodevice_cpxiodev_puts_get, _pycplex_platform.cpxiodevice_cpxiodev_puts_set)
cpxiodev_read = property(_pycplex_platform.cpxiodevice_cpxiodev_read_get, _pycplex_platform.cpxiodevice_cpxiodev_read_set)
cpxiodev_write = property(_pycplex_platform.cpxiodevice_cpxiodev_write_get, _pycplex_platform.cpxiodevice_cpxiodev_write_set)
def __init__(self):
_pycplex_platform.cpxiodevice_swiginit(self, _pycplex_platform.new_cpxiodevice())
__swig_destroy__ = _pycplex_platform.delete_cpxiodevice
# Register cpxiodevice in _pycplex_platform:
_pycplex_platform.cpxiodevice_swigregister(cpxiodevice)
cvar = _pycplex_platform.cvar
CPX_NULL = cvar.CPX_NULL
ext_name = cvar.ext_name
CPX_VERSION = _pycplex_platform.CPX_VERSION
CPX_VERSION_VERSION = _pycplex_platform.CPX_VERSION_VERSION
CPX_VERSION_RELEASE = _pycplex_platform.CPX_VERSION_RELEASE
CPX_VERSION_MODIFICATION = _pycplex_platform.CPX_VERSION_MODIFICATION
CPX_VERSION_FIX = _pycplex_platform.CPX_VERSION_FIX
CPX_INFBOUND = _pycplex_platform.CPX_INFBOUND
CPX_MINBOUND = _pycplex_platform.CPX_MINBOUND
CPX_PWL_MAXSLOPE = _pycplex_platform.CPX_PWL_MAXSLOPE
CPX_PWL_MINSLOPE = _pycplex_platform.CPX_PWL_MINSLOPE
CPX_PARAMTYPE_NONE = _pycplex_platform.CPX_PARAMTYPE_NONE
CPX_PARAMTYPE_INT = _pycplex_platform.CPX_PARAMTYPE_INT
CPX_PARAMTYPE_DOUBLE = _pycplex_platform.CPX_PARAMTYPE_DOUBLE
CPX_PARAMTYPE_STRING = _pycplex_platform.CPX_PARAMTYPE_STRING
CPX_PARAMTYPE_LONG = _pycplex_platform.CPX_PARAMTYPE_LONG
CPX_NO_SOLN = _pycplex_platform.CPX_NO_SOLN
CPX_AUTO_SOLN = _pycplex_platform.CPX_AUTO_SOLN
CPX_BASIC_SOLN = _pycplex_platform.CPX_BASIC_SOLN
CPX_NONBASIC_SOLN = _pycplex_platform.CPX_NONBASIC_SOLN
CPX_PRIMAL_SOLN = _pycplex_platform.CPX_PRIMAL_SOLN
CPX_PRECOL_LOW = _pycplex_platform.CPX_PRECOL_LOW
CPX_PRECOL_UP = _pycplex_platform.CPX_PRECOL_UP
CPX_PRECOL_FIX = _pycplex_platform.CPX_PRECOL_FIX
CPX_PRECOL_AGG = _pycplex_platform.CPX_PRECOL_AGG
CPX_PRECOL_OTHER = _pycplex_platform.CPX_PRECOL_OTHER
CPX_PREROW_RED = _pycplex_platform.CPX_PREROW_RED
CPX_PREROW_AGG = _pycplex_platform.CPX_PREROW_AGG
CPX_PREROW_OTHER = _pycplex_platform.CPX_PREROW_OTHER
CPX_AUTO = _pycplex_platform.CPX_AUTO
CPX_ON = _pycplex_platform.CPX_ON
CPX_OFF = _pycplex_platform.CPX_OFF
CPX_MAX = _pycplex_platform.CPX_MAX
CPX_MIN = _pycplex_platform.CPX_MIN
CPX_DATACHECK_OFF = _pycplex_platform.CPX_DATACHECK_OFF
CPX_DATACHECK_WARN = _pycplex_platform.CPX_DATACHECK_WARN
CPX_DATACHECK_ASSIST = _pycplex_platform.CPX_DATACHECK_ASSIST
CPX_PPRIIND_PARTIAL = _pycplex_platform.CPX_PPRIIND_PARTIAL
CPX_PPRIIND_AUTO = _pycplex_platform.CPX_PPRIIND_AUTO
CPX_PPRIIND_DEVEX = _pycplex_platform.CPX_PPRIIND_DEVEX
CPX_PPRIIND_STEEP = _pycplex_platform.CPX_PPRIIND_STEEP
CPX_PPRIIND_STEEPQSTART = _pycplex_platform.CPX_PPRIIND_STEEPQSTART
CPX_PPRIIND_FULL = _pycplex_platform.CPX_PPRIIND_FULL
CPX_DPRIIND_AUTO = _pycplex_platform.CPX_DPRIIND_AUTO
CPX_DPRIIND_FULL = _pycplex_platform.CPX_DPRIIND_FULL
CPX_DPRIIND_STEEP = _pycplex_platform.CPX_DPRIIND_STEEP
CPX_DPRIIND_FULLSTEEP = _pycplex_platform.CPX_DPRIIND_FULLSTEEP
CPX_DPRIIND_STEEPQSTART = _pycplex_platform.CPX_DPRIIND_STEEPQSTART
CPX_DPRIIND_DEVEX = _pycplex_platform.CPX_DPRIIND_DEVEX
CPX_PARALLEL_DETERMINISTIC = _pycplex_platform.CPX_PARALLEL_DETERMINISTIC
CPX_PARALLEL_AUTO = _pycplex_platform.CPX_PARALLEL_AUTO
CPX_PARALLEL_OPPORTUNISTIC = _pycplex_platform.CPX_PARALLEL_OPPORTUNISTIC
CPX_WRITELEVEL_AUTO = _pycplex_platform.CPX_WRITELEVEL_AUTO
CPX_WRITELEVEL_ALLVARS = _pycplex_platform.CPX_WRITELEVEL_ALLVARS
CPX_WRITELEVEL_DISCRETEVARS = _pycplex_platform.CPX_WRITELEVEL_DISCRETEVARS
CPX_WRITELEVEL_NONZEROVARS = _pycplex_platform.CPX_WRITELEVEL_NONZEROVARS
CPX_WRITELEVEL_NONZERODISCRETEVARS = _pycplex_platform.CPX_WRITELEVEL_NONZERODISCRETEVARS
CPX_OPTIMALITYTARGET_AUTO = _pycplex_platform.CPX_OPTIMALITYTARGET_AUTO
CPX_OPTIMALITYTARGET_OPTIMALCONVEX = _pycplex_platform.CPX_OPTIMALITYTARGET_OPTIMALCONVEX
CPX_OPTIMALITYTARGET_FIRSTORDER = _pycplex_platform.CPX_OPTIMALITYTARGET_FIRSTORDER
CPX_OPTIMALITYTARGET_OPTIMALGLOBAL = _pycplex_platform.CPX_OPTIMALITYTARGET_OPTIMALGLOBAL
CPX_ALG_NONE = _pycplex_platform.CPX_ALG_NONE
CPX_ALG_AUTOMATIC = _pycplex_platform.CPX_ALG_AUTOMATIC
CPX_ALG_PRIMAL = _pycplex_platform.CPX_ALG_PRIMAL
CPX_ALG_DUAL = _pycplex_platform.CPX_ALG_DUAL
CPX_ALG_NET = _pycplex_platform.CPX_ALG_NET
CPX_ALG_BARRIER = _pycplex_platform.CPX_ALG_BARRIER
CPX_ALG_SIFTING = _pycplex_platform.CPX_ALG_SIFTING
CPX_ALG_CONCURRENT = _pycplex_platform.CPX_ALG_CONCURRENT
CPX_ALG_BAROPT = _pycplex_platform.CPX_ALG_BAROPT
CPX_ALG_PIVOTIN = _pycplex_platform.CPX_ALG_PIVOTIN
CPX_ALG_PIVOTOUT = _pycplex_platform.CPX_ALG_PIVOTOUT
CPX_ALG_PIVOT = _pycplex_platform.CPX_ALG_PIVOT
CPX_ALG_FEASOPT = _pycplex_platform.CPX_ALG_FEASOPT
CPX_ALG_MIP = _pycplex_platform.CPX_ALG_MIP
CPX_ALG_BENDERS = _pycplex_platform.CPX_ALG_BENDERS
CPX_ALG_MULTIOBJ = _pycplex_platform.CPX_ALG_MULTIOBJ
CPX_ALG_ROBUST = _pycplex_platform.CPX_ALG_ROBUST
CPX_AT_LOWER = _pycplex_platform.CPX_AT_LOWER
CPX_BASIC = _pycplex_platform.CPX_BASIC
CPX_AT_UPPER = _pycplex_platform.CPX_AT_UPPER
CPX_FREE_SUPER = _pycplex_platform.CPX_FREE_SUPER
CPX_NO_VARIABLE = _pycplex_platform.CPX_NO_VARIABLE
CPX_CONTINUOUS = _pycplex_platform.CPX_CONTINUOUS
CPX_BINARY = _pycplex_platform.CPX_BINARY
CPX_INTEGER = _pycplex_platform.CPX_INTEGER
CPX_SEMICONT = _pycplex_platform.CPX_SEMICONT
CPX_SEMIINT = _pycplex_platform.CPX_SEMIINT
CPX_PREREDUCE_PRIMALANDDUAL = _pycplex_platform.CPX_PREREDUCE_PRIMALANDDUAL
CPX_PREREDUCE_DUALONLY = _pycplex_platform.CPX_PREREDUCE_DUALONLY
CPX_PREREDUCE_PRIMALONLY = _pycplex_platform.CPX_PREREDUCE_PRIMALONLY
CPX_PREREDUCE_NOPRIMALORDUAL = _pycplex_platform.CPX_PREREDUCE_NOPRIMALORDUAL
CPX_PREREFORM_ALL = _pycplex_platform.CPX_PREREFORM_ALL
CPX_PREREFORM_INTERFERE_CRUSH = _pycplex_platform.CPX_PREREFORM_INTERFERE_CRUSH
CPX_PREREFORM_INTERFERE_UNCRUSH = _pycplex_platform.CPX_PREREFORM_INTERFERE_UNCRUSH
CPX_PREREFORM_NONE = _pycplex_platform.CPX_PREREFORM_NONE
CPX_CONFLICT_EXCLUDED = _pycplex_platform.CPX_CONFLICT_EXCLUDED
CPX_CONFLICT_POSSIBLE_MEMBER = _pycplex_platform.CPX_CONFLICT_POSSIBLE_MEMBER
CPX_CONFLICT_POSSIBLE_LB = _pycplex_platform.CPX_CONFLICT_POSSIBLE_LB
CPX_CONFLICT_POSSIBLE_UB = _pycplex_platform.CPX_CONFLICT_POSSIBLE_UB
CPX_CONFLICT_MEMBER = _pycplex_platform.CPX_CONFLICT_MEMBER
CPX_CONFLICT_LB = _pycplex_platform.CPX_CONFLICT_LB
CPX_CONFLICT_UB = _pycplex_platform.CPX_CONFLICT_UB
CPX_CONFLICTALG_AUTO = _pycplex_platform.CPX_CONFLICTALG_AUTO
CPX_CONFLICTALG_FAST = _pycplex_platform.CPX_CONFLICTALG_FAST
CPX_CONFLICTALG_PROPAGATE = _pycplex_platform.CPX_CONFLICTALG_PROPAGATE
CPX_CONFLICTALG_PRESOLVE = _pycplex_platform.CPX_CONFLICTALG_PRESOLVE
CPX_CONFLICTALG_IIS = _pycplex_platform.CPX_CONFLICTALG_IIS
CPX_CONFLICTALG_LIMITSOLVE = _pycplex_platform.CPX_CONFLICTALG_LIMITSOLVE
CPX_CONFLICTALG_SOLVE = _pycplex_platform.CPX_CONFLICTALG_SOLVE
CPXPROB_LP = _pycplex_platform.CPXPROB_LP
CPXPROB_MILP = _pycplex_platform.CPXPROB_MILP
CPXPROB_FIXEDMILP = _pycplex_platform.CPXPROB_FIXEDMILP
CPXPROB_NODELP = _pycplex_platform.CPXPROB_NODELP
CPXPROB_QP = _pycplex_platform.CPXPROB_QP
CPXPROB_MIQP = _pycplex_platform.CPXPROB_MIQP
CPXPROB_FIXEDMIQP = _pycplex_platform.CPXPROB_FIXEDMIQP
CPXPROB_NODEQP = _pycplex_platform.CPXPROB_NODEQP
CPXPROB_QCP = _pycplex_platform.CPXPROB_QCP
CPXPROB_MIQCP = _pycplex_platform.CPXPROB_MIQCP
CPXPROB_NODEQCP = _pycplex_platform.CPXPROB_NODEQCP
CPX_LPREADER_LEGACY = _pycplex_platform.CPX_LPREADER_LEGACY
CPX_LPREADER_NEW = _pycplex_platform.CPX_LPREADER_NEW
CPX_PARAM_ALL_MIN = _pycplex_platform.CPX_PARAM_ALL_MIN
CPX_PARAM_ALL_MAX = _pycplex_platform.CPX_PARAM_ALL_MAX
CPX_CALLBACK_PRIMAL = _pycplex_platform.CPX_CALLBACK_PRIMAL
CPX_CALLBACK_DUAL = _pycplex_platform.CPX_CALLBACK_DUAL
CPX_CALLBACK_NETWORK = _pycplex_platform.CPX_CALLBACK_NETWORK
CPX_CALLBACK_PRIMAL_CROSSOVER = _pycplex_platform.CPX_CALLBACK_PRIMAL_CROSSOVER
CPX_CALLBACK_DUAL_CROSSOVER = _pycplex_platform.CPX_CALLBACK_DUAL_CROSSOVER
CPX_CALLBACK_BARRIER = _pycplex_platform.CPX_CALLBACK_BARRIER
CPX_CALLBACK_PRESOLVE = _pycplex_platform.CPX_CALLBACK_PRESOLVE
CPX_CALLBACK_QPBARRIER = _pycplex_platform.CPX_CALLBACK_QPBARRIER
CPX_CALLBACK_QPSIMPLEX = _pycplex_platform.CPX_CALLBACK_QPSIMPLEX
CPX_CALLBACK_TUNING = _pycplex_platform.CPX_CALLBACK_TUNING
CPX_CALLBACK_INFO_PRIMAL_OBJ = _pycplex_platform.CPX_CALLBACK_INFO_PRIMAL_OBJ
CPX_CALLBACK_INFO_DUAL_OBJ = _pycplex_platform.CPX_CALLBACK_INFO_DUAL_OBJ
CPX_CALLBACK_INFO_PRIMAL_INFMEAS = _pycplex_platform.CPX_CALLBACK_INFO_PRIMAL_INFMEAS
CPX_CALLBACK_INFO_DUAL_INFMEAS = _pycplex_platform.CPX_CALLBACK_INFO_DUAL_INFMEAS
CPX_CALLBACK_INFO_PRIMAL_FEAS = _pycplex_platform.CPX_CALLBACK_INFO_PRIMAL_FEAS
CPX_CALLBACK_INFO_DUAL_FEAS = _pycplex_platform.CPX_CALLBACK_INFO_DUAL_FEAS
CPX_CALLBACK_INFO_ITCOUNT = _pycplex_platform.CPX_CALLBACK_INFO_ITCOUNT
CPX_CALLBACK_INFO_CROSSOVER_PPUSH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PPUSH
CPX_CALLBACK_INFO_CROSSOVER_PEXCH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PEXCH
CPX_CALLBACK_INFO_CROSSOVER_DPUSH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DPUSH
CPX_CALLBACK_INFO_CROSSOVER_DEXCH = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DEXCH
CPX_CALLBACK_INFO_CROSSOVER_SBCNT = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_SBCNT
CPX_CALLBACK_INFO_PRESOLVE_ROWSGONE = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_ROWSGONE
CPX_CALLBACK_INFO_PRESOLVE_COLSGONE = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_COLSGONE
CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST
CPX_CALLBACK_INFO_PRESOLVE_COEFFS = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_COEFFS
CPX_CALLBACK_INFO_USER_PROBLEM = _pycplex_platform.CPX_CALLBACK_INFO_USER_PROBLEM
CPX_CALLBACK_INFO_TUNING_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_TUNING_PROGRESS
CPX_CALLBACK_INFO_ENDTIME = _pycplex_platform.CPX_CALLBACK_INFO_ENDTIME
CPX_CALLBACK_INFO_ITCOUNT_LONG = _pycplex_platform.CPX_CALLBACK_INFO_ITCOUNT_LONG
CPX_CALLBACK_INFO_CROSSOVER_PPUSH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PPUSH_LONG
CPX_CALLBACK_INFO_CROSSOVER_PEXCH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_PEXCH_LONG
CPX_CALLBACK_INFO_CROSSOVER_DPUSH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DPUSH_LONG
CPX_CALLBACK_INFO_CROSSOVER_DEXCH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_CROSSOVER_DEXCH_LONG
CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST_LONG = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_AGGSUBST_LONG
CPX_CALLBACK_INFO_PRESOLVE_COEFFS_LONG = _pycplex_platform.CPX_CALLBACK_INFO_PRESOLVE_COEFFS_LONG
CPX_CALLBACK_INFO_ENDDETTIME = _pycplex_platform.CPX_CALLBACK_INFO_ENDDETTIME
CPX_CALLBACK_INFO_STARTTIME = _pycplex_platform.CPX_CALLBACK_INFO_STARTTIME
CPX_CALLBACK_INFO_STARTDETTIME = _pycplex_platform.CPX_CALLBACK_INFO_STARTDETTIME
CPX_TUNE_AVERAGE = _pycplex_platform.CPX_TUNE_AVERAGE
CPX_TUNE_MINMAX = _pycplex_platform.CPX_TUNE_MINMAX
CPX_TUNE_ABORT = _pycplex_platform.CPX_TUNE_ABORT
CPX_TUNE_TILIM = _pycplex_platform.CPX_TUNE_TILIM
CPX_TUNE_DETTILIM = _pycplex_platform.CPX_TUNE_DETTILIM
CPX_FEASOPT_MIN_SUM = _pycplex_platform.CPX_FEASOPT_MIN_SUM
CPX_FEASOPT_OPT_SUM = _pycplex_platform.CPX_FEASOPT_OPT_SUM
CPX_FEASOPT_MIN_INF = _pycplex_platform.CPX_FEASOPT_MIN_INF
CPX_FEASOPT_OPT_INF = _pycplex_platform.CPX_FEASOPT_OPT_INF
CPX_FEASOPT_MIN_QUAD = _pycplex_platform.CPX_FEASOPT_MIN_QUAD
CPX_FEASOPT_OPT_QUAD = _pycplex_platform.CPX_FEASOPT_OPT_QUAD
CPX_BENDERSSTRATEGY_OFF = _pycplex_platform.CPX_BENDERSSTRATEGY_OFF
CPX_BENDERSSTRATEGY_AUTO = _pycplex_platform.CPX_BENDERSSTRATEGY_AUTO
CPX_BENDERSSTRATEGY_USER = _pycplex_platform.CPX_BENDERSSTRATEGY_USER
CPX_BENDERSSTRATEGY_WORKERS = _pycplex_platform.CPX_BENDERSSTRATEGY_WORKERS
CPX_BENDERSSTRATEGY_FULL = _pycplex_platform.CPX_BENDERSSTRATEGY_FULL
CPX_ANNOTATIONDATA_LONG = _pycplex_platform.CPX_ANNOTATIONDATA_LONG
CPX_ANNOTATIONDATA_DOUBLE = _pycplex_platform.CPX_ANNOTATIONDATA_DOUBLE
CPX_ANNOTATIONOBJ_OBJ = _pycplex_platform.CPX_ANNOTATIONOBJ_OBJ
CPX_ANNOTATIONOBJ_COL = _pycplex_platform.CPX_ANNOTATIONOBJ_COL
CPX_ANNOTATIONOBJ_ROW = _pycplex_platform.CPX_ANNOTATIONOBJ_ROW
CPX_ANNOTATIONOBJ_SOS = _pycplex_platform.CPX_ANNOTATIONOBJ_SOS
CPX_ANNOTATIONOBJ_IND = _pycplex_platform.CPX_ANNOTATIONOBJ_IND
CPX_ANNOTATIONOBJ_QC = _pycplex_platform.CPX_ANNOTATIONOBJ_QC
CPX_ANNOTATIONOBJ_LAST = _pycplex_platform.CPX_ANNOTATIONOBJ_LAST
CPXIIS_COMPLETE = _pycplex_platform.CPXIIS_COMPLETE
CPXIIS_PARTIAL = _pycplex_platform.CPXIIS_PARTIAL
CPXIIS_AT_LOWER = _pycplex_platform.CPXIIS_AT_LOWER
CPXIIS_FIXED = _pycplex_platform.CPXIIS_FIXED
CPXIIS_AT_UPPER = _pycplex_platform.CPXIIS_AT_UPPER
CPX_BARORDER_AUTO = _pycplex_platform.CPX_BARORDER_AUTO
CPX_BARORDER_AMD = _pycplex_platform.CPX_BARORDER_AMD
CPX_BARORDER_AMF = _pycplex_platform.CPX_BARORDER_AMF
CPX_BARORDER_ND = _pycplex_platform.CPX_BARORDER_ND
CPX_MIPEMPHASIS_BALANCED = _pycplex_platform.CPX_MIPEMPHASIS_BALANCED
CPX_MIPEMPHASIS_FEASIBILITY = _pycplex_platform.CPX_MIPEMPHASIS_FEASIBILITY
CPX_MIPEMPHASIS_OPTIMALITY = _pycplex_platform.CPX_MIPEMPHASIS_OPTIMALITY
CPX_MIPEMPHASIS_BESTBOUND = _pycplex_platform.CPX_MIPEMPHASIS_BESTBOUND
CPX_MIPEMPHASIS_HIDDENFEAS = _pycplex_platform.CPX_MIPEMPHASIS_HIDDENFEAS
CPX_MIPEMPHASIS_HEURISTIC = _pycplex_platform.CPX_MIPEMPHASIS_HEURISTIC
CPX_TYPE_VAR = _pycplex_platform.CPX_TYPE_VAR
CPX_TYPE_SOS1 = _pycplex_platform.CPX_TYPE_SOS1
CPX_TYPE_SOS2 = _pycplex_platform.CPX_TYPE_SOS2
CPX_TYPE_USER = _pycplex_platform.CPX_TYPE_USER
CPX_TYPE_ANY = _pycplex_platform.CPX_TYPE_ANY
CPX_VARSEL_MININFEAS = _pycplex_platform.CPX_VARSEL_MININFEAS
CPX_VARSEL_DEFAULT = _pycplex_platform.CPX_VARSEL_DEFAULT
CPX_VARSEL_MAXINFEAS = _pycplex_platform.CPX_VARSEL_MAXINFEAS
CPX_VARSEL_PSEUDO = _pycplex_platform.CPX_VARSEL_PSEUDO
CPX_VARSEL_STRONG = _pycplex_platform.CPX_VARSEL_STRONG
CPX_VARSEL_PSEUDOREDUCED = _pycplex_platform.CPX_VARSEL_PSEUDOREDUCED
CPX_NODESEL_DFS = _pycplex_platform.CPX_NODESEL_DFS
CPX_NODESEL_BESTBOUND = _pycplex_platform.CPX_NODESEL_BESTBOUND
CPX_NODESEL_BESTEST = _pycplex_platform.CPX_NODESEL_BESTEST
CPX_NODESEL_BESTEST_ALT = _pycplex_platform.CPX_NODESEL_BESTEST_ALT
CPX_MIPORDER_COST = _pycplex_platform.CPX_MIPORDER_COST
CPX_MIPORDER_BOUNDS = _pycplex_platform.CPX_MIPORDER_BOUNDS
CPX_MIPORDER_SCALEDCOST = _pycplex_platform.CPX_MIPORDER_SCALEDCOST
CPX_BRANCH_GLOBAL = _pycplex_platform.CPX_BRANCH_GLOBAL
CPX_BRANCH_DOWN = _pycplex_platform.CPX_BRANCH_DOWN
CPX_BRANCH_UP = _pycplex_platform.CPX_BRANCH_UP
CPX_BRDIR_DOWN = _pycplex_platform.CPX_BRDIR_DOWN
CPX_BRDIR_AUTO = _pycplex_platform.CPX_BRDIR_AUTO
CPX_BRDIR_UP = _pycplex_platform.CPX_BRDIR_UP
CPX_CUT_COVER = _pycplex_platform.CPX_CUT_COVER
CPX_CUT_GUBCOVER = _pycplex_platform.CPX_CUT_GUBCOVER
CPX_CUT_FLOWCOVER = _pycplex_platform.CPX_CUT_FLOWCOVER
CPX_CUT_CLIQUE = _pycplex_platform.CPX_CUT_CLIQUE
CPX_CUT_FRAC = _pycplex_platform.CPX_CUT_FRAC
CPX_CUT_MIR = _pycplex_platform.CPX_CUT_MIR
CPX_CUT_FLOWPATH = _pycplex_platform.CPX_CUT_FLOWPATH
CPX_CUT_DISJ = _pycplex_platform.CPX_CUT_DISJ
CPX_CUT_IMPLBD = _pycplex_platform.CPX_CUT_IMPLBD
CPX_CUT_ZEROHALF = _pycplex_platform.CPX_CUT_ZEROHALF
CPX_CUT_MCF = _pycplex_platform.CPX_CUT_MCF
CPX_CUT_LOCALCOVER = _pycplex_platform.CPX_CUT_LOCALCOVER
CPX_CUT_TIGHTEN = _pycplex_platform.CPX_CUT_TIGHTEN
CPX_CUT_OBJDISJ = _pycplex_platform.CPX_CUT_OBJDISJ
CPX_CUT_LANDP = _pycplex_platform.CPX_CUT_LANDP
CPX_CUT_USER = _pycplex_platform.CPX_CUT_USER
CPX_CUT_TABLE = _pycplex_platform.CPX_CUT_TABLE
CPX_CUT_SOLNPOOL = _pycplex_platform.CPX_CUT_SOLNPOOL
CPX_CUT_LOCALIMPLBD = _pycplex_platform.CPX_CUT_LOCALIMPLBD
CPX_CUT_BQP = _pycplex_platform.CPX_CUT_BQP
CPX_CUT_RLT = _pycplex_platform.CPX_CUT_RLT
CPX_CUT_BENDERS = _pycplex_platform.CPX_CUT_BENDERS
CPX_CUT_NUM_TYPES = _pycplex_platform.CPX_CUT_NUM_TYPES
CPX_MIPSEARCH_AUTO = _pycplex_platform.CPX_MIPSEARCH_AUTO
CPX_MIPSEARCH_TRADITIONAL = _pycplex_platform.CPX_MIPSEARCH_TRADITIONAL
CPX_MIPSEARCH_DYNAMIC = _pycplex_platform.CPX_MIPSEARCH_DYNAMIC
CPX_MIPKAPPA_OFF = _pycplex_platform.CPX_MIPKAPPA_OFF
CPX_MIPKAPPA_AUTO = _pycplex_platform.CPX_MIPKAPPA_AUTO
CPX_MIPKAPPA_SAMPLE = _pycplex_platform.CPX_MIPKAPPA_SAMPLE
CPX_MIPKAPPA_FULL = _pycplex_platform.CPX_MIPKAPPA_FULL
CPX_MIPSTART_AUTO = _pycplex_platform.CPX_MIPSTART_AUTO
CPX_MIPSTART_CHECKFEAS = _pycplex_platform.CPX_MIPSTART_CHECKFEAS
CPX_MIPSTART_SOLVEFIXED = _pycplex_platform.CPX_MIPSTART_SOLVEFIXED
CPX_MIPSTART_SOLVEMIP = _pycplex_platform.CPX_MIPSTART_SOLVEMIP
CPX_MIPSTART_REPAIR = _pycplex_platform.CPX_MIPSTART_REPAIR
CPX_MIPSTART_NOCHECK = _pycplex_platform.CPX_MIPSTART_NOCHECK
CPX_CALLBACK_MIP = _pycplex_platform.CPX_CALLBACK_MIP
CPX_CALLBACK_MIP_BRANCH = _pycplex_platform.CPX_CALLBACK_MIP_BRANCH
CPX_CALLBACK_MIP_NODE = _pycplex_platform.CPX_CALLBACK_MIP_NODE
CPX_CALLBACK_MIP_HEURISTIC = _pycplex_platform.CPX_CALLBACK_MIP_HEURISTIC
CPX_CALLBACK_MIP_SOLVE = _pycplex_platform.CPX_CALLBACK_MIP_SOLVE
CPX_CALLBACK_MIP_CUT_LOOP = _pycplex_platform.CPX_CALLBACK_MIP_CUT_LOOP
CPX_CALLBACK_MIP_PROBE = _pycplex_platform.CPX_CALLBACK_MIP_PROBE
CPX_CALLBACK_MIP_FRACCUT = _pycplex_platform.CPX_CALLBACK_MIP_FRACCUT
CPX_CALLBACK_MIP_DISJCUT = _pycplex_platform.CPX_CALLBACK_MIP_DISJCUT
CPX_CALLBACK_MIP_FLOWMIR = _pycplex_platform.CPX_CALLBACK_MIP_FLOWMIR
CPX_CALLBACK_MIP_INCUMBENT_NODESOLN = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_NODESOLN
CPX_CALLBACK_MIP_DELETENODE = _pycplex_platform.CPX_CALLBACK_MIP_DELETENODE
CPX_CALLBACK_MIP_BRANCH_NOSOLN = _pycplex_platform.CPX_CALLBACK_MIP_BRANCH_NOSOLN
CPX_CALLBACK_MIP_CUT_LAST = _pycplex_platform.CPX_CALLBACK_MIP_CUT_LAST
CPX_CALLBACK_MIP_CUT_FEAS = _pycplex_platform.CPX_CALLBACK_MIP_CUT_FEAS
CPX_CALLBACK_MIP_CUT_UNBD = _pycplex_platform.CPX_CALLBACK_MIP_CUT_UNBD
CPX_CALLBACK_MIP_INCUMBENT_HEURSOLN = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_HEURSOLN
CPX_CALLBACK_MIP_INCUMBENT_USERSOLN = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_USERSOLN
CPX_CALLBACK_MIP_INCUMBENT_MIPSTART = _pycplex_platform.CPX_CALLBACK_MIP_INCUMBENT_MIPSTART
CPX_CALLBACK_INFO_BEST_INTEGER = _pycplex_platform.CPX_CALLBACK_INFO_BEST_INTEGER
CPX_CALLBACK_INFO_BEST_REMAINING = _pycplex_platform.CPX_CALLBACK_INFO_BEST_REMAINING
CPX_CALLBACK_INFO_NODE_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_NODE_COUNT
CPX_CALLBACK_INFO_NODES_LEFT = _pycplex_platform.CPX_CALLBACK_INFO_NODES_LEFT
CPX_CALLBACK_INFO_MIP_ITERATIONS = _pycplex_platform.CPX_CALLBACK_INFO_MIP_ITERATIONS
CPX_CALLBACK_INFO_CUTOFF = _pycplex_platform.CPX_CALLBACK_INFO_CUTOFF
CPX_CALLBACK_INFO_CLIQUE_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_CLIQUE_COUNT
CPX_CALLBACK_INFO_COVER_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_COVER_COUNT
CPX_CALLBACK_INFO_MIP_FEAS = _pycplex_platform.CPX_CALLBACK_INFO_MIP_FEAS
CPX_CALLBACK_INFO_FLOWCOVER_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_FLOWCOVER_COUNT
CPX_CALLBACK_INFO_GUBCOVER_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_GUBCOVER_COUNT
CPX_CALLBACK_INFO_IMPLBD_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_IMPLBD_COUNT
CPX_CALLBACK_INFO_PROBE_PHASE = _pycplex_platform.CPX_CALLBACK_INFO_PROBE_PHASE
CPX_CALLBACK_INFO_PROBE_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_PROBE_PROGRESS
CPX_CALLBACK_INFO_FRACCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_FRACCUT_COUNT
CPX_CALLBACK_INFO_FRACCUT_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_FRACCUT_PROGRESS
CPX_CALLBACK_INFO_DISJCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_DISJCUT_COUNT
CPX_CALLBACK_INFO_DISJCUT_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_DISJCUT_PROGRESS
CPX_CALLBACK_INFO_FLOWPATH_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_FLOWPATH_COUNT
CPX_CALLBACK_INFO_MIRCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_MIRCUT_COUNT
CPX_CALLBACK_INFO_FLOWMIR_PROGRESS = _pycplex_platform.CPX_CALLBACK_INFO_FLOWMIR_PROGRESS
CPX_CALLBACK_INFO_ZEROHALFCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_ZEROHALFCUT_COUNT
CPX_CALLBACK_INFO_MY_THREAD_NUM = _pycplex_platform.CPX_CALLBACK_INFO_MY_THREAD_NUM
CPX_CALLBACK_INFO_USER_THREADS = _pycplex_platform.CPX_CALLBACK_INFO_USER_THREADS
CPX_CALLBACK_INFO_MIP_REL_GAP = _pycplex_platform.CPX_CALLBACK_INFO_MIP_REL_GAP
CPX_CALLBACK_INFO_MCFCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_MCFCUT_COUNT
CPX_CALLBACK_INFO_KAPPA_STABLE = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_STABLE
CPX_CALLBACK_INFO_KAPPA_SUSPICIOUS = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_SUSPICIOUS
CPX_CALLBACK_INFO_KAPPA_UNSTABLE = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_UNSTABLE
CPX_CALLBACK_INFO_KAPPA_ILLPOSED = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_ILLPOSED
CPX_CALLBACK_INFO_KAPPA_MAX = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_MAX
CPX_CALLBACK_INFO_KAPPA_ATTENTION = _pycplex_platform.CPX_CALLBACK_INFO_KAPPA_ATTENTION
CPX_CALLBACK_INFO_LANDPCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_LANDPCUT_COUNT
CPX_CALLBACK_INFO_USERCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_USERCUT_COUNT
CPX_CALLBACK_INFO_TABLECUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_TABLECUT_COUNT
CPX_CALLBACK_INFO_SOLNPOOLCUT_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_SOLNPOOLCUT_COUNT
CPX_CALLBACK_INFO_BENDERS_COUNT = _pycplex_platform.CPX_CALLBACK_INFO_BENDERS_COUNT
CPX_CALLBACK_INFO_NODE_COUNT_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_COUNT_LONG
CPX_CALLBACK_INFO_NODES_LEFT_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODES_LEFT_LONG
CPX_CALLBACK_INFO_MIP_ITERATIONS_LONG = _pycplex_platform.CPX_CALLBACK_INFO_MIP_ITERATIONS_LONG
CPX_CALLBACK_INFO_LAZY_SOURCE = _pycplex_platform.CPX_CALLBACK_INFO_LAZY_SOURCE
CPX_CALLBACK_INFO_NODE_SIINF = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SIINF
CPX_CALLBACK_INFO_NODE_NIINF = _pycplex_platform.CPX_CALLBACK_INFO_NODE_NIINF
CPX_CALLBACK_INFO_NODE_ESTIMATE = _pycplex_platform.CPX_CALLBACK_INFO_NODE_ESTIMATE
CPX_CALLBACK_INFO_NODE_DEPTH = _pycplex_platform.CPX_CALLBACK_INFO_NODE_DEPTH
CPX_CALLBACK_INFO_NODE_OBJVAL = _pycplex_platform.CPX_CALLBACK_INFO_NODE_OBJVAL
CPX_CALLBACK_INFO_NODE_TYPE = _pycplex_platform.CPX_CALLBACK_INFO_NODE_TYPE
CPX_CALLBACK_INFO_NODE_VAR = _pycplex_platform.CPX_CALLBACK_INFO_NODE_VAR
CPX_CALLBACK_INFO_NODE_SOS = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SOS
CPX_CALLBACK_INFO_NODE_SEQNUM = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SEQNUM
CPX_CALLBACK_INFO_NODE_USERHANDLE = _pycplex_platform.CPX_CALLBACK_INFO_NODE_USERHANDLE
CPX_CALLBACK_INFO_NODE_NODENUM = _pycplex_platform.CPX_CALLBACK_INFO_NODE_NODENUM
CPX_CALLBACK_INFO_NODE_SEQNUM_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_SEQNUM_LONG
CPX_CALLBACK_INFO_NODE_NODENUM_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_NODENUM_LONG
CPX_CALLBACK_INFO_NODE_DEPTH_LONG = _pycplex_platform.CPX_CALLBACK_INFO_NODE_DEPTH_LONG
CPX_CALLBACK_INFO_SOS_TYPE = _pycplex_platform.CPX_CALLBACK_INFO_SOS_TYPE
CPX_CALLBACK_INFO_SOS_SIZE = _pycplex_platform.CPX_CALLBACK_INFO_SOS_SIZE
CPX_CALLBACK_INFO_SOS_IS_FEASIBLE = _pycplex_platform.CPX_CALLBACK_INFO_SOS_IS_FEASIBLE
CPX_CALLBACK_INFO_SOS_MEMBER_INDEX = _pycplex_platform.CPX_CALLBACK_INFO_SOS_MEMBER_INDEX
CPX_CALLBACK_INFO_SOS_MEMBER_REFVAL = _pycplex_platform.CPX_CALLBACK_INFO_SOS_MEMBER_REFVAL
CPX_CALLBACK_INFO_SOS_NUM = _pycplex_platform.CPX_CALLBACK_INFO_SOS_NUM
CPX_CALLBACK_INFO_IC_NUM = _pycplex_platform.CPX_CALLBACK_INFO_IC_NUM
CPX_CALLBACK_INFO_IC_IMPLYING_VAR = _pycplex_platform.CPX_CALLBACK_INFO_IC_IMPLYING_VAR
CPX_CALLBACK_INFO_IC_IMPLIED_VAR = _pycplex_platform.CPX_CALLBACK_INFO_IC_IMPLIED_VAR
CPX_CALLBACK_INFO_IC_SENSE = _pycplex_platform.CPX_CALLBACK_INFO_IC_SENSE
CPX_CALLBACK_INFO_IC_COMPL = _pycplex_platform.CPX_CALLBACK_INFO_IC_COMPL
CPX_CALLBACK_INFO_IC_RHS = _pycplex_platform.CPX_CALLBACK_INFO_IC_RHS
CPX_CALLBACK_INFO_IC_IS_FEASIBLE = _pycplex_platform.CPX_CALLBACK_INFO_IC_IS_FEASIBLE
CPX_INCUMBENT_ID = _pycplex_platform.CPX_INCUMBENT_ID
CPX_RAMPUP_DISABLED = _pycplex_platform.CPX_RAMPUP_DISABLED
CPX_RAMPUP_AUTO = _pycplex_platform.CPX_RAMPUP_AUTO
CPX_RAMPUP_DYNAMIC = _pycplex_platform.CPX_RAMPUP_DYNAMIC
CPX_RAMPUP_INFINITE = _pycplex_platform.CPX_RAMPUP_INFINITE
CPX_CALLBACK_DEFAULT = _pycplex_platform.CPX_CALLBACK_DEFAULT
CPX_CALLBACK_FAIL = _pycplex_platform.CPX_CALLBACK_FAIL
CPX_CALLBACK_SET = _pycplex_platform.CPX_CALLBACK_SET
CPX_CALLBACK_ABORT_CUT_LOOP = _pycplex_platform.CPX_CALLBACK_ABORT_CUT_LOOP
CPX_USECUT_FORCE = _pycplex_platform.CPX_USECUT_FORCE
CPX_USECUT_PURGE = _pycplex_platform.CPX_USECUT_PURGE
CPX_USECUT_FILTER = _pycplex_platform.CPX_USECUT_FILTER
CPX_INTEGER_FEASIBLE = _pycplex_platform.CPX_INTEGER_FEASIBLE
CPX_INTEGER_INFEASIBLE = _pycplex_platform.CPX_INTEGER_INFEASIBLE
CPX_IMPLIED_INTEGER_FEASIBLE = _pycplex_platform.CPX_IMPLIED_INTEGER_FEASIBLE
CPX_CON_LOWER_BOUND = _pycplex_platform.CPX_CON_LOWER_BOUND
CPX_CON_UPPER_BOUND = _pycplex_platform.CPX_CON_UPPER_BOUND
CPX_CON_LINEAR = _pycplex_platform.CPX_CON_LINEAR
CPX_CON_QUADRATIC = _pycplex_platform.CPX_CON_QUADRATIC
CPX_CON_SOS = _pycplex_platform.CPX_CON_SOS
CPX_CON_INDICATOR = _pycplex_platform.CPX_CON_INDICATOR
CPX_CON_PWL = _pycplex_platform.CPX_CON_PWL
CPX_CON_ABS = _pycplex_platform.CPX_CON_ABS
CPX_CON_MINEXPR = _pycplex_platform.CPX_CON_MINEXPR
CPX_CON_MAXEXPR = _pycplex_platform.CPX_CON_MAXEXPR
CPX_CON_LAST_CONTYPE = _pycplex_platform.CPX_CON_LAST_CONTYPE
CPX_INDICATOR_IF = _pycplex_platform.CPX_INDICATOR_IF
CPX_INDICATOR_ONLYIF = _pycplex_platform.CPX_INDICATOR_ONLYIF
CPX_INDICATOR_IFANDONLYIF = _pycplex_platform.CPX_INDICATOR_IFANDONLYIF
CPXNET_NO_DISPLAY_OBJECTIVE = _pycplex_platform.CPXNET_NO_DISPLAY_OBJECTIVE
CPXNET_TRUE_OBJECTIVE = _pycplex_platform.CPXNET_TRUE_OBJECTIVE
CPXNET_PENALIZED_OBJECTIVE = _pycplex_platform.CPXNET_PENALIZED_OBJECTIVE
CPXNET_PRICE_AUTO = _pycplex_platform.CPXNET_PRICE_AUTO
CPXNET_PRICE_PARTIAL = _pycplex_platform.CPXNET_PRICE_PARTIAL
CPXNET_PRICE_MULT_PART = _pycplex_platform.CPXNET_PRICE_MULT_PART
CPXNET_PRICE_SORT_MULT_PART = _pycplex_platform.CPXNET_PRICE_SORT_MULT_PART
CPX_NETFIND_PURE = _pycplex_platform.CPX_NETFIND_PURE
CPX_NETFIND_REFLECT = _pycplex_platform.CPX_NETFIND_REFLECT
CPX_NETFIND_SCALE = _pycplex_platform.CPX_NETFIND_SCALE
CPX_QCPDUALS_NO = _pycplex_platform.CPX_QCPDUALS_NO
CPX_QCPDUALS_IFPOSSIBLE = _pycplex_platform.CPX_QCPDUALS_IFPOSSIBLE
CPX_QCPDUALS_FORCE = _pycplex_platform.CPX_QCPDUALS_FORCE
CPX_CPXAUTOCONSTANTS_H_H = _pycplex_platform.CPX_CPXAUTOCONSTANTS_H_H
CPX_BENDERS_ANNOTATION = _pycplex_platform.CPX_BENDERS_ANNOTATION
CPX_BENDERS_MASTERVALUE = _pycplex_platform.CPX_BENDERS_MASTERVALUE
CPX_BIGINT = _pycplex_platform.CPX_BIGINT
CPX_BIGLONG = _pycplex_platform.CPX_BIGLONG
CPX_CALLBACKCONTEXT_BRANCHING = _pycplex_platform.CPX_CALLBACKCONTEXT_BRANCHING
CPX_CALLBACKCONTEXT_CANDIDATE = _pycplex_platform.CPX_CALLBACKCONTEXT_CANDIDATE
CPX_CALLBACKCONTEXT_GLOBAL_PROGRESS = _pycplex_platform.CPX_CALLBACKCONTEXT_GLOBAL_PROGRESS
CPX_CALLBACKCONTEXT_LOCAL_PROGRESS = _pycplex_platform.CPX_CALLBACKCONTEXT_LOCAL_PROGRESS
CPX_CALLBACKCONTEXT_RELAXATION = _pycplex_platform.CPX_CALLBACKCONTEXT_RELAXATION
CPX_CALLBACKCONTEXT_THREAD_DOWN = _pycplex_platform.CPX_CALLBACKCONTEXT_THREAD_DOWN
CPX_CALLBACKCONTEXT_THREAD_UP = _pycplex_platform.CPX_CALLBACKCONTEXT_THREAD_UP
CPX_DUAL_OBJ = _pycplex_platform.CPX_DUAL_OBJ
CPX_EXACT_KAPPA = _pycplex_platform.CPX_EXACT_KAPPA
CPX_KAPPA = _pycplex_platform.CPX_KAPPA
CPX_KAPPA_ATTENTION = _pycplex_platform.CPX_KAPPA_ATTENTION
CPX_KAPPA_ILLPOSED = _pycplex_platform.CPX_KAPPA_ILLPOSED
CPX_KAPPA_MAX = _pycplex_platform.CPX_KAPPA_MAX
CPX_KAPPA_STABLE = _pycplex_platform.CPX_KAPPA_STABLE
CPX_KAPPA_SUSPICIOUS = _pycplex_platform.CPX_KAPPA_SUSPICIOUS
CPX_KAPPA_UNSTABLE = _pycplex_platform.CPX_KAPPA_UNSTABLE
CPX_LAZYCONSTRAINTCALLBACK_HEUR = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_HEUR
CPX_LAZYCONSTRAINTCALLBACK_MIPSTART = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_MIPSTART
CPX_LAZYCONSTRAINTCALLBACK_NODE = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_NODE
CPX_LAZYCONSTRAINTCALLBACK_USER = _pycplex_platform.CPX_LAZYCONSTRAINTCALLBACK_USER
CPX_MAX_COMP_SLACK = _pycplex_platform.CPX_MAX_COMP_SLACK
CPX_MAX_DUAL_INFEAS = _pycplex_platform.CPX_MAX_DUAL_INFEAS
CPX_MAX_DUAL_RESIDUAL = _pycplex_platform.CPX_MAX_DUAL_RESIDUAL
CPX_MAX_INDSLACK_INFEAS = _pycplex_platform.CPX_MAX_INDSLACK_INFEAS
CPX_MAX_INT_INFEAS = _pycplex_platform.CPX_MAX_INT_INFEAS
CPX_MAX_PI = _pycplex_platform.CPX_MAX_PI
CPX_MAX_PRIMAL_INFEAS = _pycplex_platform.CPX_MAX_PRIMAL_INFEAS
CPX_MAX_PRIMAL_RESIDUAL = _pycplex_platform.CPX_MAX_PRIMAL_RESIDUAL
CPX_MAX_PWLSLACK_INFEAS = _pycplex_platform.CPX_MAX_PWLSLACK_INFEAS
CPX_MAX_QCPRIMAL_RESIDUAL = _pycplex_platform.CPX_MAX_QCPRIMAL_RESIDUAL
CPX_MAX_QCSLACK = _pycplex_platform.CPX_MAX_QCSLACK
CPX_MAX_QCSLACK_INFEAS = _pycplex_platform.CPX_MAX_QCSLACK_INFEAS
CPX_MAX_RED_COST = _pycplex_platform.CPX_MAX_RED_COST
CPX_MAX_SCALED_DUAL_INFEAS = _pycplex_platform.CPX_MAX_SCALED_DUAL_INFEAS
CPX_MAX_SCALED_DUAL_RESIDUAL = _pycplex_platform.CPX_MAX_SCALED_DUAL_RESIDUAL
CPX_MAX_SCALED_PI = _pycplex_platform.CPX_MAX_SCALED_PI
CPX_MAX_SCALED_PRIMAL_INFEAS = _pycplex_platform.CPX_MAX_SCALED_PRIMAL_INFEAS
CPX_MAX_SCALED_PRIMAL_RESIDUAL = _pycplex_platform.CPX_MAX_SCALED_PRIMAL_RESIDUAL
CPX_MAX_SCALED_RED_COST = _pycplex_platform.CPX_MAX_SCALED_RED_COST
CPX_MAX_SCALED_SLACK = _pycplex_platform.CPX_MAX_SCALED_SLACK
CPX_MAX_SCALED_X = _pycplex_platform.CPX_MAX_SCALED_X
CPX_MAX_SLACK = _pycplex_platform.CPX_MAX_SLACK
CPX_MAX_X = _pycplex_platform.CPX_MAX_X
CPX_MULTIOBJ_BARITCNT = _pycplex_platform.CPX_MULTIOBJ_BARITCNT
CPX_MULTIOBJ_BESTOBJVAL = _pycplex_platform.CPX_MULTIOBJ_BESTOBJVAL
CPX_MULTIOBJ_BLEND = _pycplex_platform.CPX_MULTIOBJ_BLEND
CPX_MULTIOBJ_DEGCNT = _pycplex_platform.CPX_MULTIOBJ_DEGCNT
CPX_MULTIOBJ_DETTIME = _pycplex_platform.CPX_MULTIOBJ_DETTIME
CPX_MULTIOBJ_DEXCH = _pycplex_platform.CPX_MULTIOBJ_DEXCH
CPX_MULTIOBJ_DPUSH = _pycplex_platform.CPX_MULTIOBJ_DPUSH
CPX_MULTIOBJ_ERROR = _pycplex_platform.CPX_MULTIOBJ_ERROR
CPX_MULTIOBJ_ITCNT = _pycplex_platform.CPX_MULTIOBJ_ITCNT
CPX_MULTIOBJ_METHOD = _pycplex_platform.CPX_MULTIOBJ_METHOD
CPX_MULTIOBJ_NODECNT = _pycplex_platform.CPX_MULTIOBJ_NODECNT
CPX_MULTIOBJ_NODELEFTCNT = _pycplex_platform.CPX_MULTIOBJ_NODELEFTCNT
CPX_MULTIOBJ_OBJVAL = _pycplex_platform.CPX_MULTIOBJ_OBJVAL
CPX_MULTIOBJ_PEXCH = _pycplex_platform.CPX_MULTIOBJ_PEXCH
CPX_MULTIOBJ_PHASE1CNT = _pycplex_platform.CPX_MULTIOBJ_PHASE1CNT
CPX_MULTIOBJ_PPUSH = _pycplex_platform.CPX_MULTIOBJ_PPUSH
CPX_MULTIOBJ_PRIORITY = _pycplex_platform.CPX_MULTIOBJ_PRIORITY
CPX_MULTIOBJ_SIFTITCNT = _pycplex_platform.CPX_MULTIOBJ_SIFTITCNT
CPX_MULTIOBJ_SIFTPHASE1CNT = _pycplex_platform.CPX_MULTIOBJ_SIFTPHASE1CNT
CPX_MULTIOBJ_STATUS = _pycplex_platform.CPX_MULTIOBJ_STATUS
CPX_MULTIOBJ_TIME = _pycplex_platform.CPX_MULTIOBJ_TIME
CPX_NO_PRIORITY_CHANGE = _pycplex_platform.CPX_NO_PRIORITY_CHANGE
CPX_OBJ_GAP = _pycplex_platform.CPX_OBJ_GAP
CPX_PRIMAL_OBJ = _pycplex_platform.CPX_PRIMAL_OBJ
CPX_RELAXATION_FLAG_NOSOLVE = _pycplex_platform.CPX_RELAXATION_FLAG_NOSOLVE
CPX_SOLNPOOL_DIV = _pycplex_platform.CPX_SOLNPOOL_DIV
CPX_SOLNPOOL_FIFO = _pycplex_platform.CPX_SOLNPOOL_FIFO
CPX_SOLNPOOL_FILTER_DIVERSITY = _pycplex_platform.CPX_SOLNPOOL_FILTER_DIVERSITY
CPX_SOLNPOOL_FILTER_RANGE = _pycplex_platform.CPX_SOLNPOOL_FILTER_RANGE
CPX_SOLNPOOL_OBJ = _pycplex_platform.CPX_SOLNPOOL_OBJ
CPX_STAT_ABORT_DETTIME_LIM = _pycplex_platform.CPX_STAT_ABORT_DETTIME_LIM
CPX_STAT_ABORT_DUAL_OBJ_LIM = _pycplex_platform.CPX_STAT_ABORT_DUAL_OBJ_LIM
CPX_STAT_ABORT_IT_LIM = _pycplex_platform.CPX_STAT_ABORT_IT_LIM
CPX_STAT_ABORT_OBJ_LIM = _pycplex_platform.CPX_STAT_ABORT_OBJ_LIM
CPX_STAT_ABORT_PRIM_OBJ_LIM = _pycplex_platform.CPX_STAT_ABORT_PRIM_OBJ_LIM
CPX_STAT_ABORT_TIME_LIM = _pycplex_platform.CPX_STAT_ABORT_TIME_LIM
CPX_STAT_ABORT_USER = _pycplex_platform.CPX_STAT_ABORT_USER
CPX_STAT_BENDERS_NUM_BEST = _pycplex_platform.CPX_STAT_BENDERS_NUM_BEST
CPX_STAT_CONFLICT_ABORT_CONTRADICTION = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_CONTRADICTION
CPX_STAT_CONFLICT_ABORT_DETTIME_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_DETTIME_LIM
CPX_STAT_CONFLICT_ABORT_IT_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_IT_LIM
CPX_STAT_CONFLICT_ABORT_MEM_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_MEM_LIM
CPX_STAT_CONFLICT_ABORT_NODE_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_NODE_LIM
CPX_STAT_CONFLICT_ABORT_OBJ_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_OBJ_LIM
CPX_STAT_CONFLICT_ABORT_TIME_LIM = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_TIME_LIM
CPX_STAT_CONFLICT_ABORT_USER = _pycplex_platform.CPX_STAT_CONFLICT_ABORT_USER
CPX_STAT_CONFLICT_FEASIBLE = _pycplex_platform.CPX_STAT_CONFLICT_FEASIBLE
CPX_STAT_CONFLICT_MINIMAL = _pycplex_platform.CPX_STAT_CONFLICT_MINIMAL
CPX_STAT_FEASIBLE = _pycplex_platform.CPX_STAT_FEASIBLE
CPX_STAT_FEASIBLE_RELAXED_INF = _pycplex_platform.CPX_STAT_FEASIBLE_RELAXED_INF
CPX_STAT_FEASIBLE_RELAXED_QUAD = _pycplex_platform.CPX_STAT_FEASIBLE_RELAXED_QUAD
CPX_STAT_FEASIBLE_RELAXED_SUM = _pycplex_platform.CPX_STAT_FEASIBLE_RELAXED_SUM
CPX_STAT_FIRSTORDER = _pycplex_platform.CPX_STAT_FIRSTORDER
CPX_STAT_INFEASIBLE = _pycplex_platform.CPX_STAT_INFEASIBLE
CPX_STAT_INForUNBD = _pycplex_platform.CPX_STAT_INForUNBD
CPX_STAT_MULTIOBJ_INFEASIBLE = _pycplex_platform.CPX_STAT_MULTIOBJ_INFEASIBLE
CPX_STAT_MULTIOBJ_INForUNBD = _pycplex_platform.CPX_STAT_MULTIOBJ_INForUNBD
CPX_STAT_MULTIOBJ_NON_OPTIMAL = _pycplex_platform.CPX_STAT_MULTIOBJ_NON_OPTIMAL
CPX_STAT_MULTIOBJ_OPTIMAL = _pycplex_platform.CPX_STAT_MULTIOBJ_OPTIMAL
CPX_STAT_MULTIOBJ_STOPPED = _pycplex_platform.CPX_STAT_MULTIOBJ_STOPPED
CPX_STAT_MULTIOBJ_UNBOUNDED = _pycplex_platform.CPX_STAT_MULTIOBJ_UNBOUNDED
CPX_STAT_NUM_BEST = _pycplex_platform.CPX_STAT_NUM_BEST
CPX_STAT_OPTIMAL = _pycplex_platform.CPX_STAT_OPTIMAL
CPX_STAT_OPTIMAL_FACE_UNBOUNDED = _pycplex_platform.CPX_STAT_OPTIMAL_FACE_UNBOUNDED
CPX_STAT_OPTIMAL_INFEAS = _pycplex_platform.CPX_STAT_OPTIMAL_INFEAS
CPX_STAT_OPTIMAL_RELAXED_INF = _pycplex_platform.CPX_STAT_OPTIMAL_RELAXED_INF
CPX_STAT_OPTIMAL_RELAXED_QUAD = _pycplex_platform.CPX_STAT_OPTIMAL_RELAXED_QUAD
CPX_STAT_OPTIMAL_RELAXED_SUM = _pycplex_platform.CPX_STAT_OPTIMAL_RELAXED_SUM
CPX_STAT_UNBOUNDED = _pycplex_platform.CPX_STAT_UNBOUNDED
CPX_SUM_COMP_SLACK = _pycplex_platform.CPX_SUM_COMP_SLACK
CPX_SUM_DUAL_INFEAS = _pycplex_platform.CPX_SUM_DUAL_INFEAS
CPX_SUM_DUAL_RESIDUAL = _pycplex_platform.CPX_SUM_DUAL_RESIDUAL
CPX_SUM_INDSLACK_INFEAS = _pycplex_platform.CPX_SUM_INDSLACK_INFEAS
CPX_SUM_INT_INFEAS = _pycplex_platform.CPX_SUM_INT_INFEAS
CPX_SUM_PI = _pycplex_platform.CPX_SUM_PI
CPX_SUM_PRIMAL_INFEAS = _pycplex_platform.CPX_SUM_PRIMAL_INFEAS
CPX_SUM_PRIMAL_RESIDUAL = _pycplex_platform.CPX_SUM_PRIMAL_RESIDUAL
CPX_SUM_PWLSLACK_INFEAS = _pycplex_platform.CPX_SUM_PWLSLACK_INFEAS
CPX_SUM_QCPRIMAL_RESIDUAL = _pycplex_platform.CPX_SUM_QCPRIMAL_RESIDUAL
CPX_SUM_QCSLACK = _pycplex_platform.CPX_SUM_QCSLACK
CPX_SUM_QCSLACK_INFEAS = _pycplex_platform.CPX_SUM_QCSLACK_INFEAS
CPX_SUM_RED_COST = _pycplex_platform.CPX_SUM_RED_COST
CPX_SUM_SCALED_DUAL_INFEAS = _pycplex_platform.CPX_SUM_SCALED_DUAL_INFEAS
CPX_SUM_SCALED_DUAL_RESIDUAL = _pycplex_platform.CPX_SUM_SCALED_DUAL_RESIDUAL
CPX_SUM_SCALED_PI = _pycplex_platform.CPX_SUM_SCALED_PI
CPX_SUM_SCALED_PRIMAL_INFEAS = _pycplex_platform.CPX_SUM_SCALED_PRIMAL_INFEAS
CPX_SUM_SCALED_PRIMAL_RESIDUAL = _pycplex_platform.CPX_SUM_SCALED_PRIMAL_RESIDUAL
CPX_SUM_SCALED_RED_COST = _pycplex_platform.CPX_SUM_SCALED_RED_COST
CPX_SUM_SCALED_SLACK = _pycplex_platform.CPX_SUM_SCALED_SLACK
CPX_SUM_SCALED_X = _pycplex_platform.CPX_SUM_SCALED_X
CPX_SUM_SLACK = _pycplex_platform.CPX_SUM_SLACK
CPX_SUM_X = _pycplex_platform.CPX_SUM_X
CPXERR_ABORT_STRONGBRANCH = _pycplex_platform.CPXERR_ABORT_STRONGBRANCH
CPXERR_ADJ_SIGN_QUAD = _pycplex_platform.CPXERR_ADJ_SIGN_QUAD
CPXERR_ADJ_SIGN_SENSE = _pycplex_platform.CPXERR_ADJ_SIGN_SENSE
CPXERR_ADJ_SIGNS = _pycplex_platform.CPXERR_ADJ_SIGNS
CPXERR_ARC_INDEX_RANGE = _pycplex_platform.CPXERR_ARC_INDEX_RANGE
CPXERR_ARRAY_BAD_SOS_TYPE = _pycplex_platform.CPXERR_ARRAY_BAD_SOS_TYPE
CPXERR_ARRAY_NOT_ASCENDING = _pycplex_platform.CPXERR_ARRAY_NOT_ASCENDING
CPXERR_ARRAY_TOO_LONG = _pycplex_platform.CPXERR_ARRAY_TOO_LONG
CPXERR_BAD_ARGUMENT = _pycplex_platform.CPXERR_BAD_ARGUMENT
CPXERR_BAD_BOUND_SENSE = _pycplex_platform.CPXERR_BAD_BOUND_SENSE
CPXERR_BAD_BOUND_TYPE = _pycplex_platform.CPXERR_BAD_BOUND_TYPE
CPXERR_BAD_CHAR = _pycplex_platform.CPXERR_BAD_CHAR
CPXERR_BAD_CTYPE = _pycplex_platform.CPXERR_BAD_CTYPE
CPXERR_BAD_DECOMPOSITION = _pycplex_platform.CPXERR_BAD_DECOMPOSITION
CPXERR_BAD_DIRECTION = _pycplex_platform.CPXERR_BAD_DIRECTION
CPXERR_BAD_EXPO_RANGE = _pycplex_platform.CPXERR_BAD_EXPO_RANGE
CPXERR_BAD_EXPONENT = _pycplex_platform.CPXERR_BAD_EXPONENT
CPXERR_BAD_FILETYPE = _pycplex_platform.CPXERR_BAD_FILETYPE
CPXERR_BAD_ID = _pycplex_platform.CPXERR_BAD_ID
CPXERR_BAD_INDCONSTR = _pycplex_platform.CPXERR_BAD_INDCONSTR
CPXERR_BAD_INDICATOR = _pycplex_platform.CPXERR_BAD_INDICATOR
CPXERR_BAD_INDTYPE = _pycplex_platform.CPXERR_BAD_INDTYPE
CPXERR_BAD_LAZY_UCUT = _pycplex_platform.CPXERR_BAD_LAZY_UCUT
CPXERR_BAD_LUB = _pycplex_platform.CPXERR_BAD_LUB
CPXERR_BAD_METHOD = _pycplex_platform.CPXERR_BAD_METHOD
CPXERR_BAD_MULTIOBJ_ATTR = _pycplex_platform.CPXERR_BAD_MULTIOBJ_ATTR
CPXERR_BAD_NAME = _pycplex_platform.CPXERR_BAD_NAME
CPXERR_BAD_NUMBER = _pycplex_platform.CPXERR_BAD_NUMBER
CPXERR_BAD_OBJ_SENSE = _pycplex_platform.CPXERR_BAD_OBJ_SENSE
CPXERR_BAD_PARAM_NAME = _pycplex_platform.CPXERR_BAD_PARAM_NAME
CPXERR_BAD_PARAM_NUM = _pycplex_platform.CPXERR_BAD_PARAM_NUM
CPXERR_BAD_PIVOT = _pycplex_platform.CPXERR_BAD_PIVOT
CPXERR_BAD_PRIORITY = _pycplex_platform.CPXERR_BAD_PRIORITY
CPXERR_BAD_PROB_TYPE = _pycplex_platform.CPXERR_BAD_PROB_TYPE
CPXERR_BAD_ROW_ID = _pycplex_platform.CPXERR_BAD_ROW_ID
CPXERR_BAD_SECTION_BOUNDS = _pycplex_platform.CPXERR_BAD_SECTION_BOUNDS
CPXERR_BAD_SECTION_ENDATA = _pycplex_platform.CPXERR_BAD_SECTION_ENDATA
CPXERR_BAD_SECTION_QMATRIX = _pycplex_platform.CPXERR_BAD_SECTION_QMATRIX
CPXERR_BAD_SENSE = _pycplex_platform.CPXERR_BAD_SENSE
CPXERR_BAD_SOS_TYPE = _pycplex_platform.CPXERR_BAD_SOS_TYPE
CPXERR_BAD_STATUS = _pycplex_platform.CPXERR_BAD_STATUS
CPXERR_BAS_FILE_SHORT = _pycplex_platform.CPXERR_BAS_FILE_SHORT
CPXERR_BAS_FILE_SIZE = _pycplex_platform.CPXERR_BAS_FILE_SIZE
CPXERR_BENDERS_MASTER_SOLVE = _pycplex_platform.CPXERR_BENDERS_MASTER_SOLVE
CPXERR_CALLBACK = _pycplex_platform.CPXERR_CALLBACK
CPXERR_CALLBACK_INCONSISTENT = _pycplex_platform.CPXERR_CALLBACK_INCONSISTENT
CPXERR_CAND_NOT_POINT = _pycplex_platform.CPXERR_CAND_NOT_POINT
CPXERR_CAND_NOT_RAY = _pycplex_platform.CPXERR_CAND_NOT_RAY
CPXERR_CNTRL_IN_NAME = _pycplex_platform.CPXERR_CNTRL_IN_NAME
CPXERR_COL_INDEX_RANGE = _pycplex_platform.CPXERR_COL_INDEX_RANGE
CPXERR_COL_REPEAT_PRINT = _pycplex_platform.CPXERR_COL_REPEAT_PRINT
CPXERR_COL_REPEATS = _pycplex_platform.CPXERR_COL_REPEATS
CPXERR_COL_ROW_REPEATS = _pycplex_platform.CPXERR_COL_ROW_REPEATS
CPXERR_COL_UNKNOWN = _pycplex_platform.CPXERR_COL_UNKNOWN
CPXERR_CONFLICT_UNSTABLE = _pycplex_platform.CPXERR_CONFLICT_UNSTABLE
CPXERR_COUNT_OVERLAP = _pycplex_platform.CPXERR_COUNT_OVERLAP
CPXERR_COUNT_RANGE = _pycplex_platform.CPXERR_COUNT_RANGE
CPXERR_CPUBINDING_FAILURE = _pycplex_platform.CPXERR_CPUBINDING_FAILURE
CPXERR_DBL_MAX = _pycplex_platform.CPXERR_DBL_MAX
CPXERR_DECOMPRESSION = _pycplex_platform.CPXERR_DECOMPRESSION
CPXERR_DETTILIM_STRONGBRANCH = _pycplex_platform.CPXERR_DETTILIM_STRONGBRANCH
CPXERR_DUP_ENTRY = _pycplex_platform.CPXERR_DUP_ENTRY
CPXERR_DYNFUNC = _pycplex_platform.CPXERR_DYNFUNC
CPXERR_DYNLOAD = _pycplex_platform.CPXERR_DYNLOAD
CPXERR_ENCODING_CONVERSION = _pycplex_platform.CPXERR_ENCODING_CONVERSION
CPXERR_EXTRA_BV_BOUND = _pycplex_platform.CPXERR_EXTRA_BV_BOUND
CPXERR_EXTRA_FR_BOUND = _pycplex_platform.CPXERR_EXTRA_FR_BOUND
CPXERR_EXTRA_FX_BOUND = _pycplex_platform.CPXERR_EXTRA_FX_BOUND
CPXERR_EXTRA_INTEND = _pycplex_platform.CPXERR_EXTRA_INTEND
CPXERR_EXTRA_INTORG = _pycplex_platform.CPXERR_EXTRA_INTORG
CPXERR_EXTRA_SOSEND = _pycplex_platform.CPXERR_EXTRA_SOSEND
CPXERR_EXTRA_SOSORG = _pycplex_platform.CPXERR_EXTRA_SOSORG
CPXERR_FAIL_OPEN_READ = _pycplex_platform.CPXERR_FAIL_OPEN_READ
CPXERR_FAIL_OPEN_WRITE = _pycplex_platform.CPXERR_FAIL_OPEN_WRITE
CPXERR_FILE_ENTRIES = _pycplex_platform.CPXERR_FILE_ENTRIES
CPXERR_FILE_FORMAT = _pycplex_platform.CPXERR_FILE_FORMAT
CPXERR_FILE_IO = _pycplex_platform.CPXERR_FILE_IO
CPXERR_FILTER_VARIABLE_TYPE = _pycplex_platform.CPXERR_FILTER_VARIABLE_TYPE
CPXERR_ILL_DEFINED_PWL = _pycplex_platform.CPXERR_ILL_DEFINED_PWL
CPXERR_IN_INFOCALLBACK = _pycplex_platform.CPXERR_IN_INFOCALLBACK
CPXERR_INDEX_NOT_BASIC = _pycplex_platform.CPXERR_INDEX_NOT_BASIC
CPXERR_INDEX_RANGE = _pycplex_platform.CPXERR_INDEX_RANGE
CPXERR_INDEX_RANGE_HIGH = _pycplex_platform.CPXERR_INDEX_RANGE_HIGH
CPXERR_INDEX_RANGE_LOW = _pycplex_platform.CPXERR_INDEX_RANGE_LOW
CPXERR_INT_TOO_BIG = _pycplex_platform.CPXERR_INT_TOO_BIG
CPXERR_INT_TOO_BIG_INPUT = _pycplex_platform.CPXERR_INT_TOO_BIG_INPUT
CPXERR_INVALID_NUMBER = _pycplex_platform.CPXERR_INVALID_NUMBER
CPXERR_LIMITS_TOO_BIG = _pycplex_platform.CPXERR_LIMITS_TOO_BIG
CPXERR_LINE_TOO_LONG = _pycplex_platform.CPXERR_LINE_TOO_LONG
CPXERR_LO_BOUND_REPEATS = _pycplex_platform.CPXERR_LO_BOUND_REPEATS
CPXERR_LOCK_CREATE = _pycplex_platform.CPXERR_LOCK_CREATE
CPXERR_LP_NOT_IN_ENVIRONMENT = _pycplex_platform.CPXERR_LP_NOT_IN_ENVIRONMENT
CPXERR_LP_PARSE = _pycplex_platform.CPXERR_LP_PARSE
CPXERR_MASTER_SOLVE = _pycplex_platform.CPXERR_MASTER_SOLVE
CPXERR_MIPSEARCH_WITH_CALLBACKS = _pycplex_platform.CPXERR_MIPSEARCH_WITH_CALLBACKS
CPXERR_MISS_SOS_TYPE = _pycplex_platform.CPXERR_MISS_SOS_TYPE
CPXERR_MSG_NO_CHANNEL = _pycplex_platform.CPXERR_MSG_NO_CHANNEL
CPXERR_MSG_NO_FILEPTR = _pycplex_platform.CPXERR_MSG_NO_FILEPTR
CPXERR_MSG_NO_FUNCTION = _pycplex_platform.CPXERR_MSG_NO_FUNCTION
CPXERR_MULTIOBJ_SUBPROB_SOLVE = _pycplex_platform.CPXERR_MULTIOBJ_SUBPROB_SOLVE
CPXERR_MULTIPLE_PROBS_IN_REMOTE_ENVIRONMENT = _pycplex_platform.CPXERR_MULTIPLE_PROBS_IN_REMOTE_ENVIRONMENT
CPXERR_NAME_CREATION = _pycplex_platform.CPXERR_NAME_CREATION
CPXERR_NAME_NOT_FOUND = _pycplex_platform.CPXERR_NAME_NOT_FOUND
CPXERR_NAME_TOO_LONG = _pycplex_platform.CPXERR_NAME_TOO_LONG
CPXERR_NAN = _pycplex_platform.CPXERR_NAN
CPXERR_NEED_OPT_SOLN = _pycplex_platform.CPXERR_NEED_OPT_SOLN
CPXERR_NEGATIVE_SURPLUS = _pycplex_platform.CPXERR_NEGATIVE_SURPLUS
CPXERR_NET_DATA = _pycplex_platform.CPXERR_NET_DATA
CPXERR_NET_FILE_SHORT = _pycplex_platform.CPXERR_NET_FILE_SHORT
CPXERR_NO_BARRIER_SOLN = _pycplex_platform.CPXERR_NO_BARRIER_SOLN
CPXERR_NO_BASIC_SOLN = _pycplex_platform.CPXERR_NO_BASIC_SOLN
CPXERR_NO_BASIS = _pycplex_platform.CPXERR_NO_BASIS
CPXERR_NO_BOUND_SENSE = _pycplex_platform.CPXERR_NO_BOUND_SENSE
CPXERR_NO_BOUND_TYPE = _pycplex_platform.CPXERR_NO_BOUND_TYPE
CPXERR_NO_COLUMNS_SECTION = _pycplex_platform.CPXERR_NO_COLUMNS_SECTION
CPXERR_NO_CONFLICT = _pycplex_platform.CPXERR_NO_CONFLICT
CPXERR_NO_DECOMPOSITION = _pycplex_platform.CPXERR_NO_DECOMPOSITION
CPXERR_NO_DUAL_SOLN = _pycplex_platform.CPXERR_NO_DUAL_SOLN
CPXERR_NO_ENDATA = _pycplex_platform.CPXERR_NO_ENDATA
CPXERR_NO_ENVIRONMENT = _pycplex_platform.CPXERR_NO_ENVIRONMENT
CPXERR_NO_FILENAME = _pycplex_platform.CPXERR_NO_FILENAME
CPXERR_NO_ID = _pycplex_platform.CPXERR_NO_ID
CPXERR_NO_ID_FIRST = _pycplex_platform.CPXERR_NO_ID_FIRST
CPXERR_NO_INT_X = _pycplex_platform.CPXERR_NO_INT_X
CPXERR_NO_KAPPASTATS = _pycplex_platform.CPXERR_NO_KAPPASTATS
CPXERR_NO_LU_FACTOR = _pycplex_platform.CPXERR_NO_LU_FACTOR
CPXERR_NO_MEMORY = _pycplex_platform.CPXERR_NO_MEMORY
CPXERR_NO_MIPSTART = _pycplex_platform.CPXERR_NO_MIPSTART
CPXERR_NO_NAME_SECTION = _pycplex_platform.CPXERR_NO_NAME_SECTION
CPXERR_NO_NAMES = _pycplex_platform.CPXERR_NO_NAMES
CPXERR_NO_NORMS = _pycplex_platform.CPXERR_NO_NORMS
CPXERR_NO_NUMBER = _pycplex_platform.CPXERR_NO_NUMBER
CPXERR_NO_NUMBER_BOUND = _pycplex_platform.CPXERR_NO_NUMBER_BOUND
CPXERR_NO_NUMBER_FIRST = _pycplex_platform.CPXERR_NO_NUMBER_FIRST
CPXERR_NO_OBJ_NAME = _pycplex_platform.CPXERR_NO_OBJ_NAME
CPXERR_NO_OBJ_SENSE = _pycplex_platform.CPXERR_NO_OBJ_SENSE
CPXERR_NO_OBJECTIVE = _pycplex_platform.CPXERR_NO_OBJECTIVE
CPXERR_NO_OP_OR_SENSE = _pycplex_platform.CPXERR_NO_OP_OR_SENSE
CPXERR_NO_OPERATOR = _pycplex_platform.CPXERR_NO_OPERATOR
CPXERR_NO_ORDER = _pycplex_platform.CPXERR_NO_ORDER
CPXERR_NO_PROBLEM = _pycplex_platform.CPXERR_NO_PROBLEM
CPXERR_NO_QP_OPERATOR = _pycplex_platform.CPXERR_NO_QP_OPERATOR
CPXERR_NO_QUAD_EXP = _pycplex_platform.CPXERR_NO_QUAD_EXP
CPXERR_NO_RHS_COEFF = _pycplex_platform.CPXERR_NO_RHS_COEFF
CPXERR_NO_RHS_IN_OBJ = _pycplex_platform.CPXERR_NO_RHS_IN_OBJ
CPXERR_NO_ROW_NAME = _pycplex_platform.CPXERR_NO_ROW_NAME
CPXERR_NO_ROW_SENSE = _pycplex_platform.CPXERR_NO_ROW_SENSE
CPXERR_NO_ROWS_SECTION = _pycplex_platform.CPXERR_NO_ROWS_SECTION
CPXERR_NO_SENSIT = _pycplex_platform.CPXERR_NO_SENSIT
CPXERR_NO_SOLN = _pycplex_platform.CPXERR_NO_SOLN
CPXERR_NO_SOLNPOOL = _pycplex_platform.CPXERR_NO_SOLNPOOL
CPXERR_NO_SOS = _pycplex_platform.CPXERR_NO_SOS
CPXERR_NO_TREE = _pycplex_platform.CPXERR_NO_TREE
CPXERR_NO_VECTOR_SOLN = _pycplex_platform.CPXERR_NO_VECTOR_SOLN
CPXERR_NODE_INDEX_RANGE = _pycplex_platform.CPXERR_NODE_INDEX_RANGE
CPXERR_NODE_ON_DISK = _pycplex_platform.CPXERR_NODE_ON_DISK
CPXERR_NOT_DUAL_UNBOUNDED = _pycplex_platform.CPXERR_NOT_DUAL_UNBOUNDED
CPXERR_NOT_FIXED = _pycplex_platform.CPXERR_NOT_FIXED
CPXERR_NOT_FOR_BENDERS = _pycplex_platform.CPXERR_NOT_FOR_BENDERS
CPXERR_NOT_FOR_DISTMIP = _pycplex_platform.CPXERR_NOT_FOR_DISTMIP
CPXERR_NOT_FOR_MIP = _pycplex_platform.CPXERR_NOT_FOR_MIP
CPXERR_NOT_FOR_MULTIOBJ = _pycplex_platform.CPXERR_NOT_FOR_MULTIOBJ
CPXERR_NOT_FOR_QCP = _pycplex_platform.CPXERR_NOT_FOR_QCP
CPXERR_NOT_FOR_QP = _pycplex_platform.CPXERR_NOT_FOR_QP
CPXERR_NOT_MILPCLASS = _pycplex_platform.CPXERR_NOT_MILPCLASS
CPXERR_NOT_MIN_COST_FLOW = _pycplex_platform.CPXERR_NOT_MIN_COST_FLOW
CPXERR_NOT_MIP = _pycplex_platform.CPXERR_NOT_MIP
CPXERR_NOT_MIQPCLASS = _pycplex_platform.CPXERR_NOT_MIQPCLASS
CPXERR_NOT_ONE_PROBLEM = _pycplex_platform.CPXERR_NOT_ONE_PROBLEM
CPXERR_NOT_QP = _pycplex_platform.CPXERR_NOT_QP
CPXERR_NOT_SAV_FILE = _pycplex_platform.CPXERR_NOT_SAV_FILE
CPXERR_NOT_UNBOUNDED = _pycplex_platform.CPXERR_NOT_UNBOUNDED
CPXERR_NULL_POINTER = _pycplex_platform.CPXERR_NULL_POINTER
CPXERR_ORDER_BAD_DIRECTION = _pycplex_platform.CPXERR_ORDER_BAD_DIRECTION
CPXERR_OVERFLOW = _pycplex_platform.CPXERR_OVERFLOW
CPXERR_PARAM_INCOMPATIBLE = _pycplex_platform.CPXERR_PARAM_INCOMPATIBLE
CPXERR_PARAM_TOO_BIG = _pycplex_platform.CPXERR_PARAM_TOO_BIG
CPXERR_PARAM_TOO_SMALL = _pycplex_platform.CPXERR_PARAM_TOO_SMALL
CPXERR_PRESLV_ABORT = _pycplex_platform.CPXERR_PRESLV_ABORT
CPXERR_PRESLV_BAD_PARAM = _pycplex_platform.CPXERR_PRESLV_BAD_PARAM
CPXERR_PRESLV_BASIS_MEM = _pycplex_platform.CPXERR_PRESLV_BASIS_MEM
CPXERR_PRESLV_COPYORDER = _pycplex_platform.CPXERR_PRESLV_COPYORDER
CPXERR_PRESLV_COPYSOS = _pycplex_platform.CPXERR_PRESLV_COPYSOS
CPXERR_PRESLV_CRUSHFORM = _pycplex_platform.CPXERR_PRESLV_CRUSHFORM
CPXERR_PRESLV_DETTIME_LIM = _pycplex_platform.CPXERR_PRESLV_DETTIME_LIM
CPXERR_PRESLV_DUAL = _pycplex_platform.CPXERR_PRESLV_DUAL
CPXERR_PRESLV_FAIL_BASIS = _pycplex_platform.CPXERR_PRESLV_FAIL_BASIS
CPXERR_PRESLV_INF = _pycplex_platform.CPXERR_PRESLV_INF
CPXERR_PRESLV_INForUNBD = _pycplex_platform.CPXERR_PRESLV_INForUNBD
CPXERR_PRESLV_NO_BASIS = _pycplex_platform.CPXERR_PRESLV_NO_BASIS
CPXERR_PRESLV_NO_PROB = _pycplex_platform.CPXERR_PRESLV_NO_PROB
CPXERR_PRESLV_SOLN_MIP = _pycplex_platform.CPXERR_PRESLV_SOLN_MIP
CPXERR_PRESLV_SOLN_QP = _pycplex_platform.CPXERR_PRESLV_SOLN_QP
CPXERR_PRESLV_START_LP = _pycplex_platform.CPXERR_PRESLV_START_LP
CPXERR_PRESLV_TIME_LIM = _pycplex_platform.CPXERR_PRESLV_TIME_LIM
CPXERR_PRESLV_UNBD = _pycplex_platform.CPXERR_PRESLV_UNBD
CPXERR_PRESLV_UNCRUSHFORM = _pycplex_platform.CPXERR_PRESLV_UNCRUSHFORM
CPXERR_PRIIND = _pycplex_platform.CPXERR_PRIIND
CPXERR_PRM_DATA = _pycplex_platform.CPXERR_PRM_DATA
CPXERR_PROTOCOL = _pycplex_platform.CPXERR_PROTOCOL
CPXERR_Q_DIVISOR = _pycplex_platform.CPXERR_Q_DIVISOR
CPXERR_Q_DUP_ENTRY = _pycplex_platform.CPXERR_Q_DUP_ENTRY
CPXERR_Q_NOT_INDEF = _pycplex_platform.CPXERR_Q_NOT_INDEF
CPXERR_Q_NOT_POS_DEF = _pycplex_platform.CPXERR_Q_NOT_POS_DEF
CPXERR_Q_NOT_SYMMETRIC = _pycplex_platform.CPXERR_Q_NOT_SYMMETRIC
CPXERR_QCP_SENSE = _pycplex_platform.CPXERR_QCP_SENSE
CPXERR_QCP_SENSE_FILE = _pycplex_platform.CPXERR_QCP_SENSE_FILE
CPXERR_QUAD_EXP_NOT_2 = _pycplex_platform.CPXERR_QUAD_EXP_NOT_2
CPXERR_QUAD_IN_ROW = _pycplex_platform.CPXERR_QUAD_IN_ROW
CPXERR_RANGE_SECTION_ORDER = _pycplex_platform.CPXERR_RANGE_SECTION_ORDER
CPXERR_RESTRICTED_VERSION = _pycplex_platform.CPXERR_RESTRICTED_VERSION
CPXERR_RHS_IN_OBJ = _pycplex_platform.CPXERR_RHS_IN_OBJ
CPXERR_RIM_REPEATS = _pycplex_platform.CPXERR_RIM_REPEATS
CPXERR_RIM_ROW_REPEATS = _pycplex_platform.CPXERR_RIM_ROW_REPEATS
CPXERR_RIMNZ_REPEATS = _pycplex_platform.CPXERR_RIMNZ_REPEATS
CPXERR_ROW_INDEX_RANGE = _pycplex_platform.CPXERR_ROW_INDEX_RANGE
CPXERR_ROW_REPEAT_PRINT = _pycplex_platform.CPXERR_ROW_REPEAT_PRINT
CPXERR_ROW_REPEATS = _pycplex_platform.CPXERR_ROW_REPEATS
CPXERR_ROW_UNKNOWN = _pycplex_platform.CPXERR_ROW_UNKNOWN
CPXERR_SAV_FILE_DATA = _pycplex_platform.CPXERR_SAV_FILE_DATA
CPXERR_SAV_FILE_VALUE = _pycplex_platform.CPXERR_SAV_FILE_VALUE
CPXERR_SAV_FILE_WRITE = _pycplex_platform.CPXERR_SAV_FILE_WRITE
CPXERR_SBASE_ILLEGAL = _pycplex_platform.CPXERR_SBASE_ILLEGAL
CPXERR_SBASE_INCOMPAT = _pycplex_platform.CPXERR_SBASE_INCOMPAT
CPXERR_SINGULAR = _pycplex_platform.CPXERR_SINGULAR
CPXERR_STR_PARAM_TOO_LONG = _pycplex_platform.CPXERR_STR_PARAM_TOO_LONG
CPXERR_SUBPROB_SOLVE = _pycplex_platform.CPXERR_SUBPROB_SOLVE
CPXERR_SYNCPRIM_CREATE = _pycplex_platform.CPXERR_SYNCPRIM_CREATE
CPXERR_SYSCALL = _pycplex_platform.CPXERR_SYSCALL
CPXERR_THREAD_FAILED = _pycplex_platform.CPXERR_THREAD_FAILED
CPXERR_TILIM_CONDITION_NO = _pycplex_platform.CPXERR_TILIM_CONDITION_NO
CPXERR_TILIM_STRONGBRANCH = _pycplex_platform.CPXERR_TILIM_STRONGBRANCH
CPXERR_TOO_MANY_COEFFS = _pycplex_platform.CPXERR_TOO_MANY_COEFFS
CPXERR_TOO_MANY_COLS = _pycplex_platform.CPXERR_TOO_MANY_COLS
CPXERR_TOO_MANY_RIMNZ = _pycplex_platform.CPXERR_TOO_MANY_RIMNZ
CPXERR_TOO_MANY_RIMS = _pycplex_platform.CPXERR_TOO_MANY_RIMS
CPXERR_TOO_MANY_ROWS = _pycplex_platform.CPXERR_TOO_MANY_ROWS
CPXERR_TOO_MANY_THREADS = _pycplex_platform.CPXERR_TOO_MANY_THREADS
CPXERR_TREE_MEMORY_LIMIT = _pycplex_platform.CPXERR_TREE_MEMORY_LIMIT
CPXERR_TUNE_MIXED = _pycplex_platform.CPXERR_TUNE_MIXED
CPXERR_UNIQUE_WEIGHTS = _pycplex_platform.CPXERR_UNIQUE_WEIGHTS
CPXERR_UNSUPPORTED_CONSTRAINT_TYPE = _pycplex_platform.CPXERR_UNSUPPORTED_CONSTRAINT_TYPE
CPXERR_UNSUPPORTED_OPERATION = _pycplex_platform.CPXERR_UNSUPPORTED_OPERATION
CPXERR_UP_BOUND_REPEATS = _pycplex_platform.CPXERR_UP_BOUND_REPEATS
CPXERR_WORK_FILE_OPEN = _pycplex_platform.CPXERR_WORK_FILE_OPEN
CPXERR_WORK_FILE_READ = _pycplex_platform.CPXERR_WORK_FILE_READ
CPXERR_WORK_FILE_WRITE = _pycplex_platform.CPXERR_WORK_FILE_WRITE
CPXERR_XMLPARSE = _pycplex_platform.CPXERR_XMLPARSE
CPXMESSAGEBUFSIZE = _pycplex_platform.CPXMESSAGEBUFSIZE
CPXMI_BIGM_COEF = _pycplex_platform.CPXMI_BIGM_COEF
CPXMI_BIGM_TO_IND = _pycplex_platform.CPXMI_BIGM_TO_IND
CPXMI_BIGM_VARBOUND = _pycplex_platform.CPXMI_BIGM_VARBOUND
CPXMI_CANCEL_TOL = _pycplex_platform.CPXMI_CANCEL_TOL
CPXMI_EPGAP_LARGE = _pycplex_platform.CPXMI_EPGAP_LARGE
CPXMI_EPGAP_OBJOFFSET = _pycplex_platform.CPXMI_EPGAP_OBJOFFSET
CPXMI_FEAS_TOL = _pycplex_platform.CPXMI_FEAS_TOL
CPXMI_FRACTION_SCALING = _pycplex_platform.CPXMI_FRACTION_SCALING
CPXMI_IND_NZ_LARGE_NUM = _pycplex_platform.CPXMI_IND_NZ_LARGE_NUM
CPXMI_IND_NZ_SMALL_NUM = _pycplex_platform.CPXMI_IND_NZ_SMALL_NUM
CPXMI_IND_RHS_LARGE_NUM = _pycplex_platform.CPXMI_IND_RHS_LARGE_NUM
CPXMI_IND_RHS_SMALL_NUM = _pycplex_platform.CPXMI_IND_RHS_SMALL_NUM
CPXMI_KAPPA_ILLPOSED = _pycplex_platform.CPXMI_KAPPA_ILLPOSED
CPXMI_KAPPA_SUSPICIOUS = _pycplex_platform.CPXMI_KAPPA_SUSPICIOUS
CPXMI_KAPPA_UNSTABLE = _pycplex_platform.CPXMI_KAPPA_UNSTABLE
CPXMI_LB_LARGE_NUM = _pycplex_platform.CPXMI_LB_LARGE_NUM
CPXMI_LB_SMALL_NUM = _pycplex_platform.CPXMI_LB_SMALL_NUM
CPXMI_LC_NZ_LARGE_NUM = _pycplex_platform.CPXMI_LC_NZ_LARGE_NUM
CPXMI_LC_NZ_SMALL_NUM = _pycplex_platform.CPXMI_LC_NZ_SMALL_NUM
CPXMI_LC_RHS_LARGE_NUM = _pycplex_platform.CPXMI_LC_RHS_LARGE_NUM
CPXMI_LC_RHS_SMALL_NUM = _pycplex_platform.CPXMI_LC_RHS_SMALL_NUM
CPXMI_MULTIOBJ_COEFFS = _pycplex_platform.CPXMI_MULTIOBJ_COEFFS
CPXMI_MULTIOBJ_LARGE_NUM = _pycplex_platform.CPXMI_MULTIOBJ_LARGE_NUM
CPXMI_MULTIOBJ_MIX = _pycplex_platform.CPXMI_MULTIOBJ_MIX
CPXMI_MULTIOBJ_OPT_TOL = _pycplex_platform.CPXMI_MULTIOBJ_OPT_TOL
CPXMI_MULTIOBJ_SMALL_NUM = _pycplex_platform.CPXMI_MULTIOBJ_SMALL_NUM
CPXMI_NZ_LARGE_NUM = _pycplex_platform.CPXMI_NZ_LARGE_NUM
CPXMI_NZ_SMALL_NUM = _pycplex_platform.CPXMI_NZ_SMALL_NUM
CPXMI_OBJ_LARGE_NUM = _pycplex_platform.CPXMI_OBJ_LARGE_NUM
CPXMI_OBJ_SMALL_NUM = _pycplex_platform.CPXMI_OBJ_SMALL_NUM
CPXMI_OPT_TOL = _pycplex_platform.CPXMI_OPT_TOL
CPXMI_PWL_SLOPE_LARGE_NUM = _pycplex_platform.CPXMI_PWL_SLOPE_LARGE_NUM
CPXMI_PWL_SLOPE_SMALL_NUM = _pycplex_platform.CPXMI_PWL_SLOPE_SMALL_NUM
CPXMI_QC_LINNZ_LARGE_NUM = _pycplex_platform.CPXMI_QC_LINNZ_LARGE_NUM
CPXMI_QC_LINNZ_SMALL_NUM = _pycplex_platform.CPXMI_QC_LINNZ_SMALL_NUM
CPXMI_QC_QNZ_LARGE_NUM = _pycplex_platform.CPXMI_QC_QNZ_LARGE_NUM
CPXMI_QC_QNZ_SMALL_NUM = _pycplex_platform.CPXMI_QC_QNZ_SMALL_NUM
CPXMI_QC_RHS_LARGE_NUM = _pycplex_platform.CPXMI_QC_RHS_LARGE_NUM
CPXMI_QC_RHS_SMALL_NUM = _pycplex_platform.CPXMI_QC_RHS_SMALL_NUM
CPXMI_QOBJ_LARGE_NUM = _pycplex_platform.CPXMI_QOBJ_LARGE_NUM
CPXMI_QOBJ_SMALL_NUM = _pycplex_platform.CPXMI_QOBJ_SMALL_NUM
CPXMI_QOPT_TOL = _pycplex_platform.CPXMI_QOPT_TOL
CPXMI_RHS_LARGE_NUM = _pycplex_platform.CPXMI_RHS_LARGE_NUM
CPXMI_RHS_SMALL_NUM = _pycplex_platform.CPXMI_RHS_SMALL_NUM
CPXMI_SAMECOEFF_COL = _pycplex_platform.CPXMI_SAMECOEFF_COL
CPXMI_SAMECOEFF_IND = _pycplex_platform.CPXMI_SAMECOEFF_IND
CPXMI_SAMECOEFF_LAZY = _pycplex_platform.CPXMI_SAMECOEFF_LAZY
CPXMI_SAMECOEFF_MULTIOBJ = _pycplex_platform.CPXMI_SAMECOEFF_MULTIOBJ
CPXMI_SAMECOEFF_OBJ = _pycplex_platform.CPXMI_SAMECOEFF_OBJ
CPXMI_SAMECOEFF_QLIN = _pycplex_platform.CPXMI_SAMECOEFF_QLIN
CPXMI_SAMECOEFF_QUAD = _pycplex_platform.CPXMI_SAMECOEFF_QUAD
CPXMI_SAMECOEFF_RHS = _pycplex_platform.CPXMI_SAMECOEFF_RHS
CPXMI_SAMECOEFF_ROW = _pycplex_platform.CPXMI_SAMECOEFF_ROW
CPXMI_SAMECOEFF_UCUT = _pycplex_platform.CPXMI_SAMECOEFF_UCUT
CPXMI_SINGLE_PRECISION = _pycplex_platform.CPXMI_SINGLE_PRECISION
CPXMI_SYMMETRY_BREAKING_INEQ = _pycplex_platform.CPXMI_SYMMETRY_BREAKING_INEQ
CPXMI_UB_LARGE_NUM = _pycplex_platform.CPXMI_UB_LARGE_NUM
CPXMI_UB_SMALL_NUM = _pycplex_platform.CPXMI_UB_SMALL_NUM
CPXMI_UC_NZ_LARGE_NUM = _pycplex_platform.CPXMI_UC_NZ_LARGE_NUM
CPXMI_UC_NZ_SMALL_NUM = _pycplex_platform.CPXMI_UC_NZ_SMALL_NUM
CPXMI_UC_RHS_LARGE_NUM = _pycplex_platform.CPXMI_UC_RHS_LARGE_NUM
CPXMI_UC_RHS_SMALL_NUM = _pycplex_platform.CPXMI_UC_RHS_SMALL_NUM
CPXMI_WIDE_COEFF_RANGE = _pycplex_platform.CPXMI_WIDE_COEFF_RANGE
CPXMIP_ABORT_FEAS = _pycplex_platform.CPXMIP_ABORT_FEAS
CPXMIP_ABORT_INFEAS = _pycplex_platform.CPXMIP_ABORT_INFEAS
CPXMIP_ABORT_RELAXATION_UNBOUNDED = _pycplex_platform.CPXMIP_ABORT_RELAXATION_UNBOUNDED
CPXMIP_ABORT_RELAXED = _pycplex_platform.CPXMIP_ABORT_RELAXED
CPXMIP_DETTIME_LIM_FEAS = _pycplex_platform.CPXMIP_DETTIME_LIM_FEAS
CPXMIP_DETTIME_LIM_INFEAS = _pycplex_platform.CPXMIP_DETTIME_LIM_INFEAS
CPXMIP_FAIL_FEAS = _pycplex_platform.CPXMIP_FAIL_FEAS
CPXMIP_FAIL_FEAS_NO_TREE = _pycplex_platform.CPXMIP_FAIL_FEAS_NO_TREE
CPXMIP_FAIL_INFEAS = _pycplex_platform.CPXMIP_FAIL_INFEAS
CPXMIP_FAIL_INFEAS_NO_TREE = _pycplex_platform.CPXMIP_FAIL_INFEAS_NO_TREE
CPXMIP_FEASIBLE = _pycplex_platform.CPXMIP_FEASIBLE
CPXMIP_FEASIBLE_RELAXED_INF = _pycplex_platform.CPXMIP_FEASIBLE_RELAXED_INF
CPXMIP_FEASIBLE_RELAXED_QUAD = _pycplex_platform.CPXMIP_FEASIBLE_RELAXED_QUAD
CPXMIP_FEASIBLE_RELAXED_SUM = _pycplex_platform.CPXMIP_FEASIBLE_RELAXED_SUM
CPXMIP_INFEASIBLE = _pycplex_platform.CPXMIP_INFEASIBLE
CPXMIP_INForUNBD = _pycplex_platform.CPXMIP_INForUNBD
CPXMIP_MEM_LIM_FEAS = _pycplex_platform.CPXMIP_MEM_LIM_FEAS
CPXMIP_MEM_LIM_INFEAS = _pycplex_platform.CPXMIP_MEM_LIM_INFEAS
CPXMIP_NODE_LIM_FEAS = _pycplex_platform.CPXMIP_NODE_LIM_FEAS
CPXMIP_NODE_LIM_INFEAS = _pycplex_platform.CPXMIP_NODE_LIM_INFEAS
CPXMIP_OPTIMAL = _pycplex_platform.CPXMIP_OPTIMAL
CPXMIP_OPTIMAL_INFEAS = _pycplex_platform.CPXMIP_OPTIMAL_INFEAS
CPXMIP_OPTIMAL_POPULATED = _pycplex_platform.CPXMIP_OPTIMAL_POPULATED
CPXMIP_OPTIMAL_POPULATED_TOL = _pycplex_platform.CPXMIP_OPTIMAL_POPULATED_TOL
CPXMIP_OPTIMAL_RELAXED_INF = _pycplex_platform.CPXMIP_OPTIMAL_RELAXED_INF
CPXMIP_OPTIMAL_RELAXED_QUAD = _pycplex_platform.CPXMIP_OPTIMAL_RELAXED_QUAD
CPXMIP_OPTIMAL_RELAXED_SUM = _pycplex_platform.CPXMIP_OPTIMAL_RELAXED_SUM
CPXMIP_OPTIMAL_TOL = _pycplex_platform.CPXMIP_OPTIMAL_TOL
CPXMIP_POPULATESOL_LIM = _pycplex_platform.CPXMIP_POPULATESOL_LIM
CPXMIP_SOL_LIM = _pycplex_platform.CPXMIP_SOL_LIM
CPXMIP_TIME_LIM_FEAS = _pycplex_platform.CPXMIP_TIME_LIM_FEAS
CPXMIP_TIME_LIM_INFEAS = _pycplex_platform.CPXMIP_TIME_LIM_INFEAS
CPXMIP_UNBOUNDED = _pycplex_platform.CPXMIP_UNBOUNDED
CPX_CPXAUTOENUMS_H_H = _pycplex_platform.CPX_CPXAUTOENUMS_H_H
CPXCALLBACKINFO_THREADID = _pycplex_platform.CPXCALLBACKINFO_THREADID
CPXCALLBACKINFO_NODECOUNT = _pycplex_platform.CPXCALLBACKINFO_NODECOUNT
CPXCALLBACKINFO_ITCOUNT = _pycplex_platform.CPXCALLBACKINFO_ITCOUNT
CPXCALLBACKINFO_BEST_SOL = _pycplex_platform.CPXCALLBACKINFO_BEST_SOL
CPXCALLBACKINFO_BEST_BND = _pycplex_platform.CPXCALLBACKINFO_BEST_BND
CPXCALLBACKINFO_THREADS = _pycplex_platform.CPXCALLBACKINFO_THREADS
CPXCALLBACKINFO_FEASIBLE = _pycplex_platform.CPXCALLBACKINFO_FEASIBLE
CPXCALLBACKINFO_TIME = _pycplex_platform.CPXCALLBACKINFO_TIME
CPXCALLBACKINFO_DETTIME = _pycplex_platform.CPXCALLBACKINFO_DETTIME
CPXCALLBACKINFO_NODEUID = _pycplex_platform.CPXCALLBACKINFO_NODEUID
CPXCALLBACKINFO_NODEDEPTH = _pycplex_platform.CPXCALLBACKINFO_NODEDEPTH
CPXCALLBACKINFO_CANDIDATE_SOURCE = _pycplex_platform.CPXCALLBACKINFO_CANDIDATE_SOURCE
CPXCALLBACKINFO_RESTARTS = _pycplex_platform.CPXCALLBACKINFO_RESTARTS
CPXCALLBACKINFO_AFTERCUTLOOP = _pycplex_platform.CPXCALLBACKINFO_AFTERCUTLOOP
CPXCALLBACKINFO_NODESLEFT = _pycplex_platform.CPXCALLBACKINFO_NODESLEFT
CPXCALLBACKSOLUTION_NOCHECK = _pycplex_platform.CPXCALLBACKSOLUTION_NOCHECK
CPXCALLBACKSOLUTION_CHECKFEAS = _pycplex_platform.CPXCALLBACKSOLUTION_CHECKFEAS
CPXCALLBACKSOLUTION_PROPAGATE = _pycplex_platform.CPXCALLBACKSOLUTION_PROPAGATE
CPXCALLBACKSOLUTION_SOLVE = _pycplex_platform.CPXCALLBACKSOLUTION_SOLVE
CPXINFO_BYTE = _pycplex_platform.CPXINFO_BYTE
CPXINFO_SHORT = _pycplex_platform.CPXINFO_SHORT
CPXINFO_INT = _pycplex_platform.CPXINFO_INT
CPXINFO_LONG = _pycplex_platform.CPXINFO_LONG
CPXINFO_DOUBLE = _pycplex_platform.CPXINFO_DOUBLE
CPXPUBLICPARAMS_H = _pycplex_platform.CPXPUBLICPARAMS_H
CPX_PARAM_ADVIND = _pycplex_platform.CPX_PARAM_ADVIND
CPX_PARAM_AGGFILL = _pycplex_platform.CPX_PARAM_AGGFILL
CPX_PARAM_AGGIND = _pycplex_platform.CPX_PARAM_AGGIND
CPX_PARAM_CLOCKTYPE = _pycplex_platform.CPX_PARAM_CLOCKTYPE
CPX_PARAM_CRAIND = _pycplex_platform.CPX_PARAM_CRAIND
CPX_PARAM_DEPIND = _pycplex_platform.CPX_PARAM_DEPIND
CPX_PARAM_DPRIIND = _pycplex_platform.CPX_PARAM_DPRIIND
CPX_PARAM_PRICELIM = _pycplex_platform.CPX_PARAM_PRICELIM
CPX_PARAM_EPMRK = _pycplex_platform.CPX_PARAM_EPMRK
CPX_PARAM_EPOPT = _pycplex_platform.CPX_PARAM_EPOPT
CPX_PARAM_EPPER = _pycplex_platform.CPX_PARAM_EPPER
CPX_PARAM_EPRHS = _pycplex_platform.CPX_PARAM_EPRHS
CPX_PARAM_SIMDISPLAY = _pycplex_platform.CPX_PARAM_SIMDISPLAY
CPX_PARAM_ITLIM = _pycplex_platform.CPX_PARAM_ITLIM
CPX_PARAM_ROWREADLIM = _pycplex_platform.CPX_PARAM_ROWREADLIM
CPX_PARAM_NETFIND = _pycplex_platform.CPX_PARAM_NETFIND
CPX_PARAM_COLREADLIM = _pycplex_platform.CPX_PARAM_COLREADLIM
CPX_PARAM_NZREADLIM = _pycplex_platform.CPX_PARAM_NZREADLIM
CPX_PARAM_OBJLLIM = _pycplex_platform.CPX_PARAM_OBJLLIM
CPX_PARAM_OBJULIM = _pycplex_platform.CPX_PARAM_OBJULIM
CPX_PARAM_PERIND = _pycplex_platform.CPX_PARAM_PERIND
CPX_PARAM_PERLIM = _pycplex_platform.CPX_PARAM_PERLIM
CPX_PARAM_PPRIIND = _pycplex_platform.CPX_PARAM_PPRIIND
CPX_PARAM_PREIND = _pycplex_platform.CPX_PARAM_PREIND
CPX_PARAM_REINV = _pycplex_platform.CPX_PARAM_REINV
CPX_PARAM_SCAIND = _pycplex_platform.CPX_PARAM_SCAIND
CPX_PARAM_SCRIND = _pycplex_platform.CPX_PARAM_SCRIND
CPX_PARAM_SINGLIM = _pycplex_platform.CPX_PARAM_SINGLIM
CPX_PARAM_TILIM = _pycplex_platform.CPX_PARAM_TILIM
CPX_PARAM_PREDUAL = _pycplex_platform.CPX_PARAM_PREDUAL
CPX_PARAM_PREPASS = _pycplex_platform.CPX_PARAM_PREPASS
CPX_PARAM_DATACHECK = _pycplex_platform.CPX_PARAM_DATACHECK
CPX_PARAM_REDUCE = _pycplex_platform.CPX_PARAM_REDUCE
CPX_PARAM_PRELINEAR = _pycplex_platform.CPX_PARAM_PRELINEAR
CPX_PARAM_LPMETHOD = _pycplex_platform.CPX_PARAM_LPMETHOD
CPX_PARAM_QPMETHOD = _pycplex_platform.CPX_PARAM_QPMETHOD
CPX_PARAM_WORKDIR = _pycplex_platform.CPX_PARAM_WORKDIR
CPX_PARAM_WORKMEM = _pycplex_platform.CPX_PARAM_WORKMEM
CPX_PARAM_THREADS = _pycplex_platform.CPX_PARAM_THREADS
CPX_PARAM_CONFLICTALG = _pycplex_platform.CPX_PARAM_CONFLICTALG
CPX_PARAM_CONFLICTDISPLAY = _pycplex_platform.CPX_PARAM_CONFLICTDISPLAY
CPX_PARAM_SIFTDISPLAY = _pycplex_platform.CPX_PARAM_SIFTDISPLAY
CPX_PARAM_SIFTALG = _pycplex_platform.CPX_PARAM_SIFTALG
CPX_PARAM_SIFTITLIM = _pycplex_platform.CPX_PARAM_SIFTITLIM
CPX_PARAM_MPSLONGNUM = _pycplex_platform.CPX_PARAM_MPSLONGNUM
CPX_PARAM_MEMORYEMPHASIS = _pycplex_platform.CPX_PARAM_MEMORYEMPHASIS
CPX_PARAM_NUMERICALEMPHASIS = _pycplex_platform.CPX_PARAM_NUMERICALEMPHASIS
CPX_PARAM_FEASOPTMODE = _pycplex_platform.CPX_PARAM_FEASOPTMODE
CPX_PARAM_PARALLELMODE = _pycplex_platform.CPX_PARAM_PARALLELMODE
CPX_PARAM_TUNINGMEASURE = _pycplex_platform.CPX_PARAM_TUNINGMEASURE
CPX_PARAM_TUNINGREPEAT = _pycplex_platform.CPX_PARAM_TUNINGREPEAT
CPX_PARAM_TUNINGTILIM = _pycplex_platform.CPX_PARAM_TUNINGTILIM
CPX_PARAM_TUNINGDISPLAY = _pycplex_platform.CPX_PARAM_TUNINGDISPLAY
CPX_PARAM_WRITELEVEL = _pycplex_platform.CPX_PARAM_WRITELEVEL
CPX_PARAM_RANDOMSEED = _pycplex_platform.CPX_PARAM_RANDOMSEED
CPX_PARAM_DETTILIM = _pycplex_platform.CPX_PARAM_DETTILIM
CPX_PARAM_FILEENCODING = _pycplex_platform.CPX_PARAM_FILEENCODING
CPX_PARAM_APIENCODING = _pycplex_platform.CPX_PARAM_APIENCODING
CPX_PARAM_OPTIMALITYTARGET = _pycplex_platform.CPX_PARAM_OPTIMALITYTARGET
CPX_PARAM_CLONELOG = _pycplex_platform.CPX_PARAM_CLONELOG
CPX_PARAM_TUNINGDETTILIM = _pycplex_platform.CPX_PARAM_TUNINGDETTILIM
CPX_PARAM_CPUMASK = _pycplex_platform.CPX_PARAM_CPUMASK
CPX_PARAM_SOLUTIONTYPE = _pycplex_platform.CPX_PARAM_SOLUTIONTYPE
CPX_PARAM_WARNLIM = _pycplex_platform.CPX_PARAM_WARNLIM
CPX_PARAM_SIFTSIM = _pycplex_platform.CPX_PARAM_SIFTSIM
CPX_PARAM_DYNAMICROWS = _pycplex_platform.CPX_PARAM_DYNAMICROWS
CPX_PARAM_RECORD = _pycplex_platform.CPX_PARAM_RECORD
CPX_PARAM_PARAMDISPLAY = _pycplex_platform.CPX_PARAM_PARAMDISPLAY
CPX_PARAM_FOLDING = _pycplex_platform.CPX_PARAM_FOLDING
CPX_PARAM_PREREFORM = _pycplex_platform.CPX_PARAM_PREREFORM
CPX_PARAM_WORKERALG = _pycplex_platform.CPX_PARAM_WORKERALG
CPX_PARAM_BENDERSSTRATEGY = _pycplex_platform.CPX_PARAM_BENDERSSTRATEGY
CPX_PARAM_BENDERSFEASCUTTOL = _pycplex_platform.CPX_PARAM_BENDERSFEASCUTTOL
CPX_PARAM_BENDERSOPTCUTTOL = _pycplex_platform.CPX_PARAM_BENDERSOPTCUTTOL
CPX_PARAM_MULTIOBJDISPLAY = _pycplex_platform.CPX_PARAM_MULTIOBJDISPLAY
CPX_PARAM_BRDIR = _pycplex_platform.CPX_PARAM_BRDIR
CPX_PARAM_BTTOL = _pycplex_platform.CPX_PARAM_BTTOL
CPX_PARAM_CLIQUES = _pycplex_platform.CPX_PARAM_CLIQUES
CPX_PARAM_COEREDIND = _pycplex_platform.CPX_PARAM_COEREDIND
CPX_PARAM_COVERS = _pycplex_platform.CPX_PARAM_COVERS
CPX_PARAM_CUTLO = _pycplex_platform.CPX_PARAM_CUTLO
CPX_PARAM_CUTUP = _pycplex_platform.CPX_PARAM_CUTUP
CPX_PARAM_EPAGAP = _pycplex_platform.CPX_PARAM_EPAGAP
CPX_PARAM_EPGAP = _pycplex_platform.CPX_PARAM_EPGAP
CPX_PARAM_EPINT = _pycplex_platform.CPX_PARAM_EPINT
CPX_PARAM_MIPDISPLAY = _pycplex_platform.CPX_PARAM_MIPDISPLAY
CPX_PARAM_MIPINTERVAL = _pycplex_platform.CPX_PARAM_MIPINTERVAL
CPX_PARAM_INTSOLLIM = _pycplex_platform.CPX_PARAM_INTSOLLIM
CPX_PARAM_NODEFILEIND = _pycplex_platform.CPX_PARAM_NODEFILEIND
CPX_PARAM_NODELIM = _pycplex_platform.CPX_PARAM_NODELIM
CPX_PARAM_NODESEL = _pycplex_platform.CPX_PARAM_NODESEL
CPX_PARAM_OBJDIF = _pycplex_platform.CPX_PARAM_OBJDIF
CPX_PARAM_MIPORDIND = _pycplex_platform.CPX_PARAM_MIPORDIND
CPX_PARAM_RELOBJDIF = _pycplex_platform.CPX_PARAM_RELOBJDIF
CPX_PARAM_STARTALG = _pycplex_platform.CPX_PARAM_STARTALG
CPX_PARAM_SUBALG = _pycplex_platform.CPX_PARAM_SUBALG
CPX_PARAM_TRELIM = _pycplex_platform.CPX_PARAM_TRELIM
CPX_PARAM_VARSEL = _pycplex_platform.CPX_PARAM_VARSEL
CPX_PARAM_BNDSTRENIND = _pycplex_platform.CPX_PARAM_BNDSTRENIND
CPX_PARAM_HEURFREQ = _pycplex_platform.CPX_PARAM_HEURFREQ
CPX_PARAM_MIPORDTYPE = _pycplex_platform.CPX_PARAM_MIPORDTYPE
CPX_PARAM_CUTSFACTOR = _pycplex_platform.CPX_PARAM_CUTSFACTOR
CPX_PARAM_RELAXPREIND = _pycplex_platform.CPX_PARAM_RELAXPREIND
CPX_PARAM_PRESLVND = _pycplex_platform.CPX_PARAM_PRESLVND
CPX_PARAM_BBINTERVAL = _pycplex_platform.CPX_PARAM_BBINTERVAL
CPX_PARAM_FLOWCOVERS = _pycplex_platform.CPX_PARAM_FLOWCOVERS
CPX_PARAM_IMPLBD = _pycplex_platform.CPX_PARAM_IMPLBD
CPX_PARAM_PROBE = _pycplex_platform.CPX_PARAM_PROBE
CPX_PARAM_GUBCOVERS = _pycplex_platform.CPX_PARAM_GUBCOVERS
CPX_PARAM_STRONGCANDLIM = _pycplex_platform.CPX_PARAM_STRONGCANDLIM
CPX_PARAM_STRONGITLIM = _pycplex_platform.CPX_PARAM_STRONGITLIM
CPX_PARAM_FRACCAND = _pycplex_platform.CPX_PARAM_FRACCAND
CPX_PARAM_FRACCUTS = _pycplex_platform.CPX_PARAM_FRACCUTS
CPX_PARAM_FRACPASS = _pycplex_platform.CPX_PARAM_FRACPASS
CPX_PARAM_FLOWPATHS = _pycplex_platform.CPX_PARAM_FLOWPATHS
CPX_PARAM_MIRCUTS = _pycplex_platform.CPX_PARAM_MIRCUTS
CPX_PARAM_DISJCUTS = _pycplex_platform.CPX_PARAM_DISJCUTS
CPX_PARAM_AGGCUTLIM = _pycplex_platform.CPX_PARAM_AGGCUTLIM
CPX_PARAM_MIPCBREDLP = _pycplex_platform.CPX_PARAM_MIPCBREDLP
CPX_PARAM_CUTPASS = _pycplex_platform.CPX_PARAM_CUTPASS
CPX_PARAM_MIPEMPHASIS = _pycplex_platform.CPX_PARAM_MIPEMPHASIS
CPX_PARAM_SYMMETRY = _pycplex_platform.CPX_PARAM_SYMMETRY
CPX_PARAM_DIVETYPE = _pycplex_platform.CPX_PARAM_DIVETYPE
CPX_PARAM_RINSHEUR = _pycplex_platform.CPX_PARAM_RINSHEUR
CPX_PARAM_LBHEUR = _pycplex_platform.CPX_PARAM_LBHEUR
CPX_PARAM_REPEATPRESOLVE = _pycplex_platform.CPX_PARAM_REPEATPRESOLVE
CPX_PARAM_PROBETIME = _pycplex_platform.CPX_PARAM_PROBETIME
CPX_PARAM_POLISHTIME = _pycplex_platform.CPX_PARAM_POLISHTIME
CPX_PARAM_REPAIRTRIES = _pycplex_platform.CPX_PARAM_REPAIRTRIES
CPX_PARAM_EPLIN = _pycplex_platform.CPX_PARAM_EPLIN
CPX_PARAM_EPRELAX = _pycplex_platform.CPX_PARAM_EPRELAX
CPX_PARAM_FPHEUR = _pycplex_platform.CPX_PARAM_FPHEUR
CPX_PARAM_EACHCUTLIM = _pycplex_platform.CPX_PARAM_EACHCUTLIM
CPX_PARAM_SOLNPOOLCAPACITY = _pycplex_platform.CPX_PARAM_SOLNPOOLCAPACITY
CPX_PARAM_SOLNPOOLREPLACE = _pycplex_platform.CPX_PARAM_SOLNPOOLREPLACE
CPX_PARAM_SOLNPOOLGAP = _pycplex_platform.CPX_PARAM_SOLNPOOLGAP
CPX_PARAM_SOLNPOOLAGAP = _pycplex_platform.CPX_PARAM_SOLNPOOLAGAP
CPX_PARAM_SOLNPOOLINTENSITY = _pycplex_platform.CPX_PARAM_SOLNPOOLINTENSITY
CPX_PARAM_POPULATELIM = _pycplex_platform.CPX_PARAM_POPULATELIM
CPX_PARAM_MIPSEARCH = _pycplex_platform.CPX_PARAM_MIPSEARCH
CPX_PARAM_MIQCPSTRAT = _pycplex_platform.CPX_PARAM_MIQCPSTRAT
CPX_PARAM_ZEROHALFCUTS = _pycplex_platform.CPX_PARAM_ZEROHALFCUTS
CPX_PARAM_HEUREFFORT = _pycplex_platform.CPX_PARAM_HEUREFFORT
CPX_PARAM_POLISHAFTEREPAGAP = _pycplex_platform.CPX_PARAM_POLISHAFTEREPAGAP
CPX_PARAM_POLISHAFTEREPGAP = _pycplex_platform.CPX_PARAM_POLISHAFTEREPGAP
CPX_PARAM_POLISHAFTERNODE = _pycplex_platform.CPX_PARAM_POLISHAFTERNODE
CPX_PARAM_POLISHAFTERINTSOL = _pycplex_platform.CPX_PARAM_POLISHAFTERINTSOL
CPX_PARAM_POLISHAFTERTIME = _pycplex_platform.CPX_PARAM_POLISHAFTERTIME
CPX_PARAM_MCFCUTS = _pycplex_platform.CPX_PARAM_MCFCUTS
CPX_PARAM_MIPKAPPASTATS = _pycplex_platform.CPX_PARAM_MIPKAPPASTATS
CPX_PARAM_AUXROOTTHREADS = _pycplex_platform.CPX_PARAM_AUXROOTTHREADS
CPX_PARAM_INTSOLFILEPREFIX = _pycplex_platform.CPX_PARAM_INTSOLFILEPREFIX
CPX_PARAM_PROBEDETTIME = _pycplex_platform.CPX_PARAM_PROBEDETTIME
CPX_PARAM_POLISHAFTERDETTIME = _pycplex_platform.CPX_PARAM_POLISHAFTERDETTIME
CPX_PARAM_LANDPCUTS = _pycplex_platform.CPX_PARAM_LANDPCUTS
CPX_PARAM_NODECUTS = _pycplex_platform.CPX_PARAM_NODECUTS
CPX_PARAM_RAMPUPDURATION = _pycplex_platform.CPX_PARAM_RAMPUPDURATION
CPX_PARAM_RAMPUPDETTILIM = _pycplex_platform.CPX_PARAM_RAMPUPDETTILIM
CPX_PARAM_RAMPUPTILIM = _pycplex_platform.CPX_PARAM_RAMPUPTILIM
CPX_PARAM_LOCALIMPLBD = _pycplex_platform.CPX_PARAM_LOCALIMPLBD
CPX_PARAM_BQPCUTS = _pycplex_platform.CPX_PARAM_BQPCUTS
CPX_PARAM_RLTCUTS = _pycplex_platform.CPX_PARAM_RLTCUTS
CPX_PARAM_SUBMIPSTARTALG = _pycplex_platform.CPX_PARAM_SUBMIPSTARTALG
CPX_PARAM_SUBMIPSUBALG = _pycplex_platform.CPX_PARAM_SUBMIPSUBALG
CPX_PARAM_SUBMIPSCAIND = _pycplex_platform.CPX_PARAM_SUBMIPSCAIND
CPX_PARAM_SUBMIPNODELIMIT = _pycplex_platform.CPX_PARAM_SUBMIPNODELIMIT
CPX_PARAM_SOS1REFORM = _pycplex_platform.CPX_PARAM_SOS1REFORM
CPX_PARAM_SOS2REFORM = _pycplex_platform.CPX_PARAM_SOS2REFORM
CPX_PARAM_BAREPCOMP = _pycplex_platform.CPX_PARAM_BAREPCOMP
CPX_PARAM_BARGROWTH = _pycplex_platform.CPX_PARAM_BARGROWTH
CPX_PARAM_BAROBJRNG = _pycplex_platform.CPX_PARAM_BAROBJRNG
CPX_PARAM_BARALG = _pycplex_platform.CPX_PARAM_BARALG
CPX_PARAM_BARCOLNZ = _pycplex_platform.CPX_PARAM_BARCOLNZ
CPX_PARAM_BARDISPLAY = _pycplex_platform.CPX_PARAM_BARDISPLAY
CPX_PARAM_BARITLIM = _pycplex_platform.CPX_PARAM_BARITLIM
CPX_PARAM_BARMAXCOR = _pycplex_platform.CPX_PARAM_BARMAXCOR
CPX_PARAM_BARORDER = _pycplex_platform.CPX_PARAM_BARORDER
CPX_PARAM_BARSTARTALG = _pycplex_platform.CPX_PARAM_BARSTARTALG
CPX_PARAM_BARCROSSALG = _pycplex_platform.CPX_PARAM_BARCROSSALG
CPX_PARAM_BARQCPEPCOMP = _pycplex_platform.CPX_PARAM_BARQCPEPCOMP
CPX_PARAM_QPNZREADLIM = _pycplex_platform.CPX_PARAM_QPNZREADLIM
CPX_PARAM_CALCQCPDUALS = _pycplex_platform.CPX_PARAM_CALCQCPDUALS
CPX_PARAM_QPMAKEPSDIND = _pycplex_platform.CPX_PARAM_QPMAKEPSDIND
CPX_PARAM_QTOLININD = _pycplex_platform.CPX_PARAM_QTOLININD
CPX_PARAM_NETITLIM = _pycplex_platform.CPX_PARAM_NETITLIM
CPX_PARAM_NETEPOPT = _pycplex_platform.CPX_PARAM_NETEPOPT
CPX_PARAM_NETEPRHS = _pycplex_platform.CPX_PARAM_NETEPRHS
CPX_PARAM_NETPPRIIND = _pycplex_platform.CPX_PARAM_NETPPRIIND
CPX_PARAM_NETDISPLAY = _pycplex_platform.CPX_PARAM_NETDISPLAY
CPX_CPXAUTOTYPES_H_H = _pycplex_platform.CPX_CPXAUTOTYPES_H_H
CPX_CPXAUTOSTRUCTS_H_H = _pycplex_platform.CPX_CPXAUTOSTRUCTS_H_H
class cpxdeserializer(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
getbyte = property(_pycplex_platform.cpxdeserializer_getbyte_get, _pycplex_platform.cpxdeserializer_getbyte_set)
getshort = property(_pycplex_platform.cpxdeserializer_getshort_get, _pycplex_platform.cpxdeserializer_getshort_set)
getint = property(_pycplex_platform.cpxdeserializer_getint_get, _pycplex_platform.cpxdeserializer_getint_set)
getlong = property(_pycplex_platform.cpxdeserializer_getlong_get, _pycplex_platform.cpxdeserializer_getlong_set)
getfloat = property(_pycplex_platform.cpxdeserializer_getfloat_get, _pycplex_platform.cpxdeserializer_getfloat_set)
getdouble = property(_pycplex_platform.cpxdeserializer_getdouble_get, _pycplex_platform.cpxdeserializer_getdouble_set)
getbytes = property(_pycplex_platform.cpxdeserializer_getbytes_get, _pycplex_platform.cpxdeserializer_getbytes_set)
getshorts = property(_pycplex_platform.cpxdeserializer_getshorts_get, _pycplex_platform.cpxdeserializer_getshorts_set)
getints = property(_pycplex_platform.cpxdeserializer_getints_get, _pycplex_platform.cpxdeserializer_getints_set)
getlongs = property(_pycplex_platform.cpxdeserializer_getlongs_get, _pycplex_platform.cpxdeserializer_getlongs_set)
getfloats = property(_pycplex_platform.cpxdeserializer_getfloats_get, _pycplex_platform.cpxdeserializer_getfloats_set)
getdoubles = property(_pycplex_platform.cpxdeserializer_getdoubles_get, _pycplex_platform.cpxdeserializer_getdoubles_set)
def __init__(self):
_pycplex_platform.cpxdeserializer_swiginit(self, _pycplex_platform.new_cpxdeserializer())
__swig_destroy__ = _pycplex_platform.delete_cpxdeserializer
# Register cpxdeserializer in _pycplex_platform:
_pycplex_platform.cpxdeserializer_swigregister(cpxdeserializer)
class cpxserializer(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
addbyte = property(_pycplex_platform.cpxserializer_addbyte_get, _pycplex_platform.cpxserializer_addbyte_set)
addshort = property(_pycplex_platform.cpxserializer_addshort_get, _pycplex_platform.cpxserializer_addshort_set)
addint = property(_pycplex_platform.cpxserializer_addint_get, _pycplex_platform.cpxserializer_addint_set)
addlong = property(_pycplex_platform.cpxserializer_addlong_get, _pycplex_platform.cpxserializer_addlong_set)
addfloat = property(_pycplex_platform.cpxserializer_addfloat_get, _pycplex_platform.cpxserializer_addfloat_set)
adddouble = property(_pycplex_platform.cpxserializer_adddouble_get, _pycplex_platform.cpxserializer_adddouble_set)
addbytes = property(_pycplex_platform.cpxserializer_addbytes_get, _pycplex_platform.cpxserializer_addbytes_set)
addshorts = property(_pycplex_platform.cpxserializer_addshorts_get, _pycplex_platform.cpxserializer_addshorts_set)
addints = property(_pycplex_platform.cpxserializer_addints_get, _pycplex_platform.cpxserializer_addints_set)
addlongs = property(_pycplex_platform.cpxserializer_addlongs_get, _pycplex_platform.cpxserializer_addlongs_set)
addfloats = property(_pycplex_platform.cpxserializer_addfloats_get, _pycplex_platform.cpxserializer_addfloats_set)
adddoubles = property(_pycplex_platform.cpxserializer_adddoubles_get, _pycplex_platform.cpxserializer_adddoubles_set)
def __init__(self):
_pycplex_platform.cpxserializer_swiginit(self, _pycplex_platform.new_cpxserializer())
__swig_destroy__ = _pycplex_platform.delete_cpxserializer
# Register cpxserializer in _pycplex_platform:
_pycplex_platform.cpxserializer_swigregister(cpxserializer)
CPX_CPLEXX_H = _pycplex_platform.CPX_CPLEXX_H
CPX_APIMODEL_SMALL = _pycplex_platform.CPX_APIMODEL_SMALL
CPX_APIMODEL_LARGE = _pycplex_platform.CPX_APIMODEL_LARGE
CPX_APIMODEL = _pycplex_platform.CPX_APIMODEL
def lpcallbackfuncwrap(arg1: 'CPXCENVptr', arg2: 'void *', arg3: 'int', arg4: 'void *') -> "int":
return _pycplex_platform.lpcallbackfuncwrap(arg1, arg2, arg3, arg4)
def netcallbackfuncwrap(arg1: 'CPXCENVptr', arg2: 'void *', arg3: 'int', arg4: 'void *') -> "int":
return _pycplex_platform.netcallbackfuncwrap(arg1, arg2, arg3, arg4)
def tuningcallbackfuncwrap(arg1: 'CPXCENVptr', arg2: 'void *', arg3: 'int', arg4: 'void *') -> "int":
return _pycplex_platform.tuningcallbackfuncwrap(arg1, arg2, arg3, arg4)
def infocallbackfuncwrap(arg1: 'CPXCENVptr', arg2: 'void *', arg3: 'int', arg4: 'void *') -> "int":
return _pycplex_platform.infocallbackfuncwrap(arg1, arg2, arg3, arg4)
def mipcallbackfuncwrap(arg1: 'CPXCENVptr', arg2: 'void *', arg3: 'int', arg4: 'void *') -> "int":
return _pycplex_platform.mipcallbackfuncwrap(arg1, arg2, arg3, arg4)
def branchcallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', brtype: 'int', brset: 'CPXDIM', nodecnt: 'int', bdcnt: 'CPXDIM', nodebeg: 'CPXDIM const *', xindex: 'CPXDIM const *', lu: 'char const *', bd: 'double const *', nodeest: 'double const *', useraction_p: 'int *') -> "int":
return _pycplex_platform.branchcallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, brtype, brset, nodecnt, bdcnt, nodebeg, xindex, lu, bd, nodeest, useraction_p)
def lazyconcallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', useraction_p: 'int *') -> "int":
return _pycplex_platform.lazyconcallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, useraction_p)
def usercutcallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', useraction_p: 'int *') -> "int":
return _pycplex_platform.usercutcallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, useraction_p)
def nodecallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', nodeindex: 'CPXCNT *', useraction: 'int *') -> "int":
return _pycplex_platform.nodecallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, nodeindex, useraction)
def heuristiccallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', objval_p: 'double *', x: 'double *', checkfeas_p: 'int *', useraction_p: 'int *') -> "int":
return _pycplex_platform.heuristiccallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, objval_p, x, checkfeas_p, useraction_p)
def incumbentcallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', objval: 'double', x: 'double *', isfeas_p: 'int *', useraction_p: 'int *') -> "int":
return _pycplex_platform.incumbentcallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, objval, x, isfeas_p, useraction_p)
def solvecallbackfuncwrap(xenv: 'CPXCENVptr', cbdata: 'void *', wherefrom: 'int', cbhandle: 'void *', useraction: 'int *') -> "int":
return _pycplex_platform.solvecallbackfuncwrap(xenv, cbdata, wherefrom, cbhandle, useraction)
def deletenodecallbackfuncwrap(xenv: 'CPXCENVptr', wherefrom: 'int', cbhandle: 'void *', seqnum: 'CPXCNT', handle: 'void *') -> "void":
return _pycplex_platform.deletenodecallbackfuncwrap(xenv, wherefrom, cbhandle, seqnum, handle)
def setpydel(arg1: 'CPXENVptr') -> "int":
return _pycplex_platform.setpydel(arg1)
def cpxpygenericcallbackfuncwrap(arg1: 'CPXCALLBACKCONTEXTptr', arg2: 'CPXLONG', arg3: 'void *') -> "int":
return _pycplex_platform.cpxpygenericcallbackfuncwrap(arg1, arg2, arg3)
def cpxpymodelasstcallbackfuncwrap(arg1: 'int', arg2: 'char const *', arg3: 'void *') -> "int":
return _pycplex_platform.cpxpymodelasstcallbackfuncwrap(arg1, arg2, arg3)
def messagewrap(arg1: 'void *', arg2: 'char const *') -> "void":
return _pycplex_platform.messagewrap(arg1, arg2)
def setpyterminate(arg1: 'CPXENVptr') -> "void":
return _pycplex_platform.setpyterminate(arg1)
def sigint_register() -> "void":
return _pycplex_platform.sigint_register()
def sigint_unregister() -> "void":
return _pycplex_platform.sigint_unregister()
def py_sigint_handler(signo: 'int') -> "void":
return _pycplex_platform.py_sigint_handler(signo)
def new_native_int() -> "PyObject *":
return _pycplex_platform.new_native_int()
def delete_native_int(o: 'PyObject *') -> "PyObject *":
return _pycplex_platform.delete_native_int(o)
def set_native_int(o: 'PyObject *', v: 'PyObject *') -> "PyObject *":
return _pycplex_platform.set_native_int(o, v)
def get_native_int(o: 'PyObject *') -> "PyObject *":
return _pycplex_platform.get_native_int(o)
def setterminate(pyenv: 'PyObject *', o: 'PyObject *') -> "PyObject *":
return _pycplex_platform.setterminate(pyenv, o)
def set_status_checker(checker: 'PyObject *') -> "void":
return _pycplex_platform.set_status_checker(checker)
def fast_getcallbackinfo(cbstruct: 'PyObject *', pywhichinfo: 'PyObject *', CplexSolverError: 'PyObject *') -> "PyObject *":
return _pycplex_platform.fast_getcallbackinfo(cbstruct, pywhichinfo, CplexSolverError)
def cb_geterrorstring(cbstruct: 'PyObject *', py_status: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_geterrorstring(cbstruct, py_status)
def cb_getcolindex(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', utf8bytes: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getcolindex(cbstruct, env_lp_ptr, utf8bytes)
def cb_getrowindex(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', utf8bytes: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getrowindex(cbstruct, env_lp_ptr, utf8bytes)
def cb_getqconstrindex(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', utf8bytes: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getqconstrindex(cbstruct, env_lp_ptr, utf8bytes)
def cb_getsosindex(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', utf8bytes: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getsosindex(cbstruct, env_lp_ptr, utf8bytes)
def cb_getnumcols(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getnumcols(cbstruct, env_lp_ptr)
def cb_getnumrows(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getnumrows(cbstruct, env_lp_ptr)
def cb_getnumqconstrs(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getnumqconstrs(cbstruct, env_lp_ptr)
def cb_getnumsos(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getnumsos(cbstruct, env_lp_ptr)
def cb_gettime(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_gettime(cbstruct)
def cb_getdettime(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getdettime(cbstruct)
def cb_getstat(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getstat(cbstruct)
def cb_solninfo(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_solninfo(cbstruct)
def cb_primopt(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_primopt(cbstruct)
def cb_dualopt(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_dualopt(cbstruct)
def cb_hybbaropt(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_hybbaropt(cbstruct)
def cb_hybnetopt(cbstruct: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_hybnetopt(cbstruct)
def cb_copystart(cbstruct: 'PyObject *', py_prim: 'PyObject *', py_dual: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_copystart(cbstruct, py_prim, py_dual)
def cb_chgbds(cbstruct: 'PyObject *', py_ind: 'PyObject *', py_lu: 'PyObject *', py_bd: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_chgbds(cbstruct, py_ind, py_lu, py_bd)
def cb_slackfromx(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', py_x: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_slackfromx(cbstruct, env_lp_ptr, py_x)
def cb_qconstrslackfromx(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', py_x: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_qconstrslackfromx(cbstruct, env_lp_ptr, py_x)
def cb_crushx(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', py_x: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_crushx(cbstruct, env_lp_ptr, py_x)
def cb_crushpi(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', py_pi: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_crushpi(cbstruct, env_lp_ptr, py_pi)
def cb_getobj(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *', py_begin: 'PyObject *', py_end: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getobj(cbstruct, env_lp_ptr, py_begin, py_end)
def cb_getprestat_c(cbstruct: 'PyObject *', env_lp_ptr: 'PyObject *') -> "PyObject *":
return _pycplex_platform.cb_getprestat_c(cbstruct, env_lp_ptr)
class cb_struct(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
env = property(_pycplex_platform.cb_struct_env_get, _pycplex_platform.cb_struct_env_set)
cbdata = property(_pycplex_platform.cb_struct_cbdata_get, _pycplex_platform.cb_struct_cbdata_set)
wherefrom = property(_pycplex_platform.cb_struct_wherefrom_get, _pycplex_platform.cb_struct_wherefrom_set)
def __init__(self):
_pycplex_platform.cb_struct_swiginit(self, _pycplex_platform.new_cb_struct())
__swig_destroy__ = _pycplex_platform.delete_cb_struct
# Register cb_struct in _pycplex_platform:
_pycplex_platform.cb_struct_swigregister(cb_struct)
def get_wherefrom(Pydata: 'PyObject *') -> "int":
return _pycplex_platform.get_wherefrom(Pydata)
def delpydel(env: 'CPXENVptr') -> "int":
return _pycplex_platform.delpydel(env)
CPX_CPLEXE_H = _pycplex_platform.CPX_CPLEXE_H
CPXE_H = _pycplex_platform.CPXE_H
MIPE_H = _pycplex_platform.MIPE_H
CPXAUTOE_H = _pycplex_platform.CPXAUTOE_H
CPX_AUTOES_H = _pycplex_platform.CPX_AUTOES_H
CPX_AUTOEL_H = _pycplex_platform.CPX_AUTOEL_H
CPX_AUTOEX_H = _pycplex_platform.CPX_AUTOEX_H
class cpxpyiodevice(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
dev = property(_pycplex_platform.cpxpyiodevice_dev_get, _pycplex_platform.cpxpyiodevice_dev_set)
stream = property(_pycplex_platform.cpxpyiodevice_stream_get, _pycplex_platform.cpxpyiodevice_stream_set)
def __init__(self):
_pycplex_platform.cpxpyiodevice_swiginit(self, _pycplex_platform.new_cpxpyiodevice())
__swig_destroy__ = _pycplex_platform.delete_cpxpyiodevice
# Register cpxpyiodevice in _pycplex_platform:
_pycplex_platform.cpxpyiodevice_swigregister(cpxpyiodevice)
def init_callback_lock() -> "PyObject *":
return _pycplex_platform.init_callback_lock()
def finit_callback_lock(py_lock: 'PyObject *') -> "void":
return _pycplex_platform.finit_callback_lock(py_lock)
def CPXPyObject_AsInt(obj: 'PyObject *', val: 'int *') -> "int":
return _pycplex_platform.CPXPyObject_AsInt(obj, val)
def CPXPyObject_AsCPXDIM(obj: 'PyObject *', val: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXPyObject_AsCPXDIM(obj, val)
def CPXPyObject_AsChar(obj: 'PyObject *', val: 'char *') -> "int":
return _pycplex_platform.CPXPyObject_AsChar(obj, val)
def CPXPyObject_AsCPXSIZE(obj: 'PyObject *', val: 'CPXSIZE *') -> "int":
return _pycplex_platform.CPXPyObject_AsCPXSIZE(obj, val)
def CPXPyObject_AsCPXLONG(obj: 'PyObject *', val: 'CPXLONG *') -> "int":
return _pycplex_platform.CPXPyObject_AsCPXLONG(obj, val)
def CPXPyObject_AsCPXCNT(obj: 'PyObject *', val: 'CPXCNT *') -> "int":
return _pycplex_platform.CPXPyObject_AsCPXCNT(obj, val)
def CPXPyObject_AsDouble(obj: 'PyObject *', val: 'double *') -> "int":
return _pycplex_platform.CPXPyObject_AsDouble(obj, val)
def CPXPyIODevInit(dev: 'cpxpyiodevice', stream: 'PyObject *') -> "int":
return _pycplex_platform.CPXPyIODevInit(dev, stream)
def CPXXdualopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXdualopt(env, lp)
def CPXXembwrite(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXembwrite(env, lp, filename_str)
def CPXXfeasoptext(env: 'CPXCENVptr', lp: 'CPXLPptr', grpcnt: 'CPXDIM', concnt: 'CPXNNZ', py_grppref: 'double const *', py_grpbeg: 'CPXNNZ const *', py_grpind: 'CPXDIM const *', grptype: 'char const *') -> "int":
return _pycplex_platform.CPXXfeasoptext(env, lp, grpcnt, concnt, py_grppref, py_grpbeg, py_grpind, grptype)
def CPXXhybnetopt(env: 'CPXCENVptr', lp: 'CPXLPptr', method: 'int') -> "int":
return _pycplex_platform.CPXXhybnetopt(env, lp, method)
def CPXXlpopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXlpopt(env, lp)
def CPXXpivotin(env: 'CPXCENVptr', lp: 'CPXLPptr', rlist: 'CPXDIM const *', rlen: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXpivotin(env, lp, rlist, rlen)
def CPXXpivotout(env: 'CPXCENVptr', lp: 'CPXLPptr', clist: 'CPXDIM const *', clen: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXpivotout(env, lp, clist, clen)
def CPXXpreslvwrite(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *', objoff_p: 'double *') -> "int":
return _pycplex_platform.CPXXpreslvwrite(env, lp, filename_str, objoff_p)
def CPXXpresolve(env: 'CPXCENVptr', lp: 'CPXLPptr', method: 'int') -> "int":
return _pycplex_platform.CPXXpresolve(env, lp, method)
def CPXXprimopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXprimopt(env, lp)
def CPXXrefineconflictext(env: 'CPXCENVptr', lp: 'CPXLPptr', grpcnt: 'CPXNNZ', concnt: 'CPXNNZ', py_grppref: 'double const *', py_grpbeg: 'CPXNNZ const *', py_grpind: 'CPXDIM const *', grptype: 'char const *') -> "int":
return _pycplex_platform.CPXXrefineconflictext(env, lp, grpcnt, concnt, py_grppref, py_grpbeg, py_grpind, grptype)
def CPXXsiftopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXsiftopt(env, lp)
def CPXXstrongbranch(env: 'CPXCENVptr', lp: 'CPXLPptr', goodlist: 'CPXDIM const *', goodlen: 'CPXDIM', downpen: 'double *', uppen: 'double *', itlim: 'CPXCNT') -> "int":
return _pycplex_platform.CPXXstrongbranch(env, lp, goodlist, goodlen, downpen, uppen, itlim)
def CPXXbaropt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXbaropt(env, lp)
def CPXXhybbaropt(env: 'CPXCENVptr', lp: 'CPXLPptr', method: 'int') -> "int":
return _pycplex_platform.CPXXhybbaropt(env, lp, method)
def CPXXqpindefcertificate(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double *') -> "int":
return _pycplex_platform.CPXXqpindefcertificate(env, lp, x)
def CPXXqpopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXqpopt(env, lp)
def CPXXmipopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXmipopt(env, lp)
def CPXXpopulate(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXpopulate(env, lp)
def CPXXrefinemipstartconflictext(env: 'CPXCENVptr', lp: 'CPXLPptr', mipstartindex: 'int', grpcnt: 'CPXNNZ', concnt: 'CPXNNZ', py_grppref: 'double const *', py_grpbeg: 'CPXNNZ const *', py_grpind: 'CPXDIM const *', grptype_mipstart: 'char const *') -> "int":
return _pycplex_platform.CPXXrefinemipstartconflictext(env, lp, mipstartindex, grpcnt, concnt, py_grppref, py_grpbeg, py_grpind, grptype_mipstart)
def CPXXtuneparam(env: 'CPXENVptr', lp: 'CPXLPptr', intcnt: 'int', intnum: 'int const *', intval: 'int const *', dblcnt: 'int', dblnum: 'int const *', dblval: 'double const *', strcnt: 'int', strnum: 'int const *', strval: 'char const *const *', tunestat_p: 'int *') -> "int":
return _pycplex_platform.CPXXtuneparam(env, lp, intcnt, intnum, intval, dblcnt, dblnum, dblval, strcnt, strnum, strval, tunestat_p)
def CPXXtuneparamprobset(env: 'CPXENVptr', filecnt: 'int', filename: 'char const *const *', filetype: 'char const *const *', intcnt: 'int', intind: 'int const *', intval: 'int const *', dblcnt: 'int', dblind: 'int const *', dblval: 'double const *', strcnt: 'int', strind: 'int const *', strval: 'char const *const *', tunestat_p: 'int *') -> "int":
return _pycplex_platform.CPXXtuneparamprobset(env, filecnt, filename, filetype, intcnt, intind, intval, dblcnt, dblind, dblval, strcnt, strind, strval, tunestat_p)
def CPXErunseeds(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'int') -> "int":
return _pycplex_platform.CPXErunseeds(env, lp, cnt)
def CPXXwriteannotations(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename: 'char const *') -> "int":
return _pycplex_platform.CPXXwriteannotations(env, lp, filename)
def CPXXwritebendersannotation(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename: 'char const *') -> "int":
return _pycplex_platform.CPXXwritebendersannotation(env, lp, filename)
def CPXXclpwrite(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXclpwrite(env, lp, filename_str)
def CPXXwriteprob(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *', filetype_str: 'char const *'=None) -> "int":
return _pycplex_platform.CPXXwriteprob(env, lp, filename_str, filetype_str)
def CPXXmbasewrite(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXmbasewrite(env, lp, filename_str)
def CPXXsolwrite(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXsolwrite(env, lp, filename_str)
def CPXXsolwritesolnpool(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXsolwritesolnpool(env, lp, soln, filename_str)
def CPXXsolwritesolnpoolall(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXsolwritesolnpoolall(env, lp, filename_str)
def CPXXdperwrite(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *', epsilon: 'double') -> "int":
return _pycplex_platform.CPXXdperwrite(env, lp, filename_str, epsilon)
def CPXXpperwrite(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *', epsilon: 'double') -> "int":
return _pycplex_platform.CPXXpperwrite(env, lp, filename_str, epsilon)
def CPXXdualwrite(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *', objshift_p: 'double *') -> "int":
return _pycplex_platform.CPXXdualwrite(env, lp, filename_str, objshift_p)
def CPXXwriteparam(env: 'CPXCENVptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXwriteparam(env, filename_str)
def CPXXordwrite(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXordwrite(env, lp, filename_str)
def CPXXwritemipstarts(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *', begin: 'int', end: 'int') -> "int":
return _pycplex_platform.CPXXwritemipstarts(env, lp, filename_str, begin, end)
def CPXXfltwrite(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXfltwrite(env, lp, filename_str)
def CPXXreadcopyannotations(env: 'CPXCENVptr', lp: 'CPXLPptr', filename: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopyannotations(env, lp, filename)
def CPXXreadcopyprob(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *', filetype_str: 'char const *'=None) -> "int":
return _pycplex_platform.CPXXreadcopyprob(env, lp, filename_str, filetype_str)
def CPXXreadcopybase(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopybase(env, lp, filename_str)
def CPXXreadcopystartinfo(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopystartinfo(env, lp, filename_str)
def CPXXreadcopyparam(env: 'CPXENVptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopyparam(env, filename_str)
def CPXXreadcopyorder(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopyorder(env, lp, filename_str)
def CPXXreadcopysolnpoolfilters(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopysolnpoolfilters(env, lp, filename_str)
def CPXXreadcopymipstarts(env: 'CPXCENVptr', lp: 'CPXLPptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopymipstarts(env, lp, filename_str)
def CPXXmultiobjopt(env: 'CPXCENVptr', lp: 'CPXLPptr', paramsets: 'CPXCPARAMSETptr const *') -> "int":
return _pycplex_platform.CPXXmultiobjopt(env, lp, paramsets)
def pack_env_lp_ptr(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "PyObject *":
return _pycplex_platform.pack_env_lp_ptr(env, lp)
def Pylolmat_to_CHBmat(lolmat: 'PyObject *', env_lp_ptr: 'PyObject *', py_row_col: 'PyObject *') -> "PyObject *":
return _pycplex_platform.Pylolmat_to_CHBmat(lolmat, env_lp_ptr, py_row_col)
def free_CHBmat(HBMat: 'PyObject *') -> "void":
return _pycplex_platform.free_CHBmat(HBMat)
def int_list_to_C_array(list: 'PyObject *') -> "PyObject *":
return _pycplex_platform.int_list_to_C_array(list)
def long_list_to_C_array(list: 'PyObject *') -> "PyObject *":
return _pycplex_platform.long_list_to_C_array(list)
def double_list_to_C_array(list: 'PyObject *') -> "PyObject *":
return _pycplex_platform.double_list_to_C_array(list)
def free_int_C_array(Pyptr: 'PyObject *') -> "void":
return _pycplex_platform.free_int_C_array(Pyptr)
def free_long_C_array(Pyptr: 'PyObject *') -> "void":
return _pycplex_platform.free_long_C_array(Pyptr)
def free_double_C_array(Pyptr: 'PyObject *') -> "void":
return _pycplex_platform.free_double_C_array(Pyptr)
def CPXXcreateprob(env: 'CPXCENVptr', status_p: 'int *', probname_str: 'char const *') -> "CPXLPptr":
return _pycplex_platform.CPXXcreateprob(env, status_p, probname_str)
def CPXXcloneprob(env: 'CPXCENVptr', lp: 'CPXCLPptr', status_p: 'int *') -> "CPXLPptr":
return _pycplex_platform.CPXXcloneprob(env, lp, status_p)
def CPXXcopylpwnames(env: 'CPXCENVptr', lp: 'CPXLPptr', numcols: 'CPXDIM', numrows: 'CPXDIM', objsense: 'int', py_obj: 'double const *', py_rhs: 'double const *', sense: 'char const *', py_matbeg: 'CPXNNZ const *', py_matcnt: 'CPXDIM const *', py_matind: 'CPXDIM const *', py_matval: 'double const *', py_lb: 'double const *', py_ub: 'double const *', py_rngval: 'double const *', colname: 'char const *const *', rowname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXcopylpwnames(env, lp, numcols, numrows, objsense, py_obj, py_rhs, sense, py_matbeg, py_matcnt, py_matind, py_matval, py_lb, py_ub, py_rngval, colname, rowname)
def CPXXcopyobjname(env: 'CPXCENVptr', lp: 'CPXLPptr', objname_str: 'char const *') -> "int":
return _pycplex_platform.CPXXcopyobjname(env, lp, objname_str)
def CPXXcopybase(env: 'CPXCENVptr', lp: 'CPXLPptr', cstat: 'int const *', rstat: 'int const *') -> "int":
return _pycplex_platform.CPXXcopybase(env, lp, cstat, rstat)
def CPXXcleanup(env: 'CPXCENVptr', lp: 'CPXLPptr', eps: 'double') -> "int":
return _pycplex_platform.CPXXcleanup(env, lp, eps)
def CPXXcopystart(env: 'CPXCENVptr', lp: 'CPXLPptr', cstat: 'int const *', rstat: 'int const *', cprim: 'double const *', rprim: 'double const *', cdual: 'double const *', rdual: 'double const *') -> "int":
return _pycplex_platform.CPXXcopystart(env, lp, cstat, rstat, cprim, rprim, cdual, rdual)
def CPXXfreeprob(env: 'CPXCENVptr', lp_p: 'CPXLPptr *') -> "int":
return _pycplex_platform.CPXXfreeprob(env, lp_p)
def CPXXpivot(env: 'CPXCENVptr', lp: 'CPXLPptr', jenter: 'CPXDIM', jleave: 'CPXDIM', leavestat: 'int') -> "int":
return _pycplex_platform.CPXXpivot(env, lp, jenter, jleave, leavestat)
def CPXXsolninfo(env: 'CPXCENVptr', lp: 'CPXCLPptr', solnmethod_p: 'int *', solntype_p: 'int *', pfeasind_p: 'int *', dfeasind_p: 'int *') -> "int":
return _pycplex_platform.CPXXsolninfo(env, lp, solnmethod_p, solntype_p, pfeasind_p, dfeasind_p)
def CPXXgetstat(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetstat(env, lp)
def CPXXgetstatstring(env: 'CPXCENVptr', statind: 'int', stat_buffer_str: 'char *') -> "char *":
return _pycplex_platform.CPXXgetstatstring(env, statind, stat_buffer_str)
def CPXXgetmethod(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetmethod(env, lp)
def CPXXgetobjval(env: 'CPXCENVptr', lp: 'CPXCLPptr', objval_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetobjval(env, lp, objval_p)
def CPXXgetx(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetx(env, lp, x, begin, end)
def CPXXgetax(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetax(env, lp, x, begin, end)
def CPXXgetpi(env: 'CPXCENVptr', lp: 'CPXCLPptr', pi: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetpi(env, lp, pi, begin, end)
def CPXXgetslack(env: 'CPXCENVptr', lp: 'CPXCLPptr', slack: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetslack(env, lp, slack, begin, end)
def CPXXgetrowinfeas(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', infeasout: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetrowinfeas(env, lp, x, infeasout, begin, end)
def CPXXgetcolinfeas(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', infeasout: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcolinfeas(env, lp, x, infeasout, begin, end)
def CPXXgetdj(env: 'CPXCENVptr', lp: 'CPXCLPptr', dj: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetdj(env, lp, dj, begin, end)
def CPXXgetgrad(env: 'CPXCENVptr', lp: 'CPXCLPptr', j: 'CPXDIM', head: 'CPXDIM *', y: 'double *') -> "int":
return _pycplex_platform.CPXXgetgrad(env, lp, j, head, y)
def CPXXgetijdiv(env: 'CPXCENVptr', lp: 'CPXCLPptr', idiv_p: 'CPXDIM *', jdiv_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetijdiv(env, lp, idiv_p, jdiv_p)
def CPXXgetbase(env: 'CPXCENVptr', lp: 'CPXCLPptr', cstat: 'int *', rstat: 'int *') -> "int":
return _pycplex_platform.CPXXgetbase(env, lp, cstat, rstat)
def CPXXgetitcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetitcnt(env, lp)
def CPXXgetphase1cnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetphase1cnt(env, lp)
def CPXXgetsiftitcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetsiftitcnt(env, lp)
def CPXXgetsiftphase1cnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetsiftphase1cnt(env, lp)
def CPXXgetbaritcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetbaritcnt(env, lp)
def CPXXgetcrossppushcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetcrossppushcnt(env, lp)
def CPXXgetcrosspexchcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetcrosspexchcnt(env, lp)
def CPXXgetcrossdpushcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetcrossdpushcnt(env, lp)
def CPXXgetcrossdexchcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetcrossdexchcnt(env, lp)
def CPXXgetpsbcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetpsbcnt(env, lp)
def CPXXgetdsbcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetdsbcnt(env, lp)
def CPXXgetdblquality(env: 'CPXCENVptr', lp: 'CPXCLPptr', quality_p: 'double *', what: 'int') -> "int":
return _pycplex_platform.CPXXgetdblquality(env, lp, quality_p, what)
def CPXXgetsolnpooldblquality(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', quality_p: 'double *', what: 'int') -> "int":
return _pycplex_platform.CPXXgetsolnpooldblquality(env, lp, soln, quality_p, what)
def CPXXgetintquality(env: 'CPXCENVptr', lp: 'CPXCLPptr', quality_p: 'int *', what: 'int') -> "int":
return _pycplex_platform.CPXXgetintquality(env, lp, quality_p, what)
def CPXXgetsolnpoolintquality(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', quality_p: 'int *', what: 'int') -> "int":
return _pycplex_platform.CPXXgetsolnpoolintquality(env, lp, soln, quality_p, what)
def CPXXrhssa(env: 'CPXCENVptr', lp: 'CPXCLPptr', begin: 'CPXDIM', end: 'CPXDIM', lower: 'double *', upper: 'double *') -> "int":
return _pycplex_platform.CPXXrhssa(env, lp, begin, end, lower, upper)
def CPXXboundsa(env: 'CPXCENVptr', lp: 'CPXCLPptr', begin: 'CPXDIM', end: 'CPXDIM', lblower: 'double *', lbupper: 'double *', ublower: 'double *', ubupper: 'double *') -> "int":
return _pycplex_platform.CPXXboundsa(env, lp, begin, end, lblower, lbupper, ublower, ubupper)
def CPXXobjsa(env: 'CPXCENVptr', lp: 'CPXCLPptr', begin: 'CPXDIM', end: 'CPXDIM', lower: 'double *', upper: 'double *') -> "int":
return _pycplex_platform.CPXXobjsa(env, lp, begin, end, lower, upper)
def CPXXgetconflictext(env: 'CPXCENVptr', lp: 'CPXCLPptr', grpstat: 'int *', beg: 'CPXNNZ', end: 'CPXNNZ') -> "int":
return _pycplex_platform.CPXXgetconflictext(env, lp, grpstat, beg, end)
def CPXXgetconflictnumgroups(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXNNZ":
return _pycplex_platform.CPXXgetconflictnumgroups(env, lp)
def CPXXgetconflictgroups(env: 'CPXCENVptr', lp: 'CPXCLPptr', concnt_p: 'CPXNNZ *') -> "CPXDIM":
return _pycplex_platform.CPXXgetconflictgroups(env, lp, concnt_p)
def CPXXgetconflictnumpasses(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXCNT":
return _pycplex_platform.CPXXgetconflictnumpasses(env, lp)
def CPXXnewrows(env: 'CPXCENVptr', lp: 'CPXLPptr', rcnt: 'CPXDIM', py_rhs: 'double const *', sense: 'char const *', py_rngval: 'double const *', rowname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXnewrows(env, lp, rcnt, py_rhs, sense, py_rngval, rowname)
def CPXXaddrows(env: 'CPXCENVptr', lp: 'CPXLPptr', ccnt: 'CPXDIM', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_matbeg: 'CPXNNZ const *', colname: 'char const *const *', rowname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddrows(env, lp, ccnt, rcnt, nzcnt, py_rhs, sense, py_matbeg, colname, rowname)
def CPXXnewcols(env: 'CPXCENVptr', lp: 'CPXLPptr', ccnt: 'CPXDIM', py_obj: 'double const *', py_lb: 'double const *', py_ub: 'double const *', xctype: 'char const *', colname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXnewcols(env, lp, ccnt, py_obj, py_lb, py_ub, xctype, colname)
def CPXXaddcols(env: 'CPXCENVptr', lp: 'CPXLPptr', ccnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_obj: 'double const *', py_matbeg: 'CPXNNZ const *', py_lb: 'double const *', py_ub: 'double const *', colname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddcols(env, lp, ccnt, nzcnt, py_obj, py_matbeg, py_lb, py_ub, colname)
def CPXXdelrows(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdelrows(env, lp, begin, end)
def CPXXdelcols(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdelcols(env, lp, begin, end)
def CPXXchgrowname(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', newname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXchgrowname(env, lp, cnt, py_indices, newname)
def CPXXchgcolname(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', newname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXchgcolname(env, lp, cnt, py_indices, newname)
def CPXXdelnames(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXdelnames(env, lp)
def CPXXchgprobname(env: 'CPXCENVptr', lp: 'CPXLPptr', probname_str: 'char const *') -> "int":
return _pycplex_platform.CPXXchgprobname(env, lp, probname_str)
def CPXXchgcoeflist(env: 'CPXCENVptr', lp: 'CPXLPptr', numcoefs: 'CPXNNZ', py_rowlist: 'CPXDIM const *', py_collist: 'CPXDIM const *', py_vallist: 'double const *') -> "int":
return _pycplex_platform.CPXXchgcoeflist(env, lp, numcoefs, py_rowlist, py_collist, py_vallist)
def CPXXchgbds(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', lu: 'char const *', py_bd: 'double const *') -> "int":
return _pycplex_platform.CPXXchgbds(env, lp, cnt, py_indices, lu, py_bd)
def CPXXchgobj(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', py_values: 'double const *') -> "int":
return _pycplex_platform.CPXXchgobj(env, lp, cnt, py_indices, py_values)
def CPXXchgrhs(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', py_values: 'double const *') -> "int":
return _pycplex_platform.CPXXchgrhs(env, lp, cnt, py_indices, py_values)
def CPXXchgrngval(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', py_values: 'double const *') -> "int":
return _pycplex_platform.CPXXchgrngval(env, lp, cnt, py_indices, py_values)
def CPXXchgsense(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', sense: 'char const *') -> "int":
return _pycplex_platform.CPXXchgsense(env, lp, cnt, py_indices, sense)
def CPXXchgobjsen(env: 'CPXCENVptr', lp: 'CPXLPptr', maxormin: 'int') -> "int":
return _pycplex_platform.CPXXchgobjsen(env, lp, maxormin)
def CPXXchgprobtype(env: 'CPXCENVptr', lp: 'CPXLPptr', type: 'int') -> "int":
return _pycplex_platform.CPXXchgprobtype(env, lp, type)
def CPXXchgprobtypesolnpool(env: 'CPXCENVptr', lp: 'CPXLPptr', type: 'int', soln: 'int') -> "int":
return _pycplex_platform.CPXXchgprobtypesolnpool(env, lp, type, soln)
def CPXXcompletelp(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXcompletelp(env, lp)
def CPXXpreaddrows(env: 'CPXCENVptr', lp: 'CPXLPptr', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_rmatbeg: 'CPXNNZ const *', py_rmatind: 'CPXDIM const *', py_rmatval: 'double const *', rowname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXpreaddrows(env, lp, rcnt, nzcnt, py_rhs, sense, py_rmatbeg, py_rmatind, py_rmatval, rowname)
def CPXXprechgobj(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', indices: 'CPXDIM const *', values: 'double const *') -> "int":
return _pycplex_platform.CPXXprechgobj(env, lp, cnt, indices, values)
def CPXXgetnumcols(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumcols(env, lp)
def CPXXgetnumrows(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumrows(env, lp)
def CPXXgetnumnz(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXNNZ":
return _pycplex_platform.CPXXgetnumnz(env, lp)
def CPXXgetobjsen(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetobjsen(env, lp)
def CPXXgetobj(env: 'CPXCENVptr', lp: 'CPXCLPptr', obj: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetobj(env, lp, obj, begin, end)
def CPXXgetrhs(env: 'CPXCENVptr', lp: 'CPXCLPptr', rhs: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetrhs(env, lp, rhs, begin, end)
def CPXXgetsense(env: 'CPXCENVptr', lp: 'CPXCLPptr', sense: 'char *') -> "CPXDIM":
return _pycplex_platform.CPXXgetsense(env, lp, sense)
def CPXXgetcols(env: 'CPXCENVptr', lp: 'CPXCLPptr', nzcnt_p: 'CPXNNZ *') -> "CPXDIM":
return _pycplex_platform.CPXXgetcols(env, lp, nzcnt_p)
def CPXXgetrows(env: 'CPXCENVptr', lp: 'CPXCLPptr', nzcnt_p: 'CPXNNZ *') -> "CPXDIM":
return _pycplex_platform.CPXXgetrows(env, lp, nzcnt_p)
def CPXXgetlb(env: 'CPXCENVptr', lp: 'CPXCLPptr', lb: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetlb(env, lp, lb, begin, end)
def CPXXgetub(env: 'CPXCENVptr', lp: 'CPXCLPptr', ub: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetub(env, lp, ub, begin, end)
def CPXXgetrngval(env: 'CPXCENVptr', lp: 'CPXCLPptr', rngval: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetrngval(env, lp, rngval, begin, end)
def CPXXgetprobname(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetprobname(env, lp, buf_str)
def CPXXgetobjname(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetobjname(env, lp, buf_str)
def CPXXgetcolname(env: 'CPXCENVptr', lp: 'CPXCLPptr', name: 'char **') -> "CPXDIM":
return _pycplex_platform.CPXXgetcolname(env, lp, name)
def CPXXgetrowname(env: 'CPXCENVptr', lp: 'CPXCLPptr', name: 'char **') -> "CPXDIM":
return _pycplex_platform.CPXXgetrowname(env, lp, name)
def CPXXgetcoef(env: 'CPXCENVptr', lp: 'CPXCLPptr', i: 'CPXDIM', j: 'CPXDIM', coef_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetcoef(env, lp, i, j, coef_p)
def CPXXgetrowindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetrowindex(env, lp, lname_str, index_p)
def CPXXgetcolindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetcolindex(env, lp, lname_str, index_p)
def CPXXgetprobtype(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetprobtype(env, lp)
def CPXXsetdefaults(env: 'CPXENVptr') -> "int":
return _pycplex_platform.CPXXsetdefaults(env)
def CPXXsetintparam(env: 'CPXENVptr', whichparam: 'int', newvalue: 'int') -> "int":
return _pycplex_platform.CPXXsetintparam(env, whichparam, newvalue)
def CPXXsetlongparam(env: 'CPXENVptr', whichparam: 'int', newvalue: 'CPXLONG') -> "int":
return _pycplex_platform.CPXXsetlongparam(env, whichparam, newvalue)
def CPXXsetdblparam(env: 'CPXENVptr', whichparam: 'int', newvalue: 'double') -> "int":
return _pycplex_platform.CPXXsetdblparam(env, whichparam, newvalue)
def CPXXsetstrparam(env: 'CPXENVptr', whichparam: 'int', newvalue_str: 'char const *') -> "int":
return _pycplex_platform.CPXXsetstrparam(env, whichparam, newvalue_str)
def CPXXgetintparam(env: 'CPXCENVptr', whichparam: 'int', value_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetintparam(env, whichparam, value_p)
def CPXXgetlongparam(env: 'CPXCENVptr', whichparam: 'int', value_p: 'CPXLONG *') -> "int":
return _pycplex_platform.CPXXgetlongparam(env, whichparam, value_p)
def CPXXgetdblparam(env: 'CPXCENVptr', whichparam: 'int', value_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetdblparam(env, whichparam, value_p)
def CPXXgetstrparam(env: 'CPXCENVptr', whichparam: 'int', param_buffer_str: 'char *') -> "char *":
return _pycplex_platform.CPXXgetstrparam(env, whichparam, param_buffer_str)
def CPXXinfointparam(env: 'CPXCENVptr', whichparam: 'int', defvalue_p: 'int *', minvalue_p: 'int *', maxvalue_p: 'int *') -> "int":
return _pycplex_platform.CPXXinfointparam(env, whichparam, defvalue_p, minvalue_p, maxvalue_p)
def CPXXinfolongparam(env: 'CPXCENVptr', whichparam: 'int', defvalue_p: 'CPXLONG *', minvalue_p: 'CPXLONG *', maxvalue_p: 'CPXLONG *') -> "int":
return _pycplex_platform.CPXXinfolongparam(env, whichparam, defvalue_p, minvalue_p, maxvalue_p)
def CPXXinfodblparam(env: 'CPXCENVptr', whichparam: 'int', defvalue_p: 'double *', minvalue_p: 'double *', maxvalue_p: 'double *') -> "int":
return _pycplex_platform.CPXXinfodblparam(env, whichparam, defvalue_p, minvalue_p, maxvalue_p)
def CPXXinfostrparam(env: 'CPXCENVptr', whichparam: 'int', param_buffer_str: 'char *') -> "char *":
return _pycplex_platform.CPXXinfostrparam(env, whichparam, param_buffer_str)
def CPXXgetparamtype(env: 'CPXCENVptr', whichparam: 'int', paramtype: 'int *') -> "int":
return _pycplex_platform.CPXXgetparamtype(env, whichparam, paramtype)
def CPXXEfixparam(env: 'CPXCENVptr', paramnum: 'int') -> "int":
return _pycplex_platform.CPXXEfixparam(env, paramnum)
def CPXXversion(env: 'CPXCENVptr') -> "CPXCCHARptr":
return _pycplex_platform.CPXXversion(env)
def CPXXversionnumber(env: 'CPXCENVptr', version_p: 'int *') -> "int":
return _pycplex_platform.CPXXversionnumber(env, version_p)
def CPXXopenCPLEX(status_p: 'int *') -> "CPXENVptr":
return _pycplex_platform.CPXXopenCPLEX(status_p)
def CPXXcloseCPLEX(env_p: 'CPXENVptr *') -> "int":
return _pycplex_platform.CPXXcloseCPLEX(env_p)
def CPXXgetchannels(env: 'CPXCENVptr', cpxresults_p: 'CPXCHANNELptr *', cpxwarning_p: 'CPXCHANNELptr *', cpxerror_p: 'CPXCHANNELptr *', cpxlog_p: 'CPXCHANNELptr *') -> "int":
return _pycplex_platform.CPXXgetchannels(env, cpxresults_p, cpxwarning_p, cpxerror_p, cpxlog_p)
def CPXXaddfuncdest(env: 'CPXCENVptr', channel: 'CPXCHANNELptr', handle: 'void *') -> "int":
return _pycplex_platform.CPXXaddfuncdest(env, channel, handle)
def CPXXdelfuncdest(env: 'CPXCENVptr', channel: 'CPXCHANNELptr', handle: 'void *') -> "int":
return _pycplex_platform.CPXXdelfuncdest(env, channel, handle)
def CPXXgeterrorstring(env: 'CPXCENVptr', errcode: 'int', error_buffer_str: 'char *') -> "char *":
return _pycplex_platform.CPXXgeterrorstring(env, errcode, error_buffer_str)
def CPXXsetlpcallbackfunc(env: 'CPXENVptr', lpcallback: 'int (*)(CPXCENVptr,void *,int,void *)') -> "int":
return _pycplex_platform.CPXXsetlpcallbackfunc(env, lpcallback)
def CPXXsetnetcallbackfunc(env: 'CPXENVptr', netcallback: 'int (*)(CPXCENVptr,void *,int,void *)') -> "int":
return _pycplex_platform.CPXXsetnetcallbackfunc(env, netcallback)
def CPXXsettuningcallbackfunc(env: 'CPXENVptr', tuningcallback: 'int (*)(CPXCENVptr,void *,int,void *)') -> "int":
return _pycplex_platform.CPXXsettuningcallbackfunc(env, tuningcallback)
def CPXXsetterminate(env: 'CPXENVptr', terminate_p: 'int volatile *') -> "int":
return _pycplex_platform.CPXXsetterminate(env, terminate_p)
def CPXXgetbhead(env: 'CPXCENVptr', lp: 'CPXCLPptr', head: 'CPXDIM *', x: 'double *') -> "int":
return _pycplex_platform.CPXXgetbhead(env, lp, head, x)
def CPXXbinvcol(env: 'CPXCENVptr', lp: 'CPXCLPptr', j: 'CPXDIM', x: 'double *') -> "int":
return _pycplex_platform.CPXXbinvcol(env, lp, j, x)
def CPXXbinvrow(env: 'CPXCENVptr', lp: 'CPXCLPptr', i: 'CPXDIM', y: 'double *') -> "int":
return _pycplex_platform.CPXXbinvrow(env, lp, i, y)
def CPXXbinvacol(env: 'CPXCENVptr', lp: 'CPXCLPptr', j: 'CPXDIM', x: 'double *') -> "int":
return _pycplex_platform.CPXXbinvacol(env, lp, j, x)
def CPXXbinvarow(env: 'CPXCENVptr', lp: 'CPXCLPptr', i: 'CPXDIM', z: 'double *') -> "int":
return _pycplex_platform.CPXXbinvarow(env, lp, i, z)
def CPXXftran(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double *') -> "int":
return _pycplex_platform.CPXXftran(env, lp, x)
def CPXXbtran(env: 'CPXCENVptr', lp: 'CPXCLPptr', y: 'double *') -> "int":
return _pycplex_platform.CPXXbtran(env, lp, y)
def CPXXgetijrow(env: 'CPXCENVptr', lp: 'CPXCLPptr', i: 'CPXDIM', j: 'CPXDIM', row_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetijrow(env, lp, i, j, row_p)
def CPXXgetray(env: 'CPXCENVptr', lp: 'CPXCLPptr', z: 'double *') -> "int":
return _pycplex_platform.CPXXgetray(env, lp, z)
def CPXXmdleave(env: 'CPXCENVptr', lp: 'CPXLPptr', goodlist: 'CPXDIM const *', goodlen: 'CPXDIM', downratio: 'double *', upratio: 'double *') -> "int":
return _pycplex_platform.CPXXmdleave(env, lp, goodlist, goodlen, downratio, upratio)
def CPXXdualfarkas(env: 'CPXCENVptr', lp: 'CPXCLPptr', y: 'double *', proof_p: 'double *') -> "int":
return _pycplex_platform.CPXXdualfarkas(env, lp, y, proof_p)
def CPXXchgobjoffset(env: 'CPXCENVptr', lp: 'CPXLPptr', offset: 'double') -> "int":
return _pycplex_platform.CPXXchgobjoffset(env, lp, offset)
def CPXXgetobjoffset(env: 'CPXCENVptr', lp: 'CPXCLPptr', objoffset_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetobjoffset(env, lp, objoffset_p)
def CPXXgetbasednorms(env: 'CPXCENVptr', lp: 'CPXCLPptr', cstat: 'int *', rstat: 'int *', dnorm: 'double *') -> "int":
return _pycplex_platform.CPXXgetbasednorms(env, lp, cstat, rstat, dnorm)
def CPXXgetdnorms(env: 'CPXCENVptr', lp: 'CPXCLPptr', norm: 'double *', head: 'CPXDIM *', len_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetdnorms(env, lp, norm, head, len_p)
def CPXXgetpnorms(env: 'CPXCENVptr', lp: 'CPXCLPptr', cnorm: 'double *', rnorm: 'double *', len_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetpnorms(env, lp, cnorm, rnorm, len_p)
def CPXXtightenbds(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', indices: 'CPXDIM const *', lu: 'char const *', bd: 'double const *') -> "int":
return _pycplex_platform.CPXXtightenbds(env, lp, cnt, indices, lu, bd)
def CPXXbasicpresolve(env: 'CPXCENVptr', lp: 'CPXLPptr', redlb: 'double *', redub: 'double *', rstat: 'int *') -> "int":
return _pycplex_platform.CPXXbasicpresolve(env, lp, redlb, redub, rstat)
def CPXXslackfromx(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', slack: 'double *') -> "int":
return _pycplex_platform.CPXXslackfromx(env, lp, x, slack)
def CPXXdjfrompi(env: 'CPXCENVptr', lp: 'CPXCLPptr', pi: 'double const *', dj: 'double *') -> "int":
return _pycplex_platform.CPXXdjfrompi(env, lp, pi, dj)
def CPXXqpdjfrompi(env: 'CPXCENVptr', lp: 'CPXCLPptr', pi: 'double const *', x: 'double const *', dj: 'double *') -> "int":
return _pycplex_platform.CPXXqpdjfrompi(env, lp, pi, x, dj)
def CPXXfreepresolve(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXfreepresolve(env, lp)
def CPXXgetredlp(env: 'CPXCENVptr', lp: 'CPXCLPptr', redlp_p: 'CPXCLPptr *') -> "int":
return _pycplex_platform.CPXXgetredlp(env, lp, redlp_p)
def CPXXcrushx(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', prex: 'double *') -> "int":
return _pycplex_platform.CPXXcrushx(env, lp, x, prex)
def CPXXuncrushx(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double *', prex: 'double const *') -> "int":
return _pycplex_platform.CPXXuncrushx(env, lp, x, prex)
def CPXXcrushpi(env: 'CPXCENVptr', lp: 'CPXCLPptr', pi: 'double const *', prepi: 'double *') -> "int":
return _pycplex_platform.CPXXcrushpi(env, lp, pi, prepi)
def CPXXuncrushpi(env: 'CPXCENVptr', lp: 'CPXCLPptr', pi: 'double *', prepi: 'double const *') -> "int":
return _pycplex_platform.CPXXuncrushpi(env, lp, pi, prepi)
def CPXXcrushform(env: 'CPXCENVptr', lp: 'CPXCLPptr', len: 'CPXDIM', ind: 'CPXDIM const *', val: 'double const *', plen_p: 'CPXDIM *', poffset_p: 'double *', pind: 'CPXDIM *', pval: 'double *') -> "int":
return _pycplex_platform.CPXXcrushform(env, lp, len, ind, val, plen_p, poffset_p, pind, pval)
def CPXXuncrushform(env: 'CPXCENVptr', lp: 'CPXCLPptr', plen: 'CPXDIM', pind: 'CPXDIM const *', pval: 'double const *', len_p: 'CPXDIM *', offset_p: 'double *', ind: 'CPXDIM *', val: 'double *') -> "int":
return _pycplex_platform.CPXXuncrushform(env, lp, plen, pind, pval, len_p, offset_p, ind, val)
def CPXXgetprestat(env: 'CPXCENVptr', lp: 'CPXCLPptr', prestat_p: 'CPXDIM *', pcstat: 'CPXDIM *', prstat: 'CPXDIM *', ocstat: 'CPXDIM *', orstat: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetprestat(env, lp, prestat_p, pcstat, prstat, ocstat, orstat)
def CPXXcopyprotected(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', indices: 'CPXDIM const *') -> "int":
return _pycplex_platform.CPXXcopyprotected(env, lp, cnt, indices)
def CPXXgetprotected(env: 'CPXCENVptr', lp: 'CPXCLPptr', cnt_p: 'CPXDIM *', indices: 'CPXDIM *', pspace: 'CPXDIM', surplus_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetprotected(env, lp, cnt_p, indices, pspace, surplus_p)
def CPXXgettime(env: 'CPXCENVptr', timestamp: 'double *') -> "int":
return _pycplex_platform.CPXXgettime(env, timestamp)
def CPXXgetdettime(env: 'CPXCENVptr', timestamp: 'double *') -> "int":
return _pycplex_platform.CPXXgetdettime(env, timestamp)
def CPXXcopyorder(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', indices: 'CPXDIM const *', priority: 'CPXDIM const *', direction: 'int const *') -> "int":
return _pycplex_platform.CPXXcopyorder(env, lp, cnt, indices, priority, direction)
def CPXXchgmipstarts(env: 'CPXCENVptr', lp: 'CPXLPptr', mcnt: 'int', py_mipstartindices: 'int const *', nzcnt: 'CPXNNZ', py_beg: 'CPXNNZ const *', py_varindices: 'CPXDIM const *', py_values: 'double const *', py_effortlevel: 'int const *') -> "int":
return _pycplex_platform.CPXXchgmipstarts(env, lp, mcnt, py_mipstartindices, nzcnt, py_beg, py_varindices, py_values, py_effortlevel)
def CPXXaddmipstarts(env: 'CPXCENVptr', lp: 'CPXLPptr', mcnt: 'int', nzcnt: 'CPXNNZ', py_beg: 'CPXNNZ const *', py_varindices: 'CPXDIM const *', py_values: 'double const *', py_effortlevel: 'int const *', mipstartname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddmipstarts(env, lp, mcnt, nzcnt, py_beg, py_varindices, py_values, py_effortlevel, mipstartname)
def CPXXdelmipstarts(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'int', end: 'int') -> "int":
return _pycplex_platform.CPXXdelmipstarts(env, lp, begin, end)
def CPXXdistmipopt(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXdistmipopt(env, lp)
def CPXXcopyvmconfig(env: 'CPXENVptr', xmlstring: 'char const *') -> "int":
return _pycplex_platform.CPXXcopyvmconfig(env, xmlstring)
def CPXXreadcopyvmconfig(env: 'CPXENVptr', file: 'char const *') -> "int":
return _pycplex_platform.CPXXreadcopyvmconfig(env, file)
def CPXXdelvmconfig(env: 'CPXENVptr') -> "int":
return _pycplex_platform.CPXXdelvmconfig(env)
def CPXEhasvmconfig(env: 'CPXCENVptr', hasvmconfig_p: 'int *') -> "int":
return _pycplex_platform.CPXEhasvmconfig(env, hasvmconfig_p)
def CPXXgetmipitcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetmipitcnt(env, lp)
def CPXXgetbestobjval(env: 'CPXCENVptr', lp: 'CPXCLPptr', objval_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetbestobjval(env, lp, objval_p)
def CPXXgetmiprelgap(env: 'CPXCENVptr', lp: 'CPXCLPptr', gap_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetmiprelgap(env, lp, gap_p)
def CPXXgetcutoff(env: 'CPXCENVptr', lp: 'CPXCLPptr', cutoff_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetcutoff(env, lp, cutoff_p)
def CPXXgetnodecnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetnodecnt(env, lp)
def CPXXgetnodeleftcnt(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetnodeleftcnt(env, lp)
def CPXXgetnodeint(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXLONG":
return _pycplex_platform.CPXXgetnodeint(env, lp)
def CPXXgetnumcuts(env: 'CPXCENVptr', lp: 'CPXCLPptr', cuttype: 'int', num_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetnumcuts(env, lp, cuttype, num_p)
def CPXXgetnummipstarts(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnummipstarts(env, lp)
def CPXXgetmipstarts(env: 'CPXCENVptr', lp: 'CPXCLPptr', nzcnt_p: 'CPXNNZ *', beg: 'CPXNNZ *', varindices: 'CPXDIM *', values: 'double *', effortlevel: 'int *', startspace: 'CPXNNZ', surplus_p: 'CPXNNZ *', begin: 'int', end: 'int') -> "int":
return _pycplex_platform.CPXXgetmipstarts(env, lp, nzcnt_p, beg, varindices, values, effortlevel, startspace, surplus_p, begin, end)
def CPXXgetmipstartname(env: 'CPXCENVptr', lp: 'CPXCLPptr', name: 'char **') -> "int":
return _pycplex_platform.CPXXgetmipstartname(env, lp, name)
def CPXXgetmipstartindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetmipstartindex(env, lp, lname_str, index_p)
def CPXXgetsubstat(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetsubstat(env, lp)
def CPXXchgctype(env: 'CPXCENVptr', lp: 'CPXLPptr', cnt: 'CPXDIM', py_indices: 'CPXDIM const *', xctype: 'char const *') -> "int":
return _pycplex_platform.CPXXchgctype(env, lp, cnt, py_indices, xctype)
def CPXXaddsos(env: 'CPXCENVptr', lp: 'CPXLPptr', numsos: 'CPXDIM', numsosnz: 'CPXNNZ', sostype: 'char const *', py_sosbeg: 'CPXNNZ const *', py_sosind: 'CPXDIM const *', py_soswt: 'double const *', sosname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddsos(env, lp, numsos, numsosnz, sostype, py_sosbeg, py_sosind, py_soswt, sosname)
def CPXXdelsos(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdelsos(env, lp, begin, end)
def CPXXgetctype(env: 'CPXCENVptr', lp: 'CPXCLPptr', xctype: 'char *') -> "CPXDIM":
return _pycplex_platform.CPXXgetctype(env, lp, xctype)
def CPXXgetnumsos(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumsos(env, lp)
def CPXXgetsos(env: 'CPXCENVptr', lp: 'CPXCLPptr', numsosnz_p: 'CPXNNZ *') -> "CPXDIM":
return _pycplex_platform.CPXXgetsos(env, lp, numsosnz_p)
def CPXXgetsosname(env: 'CPXCENVptr', lp: 'CPXCLPptr', name: 'char **') -> "CPXDIM":
return _pycplex_platform.CPXXgetsosname(env, lp, name)
def CPXXgetsosindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetsosindex(env, lp, lname_str, index_p)
def CPXXgetsosinfeas(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', infeasout: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetsosinfeas(env, lp, x, infeasout, begin, end)
def CPXXaddindconstraints(env: 'CPXCENVptr', lp: 'CPXLPptr', indcnt: 'CPXDIM', py_type: 'int const *', py_indvar: 'CPXDIM const *', py_complemented: 'int const *', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', linbeg: 'CPXNNZ const *', indname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddindconstraints(env, lp, indcnt, py_type, py_indvar, py_complemented, nzcnt, py_rhs, sense, linbeg, indname)
def CPXXgetindconstraints(env: 'CPXCENVptr', lp: 'CPXCLPptr', type: 'int *') -> "CPXDIM":
return _pycplex_platform.CPXXgetindconstraints(env, lp, type)
def CPXXgetnumindconstrs(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumindconstrs(env, lp)
def CPXXgetindconstrindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetindconstrindex(env, lp, lname_str, index_p)
def CPXXgetindconstrname(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *', which: 'CPXDIM') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetindconstrname(env, lp, buf_str, which)
def CPXXgetindconstrslack(env: 'CPXCENVptr', lp: 'CPXCLPptr', indslack: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetindconstrslack(env, lp, indslack, begin, end)
def CPXXindconstrslackfromx(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', indslack: 'double *') -> "int":
return _pycplex_platform.CPXXindconstrslackfromx(env, lp, x, indslack)
def CPXXgetindconstrinfeas(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', infeasout: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetindconstrinfeas(env, lp, x, infeasout, begin, end)
def CPXXdelindconstrs(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdelindconstrs(env, lp, begin, end)
def CPXXgetnumint(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumint(env, lp)
def CPXXgetnumbin(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumbin(env, lp)
def CPXXgetnumsemicont(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumsemicont(env, lp)
def CPXXgetnumsemiint(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumsemiint(env, lp)
def CPXXgetorder(env: 'CPXCENVptr', lp: 'CPXCLPptr', cnt_p: 'CPXDIM *', indices: 'CPXDIM *', priority: 'CPXDIM *', direction: 'int *', ordspace: 'CPXDIM', surplus_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetorder(env, lp, cnt_p, indices, priority, direction, ordspace, surplus_p)
def CPXXgetsolnpoolnumfilters(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetsolnpoolnumfilters(env, lp)
def CPXXaddsolnpooldivfilter(env: 'CPXCENVptr', lp: 'CPXLPptr', lower_bound: 'double', upper_bound: 'double', nzcnt: 'CPXDIM', ind: 'CPXDIM const *', weight: 'double const *', refval: 'double const *', lname_str: 'char const *') -> "int":
return _pycplex_platform.CPXXaddsolnpooldivfilter(env, lp, lower_bound, upper_bound, nzcnt, ind, weight, refval, lname_str)
def CPXXaddsolnpoolrngfilter(env: 'CPXCENVptr', lp: 'CPXLPptr', lb: 'double', ub: 'double', nzcnt: 'CPXDIM', ind: 'int const *', val: 'double const *', lname_str: 'char const *') -> "int":
return _pycplex_platform.CPXXaddsolnpoolrngfilter(env, lp, lb, ub, nzcnt, ind, val, lname_str)
def CPXXgetsolnpoolfiltertype(env: 'CPXCENVptr', lp: 'CPXCLPptr', ftype_p: 'int *', which: 'int') -> "int":
return _pycplex_platform.CPXXgetsolnpoolfiltertype(env, lp, ftype_p, which)
def CPXXgetsolnpooldivfilter(env: 'CPXCENVptr', lp: 'CPXCLPptr', lowercutoff_p: 'double *', upper_cutoff_p: 'double *', nzcnt_p: 'CPXDIM *', ind: 'CPXDIM *', val: 'double *', refval: 'double *', space: 'CPXDIM', surplus_p: 'CPXDIM *', which: 'int') -> "int":
return _pycplex_platform.CPXXgetsolnpooldivfilter(env, lp, lowercutoff_p, upper_cutoff_p, nzcnt_p, ind, val, refval, space, surplus_p, which)
def CPXXgetsolnpoolrngfilter(env: 'CPXCENVptr', lp: 'CPXCLPptr', lb_p: 'double *', ub_p: 'double *', nzcnt_p: 'CPXDIM *', ind: 'CPXDIM *', val: 'double *', space: 'CPXDIM', surplus_p: 'CPXDIM *', which: 'int') -> "int":
return _pycplex_platform.CPXXgetsolnpoolrngfilter(env, lp, lb_p, ub_p, nzcnt_p, ind, val, space, surplus_p, which)
def CPXXgetsolnpoolfiltername(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *', which: 'int') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetsolnpoolfiltername(env, lp, buf_str, which)
def CPXXgetsolnpoolfilterindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetsolnpoolfilterindex(env, lp, lname_str, index_p)
def CPXXdelsolnpoolfilters(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'int', end: 'int') -> "int":
return _pycplex_platform.CPXXdelsolnpoolfilters(env, lp, begin, end)
def CPXXgetsolnpoolnumsolns(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetsolnpoolnumsolns(env, lp)
def CPXXgetsolnpoolnumreplaced(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetsolnpoolnumreplaced(env, lp)
def CPXXgetsolnpoolmeanobjval(env: 'CPXCENVptr', lp: 'CPXCLPptr', meanobjval_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetsolnpoolmeanobjval(env, lp, meanobjval_p)
def CPXXgetsolnpoolobjval(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', objval_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetsolnpoolobjval(env, lp, soln, objval_p)
def CPXXgetsolnpoolx(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetsolnpoolx(env, lp, soln, x, begin, end)
def CPXXgetsolnpoolslack(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', slack: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetsolnpoolslack(env, lp, soln, slack, begin, end)
def CPXXgetsolnpoolqconstrslack(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', qcslack: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetsolnpoolqconstrslack(env, lp, soln, qcslack, begin, end)
def CPXXgetsolnpoolsolnname(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *', which: 'int') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetsolnpoolsolnname(env, lp, buf_str, which)
def CPXXgetsolnpoolsolnindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetsolnpoolsolnindex(env, lp, lname_str, index_p)
def CPXXdelsolnpoolsolns(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'int', end: 'int') -> "int":
return _pycplex_platform.CPXXdelsolnpoolsolns(env, lp, begin, end)
def CPXXsetinfocallbackfunc(env: 'CPXENVptr', infocallback: 'int (*)(CPXCENVptr,void *,int,void *)') -> "int":
return _pycplex_platform.CPXXsetinfocallbackfunc(env, infocallback)
def CPXXsetmipcallbackfunc(env: 'CPXENVptr', mipcallback: 'int (*)(CPXCENVptr,void *,int,void *)') -> "int":
return _pycplex_platform.CPXXsetmipcallbackfunc(env, mipcallback)
def CPXXsetbranchcallbackfunc(env: 'CPXENVptr', branchcallback: 'int (*)(CPXCENVptr,void *,int,void *,int,CPXDIM,int,CPXDIM,CPXDIM const *,CPXDIM const *,char const *,double const *,double const *,int *)') -> "int":
return _pycplex_platform.CPXXsetbranchcallbackfunc(env, branchcallback)
def CPXXsetbranchnosolncallbackfunc(env: 'CPXENVptr', branchnosolncallback: 'int (*)(CPXCENVptr,void *,int,void *,int,CPXDIM,int,CPXDIM,CPXDIM const *,CPXDIM const *,char const *,double const *,double const *,int *)') -> "int":
return _pycplex_platform.CPXXsetbranchnosolncallbackfunc(env, branchnosolncallback)
def CPXXsetlazyconstraintcallbackfunc(env: 'CPXENVptr', lazyconcallback: 'int (*)(CPXCENVptr,void *,int,void *,int *)') -> "int":
return _pycplex_platform.CPXXsetlazyconstraintcallbackfunc(env, lazyconcallback)
def CPXXsetusercutcallbackfunc(env: 'CPXENVptr', usercutcallback: 'int (*)(CPXCENVptr,void *,int,void *,int *)') -> "int":
return _pycplex_platform.CPXXsetusercutcallbackfunc(env, usercutcallback)
def CPXXsetnodecallbackfunc(env: 'CPXENVptr', nodecallback: 'int (*)(CPXCENVptr,void *,int,void *,CPXCNT *,int *)') -> "int":
return _pycplex_platform.CPXXsetnodecallbackfunc(env, nodecallback)
def CPXXsetheuristiccallbackfunc(env: 'CPXENVptr', heuristiccallback: 'int (*)(CPXCENVptr,void *,int,void *,double *,double *,int *,int *)') -> "int":
return _pycplex_platform.CPXXsetheuristiccallbackfunc(env, heuristiccallback)
def CPXXsetincumbentcallbackfunc(env: 'CPXENVptr', incumbentcallback: 'int (*)(CPXCENVptr,void *,int,void *,double,double *,int *,int *)') -> "int":
return _pycplex_platform.CPXXsetincumbentcallbackfunc(env, incumbentcallback)
def CPXXsetsolvecallbackfunc(env: 'CPXENVptr', solvecallback: 'int (*)(CPXCENVptr,void *,int,void *,int *)') -> "int":
return _pycplex_platform.CPXXsetsolvecallbackfunc(env, solvecallback)
def CPXXgetcallbacknodeinfo(env: 'CPXCENVptr', nodeindex: 'CPXCNT') -> "void *":
return _pycplex_platform.CPXXgetcallbacknodeinfo(env, nodeindex)
def CPXXcallbacksetuserhandle(env: 'CPXCENVptr', userhandle: 'void *') -> "void **":
return _pycplex_platform.CPXXcallbacksetuserhandle(env, userhandle)
def CPXXcallbacksetnodeuserhandle(env: 'CPXCENVptr', nodeindex: 'CPXCNT') -> "void **":
return _pycplex_platform.CPXXcallbacksetnodeuserhandle(env, nodeindex)
def CPXXgetcallbackseqinfo(env: 'CPXCENVptr', nodeindex: 'CPXCNT') -> "void *":
return _pycplex_platform.CPXXgetcallbackseqinfo(env, nodeindex)
def CPXXgetcallbacksosinfo(env: 'CPXCENVptr', sosindex: 'CPXDIM', member: 'CPXDIM', whichinfo: 'int', result_p: 'void *') -> "int":
return _pycplex_platform.CPXXgetcallbacksosinfo(env, sosindex, member, whichinfo, result_p)
def CPXXgetcallbackindicatorinfo(env: 'CPXCENVptr', iindex: 'CPXDIM', whichinfo: 'int', result_p: 'void *') -> "int":
return _pycplex_platform.CPXXgetcallbackindicatorinfo(env, iindex, whichinfo, result_p)
def CPXXcutcallbackadd(env: 'CPXCENVptr', nzcnt: 'CPXDIM', rhs: 'double', sense: 'int', cutind: 'int const *', cutval: 'double const *', purgeable: 'int') -> "int":
return _pycplex_platform.CPXXcutcallbackadd(env, nzcnt, rhs, sense, cutind, cutval, purgeable)
def CPXXcutcallbackaddlocal(env: 'CPXCENVptr', nzcnt: 'CPXDIM', rhs: 'double', sense: 'int', cutind: 'CPXDIM const *', cutval: 'double const *') -> "int":
return _pycplex_platform.CPXXcutcallbackaddlocal(env, nzcnt, rhs, sense, cutind, cutval)
def CPXXbranchcallbackbranchgeneral(env: 'CPXCENVptr', varcnt: 'CPXDIM', varind: 'CPXDIM const *', varlu: 'char const *', varbd: 'double const *', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', rhs: 'double const *', sense: 'char const *', rmatbeg: 'CPXNNZ const *', rmatind: 'CPXDIM const *', rmatval: 'double const *', nodeest: 'double', userhandle: 'void *', seqnum_p: 'CPXCNT *') -> "int":
return _pycplex_platform.CPXXbranchcallbackbranchgeneral(env, varcnt, varind, varlu, varbd, rcnt, nzcnt, rhs, sense, rmatbeg, rmatind, rmatval, nodeest, userhandle, seqnum_p)
def CPXXbranchcallbackbranchasCPLEX(env: 'CPXCENVptr', num: 'int', userhandle: 'void *', seqnum_p: 'CPXCNT *') -> "int":
return _pycplex_platform.CPXXbranchcallbackbranchasCPLEX(env, num, userhandle, seqnum_p)
def CPXXgetcallbacknodex(env: 'CPXCENVptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbacknodex(env, x, begin, end)
def CPXXgetcallbacknodeobjval(env: 'CPXCENVptr', objval_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetcallbacknodeobjval(env, objval_p)
def CPXXgetcallbackorder(env: 'CPXCENVptr', priority: 'CPXDIM *', direction: 'int *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbackorder(env, priority, direction, begin, end)
def CPXXgetcallbackpseudocosts(env: 'CPXCENVptr', uppc: 'double *', downpc: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbackpseudocosts(env, uppc, downpc, begin, end)
def CPXXgetcallbackincumbent(env: 'CPXCENVptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbackincumbent(env, x, begin, end)
def CPXXgetcallbacknodeintfeas(env: 'CPXCENVptr', feas: 'int *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbacknodeintfeas(env, feas, begin, end)
def CPXXgetcallbackgloballb(env: 'CPXCENVptr', lb: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbackgloballb(env, lb, begin, end)
def CPXXgetcallbackglobalub(env: 'CPXCENVptr', ub: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbackglobalub(env, ub, begin, end)
def CPXXgetcallbacknodelb(env: 'CPXCENVptr', lb: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbacknodelb(env, lb, begin, end)
def CPXXgetcallbacknodeub(env: 'CPXCENVptr', ub: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetcallbacknodeub(env, ub, begin, end)
def CPXXgetcallbacknodestat(env: 'CPXCENVptr', nodestat_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetcallbacknodestat(env, nodestat_p)
def CPXXaddusercuts(env: 'CPXCENVptr', lp: 'CPXLPptr', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_rmatbeg: 'CPXNNZ const *', py_rmatind: 'CPXDIM const *', py_rmatval: 'double const *', rowname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddusercuts(env, lp, rcnt, nzcnt, py_rhs, sense, py_rmatbeg, py_rmatind, py_rmatval, rowname)
def CPXXaddlazyconstraints(env: 'CPXCENVptr', lp: 'CPXLPptr', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_rmatbeg: 'CPXNNZ const *', py_rmatind: 'CPXDIM const *', py_rmatval: 'double const *', rowname: 'char const *const *') -> "int":
return _pycplex_platform.CPXXaddlazyconstraints(env, lp, rcnt, nzcnt, py_rhs, sense, py_rmatbeg, py_rmatind, py_rmatval, rowname)
def CPXXfreeusercuts(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXfreeusercuts(env, lp)
def CPXXfreelazyconstraints(env: 'CPXCENVptr', lp: 'CPXLPptr') -> "int":
return _pycplex_platform.CPXXfreelazyconstraints(env, lp)
def CPXXcopyquad(env: 'CPXCENVptr', lp: 'CPXLPptr', py_qmatbeg: 'CPXNNZ const *', py_qmatcnt: 'CPXDIM const *', py_qmatind: 'CPXDIM const *', py_qmatval: 'double const *') -> "int":
return _pycplex_platform.CPXXcopyquad(env, lp, py_qmatbeg, py_qmatcnt, py_qmatind, py_qmatval)
def CPXXcopyqpsep(env: 'CPXCENVptr', lp: 'CPXLPptr', py_qsepvec: 'double const *') -> "int":
return _pycplex_platform.CPXXcopyqpsep(env, lp, py_qsepvec)
def CPXXchgqpcoef(env: 'CPXCENVptr', lp: 'CPXLPptr', i: 'CPXDIM', j: 'CPXDIM', newvalue: 'double') -> "int":
return _pycplex_platform.CPXXchgqpcoef(env, lp, i, j, newvalue)
def CPXXgetnumqpnz(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXNNZ":
return _pycplex_platform.CPXXgetnumqpnz(env, lp)
def CPXXgetnumquad(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumquad(env, lp)
def CPXXgetqpcoef(env: 'CPXCENVptr', lp: 'CPXCLPptr', rownum: 'CPXDIM', colnum: 'CPXDIM', coef_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetqpcoef(env, lp, rownum, colnum, coef_p)
def CPXXgetquad(env: 'CPXCENVptr', lp: 'CPXCLPptr', nzcnt_p: 'CPXNNZ *', qmatbeg: 'CPXNNZ *', qmatind: 'CPXDIM *', qmatval: 'double *', qmatspace: 'CPXNNZ', surplus_p: 'CPXNNZ *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetquad(env, lp, nzcnt_p, qmatbeg, qmatind, qmatval, qmatspace, surplus_p, begin, end)
def CPXXaddqconstr(env: 'CPXCENVptr', lp: 'CPXLPptr', linnzcnt: 'CPXDIM', quadnzcnt: 'CPXNNZ', rhs: 'double', sense: 'int', py_linind: 'CPXDIM const *', py_linval: 'double const *', py_quadrow: 'CPXDIM const *', py_quadcol: 'CPXDIM const *', py_quadval: 'double const *', lname_str: 'char const *') -> "int":
return _pycplex_platform.CPXXaddqconstr(env, lp, linnzcnt, quadnzcnt, rhs, sense, py_linind, py_linval, py_quadrow, py_quadcol, py_quadval, lname_str)
def CPXXdelqconstrs(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdelqconstrs(env, lp, begin, end)
def CPXXgetnumqconstrs(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumqconstrs(env, lp)
def CPXXgetqconstrindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetqconstrindex(env, lp, lname_str, index_p)
def CPXXgetqconstr(env: 'CPXCENVptr', lp: 'CPXCLPptr', linnzcnt_p: 'CPXDIM *') -> "CPXDIM":
return _pycplex_platform.CPXXgetqconstr(env, lp, linnzcnt_p)
def CPXXgetqconstrname(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *', which: 'CPXDIM') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetqconstrname(env, lp, buf_str, which)
def CPXXgetqconstrslack(env: 'CPXCENVptr', lp: 'CPXCLPptr', qcslack: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetqconstrslack(env, lp, qcslack, begin, end)
def CPXXqconstrslackfromx(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', qcslack: 'double *') -> "int":
return _pycplex_platform.CPXXqconstrslackfromx(env, lp, x, qcslack)
def CPXXgetqconstrinfeas(env: 'CPXCENVptr', lp: 'CPXCLPptr', x: 'double const *', infeasout: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetqconstrinfeas(env, lp, x, infeasout, begin, end)
def CPXXgetxqxax(env: 'CPXCENVptr', lp: 'CPXCLPptr', xqxax: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetxqxax(env, lp, xqxax, begin, end)
def CPXXgetqconstrdslack(env: 'CPXCENVptr', lp: 'CPXCLPptr', qind: 'CPXDIM') -> "CPXDIM *":
return _pycplex_platform.CPXXgetqconstrdslack(env, lp, qind)
def CPXXnewlongannotation(env: 'CPXCENVptr', lp: 'CPXLPptr', annotationname_str: 'char const *', defval: 'CPXLONG') -> "int":
return _pycplex_platform.CPXXnewlongannotation(env, lp, annotationname_str, defval)
def CPXXnewdblannotation(env: 'CPXCENVptr', lp: 'CPXLPptr', annotationname_str: 'char const *', defval: 'double') -> "int":
return _pycplex_platform.CPXXnewdblannotation(env, lp, annotationname_str, defval)
def CPXXdellongannotations(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdellongannotations(env, lp, begin, end)
def CPXXdeldblannotations(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdeldblannotations(env, lp, begin, end)
def CPXXgetlongannotationindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', annotationname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetlongannotationindex(env, lp, annotationname_str, index_p)
def CPXXgetdblannotationindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', annotationname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetdblannotationindex(env, lp, annotationname_str, index_p)
def CPXXgetlongannotationname(env: 'CPXCENVptr', lp: 'CPXCLPptr', idx: 'CPXDIM', buf_str: 'char *') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetlongannotationname(env, lp, idx, buf_str)
def CPXXgetdblannotationname(env: 'CPXCENVptr', lp: 'CPXCLPptr', idx: 'CPXDIM', buf_str: 'char *') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetdblannotationname(env, lp, idx, buf_str)
def CPXXgetnumlongannotations(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumlongannotations(env, lp)
def CPXXgetnumdblannotations(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXXgetnumdblannotations(env, lp)
def CPXXgetlongannotationdefval(env: 'CPXCENVptr', lp: 'CPXCLPptr', idx: 'CPXDIM', defval_p: 'CPXLONG *') -> "int":
return _pycplex_platform.CPXXgetlongannotationdefval(env, lp, idx, defval_p)
def CPXXgetdblannotationdefval(env: 'CPXCENVptr', lp: 'CPXCLPptr', idx: 'CPXDIM', defval_p: 'double *') -> "int":
return _pycplex_platform.CPXXgetdblannotationdefval(env, lp, idx, defval_p)
def CPXXsetdblannotations(env: 'CPXCENVptr', lp: 'CPXLPptr', idx: 'CPXDIM', objtype: 'int', cnt: 'CPXDIM', indices: 'CPXDIM const *', values: 'double const *') -> "int":
return _pycplex_platform.CPXXsetdblannotations(env, lp, idx, objtype, cnt, indices, values)
def CPXXsetlongannotations(env: 'CPXCENVptr', lp: 'CPXLPptr', idx: 'CPXDIM', objtype: 'int', cnt: 'CPXDIM', indices: 'CPXDIM const *', values: 'CPXLONG const *') -> "int":
return _pycplex_platform.CPXXsetlongannotations(env, lp, idx, objtype, cnt, indices, values)
def CPXXgetlongannotations(env: 'CPXCENVptr', lp: 'CPXCLPptr', idx: 'CPXDIM', objtype: 'int', annotation: 'CPXLONG *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetlongannotations(env, lp, idx, objtype, annotation, begin, end)
def CPXXgetdblannotations(env: 'CPXCENVptr', lp: 'CPXCLPptr', idx: 'CPXDIM', objtype: 'int', annotation: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXgetdblannotations(env, lp, idx, objtype, annotation, begin, end)
def CPXXaddpwl(env: 'CPXCENVptr', lp: 'CPXLPptr', vary: 'CPXDIM', varx: 'CPXDIM', preslope: 'double', postslope: 'double', nbreaks: 'CPXDIM', py_breakx: 'double const *', py_breaky: 'double const *', pwlname: 'char const *') -> "int":
return _pycplex_platform.CPXXaddpwl(env, lp, vary, varx, preslope, postslope, nbreaks, py_breakx, py_breaky, pwlname)
def CPXXdelpwl(env: 'CPXCENVptr', lp: 'CPXLPptr', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXdelpwl(env, lp, begin, end)
def CPXXgetnumpwl(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXDIM":
return _pycplex_platform.CPXXgetnumpwl(env, lp)
def CPXXgetpwl(env: 'CPXCENVptr', lp: 'CPXCLPptr', pwlindex: 'CPXDIM') -> "CPXDIM *":
return _pycplex_platform.CPXXgetpwl(env, lp, pwlindex)
def CPXXgetpwlindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXgetpwlindex(env, lp, lname_str, index_p)
def CPXXgetpwlname(env: 'CPXCENVptr', lp: 'CPXCLPptr', buf_str: 'char *', which: 'CPXDIM') -> "CPXSIZE *":
return _pycplex_platform.CPXXgetpwlname(env, lp, buf_str, which)
def CPXXgetnumlazyconstraints(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXDIM":
return _pycplex_platform.CPXXgetnumlazyconstraints(env, lp)
def CPXXgetnumusercuts(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXDIM":
return _pycplex_platform.CPXXgetnumusercuts(env, lp)
def CPXEgetprobstats(ienv: 'CPXCENVptr', lp: 'CPXLPptr', objs_p: 'int *', rows_p: 'int *', cols_p: 'int *', objcnt_p: 'int *', rhscnt_p: 'int *', nzcnt_p: 'int *', ecnt_p: 'int *', gcnt_p: 'int *', lcnt_p: 'int *', rngcnt_p: 'int *', ncnt_p: 'int *', fcnt_p: 'int *', xcnt_p: 'int *', bcnt_p: 'int *', ocnt_p: 'int *', bicnt_p: 'int *', icnt_p: 'int *', scnt_p: 'int *', sicnt_p: 'int *', qpcnt_p: 'int *', qpnzcnt_p: 'int *', nqconstr_p: 'int *', qrhscnt_p: 'int *', qlcnt_p: 'int *', qgcnt_p: 'int *', quadnzcnt_p: 'int *', linnzcnt_p: 'int *', nindconstr_p: 'int *', indrhscnt_p: 'int *', indnzcnt_p: 'int *', indcompcnt_p: 'int *', indlcnt_p: 'int *', indecnt_p: 'int *', indgcnt_p: 'int *', maxcoef_p: 'double *', mincoef_p: 'double *', minrhs_p: 'double *', maxrhs_p: 'double *', minrng_p: 'double *', maxrng_p: 'double *', minobj_p: 'double *', maxobj_p: 'double *', minlb_p: 'double *', maxub_p: 'double *', minqcoef_p: 'double *', maxqcoef_p: 'double *', minqcq_p: 'double *', maxqcq_p: 'double *', minqcl_p: 'double *', maxqcl_p: 'double *', minqcr_p: 'double *', maxqcr_p: 'double *', minind_p: 'double *', maxind_p: 'double *', minindrhs_p: 'double *', maxindrhs_p: 'double *', minlazy_p: 'double *', maxlazy_p: 'double *', minlazyrhs_p: 'double *', maxlazyrhs_p: 'double *', minucut_p: 'double *', maxucut_p: 'double *', minucutrhs_p: 'double *', maxucutrhs_p: 'double *', nsos_p: 'int *', nsos1_p: 'int *', sos1nmem_p: 'int *', sos1type_p: 'int *', nsos2_p: 'int *', sos2nmem_p: 'int *', sos2type_p: 'int *', lazyrhscnt: 'int *', lazygcnt: 'int *', lazylcnt: 'int *', lazyecnt: 'int *', lazycnt: 'int *', lazynzcnt: 'int *', ucutrhscnt: 'int *', ucutgcnt: 'int *', ucutlcnt: 'int *', ucutecnt: 'int *', ucutcnt: 'int *', ucutnzcnt: 'int *', npwl_p: 'int *', npwlbreaks_p: 'int *') -> "int":
return _pycplex_platform.CPXEgetprobstats(ienv, lp, objs_p, rows_p, cols_p, objcnt_p, rhscnt_p, nzcnt_p, ecnt_p, gcnt_p, lcnt_p, rngcnt_p, ncnt_p, fcnt_p, xcnt_p, bcnt_p, ocnt_p, bicnt_p, icnt_p, scnt_p, sicnt_p, qpcnt_p, qpnzcnt_p, nqconstr_p, qrhscnt_p, qlcnt_p, qgcnt_p, quadnzcnt_p, linnzcnt_p, nindconstr_p, indrhscnt_p, indnzcnt_p, indcompcnt_p, indlcnt_p, indecnt_p, indgcnt_p, maxcoef_p, mincoef_p, minrhs_p, maxrhs_p, minrng_p, maxrng_p, minobj_p, maxobj_p, minlb_p, maxub_p, minqcoef_p, maxqcoef_p, minqcq_p, maxqcq_p, minqcl_p, maxqcl_p, minqcr_p, maxqcr_p, minind_p, maxind_p, minindrhs_p, maxindrhs_p, minlazy_p, maxlazy_p, minlazyrhs_p, maxlazyrhs_p, minucut_p, maxucut_p, minucutrhs_p, maxucutrhs_p, nsos_p, nsos1_p, sos1nmem_p, sos1type_p, nsos2_p, sos2nmem_p, sos2type_p, lazyrhscnt, lazygcnt, lazylcnt, lazyecnt, lazycnt, lazynzcnt, ucutrhscnt, ucutgcnt, ucutlcnt, ucutecnt, ucutcnt, ucutnzcnt, npwl_p, npwlbreaks_p)
def CPXEgethist(ienv: 'CPXCENVptr', lp: 'CPXLPptr', key: 'int', hist: 'int *') -> "int":
return _pycplex_platform.CPXEgethist(ienv, lp, key, hist)
def CPXEgetqualitymetrics(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int', data: 'double *', idata: 'int *') -> "int":
return _pycplex_platform.CPXEgetqualitymetrics(env, lp, soln, data, idata)
def CPXEshowquality(env: 'CPXCENVptr', lp: 'CPXCLPptr', soln: 'int') -> "int":
return _pycplex_platform.CPXEshowquality(env, lp, soln)
def CPXXgetnumcores(env: 'CPXCENVptr', numcores_p: 'int *') -> "int":
return _pycplex_platform.CPXXgetnumcores(env, numcores_p)
def CPXXcallbacksetfunc(env: 'CPXENVptr', lp: 'CPXLPptr', contextmask: 'CPXLONG', callback: 'int (*)(CPXCALLBACKCONTEXTptr,CPXLONG,void *)') -> "int":
return _pycplex_platform.CPXXcallbacksetfunc(env, lp, contextmask, callback)
def CPXXcallbackgetinfoint(context: 'CPXCALLBACKCONTEXTptr', what: 'CPXCALLBACKINFO') -> "int":
return _pycplex_platform.CPXXcallbackgetinfoint(context, what)
def CPXXcallbackgetinfolong(context: 'CPXCALLBACKCONTEXTptr', what: 'CPXCALLBACKINFO') -> "int":
return _pycplex_platform.CPXXcallbackgetinfolong(context, what)
def CPXXcallbackgetinfodbl(context: 'CPXCALLBACKCONTEXTptr', what: 'CPXCALLBACKINFO') -> "int":
return _pycplex_platform.CPXXcallbackgetinfodbl(context, what)
def CPXXcallbackabort(context: 'CPXCALLBACKCONTEXTptr') -> "void":
return _pycplex_platform.CPXXcallbackabort(context)
def CPXXcallbackcandidateispoint(context: 'CPXCALLBACKCONTEXTptr', bounded_p: 'int *') -> "int":
return _pycplex_platform.CPXXcallbackcandidateispoint(context, bounded_p)
def CPXXcallbackcandidateisray(context: 'CPXCALLBACKCONTEXTptr', ray_p: 'int *') -> "int":
return _pycplex_platform.CPXXcallbackcandidateisray(context, ray_p)
def CPXXcallbackgetcandidatepoint(context: 'CPXCALLBACKCONTEXTptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM', obj_p: 'double *') -> "int":
return _pycplex_platform.CPXXcallbackgetcandidatepoint(context, x, begin, end, obj_p)
def CPXXcallbackgetcandidateray(context: 'CPXCALLBACKCONTEXTptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXcallbackgetcandidateray(context, x, begin, end)
def CPXXcallbackgetrelaxationpoint(context: 'CPXCALLBACKCONTEXTptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM', obj_p: 'double *') -> "int":
return _pycplex_platform.CPXXcallbackgetrelaxationpoint(context, x, begin, end, obj_p)
def CPXXcallbackgetrelaxationstatus(context: 'CPXCALLBACKCONTEXTptr', nodelpstat_p: 'int *', flags: 'CPXLONG') -> "int":
return _pycplex_platform.CPXXcallbackgetrelaxationstatus(context, nodelpstat_p, flags)
def CPXXcallbackmakebranch(context: 'CPXCALLBACKCONTEXTptr', varcnt: 'CPXDIM', py_varind: 'CPXDIM const *', py_varlu: 'char const *', py_varbd: 'double const *', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', py_sense: 'char const *', py_rmatbeg: 'CPXNNZ const *', py_rmatind: 'CPXDIM const *', py_rmatval: 'double const *', nodeest: 'double', seqnum_p: 'CPXCNT *') -> "int":
return _pycplex_platform.CPXXcallbackmakebranch(context, varcnt, py_varind, py_varlu, py_varbd, rcnt, nzcnt, py_rhs, py_sense, py_rmatbeg, py_rmatind, py_rmatval, nodeest, seqnum_p)
def CPXXcallbackprunenode(context: 'CPXCALLBACKCONTEXTptr') -> "int":
return _pycplex_platform.CPXXcallbackprunenode(context)
def CPXXcallbackexitcutloop(context: 'CPXCALLBACKCONTEXTptr') -> "int":
return _pycplex_platform.CPXXcallbackexitcutloop(context)
def CPXXcallbackgetincumbent(context: 'CPXCALLBACKCONTEXTptr', x: 'double *', begin: 'CPXDIM', end: 'CPXDIM', obj_p: 'double *') -> "int":
return _pycplex_platform.CPXXcallbackgetincumbent(context, x, begin, end, obj_p)
def CPXXcallbackgetlocallb(context: 'CPXCALLBACKCONTEXTptr', lb: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXcallbackgetlocallb(context, lb, begin, end)
def CPXXcallbackgetlocalub(context: 'CPXCALLBACKCONTEXTptr', ub: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXcallbackgetlocalub(context, ub, begin, end)
def CPXXcallbackgetgloballb(context: 'CPXCALLBACKCONTEXTptr', lb: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXcallbackgetgloballb(context, lb, begin, end)
def CPXXcallbackgetglobalub(context: 'CPXCALLBACKCONTEXTptr', ub: 'double *', begin: 'CPXDIM', end: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXcallbackgetglobalub(context, ub, begin, end)
def CPXXcallbackpostheursoln(context: 'CPXCALLBACKCONTEXTptr', cnt: 'CPXDIM', ind: 'CPXDIM const *', val: 'double const *', obj: 'double', type: 'CPXCALLBACKSOLUTIONSTRATEGY') -> "int":
return _pycplex_platform.CPXXcallbackpostheursoln(context, cnt, ind, val, obj, type)
def CPXXcallbackaddusercuts(context: 'CPXCALLBACKCONTEXTptr', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_cutbeg: 'CPXNNZ const *', py_cutmanagement: 'int const *', py_local: 'int const *') -> "int":
return _pycplex_platform.CPXXcallbackaddusercuts(context, rcnt, nzcnt, py_rhs, sense, py_cutbeg, py_cutmanagement, py_local)
def CPXXcallbackrejectcandidate(context: 'CPXCALLBACKCONTEXTptr', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_cutbeg: 'CPXNNZ const *') -> "int":
return _pycplex_platform.CPXXcallbackrejectcandidate(context, rcnt, nzcnt, py_rhs, sense, py_cutbeg)
def CPXXcallbackrejectcandidatelocal(context: 'CPXCALLBACKCONTEXTptr', rcnt: 'CPXDIM', nzcnt: 'CPXNNZ', py_rhs: 'double const *', sense: 'char const *', py_cutbeg: 'CPXNNZ const *') -> "int":
return _pycplex_platform.CPXXcallbackrejectcandidatelocal(context, rcnt, nzcnt, py_rhs, sense, py_cutbeg)
def CPXXmodelasstcallbacksetfunc(env: 'CPXENVptr', lp: 'CPXLPptr', callback: 'int (*)(int,char const *,void *)') -> "int":
return _pycplex_platform.CPXXmodelasstcallbacksetfunc(env, lp, callback)
def CPXXgetnumobjs(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXDIM":
return _pycplex_platform.CPXXgetnumobjs(env, lp)
def CPXXmultiobjchgattribs(env: 'CPXCENVptr', lp: 'CPXLPptr', objind: 'CPXDIM', offset: 'double', weight: 'double', priority: 'CPXDIM', abstol: 'double', reltol: 'double', name: 'char const *') -> "int":
return _pycplex_platform.CPXXmultiobjchgattribs(env, lp, objind, offset, weight, priority, abstol, reltol, name)
def CPXXmultiobjgetindex(env: 'CPXCENVptr', lp: 'CPXCLPptr', lname_str: 'char const *', index_p: 'CPXDIM *') -> "int":
return _pycplex_platform.CPXXmultiobjgetindex(env, lp, lname_str, index_p)
def CPXXmultiobjgetname(env: 'CPXCENVptr', lp: 'CPXCLPptr', objind: 'CPXDIM', buf_str: 'char *') -> "CPXSIZE *":
return _pycplex_platform.CPXXmultiobjgetname(env, lp, objind, buf_str)
def CPXXmultiobjgetobj(env: 'CPXCENVptr', lp: 'CPXCLPptr', n: 'CPXDIM', coeffs: 'double *', begin: 'CPXDIM', end: 'CPXDIM', offset_p: 'double *', weight_p: 'double *', priority_p: 'CPXDIM *', abstol_p: 'double *', reltol_p: 'double *') -> "int":
return _pycplex_platform.CPXXmultiobjgetobj(env, lp, n, coeffs, begin, end, offset_p, weight_p, priority_p, abstol_p, reltol_p)
def CPXXmultiobjgetobjval(env: 'CPXCENVptr', lp: 'CPXCLPptr', n: 'CPXDIM', objval_p: 'double *') -> "int":
return _pycplex_platform.CPXXmultiobjgetobjval(env, lp, n, objval_p)
def CPXXmultiobjgetobjvalbypriority(env: 'CPXCENVptr', lp: 'CPXCLPptr', priority: 'CPXDIM', objval_p: 'double *') -> "int":
return _pycplex_platform.CPXXmultiobjgetobjvalbypriority(env, lp, priority, objval_p)
def CPXXmultiobjsetobj(env: 'CPXCENVptr', lp: 'CPXLPptr', n: 'CPXDIM', objnz: 'CPXDIM', py_objind: 'CPXDIM const *', py_objval: 'double const *', offset: 'double', weight: 'double', priority: 'CPXDIM', abstol: 'double', reltol: 'double', objname: 'char const *') -> "int":
return _pycplex_platform.CPXXmultiobjsetobj(env, lp, n, objnz, py_objind, py_objval, offset, weight, priority, abstol, reltol, objname)
def CPXXsetnumobjs(env: 'CPXCENVptr', lp: 'CPXCLPptr', n: 'CPXDIM') -> "int":
return _pycplex_platform.CPXXsetnumobjs(env, lp, n)
def CPXXmultiobjgetdblinfo(env: 'CPXCENVptr', lp: 'CPXCLPptr', subprob: 'CPXDIM', info_p: 'double *', what: 'int') -> "int":
return _pycplex_platform.CPXXmultiobjgetdblinfo(env, lp, subprob, info_p, what)
def CPXXmultiobjgetintinfo(env: 'CPXCENVptr', lp: 'CPXCLPptr', subprob: 'CPXDIM', info_p: 'int *', what: 'int') -> "int":
return _pycplex_platform.CPXXmultiobjgetintinfo(env, lp, subprob, info_p, what)
def CPXXmultiobjgetlonginfo(env: 'CPXCENVptr', lp: 'CPXCLPptr', subprob: 'CPXDIM', info_p: 'CPXLONG *', what: 'int') -> "int":
return _pycplex_platform.CPXXmultiobjgetlonginfo(env, lp, subprob, info_p, what)
def CPXXmultiobjgetnumsolves(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "CPXDIM":
return _pycplex_platform.CPXXmultiobjgetnumsolves(env, lp)
def CPXEgetnumprios(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXEgetnumprios(env, lp)
def CPXEismultiobj(env: 'CPXCENVptr', lp: 'CPXCLPptr') -> "int":
return _pycplex_platform.CPXEismultiobj(env, lp)
def CPXXparamsetadddbl(env: 'CPXCENVptr', ps: 'CPXPARAMSETptr', whichparam: 'int', newvalue: 'double') -> "int":
return _pycplex_platform.CPXXparamsetadddbl(env, ps, whichparam, newvalue)
def CPXXparamsetaddint(env: 'CPXCENVptr', ps: 'CPXPARAMSETptr', whichparam: 'int', newvalue: 'CPXINT') -> "int":
return _pycplex_platform.CPXXparamsetaddint(env, ps, whichparam, newvalue)
def CPXXparamsetaddlong(env: 'CPXCENVptr', ps: 'CPXPARAMSETptr', whichparam: 'int', newvalue: 'CPXLONG') -> "int":
return _pycplex_platform.CPXXparamsetaddlong(env, ps, whichparam, newvalue)
def CPXXparamsetaddstr(env: 'CPXCENVptr', ps: 'CPXPARAMSETptr', whichparam: 'int', svalue: 'char const *') -> "int":
return _pycplex_platform.CPXXparamsetaddstr(env, ps, whichparam, svalue)
def CPXXparamsetapply(env: 'CPXENVptr', ps: 'CPXCPARAMSETptr') -> "int":
return _pycplex_platform.CPXXparamsetapply(env, ps)
def CPXXparamsetcopy(targetenv: 'CPXCENVptr', targetps: 'CPXPARAMSETptr', sourceps: 'CPXCPARAMSETptr') -> "int":
return _pycplex_platform.CPXXparamsetcopy(targetenv, targetps, sourceps)
def CPXXparamsetcreate(env: 'CPXCENVptr', status_p: 'int *') -> "CPXPARAMSETptr":
return _pycplex_platform.CPXXparamsetcreate(env, status_p)
def CPXXparamsetdel(env: 'CPXCENVptr', ps: 'CPXPARAMSETptr', whichparam: 'int') -> "int":
return _pycplex_platform.CPXXparamsetdel(env, ps, whichparam)
def CPXXparamsetfree(env: 'CPXCENVptr', ps_p: 'CPXPARAMSETptr *') -> "int":
return _pycplex_platform.CPXXparamsetfree(env, ps_p)
def CPXXparamsetgetdbl(env: 'CPXCENVptr', ps: 'CPXCPARAMSETptr', whichparam: 'int', dval_p: 'double *') -> "int":
return _pycplex_platform.CPXXparamsetgetdbl(env, ps, whichparam, dval_p)
def CPXXparamsetgetids(env: 'CPXCENVptr', ps: 'CPXCPARAMSETptr', cnt_p: 'int *') -> "int *":
return _pycplex_platform.CPXXparamsetgetids(env, ps, cnt_p)
def CPXXparamsetgetint(env: 'CPXCENVptr', ps: 'CPXCPARAMSETptr', whichparam: 'int', ival_p: 'CPXINT *') -> "int":
return _pycplex_platform.CPXXparamsetgetint(env, ps, whichparam, ival_p)
def CPXXparamsetgetlong(env: 'CPXCENVptr', ps: 'CPXCPARAMSETptr', whichparam: 'int', ival_p: 'CPXLONG *') -> "int":
return _pycplex_platform.CPXXparamsetgetlong(env, ps, whichparam, ival_p)
def CPXXparamsetgetstr(env: 'CPXCENVptr', ps: 'CPXCPARAMSETptr', whichparam: 'int', param_buffer_str: 'char *') -> "char *":
return _pycplex_platform.CPXXparamsetgetstr(env, ps, whichparam, param_buffer_str)
def CPXXparamsetreadcopy(env: 'CPXENVptr', ps: 'CPXPARAMSETptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXparamsetreadcopy(env, ps, filename_str)
def CPXXparamsetwrite(env: 'CPXCENVptr', ps: 'CPXCPARAMSETptr', filename_str: 'char const *') -> "int":
return _pycplex_platform.CPXXparamsetwrite(env, ps, filename_str)
def CPXEwriteprobdev(env: 'CPXCENVptr', lp: 'CPXCLPptr', filename_str: 'char const *') -> "char const *":
return _pycplex_platform.CPXEwriteprobdev(env, lp, filename_str)
class intPtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.intPtr_swiginit(self, _pycplex_platform.new_intPtr())
__swig_destroy__ = _pycplex_platform.delete_intPtr
def assign(self, value: 'int') -> "void":
return _pycplex_platform.intPtr_assign(self, value)
def value(self) -> "int":
return _pycplex_platform.intPtr_value(self)
def cast(self) -> "int *":
return _pycplex_platform.intPtr_cast(self)
@staticmethod
def frompointer(t: 'int *') -> "intPtr *":
return _pycplex_platform.intPtr_frompointer(t)
# Register intPtr in _pycplex_platform:
_pycplex_platform.intPtr_swigregister(intPtr)
def intPtr_frompointer(t: 'int *') -> "intPtr *":
return _pycplex_platform.intPtr_frompointer(t)
class cpxlongPtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.cpxlongPtr_swiginit(self, _pycplex_platform.new_cpxlongPtr())
__swig_destroy__ = _pycplex_platform.delete_cpxlongPtr
def assign(self, value: 'long long') -> "void":
return _pycplex_platform.cpxlongPtr_assign(self, value)
def value(self) -> "long long":
return _pycplex_platform.cpxlongPtr_value(self)
def cast(self) -> "long long *":
return _pycplex_platform.cpxlongPtr_cast(self)
@staticmethod
def frompointer(t: 'long long *') -> "cpxlongPtr *":
return _pycplex_platform.cpxlongPtr_frompointer(t)
# Register cpxlongPtr in _pycplex_platform:
_pycplex_platform.cpxlongPtr_swigregister(cpxlongPtr)
def cpxlongPtr_frompointer(t: 'long long *') -> "cpxlongPtr *":
return _pycplex_platform.cpxlongPtr_frompointer(t)
class doublePtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.doublePtr_swiginit(self, _pycplex_platform.new_doublePtr())
__swig_destroy__ = _pycplex_platform.delete_doublePtr
def assign(self, value: 'double') -> "void":
return _pycplex_platform.doublePtr_assign(self, value)
def value(self) -> "double":
return _pycplex_platform.doublePtr_value(self)
def cast(self) -> "double *":
return _pycplex_platform.doublePtr_cast(self)
@staticmethod
def frompointer(t: 'double *') -> "doublePtr *":
return _pycplex_platform.doublePtr_frompointer(t)
# Register doublePtr in _pycplex_platform:
_pycplex_platform.doublePtr_swigregister(doublePtr)
def doublePtr_frompointer(t: 'double *') -> "doublePtr *":
return _pycplex_platform.doublePtr_frompointer(t)
class CPXLPptrPtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.CPXLPptrPtr_swiginit(self, _pycplex_platform.new_CPXLPptrPtr())
__swig_destroy__ = _pycplex_platform.delete_CPXLPptrPtr
def assign(self, value: 'CPXLPptr') -> "void":
return _pycplex_platform.CPXLPptrPtr_assign(self, value)
def value(self) -> "CPXLPptr":
return _pycplex_platform.CPXLPptrPtr_value(self)
def cast(self) -> "CPXLPptr *":
return _pycplex_platform.CPXLPptrPtr_cast(self)
@staticmethod
def frompointer(t: 'CPXLPptr *') -> "CPXLPptrPtr *":
return _pycplex_platform.CPXLPptrPtr_frompointer(t)
# Register CPXLPptrPtr in _pycplex_platform:
_pycplex_platform.CPXLPptrPtr_swigregister(CPXLPptrPtr)
def CPXLPptrPtr_frompointer(t: 'CPXLPptr *') -> "CPXLPptrPtr *":
return _pycplex_platform.CPXLPptrPtr_frompointer(t)
class CPXENVptrPtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.CPXENVptrPtr_swiginit(self, _pycplex_platform.new_CPXENVptrPtr())
__swig_destroy__ = _pycplex_platform.delete_CPXENVptrPtr
def assign(self, value: 'CPXENVptr') -> "void":
return _pycplex_platform.CPXENVptrPtr_assign(self, value)
def value(self) -> "CPXENVptr":
return _pycplex_platform.CPXENVptrPtr_value(self)
def cast(self) -> "CPXENVptr *":
return _pycplex_platform.CPXENVptrPtr_cast(self)
@staticmethod
def frompointer(t: 'CPXENVptr *') -> "CPXENVptrPtr *":
return _pycplex_platform.CPXENVptrPtr_frompointer(t)
# Register CPXENVptrPtr in _pycplex_platform:
_pycplex_platform.CPXENVptrPtr_swigregister(CPXENVptrPtr)
def CPXENVptrPtr_frompointer(t: 'CPXENVptr *') -> "CPXENVptrPtr *":
return _pycplex_platform.CPXENVptrPtr_frompointer(t)
class CPXCHANNELptrPtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.CPXCHANNELptrPtr_swiginit(self, _pycplex_platform.new_CPXCHANNELptrPtr())
__swig_destroy__ = _pycplex_platform.delete_CPXCHANNELptrPtr
def assign(self, value: 'CPXCHANNELptr') -> "void":
return _pycplex_platform.CPXCHANNELptrPtr_assign(self, value)
def value(self) -> "CPXCHANNELptr":
return _pycplex_platform.CPXCHANNELptrPtr_value(self)
def cast(self) -> "CPXCHANNELptr *":
return _pycplex_platform.CPXCHANNELptrPtr_cast(self)
@staticmethod
def frompointer(t: 'CPXCHANNELptr *') -> "CPXCHANNELptrPtr *":
return _pycplex_platform.CPXCHANNELptrPtr_frompointer(t)
# Register CPXCHANNELptrPtr in _pycplex_platform:
_pycplex_platform.CPXCHANNELptrPtr_swigregister(CPXCHANNELptrPtr)
def CPXCHANNELptrPtr_frompointer(t: 'CPXCHANNELptr *') -> "CPXCHANNELptrPtr *":
return _pycplex_platform.CPXCHANNELptrPtr_frompointer(t)
class CPXPARAMSETptrPtr(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_pycplex_platform.CPXPARAMSETptrPtr_swiginit(self, _pycplex_platform.new_CPXPARAMSETptrPtr())
__swig_destroy__ = _pycplex_platform.delete_CPXPARAMSETptrPtr
def assign(self, value: 'CPXPARAMSETptr') -> "void":
return _pycplex_platform.CPXPARAMSETptrPtr_assign(self, value)
def value(self) -> "CPXPARAMSETptr":
return _pycplex_platform.CPXPARAMSETptrPtr_value(self)
def cast(self) -> "CPXPARAMSETptr *":
return _pycplex_platform.CPXPARAMSETptrPtr_cast(self)
@staticmethod
def frompointer(t: 'CPXPARAMSETptr *') -> "CPXPARAMSETptrPtr *":
return _pycplex_platform.CPXPARAMSETptrPtr_frompointer(t)
# Register CPXPARAMSETptrPtr in _pycplex_platform:
_pycplex_platform.CPXPARAMSETptrPtr_swigregister(CPXPARAMSETptrPtr)
def CPXPARAMSETptrPtr_frompointer(t: 'CPXPARAMSETptr *') -> "CPXPARAMSETptrPtr *":
return _pycplex_platform.CPXPARAMSETptrPtr_frompointer(t)
class intArray(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, nelements: 'size_t'):
_pycplex_platform.intArray_swiginit(self, _pycplex_platform.new_intArray(nelements))
__swig_destroy__ = _pycplex_platform.delete_intArray
def __getitem__(self, index: 'size_t') -> "int":
return _pycplex_platform.intArray___getitem__(self, index)
def __setitem__(self, index: 'size_t', value: 'int') -> "void":
return _pycplex_platform.intArray___setitem__(self, index, value)
def cast(self) -> "int *":
return _pycplex_platform.intArray_cast(self)
@staticmethod
def frompointer(t: 'int *') -> "intArray *":
return _pycplex_platform.intArray_frompointer(t)
# Register intArray in _pycplex_platform:
_pycplex_platform.intArray_swigregister(intArray)
def intArray_frompointer(t: 'int *') -> "intArray *":
return _pycplex_platform.intArray_frompointer(t)
class doubleArray(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, nelements: 'size_t'):
_pycplex_platform.doubleArray_swiginit(self, _pycplex_platform.new_doubleArray(nelements))
__swig_destroy__ = _pycplex_platform.delete_doubleArray
def __getitem__(self, index: 'size_t') -> "double":
return _pycplex_platform.doubleArray___getitem__(self, index)
def __setitem__(self, index: 'size_t', value: 'double') -> "void":
return _pycplex_platform.doubleArray___setitem__(self, index, value)
def cast(self) -> "double *":
return _pycplex_platform.doubleArray_cast(self)
@staticmethod
def frompointer(t: 'double *') -> "doubleArray *":
return _pycplex_platform.doubleArray_frompointer(t)
# Register doubleArray in _pycplex_platform:
_pycplex_platform.doubleArray_swigregister(doubleArray)
def doubleArray_frompointer(t: 'double *') -> "doubleArray *":
return _pycplex_platform.doubleArray_frompointer(t)
class longArray(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, nelements: 'size_t'):
_pycplex_platform.longArray_swiginit(self, _pycplex_platform.new_longArray(nelements))
__swig_destroy__ = _pycplex_platform.delete_longArray
def __getitem__(self, index: 'size_t') -> "long long":
return _pycplex_platform.longArray___getitem__(self, index)
def __setitem__(self, index: 'size_t', value: 'long long') -> "void":
return _pycplex_platform.longArray___setitem__(self, index, value)
def cast(self) -> "long long *":
return _pycplex_platform.longArray_cast(self)
@staticmethod
def frompointer(t: 'long long *') -> "longArray *":
return _pycplex_platform.longArray_frompointer(t)
# Register longArray in _pycplex_platform:
_pycplex_platform.longArray_swigregister(longArray)
def longArray_frompointer(t: 'long long *') -> "longArray *":
return _pycplex_platform.longArray_frompointer(t)
| 87,161
| 9,077
| 11,590
|
22e20f03dc46b1e9d6eade314e381279b10a06e0
| 1,703
|
py
|
Python
|
GenerIter/processor/Mix.py
|
GridPresence/GenerIter
|
f0b74cd6c1d1bb9a23fccb28fa8b972f9eeccaf8
|
[
"MIT"
] | 7
|
2021-01-07T19:03:10.000Z
|
2021-03-05T08:05:17.000Z
|
GenerIter/processor/Mix.py
|
GridPresence/GenerIter
|
f0b74cd6c1d1bb9a23fccb28fa8b972f9eeccaf8
|
[
"MIT"
] | 2
|
2021-01-26T12:45:58.000Z
|
2021-02-15T11:06:14.000Z
|
GenerIter/processor/Mix.py
|
GridPresence/GenerIter
|
f0b74cd6c1d1bb9a23fccb28fa8b972f9eeccaf8
|
[
"MIT"
] | 1
|
2021-01-24T05:21:28.000Z
|
2021-01-24T05:21:28.000Z
|
"""
Generator class for some Process-based mixing algorithms.
Copyright 2020 Thomas Jackson Park & Jeremy Pavier
"""
import os
import random
import inspect
from pydub import AudioSegment
from GenerIter.process import Process
from GenerIter.util import debug, nextPowerOf2
import GenerIter.excepts as robox
| 30.410714
| 100
| 0.598356
|
"""
Generator class for some Process-based mixing algorithms.
Copyright 2020 Thomas Jackson Park & Jeremy Pavier
"""
import os
import random
import inspect
from pydub import AudioSegment
from GenerIter.process import Process
from GenerIter.util import debug, nextPowerOf2
import GenerIter.excepts as robox
class Mix(Process):
def __init__(self):
super().__init__()
debug('Mix()')
def multitrack(self):
# How many times do you want this to run?
iterations = int(self._config["tracks"])
if "trackfade" in self._config:
trackfade = self._config["trackfade"]
else:
trackfade = 0
voices = self._config["voices"]
nvoices = len(voices)
if nvoices < 2:
raise robox.GIParameterErr("Insufficient voices specified for the multitrack algorithm")
mute = 6.0 * nextPowerOf2(nvoices)
for ctr in range(iterations):
audios = []
# Randomly select our samples
for voice in voices:
level = voices[voice]
sample = self._inventory.selectRandom(voice)
audio = AudioSegment.from_wav(sample)
#audio = audio.normalize()
audio = audio + level
audios.append(audio)
summation = audios[0]
for nctr in range(nvoices-1):
summation = summation.overlay(audios[nctr+1], loop=True)
if trackfade > 0:
summation = self.declick(summation, trackfade)
fname = inspect.currentframe().f_code.co_name
self.write(algorithm=fname, counter=ctr, source=summation)
| 1,321
| -2
| 76
|
7f18aacd0cfd85d08e6a79111b1cccd0674a4fce
| 19,492
|
py
|
Python
|
misc/deep_learning_notes/Ch4_Recurrent_Networks/002_vanila_RNN_with_edf/edf.py
|
tmjnow/MoocX
|
52c8450ff7ecc8450a8adc2457233d5777a3d5bb
|
[
"MIT"
] | 7
|
2017-06-13T05:24:15.000Z
|
2022-01-09T01:10:28.000Z
|
misc/deep_learning_notes/Ch4_Recurrent_Networks/002_vanila_RNN_with_edf/edf.py
|
kinshuk4/Coursera
|
52c8450ff7ecc8450a8adc2457233d5777a3d5bb
|
[
"MIT"
] | 11
|
2017-05-08T23:30:50.000Z
|
2017-06-24T21:57:42.000Z
|
misc/deep_learning_notes/Ch4_Recurrent_Networks/002_vanila_RNN_with_edf/edf.py
|
kinshuk4/MoocX
|
52c8450ff7ecc8450a8adc2457233d5777a3d5bb
|
[
"MIT"
] | 4
|
2017-10-05T12:56:53.000Z
|
2020-06-14T17:01:32.000Z
|
import numpy as np
DT = np.float32
eps = 1e-12
# Globals
components = []
params = []
# Global forward/backward
# Optimization functions
# Values
# Parameters
# Xavier initializer
# Utility function for shape inference with broadcasting
#### Actual components
class Add: # Add with broadcasting
"""
Class name: Add
Class usage: add two matrices x, y with broadcasting supported by numpy "+" operation.
Class function:
forward: calculate x + y with possible broadcasting
backward: calculate derivative w.r.t to x and y, when calculate the derivative w.r.t to y, we sum up all the axis over grad except the last dimension.
"""
class Mul: # Multiply with broadcasting
"""
Class Name: Mul
Class Usage: elementwise multiplication with two matrix
Class Functions:
forward: compute the result x*y
backward: compute the derivative w.r.t x and y
"""
class VDot: # Matrix multiply (fully-connected layer)
"""
Class Name: VDot
Class Usage: matrix multiplication where x, y are matrices
y is expected to be a parameter and there is a convention that parameters come last. Typical usage is x is batch feature vector with shape (batch_size, f_dim), y a parameter with shape (f_dim, f_dim2).
Class Functions:
forward: compute the vector matrix multplication result
backward: compute the derivative w.r.t x and y, where derivative of x and y are both matrices
"""
class Log: # Elementwise Log
"""
Class Name: Log
Class Usage: compute the elementwise log(x) given x.
Class Functions:
forward: compute log(x)
backward: compute the derivative w.r.t input vector x
"""
class Sigmoid:
"""
Class Name: Sigmoid
Class Usage: compute the elementwise sigmoid activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = 1/(1 + exp(-x_{i}))
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
class Tanh:
"""
Class Name: Tanh
Class Usage: compute the elementwise Tanh activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = (exp(x_{i}) - exp(-x_{i}))/(exp(x_{i}) + exp(-x_{i}))
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
class RELU:
"""
Class Name: RELU
Class Usage: compute the elementwise RELU activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = max(0, x_{i})
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
class LeakyRELU:
"""
Class Name: LeakyRELU
Class Usage: compute the elementwise LeakyRELU activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = 0.01*x_{i} for x_{i} < 0 and y_{i} = x_{i} for x_{i} > 0
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
class Softplus:
"""
Class Name: Softplus
Class Usage: compute the elementwise Softplus activation.
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
class SoftMax:
"""
Class Name: SoftMax
Class Usage: compute the softmax activation for each element in the matrix, normalization by each all elements in each batch (row). Specificaly, input is matrix [x_{00}, x_{01}, ..., x_{0n}, ..., x_{b0}, x_{b1}, ..., x_{bn}], output is a matrix [p_{00}, p_{01}, ..., p_{0n},...,p_{b0},,,p_{bn} ] where p_{bi} = exp(x_{bi})/(exp(x_{b0}) + ... + exp(x_{bn}))
Class Functions:
forward: compute probability p_{bi} for all b, i.
backward: compute the derivative w.r.t input matrix x
"""
class LogLoss:
"""
Class Name: LogLoss
Class Usage: compute the elementwise -log(x) given matrix x. this is the loss function we use in most case.
Class Functions:
forward: compute -log(x)
backward: compute the derivative w.r.t input matrix x
"""
class Mean:
"""
Class Name: Mean
Class Usage: compute the mean given a vector x.
Class Functions:
forward: compute (x_{0} + ... + x_{n})/n
backward: compute the derivative w.r.t input vector x
"""
class Sum:
"""
Class Name: Sum
Class Usage: compute the sum of a matrix.
"""
class MeanwithMask:
"""
Class Name: MeanwithMask
Class Usage: compute the mean given a vector x with mask.
Class Functions:
forward: compute x = x*mask and then sum over nonzeros in x/#(nozeros in x)
backward: compute the derivative w.r.t input vector matrix
"""
class Aref: # out = x[idx]
"""
Class Name: Aref
Class Usage: get some specific entry in a matrix. x is the matrix with shape (batch_size, N) and idx
is vector contains the entry index and x is differentiable.
Class Functions:
forward: compute x[b, idx(b)]
backward: compute the derivative w.r.t input matrix x
"""
class Accuracy:
"""
Class Name: Accuracy
Class Usage: check the predicted label is correct or not. x is the probability vector where each probability is for each class. idx is ground truth label.
Class Functions:
forward: find the label that has maximum probability and compare it with the ground truth label.
backward: None
"""
class Reshape:
"""
Class name: Reshape
Class usage: Reshape the tensor x to specific shape.
Class function:
forward: Reshape the tensor x to specific shape
backward: calculate derivative w.r.t to x, which is simply reshape the income gradient to x's original shape
"""
# clip the gradient if the norm of gradient is larger than some threshold, this is crucial for RNN.
##################################################### Recurrent Components ##############################################
class Embed:
"""
Class name: Embed
Class usage: Embed layer.
Class function:
forward: given the embeeding matrix w2v and word idx, return its corresponding embedding vector.
backward: calculate the derivative w.r.t to embedding matrix
"""
class ConCat:
"""
Class name: ConCat
Class usage: ConCat layer.
Class function:
forward: concat two matrix along with the axis 1.
backward: calculate the derivative w.r.t to matrix a and y.
"""
class ArgMax:
"""
Class name: ArgMax
Class usage: ArgMax layer.
Class function:
forward: given x, calculate the index which has the maximum value
backward: None
"""
| 30.314152
| 360
| 0.579776
|
import numpy as np
DT = np.float32
eps = 1e-12
# Globals
components = []
params = []
# Global forward/backward
def Forward():
for c in components:
c.forward()
def Backward(loss):
for c in components:
if c.grad is not None: c.grad = DT(0)
loss.grad = np.ones_like(loss.value)
for c in components[::-1]:
c.backward()
# Optimization functions
def SGD(lr):
for p in params:
lrp = p.opts['lr'] * lr if 'lr' in p.opts.keys() else lr
p.value = p.value - lrp * p.grad
p.grad = DT(0)
# Values
class Value:
def __init__(self, value=None):
self.value = DT(value).copy()
self.grad = None
def set(self, value):
self.value = DT(value).copy()
# Parameters
class Param:
def __init__(self, value, opts={}):
self.value = DT(value).copy()
self.opts = {}
params.append(self)
self.grad = DT(0)
# Xavier initializer
def xavier(shape):
sq = np.sqrt(3.0 / np.prod(shape[:-1]))
return np.random.uniform(-sq, sq, shape)
# Utility function for shape inference with broadcasting
def bcast(x, y):
xs = np.array(x.shape)
ys = np.array(y.shape)
pad = len(xs) - len(ys)
if pad > 0:
ys = np.pad(ys, [[pad, 0]], 'constant')
elif pad < 0:
xs = np.pad(xs, [[-pad, 0]], 'constant')
os = np.maximum(xs, ys)
xred = tuple([idx for idx in np.where(xs < os)][0])
yred = tuple([idx for idx in np.where(ys < os)][0])
return xred, yred
#### Actual components
class Add: # Add with broadcasting
"""
Class name: Add
Class usage: add two matrices x, y with broadcasting supported by numpy "+" operation.
Class function:
forward: calculate x + y with possible broadcasting
backward: calculate derivative w.r.t to x and y, when calculate the derivative w.r.t to y, we sum up all the axis over grad except the last dimension.
"""
def __init__(self, x, y):
components.append(self)
self.x = x
self.y = y
self.grad = None if x.grad is None and y.grad is None else DT(0)
def forward(self):
self.value = self.x.value + self.y.value
def backward(self):
xred, yred = bcast(self.x.value, self.y.value)
if self.x.grad is not None:
self.x.grad = self.x.grad + np.reshape(
np.sum(self.grad, axis=xred, keepdims=True),
self.x.value.shape)
if self.y.grad is not None:
self.y.grad = self.y.grad + np.reshape(
np.sum(self.grad, axis=yred, keepdims=True),
self.y.value.shape)
class Mul: # Multiply with broadcasting
"""
Class Name: Mul
Class Usage: elementwise multiplication with two matrix
Class Functions:
forward: compute the result x*y
backward: compute the derivative w.r.t x and y
"""
def __init__(self, x, y):
components.append(self)
self.x = x
self.y = y
self.grad = None if x.grad is None and y.grad is None else DT(0)
def forward(self):
self.value = self.x.value * self.y.value
def backward(self):
xred, yred = bcast(self.x.value, self.y.value)
if self.x.grad is not None:
self.x.grad = self.x.grad + np.reshape(
np.sum(self.grad * self.y.value, axis=xred, keepdims=True),
self.x.value.shape)
if self.y.grad is not None:
self.y.grad = self.y.grad + np.reshape(
np.sum(self.grad * self.x.value, axis=yred, keepdims=True),
self.y.value.shape)
class VDot: # Matrix multiply (fully-connected layer)
"""
Class Name: VDot
Class Usage: matrix multiplication where x, y are matrices
y is expected to be a parameter and there is a convention that parameters come last. Typical usage is x is batch feature vector with shape (batch_size, f_dim), y a parameter with shape (f_dim, f_dim2).
Class Functions:
forward: compute the vector matrix multplication result
backward: compute the derivative w.r.t x and y, where derivative of x and y are both matrices
"""
def __init__(self, x, y):
components.append(self)
self.x = x
self.y = y
self.grad = None if x.grad is None and y.grad is None else DT(0)
def forward(self):
self.value = np.matmul(self.x.value, self.y.value)
def backward(self):
if self.x.grad is not None:
if len(self.y.value.shape) == 1:
nabla = np.matmul(self.y.value.reshape(list(self.y.value.shape) + [1]), self.grad.T).T
else:
nabla = np.matmul(self.y.value, self.grad.T).T
self.x.grad = self.x.grad + nabla
if self.y.grad is not None:
if len(self.x.value.shape) == 1:
nabla = np.matmul(self.x.value.T.reshape(list(self.x.value.shape) + [1]),
self.grad.reshape([1] + list(self.grad.shape)))
else:
nabla = np.matmul(self.x.value.T, self.grad)
self.y.grad = self.y.grad + nabla
class Log: # Elementwise Log
"""
Class Name: Log
Class Usage: compute the elementwise log(x) given x.
Class Functions:
forward: compute log(x)
backward: compute the derivative w.r.t input vector x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.log(self.x.value)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad / self.x.value
class Sigmoid:
"""
Class Name: Sigmoid
Class Usage: compute the elementwise sigmoid activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = 1/(1 + exp(-x_{i}))
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = 1. / (1. + np.exp(-self.x.value))
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * self.value * (1. - self.value)
class Tanh:
"""
Class Name: Tanh
Class Usage: compute the elementwise Tanh activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = (exp(x_{i}) - exp(-x_{i}))/(exp(x_{i}) + exp(-x_{i}))
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
x_exp = np.exp(self.x.value)
x_neg_exp = np.exp(-self.x.value)
self.value = (x_exp - x_neg_exp) / (x_exp + x_neg_exp)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * (1 - self.value * self.value)
class RELU:
"""
Class Name: RELU
Class Usage: compute the elementwise RELU activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = max(0, x_{i})
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.maximum(self.x.value, 0)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * (self.value > 0)
class LeakyRELU:
"""
Class Name: LeakyRELU
Class Usage: compute the elementwise LeakyRELU activation. Input is vector or matrix. In case of vector, [x_{0}, x_{1}, ..., x_{n}], output is vector [y_{0}, y_{1}, ..., y_{n}] where y_{i} = 0.01*x_{i} for x_{i} < 0 and y_{i} = x_{i} for x_{i} > 0
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.maximum(self.x.value, 0.01 * self.x.value)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * np.maximum(0.01, self.value > 0)
class Softplus:
"""
Class Name: Softplus
Class Usage: compute the elementwise Softplus activation.
Class Functions:
forward: compute activation y_{i} for all i.
backward: compute the derivative w.r.t input vector/matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.log(1. + np.exp(self.x.value))
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * 1. / (1. + np.exp(-self.x.value))
class SoftMax:
"""
Class Name: SoftMax
Class Usage: compute the softmax activation for each element in the matrix, normalization by each all elements in each batch (row). Specificaly, input is matrix [x_{00}, x_{01}, ..., x_{0n}, ..., x_{b0}, x_{b1}, ..., x_{bn}], output is a matrix [p_{00}, p_{01}, ..., p_{0n},...,p_{b0},,,p_{bn} ] where p_{bi} = exp(x_{bi})/(exp(x_{b0}) + ... + exp(x_{bn}))
Class Functions:
forward: compute probability p_{bi} for all b, i.
backward: compute the derivative w.r.t input matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
lmax = np.max(self.x.value, axis=-1, keepdims=True)
ex = np.exp(self.x.value - lmax)
self.value = ex / np.sum(ex, axis=-1, keepdims=True)
def backward(self):
if self.x.grad is None:
return
gvdot = np.matmul(self.grad[..., np.newaxis, :], self.value[..., np.newaxis]).squeeze(-1)
self.x.grad = self.x.grad + self.value * (self.grad - gvdot)
class LogLoss:
"""
Class Name: LogLoss
Class Usage: compute the elementwise -log(x) given matrix x. this is the loss function we use in most case.
Class Functions:
forward: compute -log(x)
backward: compute the derivative w.r.t input matrix x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = -np.log(np.maximum(eps, self.x.value))
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + (-1) * self.grad / np.maximum(eps, self.x.value)
class Mean:
"""
Class Name: Mean
Class Usage: compute the mean given a vector x.
Class Functions:
forward: compute (x_{0} + ... + x_{n})/n
backward: compute the derivative w.r.t input vector x
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.mean(self.x.value)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * np.ones_like(self.x.value) / self.x.value.shape[0]
class Sum:
"""
Class Name: Sum
Class Usage: compute the sum of a matrix.
"""
def __init__(self, x):
components.append(self)
self.x = x
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.sum(self.x.value)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * np.ones_like(self.x.value)
class MeanwithMask:
"""
Class Name: MeanwithMask
Class Usage: compute the mean given a vector x with mask.
Class Functions:
forward: compute x = x*mask and then sum over nonzeros in x/#(nozeros in x)
backward: compute the derivative w.r.t input vector matrix
"""
def __init__(self, x, mask):
components.append(self)
self.x = x
self.mask = mask
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.sum(self.x.value * self.mask.value) / np.sum(self.mask.value)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + self.grad * np.ones_like(self.x.value) * self.mask.value / np.sum(
self.mask.value)
class Aref: # out = x[idx]
"""
Class Name: Aref
Class Usage: get some specific entry in a matrix. x is the matrix with shape (batch_size, N) and idx
is vector contains the entry index and x is differentiable.
Class Functions:
forward: compute x[b, idx(b)]
backward: compute the derivative w.r.t input matrix x
"""
def __init__(self, x, idx):
components.append(self)
self.x = x
self.idx = idx
self.grad = None if x.grad is None else DT(0)
def forward(self):
xflat = self.x.value.reshape(-1)
iflat = self.idx.value.reshape(-1)
outer_dim = len(iflat)
inner_dim = len(xflat) / outer_dim
self.pick = np.int32(np.array(range(outer_dim)) * inner_dim + iflat)
self.value = xflat[self.pick].reshape(self.idx.value.shape)
def backward(self):
if self.x.grad is not None:
grad = np.zeros_like(self.x.value)
gflat = grad.reshape(-1)
gflat[self.pick] = self.grad.reshape(-1)
self.x.grad = self.x.grad + grad
class Accuracy:
"""
Class Name: Accuracy
Class Usage: check the predicted label is correct or not. x is the probability vector where each probability is for each class. idx is ground truth label.
Class Functions:
forward: find the label that has maximum probability and compare it with the ground truth label.
backward: None
"""
def __init__(self, x, idx):
components.append(self)
self.x = x
self.idx = idx
self.grad = None
def forward(self):
self.value = np.mean(np.argmax(self.x.value, axis=-1) == self.idx.value)
def backward(self):
pass
class Reshape:
"""
Class name: Reshape
Class usage: Reshape the tensor x to specific shape.
Class function:
forward: Reshape the tensor x to specific shape
backward: calculate derivative w.r.t to x, which is simply reshape the income gradient to x's original shape
"""
def __init__(self, x, shape):
components.append(self)
self.x = x
self.shape = shape
self.grad = None if x.grad is None else DT(0)
def forward(self):
self.value = np.reshape(self.x.value, self.shape)
def backward(self):
if self.x.grad is not None:
self.x.grad = self.x.grad + np.reshape(self.grad, self.x.value.shape)
def Momentum(lr, mom):
for p in params:
if not hasattr(p, 'grad_hist'):
p.grad_hist = DT(0)
p.grad_hist = mom * p.grad_hist + p.grad
p.grad = p.grad_hist
SGD(lr)
def AdaGrad(lr, ep=1e-8):
for p in params:
if not hasattr(p, 'grad_G'):
p.grad_G = DT(0)
p.grad_G = p.grad_G + p.grad * p.grad
p.grad = p.grad / np.sqrt(p.grad_G + DT(ep))
SGD(lr)
def RMSProp(lr, g=0.9, ep=1e-8):
for p in params:
if not hasattr(p, 'grad_hist'):
p.grad_hist = DT(0)
p.grad_hist = g * p.grad_hist + (1 - g) * p.grad * p.grad
p.grad = p.grad / np.sqrt(p.grad_hist + DT(ep))
SGD(lr)
def Adam(alpha=0.001, b1=0.9, b2=0.999, ep=1e-8):
b1 = DT(b1)
b2 = DT(b2)
ep = DT(ep)
_a_b1t = DT(1.0) * b1
_a_b2t = DT(1.0) * b2
for p in params:
if not hasattr(p, 'grad_hist'):
p.grad_hist = DT(0)
p.grad_h2 = DT(0)
p.grad_hist = b1 * p.grad_hist + (1. - b1) * p.grad
p.grad_h2 = b2 * p.grad_h2 + (1. - b2) * p.grad * p.grad
mhat = p.grad_hist / (1. - _a_b1t)
vhat = p.grad_h2 / (1. - _a_b2t)
p.grad = mhat / (np.sqrt(vhat) + ep)
SGD(alpha)
# clip the gradient if the norm of gradient is larger than some threshold, this is crucial for RNN.
def GradClip(grad_clip):
for p in params:
l2 = np.sqrt(np.sum(p.grad * p.grad))
if l2 >= grad_clip:
p.grad *= grad_clip / l2
##################################################### Recurrent Components ##############################################
class Embed:
"""
Class name: Embed
Class usage: Embed layer.
Class function:
forward: given the embeeding matrix w2v and word idx, return its corresponding embedding vector.
backward: calculate the derivative w.r.t to embedding matrix
"""
def __init__(self, idx, w2v):
components.append(self)
self.idx = idx
self.w2v = w2v
self.grad = None if w2v.grad is None else DT(0)
def forward(self):
self.value = self.w2v.value[np.int32(self.idx.value), :]
def backward(self):
if self.w2v.grad is not None:
self.w2v.grad = np.zeros(self.w2v.value.shape)
self.w2v.grad[np.int32(self.idx.value), :] = self.w2v.grad[np.int32(self.idx.value), :] + self.grad
class ConCat:
"""
Class name: ConCat
Class usage: ConCat layer.
Class function:
forward: concat two matrix along with the axis 1.
backward: calculate the derivative w.r.t to matrix a and y.
"""
def __init__(self, x, y):
components.append(self)
self.x = x
self.y = y
self.grad = None if x.grad is None and y.grad is None else DT(0)
def forward(self):
self.value = np.concatenate((self.x.value, self.y.value), axis=-1)
def backward(self):
dim_x = self.x.value.shape[-1]
dim_y = self.y.value.shape[-1]
if self.x.grad is not None:
if len(self.x.value.shape) == 2:
self.x.grad = self.x.grad + self.grad[:, 0:dim_x]
else:
self.x.grad = self.x.grad + self.grad[0:dim_x]
if self.y.grad is not None:
if len(self.y.value.shape) == 2:
self.y.grad = self.y.grad + self.grad[:, dim_x:dim_x + dim_y]
else:
self.y.grad = self.y.grad + self.grad[dim_x:dim_x + dim_y]
class ArgMax:
"""
Class name: ArgMax
Class usage: ArgMax layer.
Class function:
forward: given x, calculate the index which has the maximum value
backward: None
"""
def __init__(self, x):
components.append(self)
self.x = x
def forward(self):
self.value = np.argmax(self.x.value)
def backward(self):
pass
| 10,336
| -18
| 1,968
|
29d18b780f4ac7bfea32226a11abc78d881f754d
| 881
|
py
|
Python
|
jupyterprobe/memory_utils.py
|
vermashresth/jupyter-probe
|
cec35399dbe0d19d4264df02df305504bea0f695
|
[
"MIT"
] | 8
|
2020-12-15T14:19:29.000Z
|
2021-09-23T03:39:21.000Z
|
build/lib/jupyterprobe/memory_utils.py
|
vermashresth/jupyter-probe
|
cec35399dbe0d19d4264df02df305504bea0f695
|
[
"MIT"
] | null | null | null |
build/lib/jupyterprobe/memory_utils.py
|
vermashresth/jupyter-probe
|
cec35399dbe0d19d4264df02df305504bea0f695
|
[
"MIT"
] | null | null | null |
import os
import psutil
| 30.37931
| 80
| 0.634506
|
import os
import psutil
def memory_usage_psutil(pid=None):
if pid is None:
pid = os.getpid()
process = psutil.Process(pid)
return process.memory_percent()
def get_memory_info_gpu_cpu():
res = {}
cpu = psutil.virtual_memory()
cpu_total = round(cpu[0]/2**30, 1)
cpu_used = round(cpu[3]/2**30, 1)
cpu_percent = cpu[2]
res['CPU'] = {'total':cpu_total, 'used':cpu_used, 'percent':cpu_percent}
try:
import py3nvml.py3nvml as pynvml
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
gpu = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_total = round(gpu.total/2**30,1)
gpu_used = round(gpu.used/2**30,1)
gpu_percent = round(gpu_used/gpu_total, 3)*100
res['GPU'] = {'total':gpu_total, 'used':gpu_used, 'percent':gpu_percent}
except:
pass
return res
| 811
| 0
| 46
|
2471ad588b4c0f46ff2f08f2c04ab2774fa69018
| 1,661
|
py
|
Python
|
pymoira/utils.py
|
vasilvv/pymoira
|
27facba223c220bf4c55d9b7e799a2ff951eef13
|
[
"MIT"
] | 2
|
2015-11-13T23:03:12.000Z
|
2015-12-19T02:43:20.000Z
|
pymoira/utils.py
|
vasilvv/pymoira
|
27facba223c220bf4c55d9b7e799a2ff951eef13
|
[
"MIT"
] | null | null | null |
pymoira/utils.py
|
vasilvv/pymoira
|
27facba223c220bf4c55d9b7e799a2ff951eef13
|
[
"MIT"
] | 1
|
2019-10-23T06:26:13.000Z
|
2019-10-23T06:26:13.000Z
|
#
## PyMoira client library
##
## This file contains the utility functions common for multiple modules.
#
import datetime
from .errors import UserError
def convertToMoiraValue(val):
"""Converts data from Python to Moira protocol representation."""
if type(val) == bool:
return '1' if val else '0'
else:
return str(val)
def responseToDict(description, response):
"""Transforms the query response to a dictionary using a description
of format ( (field name, type) ), where types are bool, int, string and
date time."""
if len(description) != len(response):
raise UserError("Error returned the response with invalid number of entries")
result = {}
for value, field in ( (response[i], description[i]) for i in range( 0, len(response) ) ):
name, datatype = field
if datatype == bool:
result[name] = convertMoiraBool(value)
elif datatype == int:
result[name] = convertMoiraInt(value)
elif datatype == datetime.datetime:
result[name] = convertMoiraDateTime(value)
elif datatype == str:
result[name] = value
else:
raise UserError("Unsupported Moira data type specified: %s" % datatype)
return result
| 29.140351
| 93
| 0.635762
|
#
## PyMoira client library
##
## This file contains the utility functions common for multiple modules.
#
import datetime
from .errors import UserError
def convertMoiraBool(val):
if val == '1':
return True
if val == '0':
return False
raise UserError("Invalid boolean value received from Moira server")
def convertMoiraInt(val):
try:
return int(val)
except ValueError:
return None
def convertMoiraDateTime(val):
return datetime.datetime.strptime(val, '%d-%b-%Y %H:%M:%S')
def convertToMoiraValue(val):
"""Converts data from Python to Moira protocol representation."""
if type(val) == bool:
return '1' if val else '0'
else:
return str(val)
def responseToDict(description, response):
"""Transforms the query response to a dictionary using a description
of format ( (field name, type) ), where types are bool, int, string and
date time."""
if len(description) != len(response):
raise UserError("Error returned the response with invalid number of entries")
result = {}
for value, field in ( (response[i], description[i]) for i in range( 0, len(response) ) ):
name, datatype = field
if datatype == bool:
result[name] = convertMoiraBool(value)
elif datatype == int:
result[name] = convertMoiraInt(value)
elif datatype == datetime.datetime:
result[name] = convertMoiraDateTime(value)
elif datatype == str:
result[name] = value
else:
raise UserError("Unsupported Moira data type specified: %s" % datatype)
return result
| 309
| 0
| 69
|
fa6009dfdc1734e54f9947589f388129abb13195
| 1,072
|
py
|
Python
|
lambda/test.py
|
p1ass/emojic.ch
|
5dace9d4ba6f6a46cfbd29746c74964ad8ba1822
|
[
"MIT"
] | 14
|
2020-04-03T04:14:49.000Z
|
2021-08-12T04:24:43.000Z
|
lambda/test.py
|
p1ass/emojic.ch
|
5dace9d4ba6f6a46cfbd29746c74964ad8ba1822
|
[
"MIT"
] | 78
|
2020-04-03T04:02:31.000Z
|
2021-06-04T01:18:24.000Z
|
lambda/test.py
|
naoki-kishi/emojic.ch
|
5dace9d4ba6f6a46cfbd29746c74964ad8ba1822
|
[
"MIT"
] | 1
|
2020-05-09T12:24:08.000Z
|
2020-05-09T12:24:08.000Z
|
import unittest
import base64
from detect_face import lambda_handler
# 顔があると200が返ってくる
# 顔がないと204が返ってくる
# 画像以外のファイルを拡張子を偽ってアップロードしてもダメ
if __name__ == "__main__":
unittest.main()
| 26.8
| 62
| 0.650187
|
import unittest
import base64
from detect_face import lambda_handler
class TestLambdaHandler(unittest.TestCase):
def setUp(self):
self.IMAGE_DIRECTORY = "./test_images/"
def getResponse(self, filename):
with open(self.IMAGE_DIRECTORY + filename, "rb") as f:
img_binary = f.read()
event = {"body": base64.b64encode(img_binary)}
response = lambda_handler(event, {})
return response
# 顔があると200が返ってくる
def test_face(self):
filename = "two_faces.jpg"
response = self.getResponse(filename)
self.assertEqual(200, response["statusCode"])
# 顔がないと204が返ってくる
def test_no_face(self):
filename = "cat.jpg"
response = self.getResponse(filename)
self.assertEqual(204, response["statusCode"])
# 画像以外のファイルを拡張子を偽ってアップロードしてもダメ
def test_not_image(self):
filename = "not_image.txt.jpg"
response = self.getResponse(filename)
self.assertEqual(400, response["statusCode"])
if __name__ == "__main__":
unittest.main()
| 696
| 22
| 155
|
28112880e291a8479345abe72ce6130d81553134
| 3,232
|
py
|
Python
|
mkt/api/base.py
|
oremj/zamboni
|
a751dc6d22f7af947da327b0a091cbab0a999f49
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/api/base.py
|
oremj/zamboni
|
a751dc6d22f7af947da327b0a091cbab0a999f49
|
[
"BSD-3-Clause"
] | null | null | null |
mkt/api/base.py
|
oremj/zamboni
|
a751dc6d22f7af947da327b0a091cbab0a999f49
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.core.exceptions import ObjectDoesNotExist
from tastypie import http
from tastypie.bundle import Bundle
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource
import commonware.log
import oauth2
from translations.fields import PurifiedField, TranslatedField
log = commonware.log.getLogger('z.api')
| 34.752688
| 78
| 0.626238
|
import json
from django.core.exceptions import ObjectDoesNotExist
from tastypie import http
from tastypie.bundle import Bundle
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import ModelResource
import commonware.log
import oauth2
from translations.fields import PurifiedField, TranslatedField
log = commonware.log.getLogger('z.api')
class MarketplaceResource(ModelResource):
def get_resource_uri(self, bundle_or_obj):
# Fix until my pull request gets pulled into tastypie.
# https://github.com/toastdriven/django-tastypie/pull/490
kwargs = {
'resource_name': self._meta.resource_name,
}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.pk
else:
kwargs['pk'] = bundle_or_obj.pk
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url("api_dispatch_detail", kwargs=kwargs)
@classmethod
def should_skip_field(cls, field):
# We don't want to skip translated fields.
if isinstance(field, (PurifiedField, TranslatedField)):
return False
return True if getattr(field, 'rel') else False
def form_errors(self, forms):
errors = {}
if not isinstance(forms, list):
forms = [forms]
for f in forms:
if isinstance(f.errors, list): # Cope with formsets.
for e in f.errors:
errors.update(e)
continue
errors.update(dict(f.errors.items()))
response = http.HttpBadRequest(json.dumps({'error_message': errors}),
content_type='application/json')
return ImmediateHttpResponse(response=response)
def get_object_or_404(self, cls, **filters):
"""
A wrapper around our more familiar get_object_or_404, for when we need
to get access to an object that isn't covered by get_obj.
"""
if not filters:
raise ImmediateHttpResponse(response=http.HttpNotFound())
try:
return cls.objects.get(**filters)
except (cls.DoesNotExist, cls.MultipleObjectsReturned):
raise ImmediateHttpResponse(response=http.HttpNotFound())
def get_by_resource_or_404(self, request, **kwargs):
"""
A wrapper around the obj_get to just get the object.
"""
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise ImmediateHttpResponse(response=http.HttpNotFound())
return obj
def dispatch(self, request_type, request, **kwargs):
try:
auth = (oauth2.Request._split_header(
request.META.get('HTTP_AUTHORIZATION', '')))
name = auth.get('oauth_consumer_key', 'none')
except (AttributeError, IndexError):
# Problems parsing the header.
name = 'error'
log.debug('%s:%s:%s' % (
request.META['REQUEST_METHOD'], request.path, name))
return (super(MarketplaceResource, self)
.dispatch(request_type, request, **kwargs))
| 1,858
| 981
| 23
|
0ff92fee0b03da94feeb1de00edd583d81887727
| 953
|
py
|
Python
|
instagramApp/migrations/0002_auto_20211016_1005.py
|
Tito-74/Instagram-App
|
b44f21fb0c7aa5f8e978194814e603b8f9c66592
|
[
"MIT"
] | null | null | null |
instagramApp/migrations/0002_auto_20211016_1005.py
|
Tito-74/Instagram-App
|
b44f21fb0c7aa5f8e978194814e603b8f9c66592
|
[
"MIT"
] | null | null | null |
instagramApp/migrations/0002_auto_20211016_1005.py
|
Tito-74/Instagram-App
|
b44f21fb0c7aa5f8e978194814e603b8f9c66592
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-10-16 07:05
from django.db import migrations, models
import django.db.models.deletion
| 32.862069
| 117
| 0.60021
|
# Generated by Django 3.2.8 on 2021-10-16 07:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('instagramApp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='comments',
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(blank=True)),
('time_posted', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagramApp.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='instagramApp.profile')),
],
),
]
| 0
| 806
| 23
|
bdbc3c77437b54a570ec5093b2f5ef0704bf1f22
| 929
|
py
|
Python
|
tools/cardiac_py/DMD/JParser.py
|
paulkefer/cardioid
|
59c07b714d8b066b4f84eb50487c36f6eadf634c
|
[
"MIT-0",
"MIT"
] | 33
|
2018-12-12T20:05:06.000Z
|
2021-09-26T13:30:16.000Z
|
tools/cardiac_py/DMD/JParser.py
|
paulkefer/cardioid
|
59c07b714d8b066b4f84eb50487c36f6eadf634c
|
[
"MIT-0",
"MIT"
] | 5
|
2019-04-25T11:34:43.000Z
|
2021-11-14T04:35:37.000Z
|
tools/cardiac_py/DMD/JParser.py
|
paulkefer/cardioid
|
59c07b714d8b066b4f84eb50487c36f6eadf634c
|
[
"MIT-0",
"MIT"
] | 15
|
2018-12-21T22:44:59.000Z
|
2021-08-29T10:30:25.000Z
|
'''
Created on 27/02/2013
@author: butler
'''
import json
class JParser(object):
'''
Parser for json config file. As this is designed to be '
'''
def __init__(self, file_name):
'''
'''
self.file_name = file_name
"""
self.delta_t_ms
self.sample_rate
self.t_min_s
self.t_max_s
"""
self.parse()
| 23.225
| 60
| 0.518837
|
'''
Created on 27/02/2013
@author: butler
'''
import json
class JParser(object):
'''
Parser for json config file. As this is designed to be '
'''
def __init__(self, file_name):
'''
'''
self.file_name = file_name
"""
self.delta_t_ms
self.sample_rate
self.t_min_s
self.t_max_s
"""
self.parse()
def parse(self):
try:
self.fd = open(self.file_name, 'r')
jdict = json.load(self.fd)
jdict = jdict["DMD"]
self.delta_t_ms = float(jdict['delta_t_ms'])
self.sample_rate = int(jdict['sample_rate'])
self.t_min_s = jdict['t_min_s']
self.t_max_s = jdict['t_max_s']
self.solver =jdict['solver']
self.calc_modes = jdict['calc_modes']
self.mode_list = jdict['modes']
finally:
self.fd.close()
| 510
| 0
| 27
|
53582b3a4539df932cca9ee602fac6455a7e9b85
| 1,443
|
py
|
Python
|
trade.py
|
chris-greening/coinbase-portfolio
|
c20226079d1236e71368860e6f24b01df789a3ed
|
[
"MIT"
] | null | null | null |
trade.py
|
chris-greening/coinbase-portfolio
|
c20226079d1236e71368860e6f24b01df789a3ed
|
[
"MIT"
] | null | null | null |
trade.py
|
chris-greening/coinbase-portfolio
|
c20226079d1236e71368860e6f24b01df789a3ed
|
[
"MIT"
] | null | null | null |
import datetime
import pytz
from decimal import Decimal
| 40.083333
| 100
| 0.607069
|
import datetime
import pytz
from decimal import Decimal
class Trade:
def __init__(self, coinbase_resp):
"""Parse response data from Coinbase API"""
self.coinbase_resp = coinbase_resp
self.parse_response()
def parse_response(self):
self.amount = Decimal(self.coinbase_resp["amount"].amount)
self.currency = self.coinbase_resp["amount"]["currency"]
self.created_at = datetime.datetime.fromisoformat(self.coinbase_resp["created_at"].replace(
'Z', '+00:00')).astimezone(pytz.timezone("America/New_York"))
self.fee = Decimal(self.coinbase_resp["fees"][0]["amount"].amount)
self.id = self.coinbase_resp["id"]
self.resource = self.coinbase_resp["resource"]
self.subtotal = Decimal(self.coinbase_resp["subtotal"]["amount"])
self.total = Decimal(self.coinbase_resp["total"]["amount"])
self.price = Decimal(self.coinbase_resp["unit_price"]["amount"])
def to_dict(self):
return {
"currency":self.currency,
"resource":self.resource,
"created_at":self.created_at,
"amount":self.amount,
"total":self.total,
"subtotal":self.subtotal,
"fee":self.fee,
"price":self.price
}
def __repr__(self):
return f"< {self.resource.upper()}: {self.amount} (${self.total}) {self.currency} >"
| 1,115
| 246
| 24
|
dfaaf65c142c6efc4c3a5d05426059f979f9b07b
| 418
|
py
|
Python
|
practice.py
|
narayanants/python-mega-course
|
2ba2980ab21dfbed5f86f00695559f7831b5c566
|
[
"MIT"
] | null | null | null |
practice.py
|
narayanants/python-mega-course
|
2ba2980ab21dfbed5f86f00695559f7831b5c566
|
[
"MIT"
] | null | null | null |
practice.py
|
narayanants/python-mega-course
|
2ba2980ab21dfbed5f86f00695559f7831b5c566
|
[
"MIT"
] | null | null | null |
x = 1
y = 'Hello'
z = 10.123
d = x + z
print(d)
print(type(x))
print(type(y))
print(y.upper())
print(y.lower())
print(type(z))
a = [1,2,3,4,5,6,6,7,8]
print(len(a))
print(a.count(6))
b = list(range(10,100,10))
print(b)
stud_marks = {"Madhan":90, "Raj":25,"Mani":80}
print(stud_marks.keys())
print(stud_marks.values())
ab = list(range(10,100,10))
ab.append(100)
ab.append(110)
ab.remove(110)
print(ab[0:2])
| 11.611111
| 46
| 0.617225
|
x = 1
y = 'Hello'
z = 10.123
d = x + z
print(d)
print(type(x))
print(type(y))
print(y.upper())
print(y.lower())
print(type(z))
a = [1,2,3,4,5,6,6,7,8]
print(len(a))
print(a.count(6))
b = list(range(10,100,10))
print(b)
stud_marks = {"Madhan":90, "Raj":25,"Mani":80}
print(stud_marks.keys())
print(stud_marks.values())
ab = list(range(10,100,10))
ab.append(100)
ab.append(110)
ab.remove(110)
print(ab[0:2])
| 0
| 0
| 0
|
7fb662a179c9be0efb6d3755e45803047f17796c
| 1,347
|
py
|
Python
|
telegram/main.py
|
hakierspejs/mierzyciel
|
bb680854fd351820c45415b8e85702ea60851b0d
|
[
"WTFPL"
] | null | null | null |
telegram/main.py
|
hakierspejs/mierzyciel
|
bb680854fd351820c45415b8e85702ea60851b0d
|
[
"WTFPL"
] | null | null | null |
telegram/main.py
|
hakierspejs/mierzyciel
|
bb680854fd351820c45415b8e85702ea60851b0d
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python
import logging
import socket
import urllib.request
import time
LOGGER = logging.getLogger('mierzyciel.telegram')
if __name__ == '__main__':
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level='INFO', format=fmt)
main()
| 30.613636
| 78
| 0.604306
|
#!/usr/bin/env python
import logging
import socket
import urllib.request
import time
LOGGER = logging.getLogger('mierzyciel.telegram')
def get_telegram_stats(chat_name):
s = urllib.request.urlopen('https://t.me/' + chat_name).read().decode()
prefix = '<div class="tgme_page_extra">'
line = [x for x in s.split('\n') if x.startswith(prefix)][0][len(prefix):]
words = line.split()
return int(words[0]), int(words[2])
def upload_to_graphite(h, metric, value):
s = socket.socket()
try:
s.connect(h)
now = int(time.time())
buf = f'{metric} {value} {now}\n'.encode()
LOGGER.info('Sending %r to %r', buf, h)
s.send(buf)
s.close()
except (ConnectionRefusedError, socket.timeout) as e:
LOGGER.exception(e)
time.sleep(3.0)
def main():
while True:
for chat in ['hakierspejs', 'hslodzbot']:
stats = get_telegram_stats(chat)
h = ('graphite.hs-ldz.pl', 2003)
prefix = 'hakierspejs.telegram.' + chat + '.'
upload_to_graphite(h, prefix + 'num_joined', stats[0])
upload_to_graphite(h, prefix + 'num_active', stats[1])
time.sleep(60)
if __name__ == '__main__':
fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level='INFO', format=fmt)
main()
| 987
| 0
| 69
|
2cfaac913c747cb5c044f427f461fb959f263be3
| 4,312
|
py
|
Python
|
tests/test_utils.py
|
dbluhm/aries-staticagent-python
|
1f27f85cf72cc6a2e10a6495c85f2b21462362cd
|
[
"Apache-2.0"
] | 18
|
2019-06-20T18:19:36.000Z
|
2022-01-17T04:30:18.000Z
|
tests/test_utils.py
|
dbluhm/aries-staticagent-python
|
1f27f85cf72cc6a2e10a6495c85f2b21462362cd
|
[
"Apache-2.0"
] | 16
|
2019-06-20T18:24:49.000Z
|
2021-05-19T17:38:40.000Z
|
tests/test_utils.py
|
dbluhm/aries-staticagent-python
|
1f27f85cf72cc6a2e10a6495c85f2b21462362cd
|
[
"Apache-2.0"
] | 20
|
2019-06-20T16:54:13.000Z
|
2021-12-03T00:08:37.000Z
|
""" Test utilities. """
import pytest
from aries_staticagent import utils, Message
from aries_staticagent.mtc import (
AUTHCRYPT_AFFIRMED,
AUTHCRYPT_DENIED,
ANONCRYPT_AFFIRMED,
ANONCRYPT_DENIED,
)
@pytest.fixture
def test_preprocess():
"""Test preprocessing decorator."""
@utils.preprocess(preprocessor)
handled = test_handler({})
assert handled["preprocessed"]
@pytest.mark.asyncio
async def test_preprocess_async_handler():
"""Test preprocessing decorator."""
@utils.preprocess(preprocessor)
handled = await test_handler({})
assert handled["preprocessed"]
@pytest.mark.asyncio
async def test_preprocess_async_handler_and_preprocessor():
"""Test preprocessing decorator."""
@utils.preprocess_async(preprocessor)
handled = await test_handler({})
assert handled["preprocessed"]
def test_validate(message):
"""Test validation of message"""
@utils.validate(validator)
validate_test(message)
def test_validate_modify_msg():
"""Test validation can modify the message."""
@utils.validate(validator)
test_handler({})
def test_validate_with_other_decorators():
"""Test validation of message"""
def fake_route():
"""Register route decorator."""
return _fake_route_decorator
@utils.validate(validator)
@fake_route()
@fake_route()
@utils.validate(validator)
handled = validate_test({"@id": "12345"})
assert handled["validated"]
handled = validate_test2({"@id": "12345"})
assert handled["validated"]
def test_mtc_decorator(message):
"""Test the MTC decorator."""
@utils.mtc(AUTHCRYPT_AFFIRMED, AUTHCRYPT_DENIED)
message.mtc[AUTHCRYPT_AFFIRMED] = True
message.mtc[AUTHCRYPT_DENIED] = False
mtc_test(message)
def test_mtc_decorator_not_met(message):
"""Test the MTC decorator."""
@utils.mtc(AUTHCRYPT_AFFIRMED)
message.mtc[AUTHCRYPT_AFFIRMED] = True
message.mtc[AUTHCRYPT_DENIED] = False
with pytest.raises(utils.InsufficientMessageTrust):
mtc_test(message)
def test_authcrypted_decorator(message):
"""Test the authcrypted decorator."""
@utils.authcrypted
message.mtc[AUTHCRYPT_AFFIRMED] = True
message.mtc[AUTHCRYPT_DENIED] = False
mtc_test(message)
def test_authcrypted_decorator_not_met(message):
"""Test the authcrypted decorator."""
@utils.authcrypted
message.mtc[AUTHCRYPT_AFFIRMED] = True
with pytest.raises(utils.InsufficientMessageTrust):
mtc_test(message)
def test_anoncrypted_decorator(message):
"""Test the anoncrypted decorator."""
@utils.anoncrypted
message.mtc[ANONCRYPT_AFFIRMED] = True
message.mtc[ANONCRYPT_DENIED] = False
mtc_test(message)
def test_anoncrypted_decorator_not_met(message):
"""Test the anoncrypted decorator."""
@utils.anoncrypted
message.mtc[ANONCRYPT_AFFIRMED] = True
with pytest.raises(utils.InsufficientMessageTrust):
mtc_test(message)
| 21.668342
| 81
| 0.666744
|
""" Test utilities. """
import pytest
from aries_staticagent import utils, Message
from aries_staticagent.mtc import (
AUTHCRYPT_AFFIRMED,
AUTHCRYPT_DENIED,
ANONCRYPT_AFFIRMED,
ANONCRYPT_DENIED,
)
@pytest.fixture
def message():
yield Message.parse_obj(
{"@type": "doc_uri/protocol/0.1/test", "@id": "12345", "content": "test"}
)
def test_preprocess():
"""Test preprocessing decorator."""
def preprocessor(msg):
msg["preprocessed"] = True
return msg
@utils.preprocess(preprocessor)
def test_handler(msg):
return msg
handled = test_handler({})
assert handled["preprocessed"]
@pytest.mark.asyncio
async def test_preprocess_async_handler():
"""Test preprocessing decorator."""
def preprocessor(msg):
msg["preprocessed"] = True
return msg
@utils.preprocess(preprocessor)
async def test_handler(msg):
return msg
handled = await test_handler({})
assert handled["preprocessed"]
@pytest.mark.asyncio
async def test_preprocess_async_handler_and_preprocessor():
"""Test preprocessing decorator."""
async def preprocessor(msg):
msg["preprocessed"] = True
return msg
@utils.preprocess_async(preprocessor)
async def test_handler(msg):
return msg
handled = await test_handler({})
assert handled["preprocessed"]
def test_validate(message):
"""Test validation of message"""
def validator(msg):
assert msg.id == "12345"
return msg
@utils.validate(validator)
def validate_test(msg):
assert msg
validate_test(message)
def test_validate_modify_msg():
"""Test validation can modify the message."""
def validator(msg):
msg["modified"] = True
return msg
@utils.validate(validator)
def test_handler(msg):
assert msg["modified"]
test_handler({})
def test_validate_with_other_decorators():
"""Test validation of message"""
def validator(msg):
assert msg["@id"] == "12345"
msg["validated"] = True
return msg
def fake_route():
"""Register route decorator."""
def _fake_route_decorator(func):
return func
return _fake_route_decorator
@utils.validate(validator)
@fake_route()
def validate_test(msg):
return msg
@fake_route()
@utils.validate(validator)
def validate_test2(msg):
return msg
handled = validate_test({"@id": "12345"})
assert handled["validated"]
handled = validate_test2({"@id": "12345"})
assert handled["validated"]
def test_mtc_decorator(message):
"""Test the MTC decorator."""
@utils.mtc(AUTHCRYPT_AFFIRMED, AUTHCRYPT_DENIED)
def mtc_test(msg):
assert msg
message.mtc[AUTHCRYPT_AFFIRMED] = True
message.mtc[AUTHCRYPT_DENIED] = False
mtc_test(message)
def test_mtc_decorator_not_met(message):
"""Test the MTC decorator."""
@utils.mtc(AUTHCRYPT_AFFIRMED)
def mtc_test(msg):
assert msg
message.mtc[AUTHCRYPT_AFFIRMED] = True
message.mtc[AUTHCRYPT_DENIED] = False
with pytest.raises(utils.InsufficientMessageTrust):
mtc_test(message)
def test_authcrypted_decorator(message):
"""Test the authcrypted decorator."""
@utils.authcrypted
def mtc_test(msg):
assert msg
message.mtc[AUTHCRYPT_AFFIRMED] = True
message.mtc[AUTHCRYPT_DENIED] = False
mtc_test(message)
def test_authcrypted_decorator_not_met(message):
"""Test the authcrypted decorator."""
@utils.authcrypted
def mtc_test(msg):
assert msg
message.mtc[AUTHCRYPT_AFFIRMED] = True
with pytest.raises(utils.InsufficientMessageTrust):
mtc_test(message)
def test_anoncrypted_decorator(message):
"""Test the anoncrypted decorator."""
@utils.anoncrypted
def mtc_test(msg):
assert msg
message.mtc[ANONCRYPT_AFFIRMED] = True
message.mtc[ANONCRYPT_DENIED] = False
mtc_test(message)
def test_anoncrypted_decorator_not_met(message):
"""Test the anoncrypted decorator."""
@utils.anoncrypted
def mtc_test(msg):
assert msg
message.mtc[ANONCRYPT_AFFIRMED] = True
with pytest.raises(utils.InsufficientMessageTrust):
mtc_test(message)
| 764
| 0
| 553
|
12015e477b8e55a091fff7f2d84582fa6e3275ff
| 1,486
|
py
|
Python
|
HLTriggerOffline/HeavyFlavor/python/heavyFlavorValidation_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
HLTriggerOffline/HeavyFlavor/python/heavyFlavorValidation_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
HLTriggerOffline/HeavyFlavor/python/heavyFlavorValidation_cfi.py
|
pasmuss/cmssw
|
566f40c323beef46134485a45ea53349f59ae534
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
heavyFlavorValidation = cms.EDAnalyzer("HeavyFlavorValidation",
DQMFolder = cms.untracked.string("HLT/HeavyFlavor"),
TriggerProcessName = cms.untracked.string("HLT"),
TriggerPathName = cms.untracked.string("HLT_Mu5"),
TriggerSummaryRAW = cms.untracked.string("hltTriggerSummaryRAW"),
TriggerSummaryAOD = cms.untracked.string("hltTriggerSummaryAOD"),
TriggerResults = cms.untracked.string("TriggerResults"),
RecoMuons = cms.InputTag("muons"),
GenParticles = cms.InputTag("genParticles"),
# list IDs of muon mothers, -1:don't check, 0:particle gun, 23:Z, 443:J/psi, 553:Upsilon, 531:Bs, 333:Phi
MotherIDs = cms.untracked.vint32(23,443,553,531,333,0),
GenGlobDeltaRMatchingCut = cms.untracked.double(0.1),
GlobL1DeltaRMatchingCut = cms.untracked.double(0.3),
GlobL2DeltaRMatchingCut = cms.untracked.double(0.3),
GlobL3DeltaRMatchingCut = cms.untracked.double(0.1),
DeltaEtaBins = cms.untracked.vdouble(100, -.5, .5),
DeltaPhiBins = cms.untracked.vdouble(100, -.5, .5),
MuonPtBins = cms.untracked.vdouble(1., 3., 5., 9., 15., 32., 64., 128., 256., 512., 1024., 2048.),
MuonEtaBins = cms.untracked.vdouble(16, -2.4, 2.4),
MuonPhiBins = cms.untracked.vdouble(12, -3.15, 3.15),
DimuonPtBins = cms.untracked.vdouble(0., 2., 4., 6., 8., 10., 15., 25., 50., 100.),
DimuonEtaBins = cms.untracked.vdouble(16, -2.4, 2.4),
DimuonDRBins = cms.untracked.vdouble(10, 0., 1.)
)
| 55.037037
| 105
| 0.697174
|
import FWCore.ParameterSet.Config as cms
heavyFlavorValidation = cms.EDAnalyzer("HeavyFlavorValidation",
DQMFolder = cms.untracked.string("HLT/HeavyFlavor"),
TriggerProcessName = cms.untracked.string("HLT"),
TriggerPathName = cms.untracked.string("HLT_Mu5"),
TriggerSummaryRAW = cms.untracked.string("hltTriggerSummaryRAW"),
TriggerSummaryAOD = cms.untracked.string("hltTriggerSummaryAOD"),
TriggerResults = cms.untracked.string("TriggerResults"),
RecoMuons = cms.InputTag("muons"),
GenParticles = cms.InputTag("genParticles"),
# list IDs of muon mothers, -1:don't check, 0:particle gun, 23:Z, 443:J/psi, 553:Upsilon, 531:Bs, 333:Phi
MotherIDs = cms.untracked.vint32(23,443,553,531,333,0),
GenGlobDeltaRMatchingCut = cms.untracked.double(0.1),
GlobL1DeltaRMatchingCut = cms.untracked.double(0.3),
GlobL2DeltaRMatchingCut = cms.untracked.double(0.3),
GlobL3DeltaRMatchingCut = cms.untracked.double(0.1),
DeltaEtaBins = cms.untracked.vdouble(100, -.5, .5),
DeltaPhiBins = cms.untracked.vdouble(100, -.5, .5),
MuonPtBins = cms.untracked.vdouble(1., 3., 5., 9., 15., 32., 64., 128., 256., 512., 1024., 2048.),
MuonEtaBins = cms.untracked.vdouble(16, -2.4, 2.4),
MuonPhiBins = cms.untracked.vdouble(12, -3.15, 3.15),
DimuonPtBins = cms.untracked.vdouble(0., 2., 4., 6., 8., 10., 15., 25., 50., 100.),
DimuonEtaBins = cms.untracked.vdouble(16, -2.4, 2.4),
DimuonDRBins = cms.untracked.vdouble(10, 0., 1.)
)
| 0
| 0
| 0
|
9809ab5317cd085c73700d8f77b21ca36ba30fc5
| 1,292
|
py
|
Python
|
utils/config.py
|
ogrenenmakine/VCL-PL-Semi-Supervised-Learning-from-Noisy-Web-Data-with-Variational-Contrastive-Learning
|
baef25837ce7e073d03f69a095d1992aa18dd2d5
|
[
"MIT"
] | null | null | null |
utils/config.py
|
ogrenenmakine/VCL-PL-Semi-Supervised-Learning-from-Noisy-Web-Data-with-Variational-Contrastive-Learning
|
baef25837ce7e073d03f69a095d1992aa18dd2d5
|
[
"MIT"
] | null | null | null |
utils/config.py
|
ogrenenmakine/VCL-PL-Semi-Supervised-Learning-from-Noisy-Web-Data-with-Variational-Contrastive-Learning
|
baef25837ce7e073d03f69a095d1992aa18dd2d5
|
[
"MIT"
] | null | null | null |
"""
Authors: Wouter Van Gansbeke, Simon Vandenhende
Licensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)
"""
import os
import yaml
from easydict import EasyDict
from utils.utils import mkdir_if_missing
| 36.914286
| 92
| 0.706656
|
"""
Authors: Wouter Van Gansbeke, Simon Vandenhende
Licensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)
"""
import os
import yaml
from easydict import EasyDict
from utils.utils import mkdir_if_missing
def create_config(config_file_env, config_file_exp, batch_size, epochs):
# Config for environment path
with open(config_file_env, 'r') as stream:
root_dir = yaml.safe_load(stream)['root_dir']
with open(config_file_exp, 'r') as stream:
config = yaml.safe_load(stream)
cfg = EasyDict()
# Copy
for k, v in config.items():
cfg[k] = v
# Set paths for pretext task (These directories are needed in every stage)
base_dir = os.path.join(root_dir, cfg['train_db_name'])
pretext_dir = os.path.join(base_dir, 'SimCLR-B' + str(batch_size))
mkdir_if_missing(base_dir)
mkdir_if_missing(pretext_dir)
cfg['pretext_dir'] = pretext_dir
cfg['pretext_checkpoint'] = os.path.join(pretext_dir, 'checkpoint.pth.tar')
cfg['pretext_model'] = os.path.join(pretext_dir, 'model.pth.tar')
cfg['topk_neighbors_train_path'] = os.path.join(pretext_dir, 'topk-train-neighbors.npy')
cfg['topk_neighbors_val_path'] = os.path.join(pretext_dir, 'topk-val-neighbors.npy')
return cfg
| 1,029
| 0
| 24
|
24dea8c23635079d76503504a11ec99b74c28151
| 1,839
|
py
|
Python
|
fuzzytools/matplotlib/lims.py
|
oscarpimentel/fuzzy-tools
|
edbde6a1e56c1c564cca609e4d0cc9cda906b992
|
[
"MIT"
] | null | null | null |
fuzzytools/matplotlib/lims.py
|
oscarpimentel/fuzzy-tools
|
edbde6a1e56c1c564cca609e4d0cc9cda906b992
|
[
"MIT"
] | null | null | null |
fuzzytools/matplotlib/lims.py
|
oscarpimentel/fuzzy-tools
|
edbde6a1e56c1c564cca609e4d0cc9cda906b992
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from . import _C
import matplotlib.pyplot as plt
import numpy as np
from copy import copy, deepcopy
EXTENDED_PERCENT = 0.1
###################################################################################################################################################
###################################################################################################################################################
| 35.365385
| 163
| 0.650353
|
from __future__ import print_function
from __future__ import division
from . import _C
import matplotlib.pyplot as plt
import numpy as np
from copy import copy, deepcopy
EXTENDED_PERCENT = 0.1
###################################################################################################################################################
def get_xlim(_x_values, axis_extended_percent):
x_values = np.array(_x_values)
assert len(x_values.shape)==1
assert axis_extended_percent<=1
x_min, x_max = x_values.min(), x_values.max()
dx = x_max-x_min
x_margin = axis_extended_percent*dx
xlim = (x_min-x_margin, x_max+x_margin)
return xlim
###################################################################################################################################################
class AxisLims(object):
def __init__(self, axis_clip_values,
axis_extended_percent=EXTENDED_PERCENT,
):
self.axis_clip_values = axis_clip_values
self.axis_extended_percent = {k:axis_extended_percent for k in axis_clip_values.keys()} if not isinstance(axis_extended_percent, dict) else axis_extended_percent
self.reset()
def reset(self):
self.axis_d = {k:[] for k in self.axis_clip_values.keys()}
pass
def append(self, axis_name, axis_values):
self.axis_d[axis_name] += [x for x in axis_values]
def get_axis_lim(self, axis_name):
axis_extended_percent = self.axis_extended_percent[axis_name]
axis_clip_values = self.axis_clip_values[axis_name]
axis_lim = get_xlim(self.axis_d[axis_name], axis_extended_percent)
axis_lim = np.clip(axis_lim, axis_clip_values[0], axis_clip_values[1]) if not (axis_clip_values[0] is None and axis_clip_values[1] is None) else axis_lim
return axis_lim
def set_ax_axis_lims(self, ax):
for k in self.axis_d.keys():
getattr(ax, f'set_{k}lim')(self.get_axis_lim(k))
return ax
| 1,180
| 2
| 165
|
34463d3a7056ec3ca5a9a4b2f7a2cc3412f5557e
| 1,127
|
py
|
Python
|
emg_analyzer/argparse_utils.py
|
freeh4cker/emg_analyser
|
63ab91aaa7c9b36959e1d1038c89108fbce821f5
|
[
"BSD-3-Clause"
] | 1
|
2020-07-29T19:07:55.000Z
|
2020-07-29T19:07:55.000Z
|
emg_analyzer/argparse_utils.py
|
freeh4cker/emg_analyser
|
63ab91aaa7c9b36959e1d1038c89108fbce821f5
|
[
"BSD-3-Clause"
] | 7
|
2017-12-16T23:25:18.000Z
|
2018-01-24T22:04:09.000Z
|
emg_analyzer/argparse_utils.py
|
freeh4cker/emg_analyser
|
63ab91aaa7c9b36959e1d1038c89108fbce821f5
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
# Copyright (c) 2017-2018 Bertrand Néron. All rights reserved. #
# Use of this source code is governed by a BSD-style license that can be #
# found in the LICENSE file. #
##########################################################################
import argparse
class VersionAction(argparse._VersionAction):
"""Class to allow argparse to handel more complex version output"""
def __call__(self, parser, namespace, values, option_string=None):
"""Override the :meth:`argparse._VersionAction.__call__` to use
a RawTextHelpFormatter only for version action whatever the class_formatter
specified for the :class:`argparse.ArgumentParser` object.
"""
version = self.version
if version is None:
version = parser.version
formatter = argparse.RawTextHelpFormatter(parser.prog)
formatter.add_text(version)
parser._print_message(formatter.format_help(), argparse._sys.stdout)
parser.exit()
| 40.25
| 86
| 0.568767
|
##########################################################################
# Copyright (c) 2017-2018 Bertrand Néron. All rights reserved. #
# Use of this source code is governed by a BSD-style license that can be #
# found in the LICENSE file. #
##########################################################################
import argparse
class VersionAction(argparse._VersionAction):
"""Class to allow argparse to handel more complex version output"""
def __call__(self, parser, namespace, values, option_string=None):
"""Override the :meth:`argparse._VersionAction.__call__` to use
a RawTextHelpFormatter only for version action whatever the class_formatter
specified for the :class:`argparse.ArgumentParser` object.
"""
version = self.version
if version is None:
version = parser.version
formatter = argparse.RawTextHelpFormatter(parser.prog)
formatter.add_text(version)
parser._print_message(formatter.format_help(), argparse._sys.stdout)
parser.exit()
| 0
| 0
| 0
|
0e91c58634a794d0f8effa722b00f5f2c6a299c5
| 852
|
py
|
Python
|
main.py
|
pratyushravishankar/audio-recognition
|
403a527906601b23716cc4b0136d4bf3ddcb3bc8
|
[
"MIT"
] | null | null | null |
main.py
|
pratyushravishankar/audio-recognition
|
403a527906601b23716cc4b0136d4bf3ddcb3bc8
|
[
"MIT"
] | null | null | null |
main.py
|
pratyushravishankar/audio-recognition
|
403a527906601b23716cc4b0136d4bf3ddcb3bc8
|
[
"MIT"
] | null | null | null |
import utils
from sklearn.model_selection import train_test_split
import lsh_random_projection as LSH
import pickle
build_model()
with open('lsh.pkl', 'rb') as input:
lsh = pickle.load(input)
# lsh.get()
| 24.342857
| 73
| 0.712441
|
import utils
from sklearn.model_selection import train_test_split
import lsh_random_projection as LSH
import pickle
def build_model():
features = utils.load("data/fma_metadata/features.csv")
tracks = utils.load('data/fma_metadata/tracks.csv')
non_nulls_tracks = tracks[tracks['track']['genre_top'].notnull()]
print(non_nulls_tracks.head())
non_null_features = features.loc[non_nulls_tracks.index]
X_train, X_test = train_test_split(non_null_features, test_size=1)
lsh = LSH.LSH(1, 15, 140)
lsh.add(X_train['mfcc'])
save_object(lsh, 'lsh.pkl')
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
build_model()
with open('lsh.pkl', 'rb') as input:
lsh = pickle.load(input)
# lsh.get()
| 589
| 0
| 46
|
13404955476ece8a5721e805c365b907490b523b
| 3,264
|
py
|
Python
|
pyprobml-master/examples/keras/vgg-keras.py
|
storopoli/Machine-Learning-Probalistic
|
f8617e7b81f4d6c71e72edc40ba11ac746794a95
|
[
"MIT"
] | 1
|
2019-03-04T05:43:10.000Z
|
2019-03-04T05:43:10.000Z
|
Old/examples/keras/vgg-keras.py
|
tywang89/pyprobml
|
82cfdcb8daea653cda8f77e8737e585418476ca7
|
[
"MIT"
] | null | null | null |
Old/examples/keras/vgg-keras.py
|
tywang89/pyprobml
|
82cfdcb8daea653cda8f77e8737e585418476ca7
|
[
"MIT"
] | null | null | null |
# Example of applying VGG16 classifier
# Based on Keras book sec 5.4.3.
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.preprocessing import image
#import matplotlib.pyplot as plt
import numpy as np
#import urllib
#from keras.applications import ResNet50
#from keras.applications.resnet50 import preprocess_input, decode_predictions
#model = ResNet50(weights='imagenet')
model = VGG16(weights='imagenet')
model.summary() # see vgg16-summary.txt for details
# Load image from file
img_path = 'figures/cat_dog.jpg' # From https://github.com/ramprs/grad-cam/blob/master/images/cat_dog.jpg
#img_path = 'figures/Elephant_mating_ritual_3.jpg' # https://en.wikipedia.org/wiki/African_elephant#/media/File:Elephant_mating_ritual_3.jpg
#img_path = 'figures/Serengeti_Elefantenherde2.jpg' #https://en.wikipedia.org/wiki/African_elephant#/media/File:Serengeti_Elefantenherde2.jpg
#img_path = 'figures/dog-cat-openimages.jpg'
#img_path = 'figures/dog-ball-openimages.jpg' # https://www.flickr.com/photos/akarmy/5423588107
#img_path = 'figures/dog-car-backpack-openimages.jpg' # https://www.flickr.com/photos/mountrainiernps/14485323038
# Retrieve image from web
#url = "https://en.wikipedia.org/wiki/African_elephant#/media/File:Elephant_mating_ritual_3.jpg"
#urllib.request.urlretrieve(url, "/tmp/img.png")
#img = plt.imread('/tmp/img.png')
# `img` is a PIL image of size 224x224
img = image.load_img(img_path, target_size=(224, 224))
# `x` is a float32 Numpy array of shape (224, 224, 3)
x = image.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 224, 224, 3)
x = np.expand_dims(x, axis=0)
# Finally we preprocess the batch
# (this does channel-wise color normalization)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=10)[0])
'''
Predicted: [
('n02108422', 'bull_mastiff', 0.40943894),
('n02108089', 'boxer', 0.3950904),
('n02109047', 'Great_Dane', 0.039510112),
('n02109525', 'Saint_Bernard', 0.031701218), '
('n02129604', 'tiger', 0.019169593),
('n02093754', 'Border_terrier', 0.018684039),
('n02110958', 'pug', 0.014893572),
('n02123159', 'tiger_cat', 0.014403002),
('n02105162', 'malinois', 0.010533252),
('n03803284', 'muzzle', 0.005662783)]
'''
# For img_path = 'data/Elephant_mating_ritual_3.jpg'
#Predicted: [('n02504458', 'African_elephant', 0.93163019),
#('n01871265', 'tusker', 0.053829707), ('n02504013', 'Indian_elephant', 0.014539864)]
# For img_path = 'data/Serengeti_Elefantenherde2.jpg'
#Predicted: [('n01871265', 'tusker', 0.61881274),
#('n02504458', 'African_elephant', 0.25420085), ('n02504013', 'Indian_elephant', 0.11940476)]
# Animal > Chordate > Vertebrate > Mammal > Tusker
#http://image-net.org/synset?wnid=n01871265
#Any mammal with prominent tusks (especially an elephant or wild boar)
# Animal > Chordate > Vertebrate > Mammal > Placental Mammal ...
# > Proboscidian > Elephant > African Elephant
# http://image-net.org/synset?wnid=n02504458
# An elephant native to Africa having enormous flapping ears and ivory tusks
| 39.804878
| 141
| 0.747855
|
# Example of applying VGG16 classifier
# Based on Keras book sec 5.4.3.
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.preprocessing import image
#import matplotlib.pyplot as plt
import numpy as np
#import urllib
#from keras.applications import ResNet50
#from keras.applications.resnet50 import preprocess_input, decode_predictions
#model = ResNet50(weights='imagenet')
model = VGG16(weights='imagenet')
model.summary() # see vgg16-summary.txt for details
# Load image from file
img_path = 'figures/cat_dog.jpg' # From https://github.com/ramprs/grad-cam/blob/master/images/cat_dog.jpg
#img_path = 'figures/Elephant_mating_ritual_3.jpg' # https://en.wikipedia.org/wiki/African_elephant#/media/File:Elephant_mating_ritual_3.jpg
#img_path = 'figures/Serengeti_Elefantenherde2.jpg' #https://en.wikipedia.org/wiki/African_elephant#/media/File:Serengeti_Elefantenherde2.jpg
#img_path = 'figures/dog-cat-openimages.jpg'
#img_path = 'figures/dog-ball-openimages.jpg' # https://www.flickr.com/photos/akarmy/5423588107
#img_path = 'figures/dog-car-backpack-openimages.jpg' # https://www.flickr.com/photos/mountrainiernps/14485323038
# Retrieve image from web
#url = "https://en.wikipedia.org/wiki/African_elephant#/media/File:Elephant_mating_ritual_3.jpg"
#urllib.request.urlretrieve(url, "/tmp/img.png")
#img = plt.imread('/tmp/img.png')
# `img` is a PIL image of size 224x224
img = image.load_img(img_path, target_size=(224, 224))
# `x` is a float32 Numpy array of shape (224, 224, 3)
x = image.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 224, 224, 3)
x = np.expand_dims(x, axis=0)
# Finally we preprocess the batch
# (this does channel-wise color normalization)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=10)[0])
'''
Predicted: [
('n02108422', 'bull_mastiff', 0.40943894),
('n02108089', 'boxer', 0.3950904),
('n02109047', 'Great_Dane', 0.039510112),
('n02109525', 'Saint_Bernard', 0.031701218), '
('n02129604', 'tiger', 0.019169593),
('n02093754', 'Border_terrier', 0.018684039),
('n02110958', 'pug', 0.014893572),
('n02123159', 'tiger_cat', 0.014403002),
('n02105162', 'malinois', 0.010533252),
('n03803284', 'muzzle', 0.005662783)]
'''
# For img_path = 'data/Elephant_mating_ritual_3.jpg'
#Predicted: [('n02504458', 'African_elephant', 0.93163019),
#('n01871265', 'tusker', 0.053829707), ('n02504013', 'Indian_elephant', 0.014539864)]
# For img_path = 'data/Serengeti_Elefantenherde2.jpg'
#Predicted: [('n01871265', 'tusker', 0.61881274),
#('n02504458', 'African_elephant', 0.25420085), ('n02504013', 'Indian_elephant', 0.11940476)]
# Animal > Chordate > Vertebrate > Mammal > Tusker
#http://image-net.org/synset?wnid=n01871265
#Any mammal with prominent tusks (especially an elephant or wild boar)
# Animal > Chordate > Vertebrate > Mammal > Placental Mammal ...
# > Proboscidian > Elephant > African Elephant
# http://image-net.org/synset?wnid=n02504458
# An elephant native to Africa having enormous flapping ears and ivory tusks
| 0
| 0
| 0
|
e1e6b7f0ec0d3411bf4fc0e68ffd0e08d835ed25
| 2,056
|
py
|
Python
|
spotify_api/__init__.py
|
nerd8622/Python-Spotify-API
|
4f750bb0fff39da141a48c5c08396bd340cfe2fb
|
[
"MIT"
] | null | null | null |
spotify_api/__init__.py
|
nerd8622/Python-Spotify-API
|
4f750bb0fff39da141a48c5c08396bd340cfe2fb
|
[
"MIT"
] | null | null | null |
spotify_api/__init__.py
|
nerd8622/Python-Spotify-API
|
4f750bb0fff39da141a48c5c08396bd340cfe2fb
|
[
"MIT"
] | null | null | null |
from .web_requests import safeGet
| 33.704918
| 129
| 0.601167
|
from .web_requests import safeGet
class API:
def __init__(self, token):
self.url = lambda s: f"https://api.spotify.com/v1/{s}"
self.authHeader = {'Authorization': f'Bearer {token}'}
def get_album_tracks(self, a_id):
response = safeGet(self.url(f'albums/{a_id}/tracks'), {}, self.authHeader)
return response
def get_user(self, uid):
return User(self, uid)
def currentUser(self):
response = safeGet(self.url('me'), {}, self.authHeader)
return response
class Track:
def __init__(self, api, data, features):
self.api = api
self.tid = data['track']['id']
self.url = f"https://api.spotify.com/v1/tracks/{self.tid}"
self.name = data['track']['name']
self.explicit = data['track']['explicit']
self.raw = data
self.features = features
self.tempo = features['tempo']
class Playlist:
def __init__(self, api, data):
self.api = api
self.pid = data['id']
self.url = lambda s: f"https://api.spotify.com/v1/playlists/{self.pid}/{s}"
self.featuresUrl = "https://api.spotify.com/v1/audio-features"
self.name = data['name']
self.raw = data
def get_tracks(self):
response = safeGet(self.url('tracks'), {'market': 'US'}, self.api.authHeader)
features = safeGet(self.featuresUrl, {'ids': ','.join(i['track']['id'] for i in response["items"])}, self.api.authHeader)
return [Track(self.api, item, feature) for item, feature in zip(response['items'], features['audio_features'])]
class User:
def __init__(self, api, uid):
self.api = api
self.url = lambda s: f"https://api.spotify.com/v1/users/{uid}/{s}"
self.uid = uid
self.raw = safeGet(self.url(''), {}, self.api.authHeader)
self.name = self.raw['display_name']
def get_playlists(self):
response = safeGet(self.url('playlists'), {}, self.api.authHeader)
return [Playlist(self.api, item) for item in response['items']]
| 1,709
| -36
| 339
|