hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
50e74d35854c86bd0a208c931e436bcfc814f763 | 3,849 | py | Python | tests/pulses/constant_pulse_template_tests.py | eendebakpt/qupulse | 5b5b48de10084d413e10cfd8f6e9f7536c69dd70 | [
"MIT"
] | 30 | 2018-09-13T02:59:55.000Z | 2022-03-21T04:25:22.000Z | tests/pulses/constant_pulse_template_tests.py | eendebakpt/qupulse | 5b5b48de10084d413e10cfd8f6e9f7536c69dd70 | [
"MIT"
] | 220 | 2018-09-06T14:43:15.000Z | 2022-03-25T12:26:25.000Z | tests/pulses/constant_pulse_template_tests.py | eendebakpt/qupulse | 5b5b48de10084d413e10cfd8f6e9f7536c69dd70 | [
"MIT"
] | 14 | 2019-01-08T14:42:36.000Z | 2021-05-21T08:53:06.000Z | import unittest
import qupulse.pulses.plotting
import qupulse._program.waveforms
import qupulse.utils.sympy
from qupulse.pulses import TablePT, FunctionPT, AtomicMultiChannelPT, MappingPT
from qupulse.pulses.plotting import plot
from qupulse.pulses.sequence_pulse_template import SequencePulseTemplate
from qupulse._program._loop import make_compatible
from qupulse.pulses.constant_pulse_template import ConstantPulseTemplate
| 43.738636 | 113 | 0.677059 | import unittest
import qupulse.pulses.plotting
import qupulse._program.waveforms
import qupulse.utils.sympy
from qupulse.pulses import TablePT, FunctionPT, AtomicMultiChannelPT, MappingPT
from qupulse.pulses.plotting import plot
from qupulse.pulses.sequence_pulse_template import SequencePulseTemplate
from qupulse._program._loop import make_compatible
from qupulse.pulses.constant_pulse_template import ConstantPulseTemplate
class TestConstantPulseTemplate(unittest.TestCase):
def test_ConstantPulseTemplate(self):
pt = ConstantPulseTemplate(100, {'P1': .5, 'P2': .25})
self.assertEqual(pt.integral, {'P1': 50, 'P2': 25})
data = pt.get_serialization_data()
self.assertEqual(data['name'], pt._name)
self.assertIn('ConstantPulseTemplate', str(pt))
self.assertIn('ConstantPulseTemplate', repr(pt))
def test_zero_duration(self):
p1 = ConstantPulseTemplate(10, {'P1': 1.})
p2 = ConstantPulseTemplate(0, {'P1': 1.})
p3 = ConstantPulseTemplate(2, {'P1': 1.})
_ = qupulse.pulses.plotting.render(p1.create_program())
pulse = SequencePulseTemplate(p1, p2, p3)
prog = pulse.create_program()
_ = qupulse.pulses.plotting.render(prog)
self.assertEqual(pulse.duration, 12)
def test_regression_duration_conversion(self):
old_value = qupulse._program.waveforms.PULSE_TO_WAVEFORM_ERROR
try:
qupulse._program.waveforms.PULSE_TO_WAVEFORM_ERROR = 1e-6
for duration_in_samples in [64, 936320, 24615392]:
p = ConstantPulseTemplate(duration_in_samples / 2.4, {'a': 0})
number_of_samples = p.create_program().duration * 2.4
make_compatible(p.create_program(), 8, 8, 2.4)
self.assertEqual(number_of_samples.denominator, 1)
p2 = ConstantPulseTemplate((duration_in_samples + 1) / 2.4, {'a': 0})
self.assertNotEqual(p.create_program().duration, p2.create_program().duration)
finally:
qupulse._program.waveforms.PULSE_TO_WAVEFORM_ERROR = old_value
def test_regression_duration_conversion_functionpt(self):
old_value = qupulse._program.waveforms.PULSE_TO_WAVEFORM_ERROR
try:
qupulse._program.waveforms.PULSE_TO_WAVEFORM_ERROR = 1e-6
for duration_in_samples in [64, 2000, 936320]:
p = FunctionPT('1', duration_expression=duration_in_samples / 2.4, channel='a')
number_of_samples = p.create_program().duration * 2.4
self.assertEqual(number_of_samples.denominator, 1)
finally:
qupulse._program.waveforms.PULSE_TO_WAVEFORM_ERROR = old_value
def test_regression_template_combination(self):
old_value = qupulse.utils.sympy.SYMPY_DURATION_ERROR_MARGIN
try:
qupulse.utils.sympy.SYMPY_DURATION_ERROR_MARGIN = 1e-9
duration_in_seconds = 2e-6
full_template = ConstantPulseTemplate(duration=duration_in_seconds * 1e9, amplitude_dict={'C1': 1.1})
duration_in_seconds_derived = 1e-9 * full_template.duration
marker_pulse = TablePT({'marker': [(0, 0), (duration_in_seconds_derived * 1e9, 0)]})
full_template = AtomicMultiChannelPT(full_template, marker_pulse)
finally:
qupulse.utils.sympy.SYMPY_DURATION_ERROR_MARGIN = old_value
def test_regression_sequencept_with_mappingpt(self):
t1 = TablePT({'C1': [(0, 0), (100, 0)], 'C2': [(0, 1), (100, 1)]})
t2 = ConstantPulseTemplate(200, {'C1': 2, 'C2': 3})
qupulse_template = SequencePulseTemplate(t1, t2)
channel_mapping = {'C1': None, 'C2': 'C2'}
p = MappingPT(qupulse_template, channel_mapping=channel_mapping)
plot(p)
self.assertEqual(p.defined_channels, {'C2'})
| 3,205 | 30 | 185 |
91c892f35b793e6da0b88a8f56c528786f5a98d0 | 769 | py | Python | alphabet_rangoli.py | adenosinew/algorithms | 9a11cc18d725fd9d785dea99342d920cf5ea37fb | [
"MIT"
] | null | null | null | alphabet_rangoli.py | adenosinew/algorithms | 9a11cc18d725fd9d785dea99342d920cf5ea37fb | [
"MIT"
] | null | null | null | alphabet_rangoli.py | adenosinew/algorithms | 9a11cc18d725fd9d785dea99342d920cf5ea37fb | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
print_rangoli(5)
| 27.464286 | 58 | 0.46684 | def print_rangoli(size):
alphabet=[chr(i) for i in range(ord('a'),ord('z')+1)]
for i in range(size):
if i>0:
sub=alphabet[size-1-i:size]
sub_reverse=sub[:]
sub.reverse()
sub_list=sub+sub_reverse[1:]
s='-'.join(sub_list)
else:
s=alphabet[size-1]
print(s.center(4*size-3,'-'))
for i in range(size-2,-1,-1):
if i>0:
sub=alphabet[size-1-i:size]
sub_reverse=sub[:]
sub.reverse()
sub_list=sub+sub_reverse[1:]
s='-'.join(sub_list)
else:
s=alphabet[size-1]
print(s.center(4*size-3,'-'))
if __name__ == '__main__':
print_rangoli(5)
| 682 | 0 | 31 |
f9f1ee1fdb632311a630fb33b4c550588572ef0a | 130 | py | Python | __main__.py | lucasschoenhold/nes-py | 7de04f48e928cf96ba0976ee61def5958aaa759d | [
"MIT"
] | 128 | 2018-07-22T03:31:42.000Z | 2022-03-28T13:17:04.000Z | __main__.py | lucasschoenhold/nes-py | 7de04f48e928cf96ba0976ee61def5958aaa759d | [
"MIT"
] | 35 | 2018-07-20T16:37:23.000Z | 2022-02-04T00:37:23.000Z | __main__.py | lucasschoenhold/nes-py | 7de04f48e928cf96ba0976ee61def5958aaa759d | [
"MIT"
] | 31 | 2019-02-19T10:56:22.000Z | 2022-01-15T19:32:52.000Z | """The main script for development testing."""
from nes_py.app.cli import main
# execute the main entry point of the CLI
main()
| 18.571429 | 46 | 0.738462 | """The main script for development testing."""
from nes_py.app.cli import main
# execute the main entry point of the CLI
main()
| 0 | 0 | 0 |
d2aea9cbc2d4808d7eb4ede9b03fe6aaaf1b6bb8 | 727 | py | Python | submissions/closest-binary-search-tree-value/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | submissions/closest-binary-search-tree-value/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | 1 | 2022-03-04T20:24:32.000Z | 2022-03-04T20:31:58.000Z | submissions/closest-binary-search-tree-value/solution.py | Wattyyy/LeetCode | 13a9be056d0a0c38c2f8c8222b11dc02cb25a935 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/closest-binary-search-tree-value
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| 27.961538 | 64 | 0.61348 | # https://leetcode.com/problems/closest-binary-search-tree-value
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def __init__(self):
self.distance = []
def traverse(self, node, target):
self.distance.append([abs(node.val - target), node.val])
if node.left:
self.traverse(node.left, target)
if node.right:
self.traverse(node.right, target)
def closestValue(self, root, target):
self.traverse(root, target)
self.distance = sorted(self.distance)
return self.distance[0][1]
| 371 | -6 | 103 |
ad77781c49fac60e715b11f04f37da4e4ac44c7a | 4,977 | py | Python | eahub/localgroups/views.py | austin226/eahub.org | 5cadaac8d60d2f65671924cd05aa9a6b0cba3a84 | [
"MIT"
] | null | null | null | eahub/localgroups/views.py | austin226/eahub.org | 5cadaac8d60d2f65671924cd05aa9a6b0cba3a84 | [
"MIT"
] | null | null | null | eahub/localgroups/views.py | austin226/eahub.org | 5cadaac8d60d2f65671924cd05aa9a6b0cba3a84 | [
"MIT"
] | null | null | null | from django import urls
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.views.generic import detail as detail_views
from django.views.generic import edit as edit_views
from rules.contrib import views as rules_views
from ..base.views import ReportAbuseView
from ..profiles.models import Profile
from .forms import LocalGroupForm
from .models import LocalGroup
@login_required
@require_POST
@login_required
@require_POST
| 32.960265 | 87 | 0.68033 | from django import urls
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import mixins as auth_mixins
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.views.generic import detail as detail_views
from django.views.generic import edit as edit_views
from rules.contrib import views as rules_views
from ..base.views import ReportAbuseView
from ..profiles.models import Profile
from .forms import LocalGroupForm
from .models import LocalGroup
class LocalGroupCreateView(auth_mixins.LoginRequiredMixin, edit_views.CreateView):
model = LocalGroup
form_class = LocalGroupForm
template_name = "eahub/edit_group.html"
def get_initial(self):
initial = super().get_initial()
user = self.request.user
if hasattr(user, "profile"):
initial["organisers"] = [user]
return initial
def get_form_kwargs(self):
return {**super().get_form_kwargs(), "user": self.request.user}
def form_valid(self, form):
form.instance.geocode()
return super().form_valid(form)
class LocalGroupDetailView(detail_views.DetailView):
model = LocalGroup
template_name = "eahub/group.html"
context_object_name = "group"
class LocalGroupUpdateView(rules_views.PermissionRequiredMixin, edit_views.UpdateView):
model = LocalGroup
form_class = LocalGroupForm
template_name = "eahub/edit_group.html"
permission_required = "localgroups.change_local_group"
def get_form_kwargs(self):
return {**super().get_form_kwargs(), "user": self.request.user}
def form_valid(self, form):
if "city_or_town" in form.changed_data or "country" in form.changed_data:
form.instance.geocode()
return super().form_valid(form)
class LocalGroupDeleteView(rules_views.PermissionRequiredMixin, edit_views.DeleteView):
model = LocalGroup
template_name = "eahub/delete_group.html"
success_url = urls.reverse_lazy("groups")
permission_required = "localgroups.delete_local_group"
class ReportGroupAbuseView(ReportAbuseView):
def profile(self):
return LocalGroup.objects.get(slug=self.kwargs["slug"])
def get_type(self):
return "group"
@login_required
@require_POST
def claim_group(request, slug):
group = get_object_or_404(LocalGroup, slug=slug)
subject = "EA Group claimed: {0}".format(group.name)
try:
user_eahub_url = "https://{0}/profile/{1}".format(
get_current_site(request).domain, request.user.profile.slug
)
user_name = request.user.profile.name
except Profile.DoesNotExist:
user_eahub_url = "about:blank"
user_name = request.user.email
message = render_to_string(
"emails/claim_group.txt",
{
"user_eahub_url": user_eahub_url,
"user_name": user_name,
"group_name": group.name,
"group_url": "https://{0}/group/{1}".format(
get_current_site(request).domain, group.slug
),
"user_email": request.user.email,
},
)
send_mail(
subject,
message,
settings.DEFAULT_FROM_EMAIL,
recipient_list=settings.LEAN_MANAGERS,
)
messages.success(
request,
"Thank you, we have received your request to claim this group. "
"Our admin team will send you an email once they have checked your request.",
)
return redirect("/group/{}".format(group.slug))
@login_required
@require_POST
def report_group_inactive(request, slug):
group = get_object_or_404(LocalGroup, slug=slug)
subject = "EA Group reported as inactive: {0}".format(group.name)
try:
user_eahub_url = "https://{0}/profile/{1}".format(
get_current_site(request).domain, request.user.profile.slug
)
except Profile.DoesNotExist:
user_eahub_url = "about:blank"
message = render_to_string(
"emails/report_group_inactive.txt",
{
"user_eahub_url": user_eahub_url,
"user_name": request.user.profile.name,
"group_name": group.name,
"group_url": "https://{0}/group/{1}".format(
get_current_site(request).domain, group.slug
),
"user_email": request.user.email,
},
)
send_mail(
subject,
message,
settings.DEFAULT_FROM_EMAIL,
recipient_list=settings.LEAN_MANAGERS,
)
messages.success(
request,
"Thank you, we have received your report. "
"Our admin team will send you an email once they have looked into it.",
)
return redirect("/group/{}".format(group.slug))
| 3,020 | 909 | 212 |
16388d09352d3a15eccade0912b62f7b3e2e59f7 | 2,760 | py | Python | tests/test_mutator.py | malcolmraine/Gatekeeper | 33461bdd90ed83f834e13a7542d333982ba37d24 | [
"Apache-2.0"
] | null | null | null | tests/test_mutator.py | malcolmraine/Gatekeeper | 33461bdd90ed83f834e13a7542d333982ba37d24 | [
"Apache-2.0"
] | null | null | null | tests/test_mutator.py | malcolmraine/Gatekeeper | 33461bdd90ed83f834e13a7542d333982ba37d24 | [
"Apache-2.0"
] | null | null | null | import unittest
from Gatekeeper import Mutator
| 20.75188 | 72 | 0.536594 | import unittest
from Gatekeeper import Mutator
class Person(Mutator):
def __init__(self):
self.address = ""
@staticmethod
def get_address_attribute(value):
# Using mutator without decorator
return value.upper()
class TestMutator(unittest.TestCase):
def test_cast(self) -> None:
"""
Test for @Mutator.cast decorator.
:return: None
"""
@Mutator.cast(int)
def func(x):
return x
self.assertEqual(55, func(55.989872))
def test_upper(self) -> None:
"""
Test for @Mutator.upper decorator.
:return: None
"""
@Mutator.upper
def func(x):
return x
self.assertEqual("DOE", func("doe"))
def test_lower(self) -> None:
"""
Test @Mutator.lower decorator.
:return: None
"""
@Mutator.lower
def func(x):
return x
self.assertEqual("john", func("JOHN"))
def test_datetime(self) -> None:
"""
Test for @Mutator.datetime decorator
:return: None
"""
@Mutator.datetime("%m/%d/%Y")
def func(x):
return x
self.assertEqual("03/19/1983", func("1983-03-19T00:00:00Z"))
def test_round(self) -> None:
"""
Test for @Enforcer.round decorator.
:return: None
"""
@Mutator.round(2)
def func(x):
return x
self.assertEqual(8.56, func(8.5587167847781))
def test_saturate_low(self) -> None:
"""
Test for @Enforcer.saturate decorator when the function argument
falls below the given bounds.
:return: None
"""
@Mutator.saturate(0, 10)
def func(x):
return x
self.assertEqual(0, func(-3))
def test_saturate_high(self) -> None:
"""
Test for @Enforcer.saturate decorator when the function argument
falls above the given bounds.
:return: None
"""
@Mutator.saturate(0, 10)
def func(x):
return x
self.assertEqual(10, func(15))
def test_saturate_mid(self) -> None:
"""
Test for @Enforcer.saturate decorator when the function argument
falls between the given bounds.
:return: None
"""
@Mutator.saturate(0, 10)
def func(x):
return x
self.assertEqual(8, func(8))
def test_non_decorated(self) -> None:
"""
Test for pass-through mutator method without a decorator.
:return: None
"""
person = Person()
person.address = "123 abc lane"
self.assertEqual("123 ABC LANE", person.address)
| 203 | 2,462 | 46 |
7fc5cc6e34641bc3813078331fcb1abd347f9b74 | 424 | py | Python | fairseq/save_matrix_to_img.py | ToWeRT1An/group_fairseq | ca323ad5d3e7eca457f2cb8976cb732fedc9757e | [
"BSD-3-Clause"
] | null | null | null | fairseq/save_matrix_to_img.py | ToWeRT1An/group_fairseq | ca323ad5d3e7eca457f2cb8976cb732fedc9757e | [
"BSD-3-Clause"
] | null | null | null | fairseq/save_matrix_to_img.py | ToWeRT1An/group_fairseq | ca323ad5d3e7eca457f2cb8976cb732fedc9757e | [
"BSD-3-Clause"
] | null | null | null | import torch
from torchvision.utils import save_image
| 30.285714 | 74 | 0.514151 | import torch
from torchvision.utils import save_image
def save_attn(m, file_path):
if len(m.shape) == 3:
for i in range(m.shape[0]):
save_image(m[i],file_path+'/'+str(i)+'_'+str(m.shape[0])+'_'+\
str(m.shape[1])+'_'+str(m.shape[-1])+'.jpg')
elif len(m.shape) == 2:
save_image(m,file_path+'/'+str(m.shape[0])+'_'+\
str(m.shape[1])+'.jpg')
| 340 | 0 | 28 |
e0249919f501aff8684a4ba12372ca276b0666f2 | 1,468 | py | Python | tests/pyre.pkg/patterns/unique_registry.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | tests/pyre.pkg/patterns/unique_registry.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | tests/pyre.pkg/patterns/unique_registry.py | PyreFramework/pyre | 345c7449a3416eea1c1affa74fb32faff30a6aaa | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
def test():
"""
Verify that classes can intercept registry creation
"""
# access
from pyre.patterns.Unique import Unique
# make a class to serve as a registry
class Registry(dict):
"""A custom instance registry"""
# declare a class with an instance registry
# make an instance
b = Base(name="b")
# and another by the same name
alias = Base(name="b")
# verify that they are identical
assert b is alias
# get the registry
registry = Base.pyre_unique
# verify it's an instance of my custom class
assert isinstance(registry, Registry)
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 22.584615 | 92 | 0.589237 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2022 all rights reserved
def test():
"""
Verify that classes can intercept registry creation
"""
# access
from pyre.patterns.Unique import Unique
# make a class to serve as a registry
class Registry(dict):
"""A custom instance registry"""
# declare a class with an instance registry
class Base(metaclass=Unique):
# metamethods
def __init__(self, name, **kwds):
# chain up
super().__init__(**kwds)
# save the name
self.name = name
# all done
return
# implementation details
# this must be a class method so it can override the implementation in the metaclass
@classmethod
def pyre_createRegistry(cls):
"""
Customize the registry
"""
# instantiate our custom registry and return it
return Registry()
# make an instance
b = Base(name="b")
# and another by the same name
alias = Base(name="b")
# verify that they are identical
assert b is alias
# get the registry
registry = Base.pyre_unique
# verify it's an instance of my custom class
assert isinstance(registry, Registry)
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 171 | 404 | 26 |
2c907594ed5935c8a81ec7e0f9700b054d91b263 | 14,287 | py | Python | balena-cam/app/server.py | akhileshboodhun/balena-cam | 347b5a411613033cbfeaa907d68d8f696a05863f | [
"Apache-2.0"
] | null | null | null | balena-cam/app/server.py | akhileshboodhun/balena-cam | 347b5a411613033cbfeaa907d68d8f696a05863f | [
"Apache-2.0"
] | null | null | null | balena-cam/app/server.py | akhileshboodhun/balena-cam | 347b5a411613033cbfeaa907d68d8f696a05863f | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
import os
import cv2
import platform
import sys
from time import sleep
import time
from aiohttp import web
from av import VideoFrame
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack, RTCIceServer, RTCConfiguration
from aiohttp_basicauth import BasicAuthMiddleware
import numpy as np
from constant.camera import resolution_presets
# import picamera
kernel_dil = np.ones((10, 10), np.uint8)
# reference:
# https://stackoverflow.com/questions/60989671/white-blue-balance-error-in-high-resolution-with-opencv-and-picamera-v2
# resoultion_defulat = (640, 480)
# resolution_good = (1280, 704)
# resolution_high = (1920, 1088)
# resolution_nice = (1640, 928)
# resolution_picked = resolution_presets["picamera"]["default"]
resolution_picked = resolution_presets["picamera"][1920]
# resolution_picked = (1280, 720)
motion_detected_on = False
use_picamera = True
if __name__ == '__main__':
checkDeviceReadiness()
print("Camera detected!!!")
ROOT = os.path.dirname(__file__)
pcs = set()
if use_picamera == True:
camera_device = PiCameraDevice(resolution_picked)
else:
camera_device = CameraDevice(resolution_picked)
flip = False
try:
if os.environ['rotation'] == '1':
flip = True
except BaseException:
pass
auth = []
if 'username' in os.environ and 'password' in os.environ:
print('\n#############################################################')
print('Authorization is enabled.')
print('Your balenaCam is password protected.')
print('#############################################################\n')
auth.append(
BasicAuthMiddleware(
username=os.environ['username'],
password=os.environ['password']))
else:
print('\n#############################################################')
print('Authorization is disabled.')
print('Anyone can access your balenaCam, using the device\'s URL!')
print(
'Set the username and password environment variables \nto enable authorization.')
print(
'For more info visit: \nhttps://github.com/balena-io-playground/balena-cam')
print('#############################################################\n')
# Factory to create peerConnections depending on the iceServers
# set by user
pc_factory = PeerConnectionFactory()
app = web.Application(middlewares=auth)
app.on_shutdown.append(on_shutdown)
app.router.add_get('/', index)
app.router.add_get('/favicon.png', favicon)
app.router.add_get('/balena-logo.svg', balena_logo)
app.router.add_get('/balena-cam.svg', balena)
app.router.add_get('/client.js', javascript)
app.router.add_get('/style.css', stylesheet)
app.router.add_post('/offer', offer)
app.router.add_get('/mjpeg', mjpeg_handler)
app.router.add_get('/ice-config', config)
web.run_app(app, port=80)
| 33.459016 | 118 | 0.595786 | import asyncio
import json
import os
import cv2
import platform
import sys
from time import sleep
import time
from aiohttp import web
from av import VideoFrame
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack, RTCIceServer, RTCConfiguration
from aiohttp_basicauth import BasicAuthMiddleware
import numpy as np
from constant.camera import resolution_presets
# import picamera
kernel_dil = np.ones((10, 10), np.uint8)
# reference:
# https://stackoverflow.com/questions/60989671/white-blue-balance-error-in-high-resolution-with-opencv-and-picamera-v2
# resoultion_defulat = (640, 480)
# resolution_good = (1280, 704)
# resolution_high = (1920, 1088)
# resolution_nice = (1640, 928)
# resolution_picked = resolution_presets["picamera"]["default"]
resolution_picked = resolution_presets["picamera"][1920]
# resolution_picked = (1280, 720)
motion_detected_on = False
use_picamera = True
class FPS():
def __init__(self):
self.counter = 0
self.calculate_frequence = 5 # 5s
self.start_time = time.time()
def per_frame(self):
self.counter = self.counter + 1
if (time.time() - self.start_time) > self.calculate_frequence:
print("FPS: ", self.counter /
(time.time() - self.start_time))
self.counter = 0
self.start_time = time.time()
class CameraDevice():
def __init__(self, resolution_picked):
print('Cv2 is used as camera source lib')
self.cap = cv2.VideoCapture(0)
self.fgbg = cv2.createBackgroundSubtractorMOG2()
ret, frame = self.cap.read()
if not ret:
print('Failed to open default camera. Exiting...')
sys.exit()
# self.cap.set(3, 640)# ?why capture in such small space
# self.cap.set(4, 480)
# ?why capture in such small space
self.cap.set(3, resolution_picked[0])
self.cap.set(4, resolution_picked[1])
self.f_counter = 0
self.fps = FPS()
print("Cam set to resolution: begin===")
print(
self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),
self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
)
print("===end")
def rotate(self, frame):
if flip:
(h, w) = frame.shape[:2]
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, 180, 1.0)
frame = cv2.warpAffine(frame, M, (w, h))
print(frame.shape)
return frame
def get_background_mask_3channel(self, frame):
fgmask = self.get_background_mask(frame)
# fgmask = self.fgbg.apply(frame)
img2 = np.zeros_like(frame)
img2[:, :, 0] = fgmask
img2[:, :, 1] = fgmask
img2[:, :, 2] = fgmask
# print("img2 shape", img2.shape, self.f_counter)
return img2
def get_background_mask(self, frame):
fgmask = self.fgbg.apply(frame)
print("fgmask shape", fgmask.shape, self.f_counter)
# dilation
dilation = cv2.dilate(fgmask, kernel_dil, iterations=1)
return dilation
return fgmask
def print_img_info(self, image, title):
# print(title)
# print(image)
# print(image.shape)
# print(image.dtype)
image_max_value_each_channel = image.reshape(
(image.shape[0] * image.shape[1], 3)).max(axis=0)
image_min_value_each_channel = image.reshape(
(image.shape[0] * image.shape[1], 3)).min(axis=0)
print("{}, shape: {}, dtype: {}, max (in each channels) value: {}, min value: {}".format(
title, image.shape, image.dtype, image_max_value_each_channel, image_min_value_each_channel))
async def get_latest_frame(self):
if motion_detected_on == False:
# start_time = time.time()
# self.f_counter = self.f_counter + 1
# print(self.f_counter)
self.fps.per_frame()
ret, frame = self.cap.read()
await asyncio.sleep(0)
return frame
self.f_counter = self.f_counter + 1
ret, frame = self.cap.read()
# print("frame shape", frame.shape)
# bg_mask = self.get_background_mask_3channel(frame)
bg_mask = self.get_background_mask_3channel(frame)
ret, thresh1 = cv2.threshold(
bg_mask, 128, 255, cv2.THRESH_BINARY)
alpha = thresh1 / 255
# alpha = np.ones(frame.shape, np.uint8)
# Force the left side all non-pass
# alpha[:, 0:frame.shape[1] // 2] = (0, 0, 0)
# Force the left side all pass
alpha[:, frame.shape[1] // 2:] = (1, 1, 1)
alpha_unit8 = alpha.astype(np.uint8)
self.print_img_info(bg_mask, "bg_mask")
self.print_img_info(thresh1, "thresh1")
self.print_img_info(alpha, "alpha")
self.print_img_info(alpha_unit8, "alpha_unit8")
fg = cv2.multiply(alpha_unit8, frame)
# fg = cv2.bitwise_and(frame, frame, mask=bg_mask)
# fgmask = frame
await asyncio.sleep(0)
if self.f_counter % 50 >= 25:
return self.rotate(frame)
return self.rotate(fg)
async def get_jpeg_frame(self):
encode_param = (int(cv2.IMWRITE_JPEG_QUALITY), 50)
frame = await self.get_latest_frame()
frame, encimg = cv2.imencode('.jpg', frame, encode_param)
return encimg.tostring()
class PiCameraDevice():
def __init__(self, resolution_picked):
print('Picamera is used as camera source')
# resolution = (640, 480) # fps should be like 15
# resolution = (1640, 1232) # has problem
# resolution = (1280, 720)
# resolution = (1920, 1088) # work for only a period
# resolution = (3280, 2464)
framerate = 10
from utils.pivideostream import PiVideoStream
self.stream = PiVideoStream(resolution=resolution_picked,
framerate=framerate)
self.stream.start()
self.fps = FPS()
print("Cam set to resolution: begin===")
print(self.stream.camera.resolution
)
print("===end")
async def get_latest_frame(self):
self.fps.per_frame()
frame = self.stream.read()
return frame
async def get_jpeg_frame(self):
encode_param = (int(cv2.IMWRITE_JPEG_QUALITY), 90)
frame = await self.get_latest_frame()
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
frame = cv2.filter2D(frame, -1, kernel) #sharpening
frame = cv2.resize(frame, (640,480), interpolation = cv2.INTER_AREA) # downsizing
frame, encimg = cv2.imencode('.jpg', frame, encode_param)
return encimg.tostring()
class PeerConnectionFactory():
def __init__(self):
self.config = {'sdpSemantics': 'unified-plan'}
self.STUN_SERVER = None
self.TURN_SERVER = None
self.TURN_USERNAME = None
self.TURN_PASSWORD = None
if all(k in os.environ for k in ('STUN_SERVER',
'TURN_SERVER', 'TURN_USERNAME', 'TURN_PASSWORD')):
print(
'WebRTC connections will use your custom ICE Servers (STUN / TURN).')
self.STUN_SERVER = os.environ['STUN_SERVER']
self.TURN_SERVER = os.environ['TURN_SERVER']
self.TURN_USERNAME = os.environ['TURN_USERNAME']
self.TURN_PASSWORD = os.environ['TURN_PASSWORD']
iceServers = [
{
'urls': self.STUN_SERVER
},
{
'urls': self.TURN_SERVER,
'credential': self.TURN_PASSWORD,
'username': self.TURN_USERNAME
}
]
self.config['iceServers'] = iceServers
def create_peer_connection(self):
if self.TURN_SERVER is not None:
iceServers = []
iceServers.append(RTCIceServer(self.STUN_SERVER))
iceServers.append(
RTCIceServer(
self.TURN_SERVER,
username=self.TURN_USERNAME,
credential=self.TURN_PASSWORD))
return RTCPeerConnection(RTCConfiguration(iceServers))
return RTCPeerConnection()
def get_ice_config(self):
return json.dumps(self.config)
class RTCVideoStream(VideoStreamTrack):
def __init__(self, camera_device):
super().__init__()
self.camera_device = camera_device
self.data_bgr = None
async def recv(self):
self.data_bgr = await self.camera_device.get_latest_frame()
frame = VideoFrame.from_ndarray(self.data_bgr, format='bgr24')
pts, time_base = await self.next_timestamp()
frame.pts = pts
frame.time_base = time_base
return frame
async def index(request):
content = open(
os.path.join(
ROOT,
'client/index.html'),
'r').read()
return web.Response(content_type='text/html', text=content)
async def stylesheet(request):
content = open(os.path.join(ROOT, 'client/style.css'), 'r').read()
return web.Response(content_type='text/css', text=content)
async def javascript(request):
content = open(os.path.join(ROOT, 'client/client.js'), 'r').read()
return web.Response(
content_type='application/javascript', text=content)
async def balena(request):
content = open(
os.path.join(
ROOT,
'client/balena-cam.svg'),
'r').read()
return web.Response(
content_type='image/svg+xml', text=content)
async def balena_logo(request):
content = open(
os.path.join(
ROOT,
'client/balena-logo.svg'),
'r').read()
return web.Response(
content_type='image/svg+xml', text=content)
async def favicon(request):
return web.FileResponse(os.path.join(ROOT, 'client/favicon.png'))
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(
sdp=params['sdp'],
type=params['type'])
pc = pc_factory.create_peer_connection()
pcs.add(pc)
# Add local media
local_video = RTCVideoStream(camera_device)
pc.addTrack(local_video)
@ pc.on('iceconnectionstatechange')
async def on_iceconnectionstatechange():
if pc.iceConnectionState == 'failed':
await pc.close()
pcs.discard(pc)
await pc.setRemoteDescription(offer)
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
return web.Response(
content_type='application/json',
text=json.dumps({
'sdp': pc.localDescription.sdp,
'type': pc.localDescription.type
}))
async def mjpeg_handler(request):
boundary = "frame"
response = web.StreamResponse(status=200, reason='OK', headers={
'Content-Type': 'multipart/x-mixed-replace; '
'boundary=%s' % boundary,
})
await response.prepare(request)
while True:
data = await camera_device.get_jpeg_frame()
# this means that the maximum FPS is 5
await asyncio.sleep(0.05) # ????
await response.write(
'--{}\r\n'.format(boundary).encode('utf-8'))
await response.write(b'Content-Type: image/jpeg\r\n')
await response.write('Content-Length: {}\r\n'.format(
len(data)).encode('utf-8'))
await response.write(b"\r\n")
await response.write(data)
await response.write(b"\r\n")
return response
async def config(request):
return web.Response(
content_type='application/json',
text=pc_factory.get_ice_config()
)
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
def checkDeviceReadiness():
if not os.path.exists(
'/dev/video0') and platform.system() == 'Linux':
print('Video device is not ready')
print('Trying to load bcm2835-v4l2 driver...')
os.system('bash -c "modprobe bcm2835-v4l2"')
sleep(1)
sys.exit()
else:
print('Video device is ready')
if __name__ == '__main__':
checkDeviceReadiness()
print("Camera detected!!!")
ROOT = os.path.dirname(__file__)
pcs = set()
if use_picamera == True:
camera_device = PiCameraDevice(resolution_picked)
else:
camera_device = CameraDevice(resolution_picked)
flip = False
try:
if os.environ['rotation'] == '1':
flip = True
except BaseException:
pass
auth = []
if 'username' in os.environ and 'password' in os.environ:
print('\n#############################################################')
print('Authorization is enabled.')
print('Your balenaCam is password protected.')
print('#############################################################\n')
auth.append(
BasicAuthMiddleware(
username=os.environ['username'],
password=os.environ['password']))
else:
print('\n#############################################################')
print('Authorization is disabled.')
print('Anyone can access your balenaCam, using the device\'s URL!')
print(
'Set the username and password environment variables \nto enable authorization.')
print(
'For more info visit: \nhttps://github.com/balena-io-playground/balena-cam')
print('#############################################################\n')
# Factory to create peerConnections depending on the iceServers
# set by user
pc_factory = PeerConnectionFactory()
app = web.Application(middlewares=auth)
app.on_shutdown.append(on_shutdown)
app.router.add_get('/', index)
app.router.add_get('/favicon.png', favicon)
app.router.add_get('/balena-logo.svg', balena_logo)
app.router.add_get('/balena-cam.svg', balena)
app.router.add_get('/client.js', javascript)
app.router.add_get('/style.css', stylesheet)
app.router.add_post('/offer', offer)
app.router.add_get('/mjpeg', mjpeg_handler)
app.router.add_get('/ice-config', config)
web.run_app(app, port=80)
| 10,431 | 20 | 822 |
83c22695b516919730bf4947f32543050a6068c4 | 128 | py | Python | channelsmultiplexer/__init__.py | hishnash/channelsmultiplexer | b35f741f356c15e96e51ace2b85ea65a630fcc85 | [
"MIT"
] | 25 | 2018-11-08T19:42:08.000Z | 2022-01-28T00:49:42.000Z | channelsmultiplexer/__init__.py | hishnash/channelsmultiplexer | b35f741f356c15e96e51ace2b85ea65a630fcc85 | [
"MIT"
] | 4 | 2018-11-11T23:40:48.000Z | 2022-02-25T09:37:46.000Z | channelsmultiplexer/__init__.py | hishnash/channelsmultiplexer | b35f741f356c15e96e51ace2b85ea65a630fcc85 | [
"MIT"
] | 6 | 2019-01-13T22:44:40.000Z | 2022-02-25T14:23:36.000Z | from channelsmultiplexer.demultiplexer import AsyncJsonWebsocketDemultiplexer
__all__ = [
AsyncJsonWebsocketDemultiplexer
] | 25.6 | 77 | 0.867188 | from channelsmultiplexer.demultiplexer import AsyncJsonWebsocketDemultiplexer
__all__ = [
AsyncJsonWebsocketDemultiplexer
] | 0 | 0 | 0 |
2fdf73d92e81234842ceb633829cea348a288ece | 709 | py | Python | acmweb/settings.py | hnit-acm/hnit-acm-web | 438cce2db574a953caea75503c1132644299ecde | [
"MIT"
] | 2 | 2020-10-26T14:07:45.000Z | 2020-11-22T13:47:21.000Z | acmweb/settings.py | hnit-acm/hnit-acm-web | 438cce2db574a953caea75503c1132644299ecde | [
"MIT"
] | null | null | null | acmweb/settings.py | hnit-acm/hnit-acm-web | 438cce2db574a953caea75503c1132644299ecde | [
"MIT"
] | 3 | 2021-07-19T07:44:51.000Z | 2021-08-03T15:53:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/22 16:29
# @Author : Coodyz
# @Site : https://github.com/coolbreeze2
# @File : settings.py
# @Software: PyCharm
import os
import sys
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
config = BaseConfig
| 22.870968 | 96 | 0.67842 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/10/22 16:29
# @Author : Coodyz
# @Site : https://github.com/coolbreeze2
# @File : settings.py
# @Software: PyCharm
import os
import sys
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
# SQLite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
class BaseConfig:
SECRET_KEY = os.getenv('SECRET_KEY', 'dawd2y3872jkrg')
ROOT_PWD = "dhw8ye892yjkhfgh"
DB_FILE = "acmWeb.db"
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', prefix + os.path.join(basedir, DB_FILE))
SQLALCHEMY_TRACK_MODIFICATIONS = False
config = BaseConfig
| 0 | 256 | 23 |
b94816e6a1751a26c8b236e7af9f60f21c9686d7 | 1,577 | py | Python | setup.py | khanlab/pydeface | 13eede1595fbf776f1c3af48d66ad057066acfbc | [
"MIT"
] | 1 | 2021-06-11T02:17:48.000Z | 2021-06-11T02:17:48.000Z | setup.py | khanlab/pydeface | 13eede1595fbf776f1c3af48d66ad057066acfbc | [
"MIT"
] | null | null | null | setup.py | khanlab/pydeface | 13eede1595fbf776f1c3af48d66ad057066acfbc | [
"MIT"
] | 1 | 2020-07-14T14:19:42.000Z | 2020-07-14T14:19:42.000Z | #!/usr/bin/env python
#
# Copyright (C) 2013-2015 Russell Poldrack <poldrack@stanford.edu>
#
# Some portions were borrowed from:
# https://github.com/mwaskom/lyman/blob/master/setup.py
# and:
# https://chriswarrick.com/blog/2014/09/15/python-apps-the-right-way-entry_points-and-scripts/
import os
from setuptools import setup
DISTNAME = "pydeface"
DESCRIPTION = "pydeface: a script to remove facial structure from MRI images."
MAINTAINER = 'Russ Poldrack'
MAINTAINER_EMAIL = 'poldrack@stanford.edu'
LICENSE = 'MIT'
URL = 'http://poldracklab.org'
DOWNLOAD_URL = 'https://github.com/poldracklab/pydeface/'
VERSION = '2.0'
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
datafiles = {'pydeface': ['data/facemask.nii.gz',
'data/mean_reg2mean.nii.gz']}
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
url=URL,
download_url=DOWNLOAD_URL,
packages=['pydeface'],
package_data=datafiles,
classifiers=['Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
install_requires=['numpy', 'nibabel', 'nipype'],
entry_points={
'console_scripts': [
'pydeface = pydeface.__main__:main'
]},
)
| 31.54 | 94 | 0.630311 | #!/usr/bin/env python
#
# Copyright (C) 2013-2015 Russell Poldrack <poldrack@stanford.edu>
#
# Some portions were borrowed from:
# https://github.com/mwaskom/lyman/blob/master/setup.py
# and:
# https://chriswarrick.com/blog/2014/09/15/python-apps-the-right-way-entry_points-and-scripts/
import os
from setuptools import setup
DISTNAME = "pydeface"
DESCRIPTION = "pydeface: a script to remove facial structure from MRI images."
MAINTAINER = 'Russ Poldrack'
MAINTAINER_EMAIL = 'poldrack@stanford.edu'
LICENSE = 'MIT'
URL = 'http://poldracklab.org'
DOWNLOAD_URL = 'https://github.com/poldracklab/pydeface/'
VERSION = '2.0'
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
datafiles = {'pydeface': ['data/facemask.nii.gz',
'data/mean_reg2mean.nii.gz']}
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
url=URL,
download_url=DOWNLOAD_URL,
packages=['pydeface'],
package_data=datafiles,
classifiers=['Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
install_requires=['numpy', 'nibabel', 'nipype'],
entry_points={
'console_scripts': [
'pydeface = pydeface.__main__:main'
]},
)
| 0 | 0 | 0 |
6aea42a28eed84faf29e4fb36bb6389c2f884432 | 12,096 | py | Python | test1.py | wgcban/spin_roadmapper | 2c1c8f22073d989753dc6f95d1f547198a76414b | [
"Apache-2.0"
] | 24 | 2021-09-15T00:20:52.000Z | 2022-03-27T05:01:23.000Z | test1.py | wgcban/spin_roadmapper | 2c1c8f22073d989753dc6f95d1f547198a76414b | [
"Apache-2.0"
] | 2 | 2021-12-27T13:45:02.000Z | 2022-03-25T13:33:20.000Z | test1.py | wgcban/spin_roadmapper | 2c1c8f22073d989753dc6f95d1f547198a76414b | [
"Apache-2.0"
] | 3 | 2021-12-27T03:11:56.000Z | 2022-03-10T10:24:42.000Z | from __future__ import print_function
import argparse
import json
import os
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from model.models import MODELS
from road_dataset import DeepGlobeDataset, SpacenetDataset
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR
from utils.loss import CrossEntropyLoss2d, mIoULoss
from utils import util
from utils import viz_util
from utils.util import relaxed_f1
__dataset__ = {"spacenet": SpacenetDataset, "deepglobe": DeepGlobeDataset}
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", required=True, type=str, help="config file path"
)
parser.add_argument(
"--model_name",
required=True,
choices=sorted(MODELS.keys()),
help="Name of Model = {}".format(MODELS.keys()),
)
parser.add_argument("--exp", required=True, type=str, help="Experiment Name/Directory")
parser.add_argument(
"--resume", default=None, type=str, help="path to latest checkpoint (default: None)"
)
parser.add_argument(
"--dataset",
required=True,
choices=sorted(__dataset__.keys()),
help="select dataset name from {}. (default: Spacenet)".format(__dataset__.keys()),
)
parser.add_argument(
"--model_kwargs",
default={},
type=json.loads,
help="parameters for the model",
)
parser.add_argument(
"--multi_scale_pred",
default=True,
type=util.str2bool,
help="perform multi-scale prediction (default: True)",
)
args = parser.parse_args()
config = None
if args.resume is not None:
if args.config is not None:
print("Warning: --config overridden by --resume")
config = torch.load(args.resume)["config"]
elif args.config is not None:
config = json.load(open(args.config))
assert config is not None
util.setSeed(config)
experiment_dir = os.path.join(config["trainer"]["save_dir"], args.exp)
util.ensure_dir(experiment_dir)
###Logging Files
train_file = "{}/{}_train_loss.txt".format(experiment_dir, args.dataset)
test_file = "{}/{}_test_loss.txt".format(experiment_dir, args.dataset)
train_loss_file = open(train_file, "w")
val_loss_file = open(test_file, "w")
### Angle Metrics
train_file_angle = "{}/{}_train_angle_loss.txt".format(experiment_dir, args.dataset)
test_file_angle = "{}/{}_test_angle_loss.txt".format(experiment_dir, args.dataset)
train_loss_angle_file = open(train_file_angle, "rb", 0)
val_loss_angle_file = open(test_file_angle, "rb", 0)
################################################################################
num_gpus = torch.cuda.device_count()
model = MODELS[args.model_name](
config["task1_classes"], config["task2_classes"], **args.model_kwargs
)
if num_gpus > 1:
print("Training with multiple GPUs ({})".format(num_gpus))
model = nn.DataParallel(model).cuda()
else:
print("Single Cuda Node is avaiable")
model.cuda()
################################################################################
### Load Dataset from root folder and intialize DataLoader
train_loader = data.DataLoader(
__dataset__[args.dataset](
config["train_dataset"],
seed=config["seed"],
is_train=True,
multi_scale_pred=args.multi_scale_pred,
),
batch_size=config["train_batch_size"],
num_workers=8,
shuffle=True,
pin_memory=False,
)
val_loader = data.DataLoader(
__dataset__[args.dataset](
config["val_dataset"],
seed=config["seed"],
is_train=False,
multi_scale_pred=args.multi_scale_pred,
),
batch_size=config["val_batch_size"],
num_workers=8,
shuffle=False,
pin_memory=False,
)
print("Training with dataset => {}".format(train_loader.dataset.__class__.__name__))
################################################################################
best_accuracy = 0
best_miou = 0
start_epoch = 1
total_epochs = config["trainer"]["total_epochs"]
optimizer = optim.SGD(
model.parameters(), lr=config["optimizer"]["lr"], momentum=0.9, weight_decay=0.0005
)
if args.resume is not None:
print("Loading from existing FCN and copying weights to continue....")
checkpoint = torch.load(args.resume)
start_epoch = checkpoint["epoch"] + 1
best_miou = checkpoint["miou"]
# stat_parallel_dict = util.getParllelNetworkStateDict(checkpoint['state_dict'])
# model.load_state_dict(stat_parallel_dict)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
util.weights_init(model, manual_seed=config["seed"])
viz_util.summary(model, print_arch=False)
scheduler = MultiStepLR(
optimizer,
milestones=eval(config["optimizer"]["lr_drop_epoch"]),
gamma=config["optimizer"]["lr_step"],
)
weights = torch.ones(config["task1_classes"]).cuda()
if config["task1_weight"] < 1:
print("Roads are weighted.")
weights[0] = 1 - config["task1_weight"]
weights[1] = config["task1_weight"]
weights_angles = torch.ones(config["task2_classes"]).cuda()
if config["task2_weight"] < 1:
print("Road angles are weighted.")
weights_angles[-1] = config["task2_weight"]
angle_loss = CrossEntropyLoss2d(
weight=weights_angles, size_average=True, ignore_index=255, reduce=True
).cuda()
road_loss = mIoULoss(
weight=weights, size_average=True, n_classes=config["task1_classes"]
).cuda()
for epoch in range(0, 1):
start_time = datetime.now()
# scheduler.step(epoch)
# print("\nTraining Epoch: %d" % epoch)
# train(epoch)
# if epoch % config["trainer"]["test_freq"] == 0:
# for i in range(110,112):
print("\nTesting Epoch: %d" % epoch)
val_loss = test(epoch)
end_time = datetime.now()
print("Time Elapsed for epoch => {1}".format(epoch, end_time - start_time))
| 34.073239 | 151 | 0.632192 | from __future__ import print_function
import argparse
import json
import os
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from model.models import MODELS
from road_dataset import DeepGlobeDataset, SpacenetDataset
from torch.autograd import Variable
from torch.optim.lr_scheduler import MultiStepLR
from utils.loss import CrossEntropyLoss2d, mIoULoss
from utils import util
from utils import viz_util
from utils.util import relaxed_f1
__dataset__ = {"spacenet": SpacenetDataset, "deepglobe": DeepGlobeDataset}
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", required=True, type=str, help="config file path"
)
parser.add_argument(
"--model_name",
required=True,
choices=sorted(MODELS.keys()),
help="Name of Model = {}".format(MODELS.keys()),
)
parser.add_argument("--exp", required=True, type=str, help="Experiment Name/Directory")
parser.add_argument(
"--resume", default=None, type=str, help="path to latest checkpoint (default: None)"
)
parser.add_argument(
"--dataset",
required=True,
choices=sorted(__dataset__.keys()),
help="select dataset name from {}. (default: Spacenet)".format(__dataset__.keys()),
)
parser.add_argument(
"--model_kwargs",
default={},
type=json.loads,
help="parameters for the model",
)
parser.add_argument(
"--multi_scale_pred",
default=True,
type=util.str2bool,
help="perform multi-scale prediction (default: True)",
)
args = parser.parse_args()
config = None
if args.resume is not None:
if args.config is not None:
print("Warning: --config overridden by --resume")
config = torch.load(args.resume)["config"]
elif args.config is not None:
config = json.load(open(args.config))
assert config is not None
util.setSeed(config)
experiment_dir = os.path.join(config["trainer"]["save_dir"], args.exp)
util.ensure_dir(experiment_dir)
###Logging Files
train_file = "{}/{}_train_loss.txt".format(experiment_dir, args.dataset)
test_file = "{}/{}_test_loss.txt".format(experiment_dir, args.dataset)
train_loss_file = open(train_file, "w")
val_loss_file = open(test_file, "w")
### Angle Metrics
train_file_angle = "{}/{}_train_angle_loss.txt".format(experiment_dir, args.dataset)
test_file_angle = "{}/{}_test_angle_loss.txt".format(experiment_dir, args.dataset)
train_loss_angle_file = open(train_file_angle, "rb", 0)
val_loss_angle_file = open(test_file_angle, "rb", 0)
################################################################################
num_gpus = torch.cuda.device_count()
model = MODELS[args.model_name](
config["task1_classes"], config["task2_classes"], **args.model_kwargs
)
if num_gpus > 1:
print("Training with multiple GPUs ({})".format(num_gpus))
model = nn.DataParallel(model).cuda()
else:
print("Single Cuda Node is avaiable")
model.cuda()
################################################################################
### Load Dataset from root folder and intialize DataLoader
train_loader = data.DataLoader(
__dataset__[args.dataset](
config["train_dataset"],
seed=config["seed"],
is_train=True,
multi_scale_pred=args.multi_scale_pred,
),
batch_size=config["train_batch_size"],
num_workers=8,
shuffle=True,
pin_memory=False,
)
val_loader = data.DataLoader(
__dataset__[args.dataset](
config["val_dataset"],
seed=config["seed"],
is_train=False,
multi_scale_pred=args.multi_scale_pred,
),
batch_size=config["val_batch_size"],
num_workers=8,
shuffle=False,
pin_memory=False,
)
print("Training with dataset => {}".format(train_loader.dataset.__class__.__name__))
################################################################################
best_accuracy = 0
best_miou = 0
start_epoch = 1
total_epochs = config["trainer"]["total_epochs"]
optimizer = optim.SGD(
model.parameters(), lr=config["optimizer"]["lr"], momentum=0.9, weight_decay=0.0005
)
if args.resume is not None:
print("Loading from existing FCN and copying weights to continue....")
checkpoint = torch.load(args.resume)
start_epoch = checkpoint["epoch"] + 1
best_miou = checkpoint["miou"]
# stat_parallel_dict = util.getParllelNetworkStateDict(checkpoint['state_dict'])
# model.load_state_dict(stat_parallel_dict)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
util.weights_init(model, manual_seed=config["seed"])
viz_util.summary(model, print_arch=False)
scheduler = MultiStepLR(
optimizer,
milestones=eval(config["optimizer"]["lr_drop_epoch"]),
gamma=config["optimizer"]["lr_step"],
)
weights = torch.ones(config["task1_classes"]).cuda()
if config["task1_weight"] < 1:
print("Roads are weighted.")
weights[0] = 1 - config["task1_weight"]
weights[1] = config["task1_weight"]
weights_angles = torch.ones(config["task2_classes"]).cuda()
if config["task2_weight"] < 1:
print("Road angles are weighted.")
weights_angles[-1] = config["task2_weight"]
angle_loss = CrossEntropyLoss2d(
weight=weights_angles, size_average=True, ignore_index=255, reduce=True
).cuda()
road_loss = mIoULoss(
weight=weights, size_average=True, n_classes=config["task1_classes"]
).cuda()
def test(epoch):
global best_accuracy
global best_miou
global best_road
chec = "/media/lidan/ssd/Chaminda/road_connectivity/deepglobe_exp/dg_stak_mtl/model_best.pth.tar"
# import pdb
# pdb.set_trace()
checkpoint = torch.load(chec)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
model.eval()
test_loss_iou = 0
test_loss_vec = 0
hist = np.zeros((config["task1_classes"], config["task1_classes"]))
hist_angles = np.zeros((config["task2_classes"], config["task2_classes"]))
crop_size = config["val_dataset"][args.dataset]["crop_size"]
rprecision_tp = 0.0
rrecall_tp = 0.0
pred_positive = 0.0
gt_positive = 0.0
for i, (inputsBGR, labels, vecmap_angles) in enumerate(val_loader, 0):
inputsBGR = Variable(
inputsBGR.float().cuda(), volatile=True, requires_grad=False
)
outputs, pred_vecmaps = model(inputsBGR)
if args.multi_scale_pred:
loss1 = road_loss(outputs[0], util.to_variable(labels[0], True, False), True)
num_stacks = model.module.num_stacks if num_gpus > 1 else model.num_stacks
for idx in range(num_stacks - 1):
loss1 += road_loss(outputs[idx + 1], util.to_variable(labels[0], True, False), True)
for idx, output in enumerate(outputs[-2:]):
loss1 += road_loss(output, util.to_variable(labels[idx + 1], True, False), True)
loss2 = angle_loss(pred_vecmaps[0], util.to_variable(vecmap_angles[0], True, False))
for idx in range(num_stacks - 1):
loss2 += angle_loss(
pred_vecmaps[idx + 1], util.to_variable(vecmap_angles[0], True, False)
)
for idx, pred_vecmap in enumerate(pred_vecmaps[-2:]):
loss2 += angle_loss(
pred_vecmap, util.to_variable(vecmap_angles[idx + 1], True, False)
)
outputs = outputs[-1]
pred_vecmaps = pred_vecmaps[-1]
else:
loss1 = road_loss(outputs, util.to_variable(labels[0], True, False), True)
loss2 = angle_loss(pred_vecmaps, util.to_variable(labels[0], True, False))
test_loss_iou += loss1.item()
test_loss_vec += loss2.item()
_, predicted = torch.max(outputs.data, 1)
correctLabel = labels[-1].view(-1, crop_size, crop_size).long()
hist += util.fast_hist(
predicted.view(predicted.size(0), -1).cpu().numpy(),
correctLabel.view(correctLabel.size(0), -1).cpu().numpy(),
config["task1_classes"],
)
_, predicted_angle = torch.max(pred_vecmaps.data, 1)
correct_angles = vecmap_angles[-1].view(-1, crop_size, crop_size).long()
hist_angles += util.fast_hist(
predicted_angle.view(predicted_angle.size(0), -1).cpu().numpy(),
correct_angles.view(correct_angles.size(0), -1).cpu().numpy(),
config["task2_classes"],
)
p_accu, miou, road_iou, fwacc = util.performMetrics(
train_loss_file,
val_loss_file,
epoch,
hist,
test_loss_iou / (i + 1),
test_loss_vec / (i + 1),
is_train=False,
)
# p_accu_angle, miou_angle, fwacc_angle = util.performAngleMetrics(
# train_loss_angle_file, val_loss_angle_file, epoch, hist_angles, is_train=False
# )
# viz_util.progress_bar(
# i,
# len(val_loader),
# "Loss: %.6f | VecLoss: %.6f | road miou: %.4f%%(%.4f%%)"
# % (
# test_loss_iou / (i + 1),
# test_loss_vec / (i + 1),
# miou,
# road_iou,
# # miou_angle,
# ),
# )
if i % 10 == 0 or i == len(val_loader) - 1:
images_path = "{}/images/".format(experiment_dir)
util.ensure_dir(images_path)
util.savePredictedProb(
inputsBGR.data.cpu(),
labels[-1].cpu(),
predicted.cpu(),
F.softmax(outputs, dim=1).data.cpu()[:, 1, :, :],
predicted_angle.cpu(),
os.path.join(images_path, "validate_pair_{}_{}.png".format(epoch, i)),
norm_type=config["val_dataset"]["normalize_type"],
)
rprecision_tp_ins, rrecall_tp_ins, pred_positive_ins, gt_positive_ins = relaxed_f1(predicted.cpu().numpy(), labels[-1].cpu().numpy(), buffer=4)
rprecision_tp = rprecision_tp + rprecision_tp_ins
rrecall_tp = rrecall_tp + rrecall_tp_ins
pred_positive = pred_positive + pred_positive_ins
gt_positive = gt_positive + gt_positive_ins
precision = rprecision_tp/(gt_positive + 1e-12)
recall = rrecall_tp/(gt_positive + 1e-12)
f1measure = 2*precision*recall/(precision + recall + 1e-12)
iou = precision*recall/(precision+recall-(precision*recall) + 1e-12)
print("[Testing {}/{}] precision={}, recall={}, F1={}, IoU-r={}".format(i, len(val_loader), precision, recall, f1measure, iou))
del inputsBGR, labels, predicted, outputs, pred_vecmaps, predicted_angle
accuracy, miou, road_iou, fwacc = util.performMetrics(
train_loss_file,
val_loss_file,
epoch,
hist,
test_loss_iou / len(val_loader),
test_loss_vec / len(val_loader),
is_train=False,
write=True,
)
print("[FINAL] precision={}, recall={}, F1={}, IoU-r={}, road-iou={}".format(precision, recall, f1measure, iou, road_iou))
# util.performAngleMetrics(
# train_loss_angle_file,
# val_loss_angle_file,
# epoch,
# hist_angles,
# is_train=False,
# write=True,
# )
# if road_iou > best_road:
# best_road = road_iou
# print("Best Road IoU %d " % best_road)
if miou > best_miou:
best_accuracy = accuracy
best_miou = miou
util.save_checkpoint(epoch, test_loss_iou / len(val_loader), model, optimizer, best_accuracy, best_miou, config, experiment_dir)
return test_loss_iou / len(val_loader)
for epoch in range(0, 1):
start_time = datetime.now()
# scheduler.step(epoch)
# print("\nTraining Epoch: %d" % epoch)
# train(epoch)
# if epoch % config["trainer"]["test_freq"] == 0:
# for i in range(110,112):
print("\nTesting Epoch: %d" % epoch)
val_loss = test(epoch)
end_time = datetime.now()
print("Time Elapsed for epoch => {1}".format(epoch, end_time - start_time))
| 6,245 | 0 | 23 |
8a800b78cc0a7c0b79c6703c5fad8c240958d500 | 5,054 | py | Python | plot_example.py | GeoCode-polymtl/Deep_1D_velocity | 8f42fc4f5c984d0e11b4c93ae7eee99ba3843b4c | [
"MIT"
] | 7 | 2020-08-17T19:47:21.000Z | 2022-03-29T08:02:51.000Z | plot_example.py | GeoCode-polymtl/Deep_1D_velocity | 8f42fc4f5c984d0e11b4c93ae7eee99ba3843b4c | [
"MIT"
] | 6 | 2020-01-28T22:17:17.000Z | 2022-02-09T23:31:59.000Z | plot_example.py | GeoCode-polymtl/Deep_1D_velocity | 8f42fc4f5c984d0e11b4c93ae7eee99ba3843b4c | [
"MIT"
] | 4 | 2019-11-27T06:05:31.000Z | 2021-10-08T00:38:38.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot one example with generated data
"""
from vrmslearn.ModelParameters import ModelParameters
from vrmslearn.SeismicGenerator import SeismicGenerator, mute_direct, random_time_scaling, random_noise, random_static
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from shutil import rmtree
import h5py as h5
def plot_one_example(modeled_data, vrms, vp, tlabels, pars):
"""
This method creates one example by generating a random velocity model,
modeling a shot record with it, and also computes the vrms. The three
results are displayed side by side in an window.
@params:
@returns:
"""
# Plot results
fig, ax = plt.subplots(1, 3, figsize=[16, 8])
im1 = ax[0].imshow(vp, cmap=plt.get_cmap('hot'), aspect='auto', vmin=0.9 * pars.vp_min, vmax=1.1 * pars.vp_max)
ax[0].set_xlabel("X Cell Index," + " dh = " + str(pars.dh) + " m",
fontsize=12, fontweight='normal')
ax[0].set_ylabel("Z Cell Index," + " dh = " + str(pars.dh) + " m",
fontsize=12, fontweight='normal')
ax[0].set_title("P Interval Velocity", fontsize=16, fontweight='bold')
p = ax[0].get_position().get_points().flatten()
axis_cbar = fig.add_axes([p[0], 0.03, p[2] - p[0], 0.02])
plt.colorbar(im1, cax=axis_cbar, orientation='horizontal')
clip = 0.1
vmax = np.max(modeled_data) * clip
vmin = -vmax
ax[1].imshow(modeled_data,
interpolation='bilinear',
cmap=plt.get_cmap('Greys'),
vmin=vmin, vmax=vmax,
aspect='auto')
refpred = [ii for ii, t in enumerate(tlabels) if t == 1]
if pars.minoffset == 0:
toff = np.zeros(len(refpred)) + int(modeled_data.shape[1]/2)-2
else:
toff = np.zeros(len(refpred))
ax[1].plot(toff, refpred, 'r*')
ax[1].set_xlabel("Receiver Index", fontsize=12, fontweight='normal')
ax[1].set_ylabel("Time Index," + " dt = " + str(pars.dt * 1000 * pars.resampling) + " ms",
fontsize=12, fontweight='normal')
ax[1].set_title("Shot Gather", fontsize=16, fontweight='bold')
ax[2].plot(vrms * (pars.vp_max-pars.vp_min) + pars.vp_min, np.arange(0, len(vrms)))
ax[2].invert_yaxis()
ax[2].set_ylim(top=0, bottom=len(vrms))
ax[2].set_xlim(0.9 * pars.vp_min, 1.1 * pars.vp_max)
ax[2].set_xlabel("RMS Velocity (m/s)", fontsize=12, fontweight='normal')
ax[2].set_ylabel("Time Index," + " dt = " + str(pars.dt * 1000 * pars.resampling) + " ms",
fontsize=12, fontweight='normal')
ax[2].set_title("P RMS Velocity", fontsize=16, fontweight='bold')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Add arguments to parse
parser.add_argument("-l", "--nlayers",
type=int,
default=12,
help="number of layers : int > 0, default = 0")
parser.add_argument("-d", "--device",
type=int,
default=4,
help="device type : int = 2 or 4, default = 2")
parser.add_argument("-f", "--filename",
type=str,
default="",
help="name of the file containing the example")
# Parse the input
args = parser.parse_args()
pars = ModelParameters()
pars.dh = 6.25
pars.peak_freq = 26
pars.NX = 692*2
pars.NZ = 752*2
pars.dt = 0.0004
pars.NT = int(8.0 / pars.dt)
pars.resampling = 10
pars.dg = 8
pars.gmin = int(470 / pars.dh)
pars.gmax = int((470 + 72 * pars.dg * pars.dh) / pars.dh)
pars.minoffset = 470
pars.vp_min = 1300.0 # maximum value of vp (in m/s)
pars.vp_max = 4000.0 # minimum value of vp (in m/s)
pars.marine = True
pars.velwater = 1500
pars.d_velwater = 60
pars.water_depth = 3500
pars.dwater_depth = 1000
pars.fs = False
pars.source_depth = (pars.Npad + 4) * pars.dh
pars.receiver_depth = (pars.Npad + 4) * pars.dh
pars.identify_direct = False
pars.random_time_scaling = True
gen = SeismicGenerator(pars)
if args.filename is "":
workdir = "./seiscl_workdir"
if not os.path.isdir(workdir):
os.mkdir(workdir)
data, vrms, vp, valid, tlabels = gen.compute_example(workdir=workdir)
if os.path.isdir(workdir):
rmtree(workdir)
else:
file = h5.File(args.filename, "r")
data = file['data'][:]
vrms = file['vrms'][:]
vp = file['vp'][:]
valid = file['valid'][:]
tlabels = file['tlabels'][:]
file.close()
vp = np.stack([vp] * vp.shape[0], axis=1)
data = mute_direct(data, vp[0, 0], pars)
data = random_time_scaling(data, pars.dt * pars.resampling, emin=-2, emax=2)
data = random_noise(data, 0.02)
random_static(data, 2)
plot_one_example(data, vrms, vp, tlabels, pars)
| 33.919463 | 118 | 0.582509 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot one example with generated data
"""
from vrmslearn.ModelParameters import ModelParameters
from vrmslearn.SeismicGenerator import SeismicGenerator, mute_direct, random_time_scaling, random_noise, random_static
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from shutil import rmtree
import h5py as h5
def plot_one_example(modeled_data, vrms, vp, tlabels, pars):
"""
This method creates one example by generating a random velocity model,
modeling a shot record with it, and also computes the vrms. The three
results are displayed side by side in an window.
@params:
@returns:
"""
# Plot results
fig, ax = plt.subplots(1, 3, figsize=[16, 8])
im1 = ax[0].imshow(vp, cmap=plt.get_cmap('hot'), aspect='auto', vmin=0.9 * pars.vp_min, vmax=1.1 * pars.vp_max)
ax[0].set_xlabel("X Cell Index," + " dh = " + str(pars.dh) + " m",
fontsize=12, fontweight='normal')
ax[0].set_ylabel("Z Cell Index," + " dh = " + str(pars.dh) + " m",
fontsize=12, fontweight='normal')
ax[0].set_title("P Interval Velocity", fontsize=16, fontweight='bold')
p = ax[0].get_position().get_points().flatten()
axis_cbar = fig.add_axes([p[0], 0.03, p[2] - p[0], 0.02])
plt.colorbar(im1, cax=axis_cbar, orientation='horizontal')
clip = 0.1
vmax = np.max(modeled_data) * clip
vmin = -vmax
ax[1].imshow(modeled_data,
interpolation='bilinear',
cmap=plt.get_cmap('Greys'),
vmin=vmin, vmax=vmax,
aspect='auto')
refpred = [ii for ii, t in enumerate(tlabels) if t == 1]
if pars.minoffset == 0:
toff = np.zeros(len(refpred)) + int(modeled_data.shape[1]/2)-2
else:
toff = np.zeros(len(refpred))
ax[1].plot(toff, refpred, 'r*')
ax[1].set_xlabel("Receiver Index", fontsize=12, fontweight='normal')
ax[1].set_ylabel("Time Index," + " dt = " + str(pars.dt * 1000 * pars.resampling) + " ms",
fontsize=12, fontweight='normal')
ax[1].set_title("Shot Gather", fontsize=16, fontweight='bold')
ax[2].plot(vrms * (pars.vp_max-pars.vp_min) + pars.vp_min, np.arange(0, len(vrms)))
ax[2].invert_yaxis()
ax[2].set_ylim(top=0, bottom=len(vrms))
ax[2].set_xlim(0.9 * pars.vp_min, 1.1 * pars.vp_max)
ax[2].set_xlabel("RMS Velocity (m/s)", fontsize=12, fontweight='normal')
ax[2].set_ylabel("Time Index," + " dt = " + str(pars.dt * 1000 * pars.resampling) + " ms",
fontsize=12, fontweight='normal')
ax[2].set_title("P RMS Velocity", fontsize=16, fontweight='bold')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Add arguments to parse
parser.add_argument("-l", "--nlayers",
type=int,
default=12,
help="number of layers : int > 0, default = 0")
parser.add_argument("-d", "--device",
type=int,
default=4,
help="device type : int = 2 or 4, default = 2")
parser.add_argument("-f", "--filename",
type=str,
default="",
help="name of the file containing the example")
# Parse the input
args = parser.parse_args()
pars = ModelParameters()
pars.dh = 6.25
pars.peak_freq = 26
pars.NX = 692*2
pars.NZ = 752*2
pars.dt = 0.0004
pars.NT = int(8.0 / pars.dt)
pars.resampling = 10
pars.dg = 8
pars.gmin = int(470 / pars.dh)
pars.gmax = int((470 + 72 * pars.dg * pars.dh) / pars.dh)
pars.minoffset = 470
pars.vp_min = 1300.0 # maximum value of vp (in m/s)
pars.vp_max = 4000.0 # minimum value of vp (in m/s)
pars.marine = True
pars.velwater = 1500
pars.d_velwater = 60
pars.water_depth = 3500
pars.dwater_depth = 1000
pars.fs = False
pars.source_depth = (pars.Npad + 4) * pars.dh
pars.receiver_depth = (pars.Npad + 4) * pars.dh
pars.identify_direct = False
pars.random_time_scaling = True
gen = SeismicGenerator(pars)
if args.filename is "":
workdir = "./seiscl_workdir"
if not os.path.isdir(workdir):
os.mkdir(workdir)
data, vrms, vp, valid, tlabels = gen.compute_example(workdir=workdir)
if os.path.isdir(workdir):
rmtree(workdir)
else:
file = h5.File(args.filename, "r")
data = file['data'][:]
vrms = file['vrms'][:]
vp = file['vp'][:]
valid = file['valid'][:]
tlabels = file['tlabels'][:]
file.close()
vp = np.stack([vp] * vp.shape[0], axis=1)
data = mute_direct(data, vp[0, 0], pars)
data = random_time_scaling(data, pars.dt * pars.resampling, emin=-2, emax=2)
data = random_noise(data, 0.02)
random_static(data, 2)
plot_one_example(data, vrms, vp, tlabels, pars)
| 0 | 0 | 0 |
7ab9af606a4ec26dc2e1ce9d28c2d6a04ba6f60b | 4,136 | py | Python | scripts/extract-omicsdi-datasets.py | EMBL-EBI-TSI/RDSDS-Indexer | 9207d99ee34410fbeb4c518b5a6e49a655ca7a83 | [
"Apache-2.0"
] | null | null | null | scripts/extract-omicsdi-datasets.py | EMBL-EBI-TSI/RDSDS-Indexer | 9207d99ee34410fbeb4c518b5a6e49a655ca7a83 | [
"Apache-2.0"
] | null | null | null | scripts/extract-omicsdi-datasets.py | EMBL-EBI-TSI/RDSDS-Indexer | 9207d99ee34410fbeb4c518b5a6e49a655ca7a83 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import sys
import argparse
import datetime
import textwrap
import requests
import json
import csv
import urllib
from pprint import pprint
from settings import OMICSDI, OMICSDI_HEADERS, PATHS
DATA = []
def request_url(URL):
"""Request URL and return JSON payload"""
r = requests.get(URL)
if r.status_code == requests.codes.ok:
return json.loads(r.text)
else:
r.raise_for_status()
def get_arrayexpress_datasets(dataset, search_data):
"""Custom extractor for Arrayexpress datasets"""
for d in search_data['datasets']:
did = d["id"]
sub_dir = did.split('-')[1]
pubDate = datetime.datetime.strptime(d["publicationDate"], '%Y%m%d')
row = {
'dataset': dataset,
'id': did,
'pub_date': datetime.datetime.strftime(pubDate, '%Y-%m-%d'),
'dataset_url': OMICSDI[dataset]['dataset_url'].format(did),
'omicsdi_url': OMICSDI[dataset]['omicsdi_url'].format(did),
'omicsdi_api_url': OMICSDI[dataset]['omicsdi_api_url'].format(did),
'local_path': PATHS[dataset]['file'][0].format("/".join([sub_dir, did]))
}
DATA.append(row)
def get_pride_datasets(dataset, search_data):
"""Custom extractor for PRIDE datasets"""
# TODO: Extract external links from individual dataset json file_versions
for d in search_data['datasets']:
did = d["id"]
pubDate = datetime.datetime.strptime(d["publicationDate"], '%Y%m%d')
local_path_postfix = "/".join([str(pubDate.year), str(pubDate.month), did])
row = {
'dataset': dataset,
'id': did,
'pub_date': datetime.datetime.strftime(pubDate, '%Y-%m-%d'),
'dataset_url': OMICSDI[dataset]['dataset_url'].format(did),
'omicsdi_url': OMICSDI[dataset]['omicsdi_url'].format(did),
'omicsdi_api_url': OMICSDI[dataset]['omicsdi_api_url'].format(did),
'local_path': PATHS[dataset]['file'][0].format(local_path_postfix)
}
DATA.append(row)
def get_generic_datasets(dataset, search_data):
"""Generic extractor for datasets"""
for d in search_data['datasets']:
did = d["id"]
pubDate = datetime.datetime.strptime(d["publicationDate"], '%Y%m%d')
row = {
'dataset': dataset,
'id': did,
'pub_date': datetime.datetime.strftime(pubDate, '%Y-%m-%d'),
'dataset_url': OMICSDI[dataset]['dataset_url'].format(did),
'omicsdi_url': OMICSDI[dataset]['omicsdi_url'].format(did),
'omicsdi_api_url': OMICSDI[dataset]['omicsdi_api_url'].format(did),
'local_path': PATHS[dataset]['file'][0].format(did)
}
DATA.append(row)
def get_datasets(dataset=None, start=0, size=100):
"""Request dataset pages from OMICSDI"""
URL = OMICSDI['base_url']
query = urllib.parse.quote_plus(OMICSDI[dataset]['query'])
URL = URL + "&".join(["query=%s" % query, "start=%s" % start, "size=%s" % size])
search_data = request_url(URL)
count = search_data['count']
percent_left = (start+100) / count
print("Requested: {} ({:.2%} completed)".format(URL, percent_left))
if dataset == "pride":
get_pride_datasets(dataset, search_data)
elif dataset == "arrayexpress":
get_arrayexpress_datasets(dataset, search_data)
else:
get_generic_datasets(dataset, search_data)
if count > start+100:
get_datasets(dataset, start+100, size)
def export_csv(dataset, output):
"""Export dataset extract as CSV"""
filename = '%s/%s.csv' % (output, dataset)
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=OMICSDI_HEADERS)
writer.writeheader()
writer.writerows(DATA)
if __name__ == "__main__":
main() | 33.901639 | 82 | 0.677708 | #!/usr/bin/env python3
import os
import sys
import argparse
import datetime
import textwrap
import requests
import json
import csv
import urllib
from pprint import pprint
from settings import OMICSDI, OMICSDI_HEADERS, PATHS
DATA = []
def request_url(URL):
"""Request URL and return JSON payload"""
r = requests.get(URL)
if r.status_code == requests.codes.ok:
return json.loads(r.text)
else:
r.raise_for_status()
def get_arrayexpress_datasets(dataset, search_data):
"""Custom extractor for Arrayexpress datasets"""
for d in search_data['datasets']:
did = d["id"]
sub_dir = did.split('-')[1]
pubDate = datetime.datetime.strptime(d["publicationDate"], '%Y%m%d')
row = {
'dataset': dataset,
'id': did,
'pub_date': datetime.datetime.strftime(pubDate, '%Y-%m-%d'),
'dataset_url': OMICSDI[dataset]['dataset_url'].format(did),
'omicsdi_url': OMICSDI[dataset]['omicsdi_url'].format(did),
'omicsdi_api_url': OMICSDI[dataset]['omicsdi_api_url'].format(did),
'local_path': PATHS[dataset]['file'][0].format("/".join([sub_dir, did]))
}
DATA.append(row)
def get_pride_datasets(dataset, search_data):
"""Custom extractor for PRIDE datasets"""
# TODO: Extract external links from individual dataset json file_versions
for d in search_data['datasets']:
did = d["id"]
pubDate = datetime.datetime.strptime(d["publicationDate"], '%Y%m%d')
local_path_postfix = "/".join([str(pubDate.year), str(pubDate.month), did])
row = {
'dataset': dataset,
'id': did,
'pub_date': datetime.datetime.strftime(pubDate, '%Y-%m-%d'),
'dataset_url': OMICSDI[dataset]['dataset_url'].format(did),
'omicsdi_url': OMICSDI[dataset]['omicsdi_url'].format(did),
'omicsdi_api_url': OMICSDI[dataset]['omicsdi_api_url'].format(did),
'local_path': PATHS[dataset]['file'][0].format(local_path_postfix)
}
DATA.append(row)
def get_generic_datasets(dataset, search_data):
"""Generic extractor for datasets"""
for d in search_data['datasets']:
did = d["id"]
pubDate = datetime.datetime.strptime(d["publicationDate"], '%Y%m%d')
row = {
'dataset': dataset,
'id': did,
'pub_date': datetime.datetime.strftime(pubDate, '%Y-%m-%d'),
'dataset_url': OMICSDI[dataset]['dataset_url'].format(did),
'omicsdi_url': OMICSDI[dataset]['omicsdi_url'].format(did),
'omicsdi_api_url': OMICSDI[dataset]['omicsdi_api_url'].format(did),
'local_path': PATHS[dataset]['file'][0].format(did)
}
DATA.append(row)
def get_datasets(dataset=None, start=0, size=100):
"""Request dataset pages from OMICSDI"""
URL = OMICSDI['base_url']
query = urllib.parse.quote_plus(OMICSDI[dataset]['query'])
URL = URL + "&".join(["query=%s" % query, "start=%s" % start, "size=%s" % size])
search_data = request_url(URL)
count = search_data['count']
percent_left = (start+100) / count
print("Requested: {} ({:.2%} completed)".format(URL, percent_left))
if dataset == "pride":
get_pride_datasets(dataset, search_data)
elif dataset == "arrayexpress":
get_arrayexpress_datasets(dataset, search_data)
else:
get_generic_datasets(dataset, search_data)
if count > start+100:
get_datasets(dataset, start+100, size)
def export_csv(dataset, output):
"""Export dataset extract as CSV"""
filename = '%s/%s.csv' % (output, dataset)
with open(filename, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=OMICSDI_HEADERS)
writer.writeheader()
writer.writerows(DATA)
def main():
description = """\
Scrape OmicsDI for dataset paths
"""
parser = argparse.ArgumentParser(
description=textwrap.dedent(description)
)
parser.add_argument('dataset', help='Dataset Repository')
parser.add_argument('-o', '--output', default='data', help='Dataset Repository')
args = parser.parse_args()
if OMICSDI.get(args.dataset, None) is not None:
get_datasets(args.dataset)
export_csv(args.dataset, args.output)
else:
print("Error: Dataset metadata does not exist in settings.py")
sys.exit(1)
if __name__ == "__main__":
main() | 528 | 0 | 23 |
91f312cb8f689ea616bdbc2a492a8af9c7a64e82 | 291 | py | Python | gum/apps.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 16 | 2015-05-04T18:47:33.000Z | 2021-02-03T17:10:40.000Z | gum/apps.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 4 | 2015-09-08T14:48:31.000Z | 2016-09-09T09:49:41.000Z | gum/apps.py | marcosgabarda/django-gum | 796a496e95391aab9e462bde6d8f775534d5f6c6 | [
"MIT"
] | 2 | 2015-05-04T18:39:23.000Z | 2016-04-18T14:35:47.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
| 22.384615 | 59 | 0.71134 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class GumConfig(AppConfig):
name = 'gum'
verbose_name = 'Gum'
def ready(self):
from gum.models import handle_indexer_registrations
handle_indexer_registrations()
| 94 | 75 | 23 |
c3bd282f5426ddf24c1a77eb3931901a51a6ba57 | 305 | py | Python | ribosome.py | mathandcodes/algenomics | f036e3f15e1bbae081494f274494f8af36125126 | [
"MIT"
] | null | null | null | ribosome.py | mathandcodes/algenomics | f036e3f15e1bbae081494f274494f8af36125126 | [
"MIT"
] | null | null | null | ribosome.py | mathandcodes/algenomics | f036e3f15e1bbae081494f274494f8af36125126 | [
"MIT"
] | 1 | 2021-05-10T05:26:09.000Z | 2021-05-10T05:26:09.000Z | # Aminoacidos
AMINOACIDS = {
"AAA": "lisina",
"AAC": "asparagina",
"AGC": "serina"
}
# Codons
CODONS = list(AMINOACIDS.keys()) | 17.941176 | 54 | 0.616393 | # Aminoacidos
AMINOACIDS = {
"AAA": "lisina",
"AAC": "asparagina",
"AGC": "serina"
}
# Codons
CODONS = list(AMINOACIDS.keys())
class Ribosome:
@staticmethod
def translation(sequence: str):
"RNA to protein"
#TODO Completar aminoacidos y hacer traduccion
pass | 0 | 143 | 23 |
7c800a7eafc9d2d838f4b7e89d2170c96f7ce8b0 | 4,253 | py | Python | object-detection/yolov2/valid.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | null | null | null | object-detection/yolov2/valid.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | null | null | null | object-detection/yolov2/valid.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | 1 | 2020-04-25T06:11:28.000Z | 2020-04-25T06:11:28.000Z | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was forked from https://github.com/marvis/pytorch-yolo2 ,
# licensed under the MIT License (see LICENSE.external for more details).
import dataset
import utils
import numpy as np
import os
import itertools
from multiprocessing.pool import ThreadPool
import nnabla
import nnabla_ext.cuda
import yolov2
from arg_utils import Yolov2OptionValid
args = Yolov2OptionValid().parse_args()
if __name__ == '__main__':
weightfile = args.weight
outdir = args.output
outfile = 'comp4_det_test_'
valid(weightfile, outfile, outdir)
| 35.441667 | 127 | 0.624971 | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was forked from https://github.com/marvis/pytorch-yolo2 ,
# licensed under the MIT License (see LICENSE.external for more details).
import dataset
import utils
import numpy as np
import os
import itertools
from multiprocessing.pool import ThreadPool
import nnabla
import nnabla_ext.cuda
import yolov2
from arg_utils import Yolov2OptionValid
args = Yolov2OptionValid().parse_args()
def valid(weightfile, outfile, outdir):
pool = ThreadPool(1)
valid_images = args.valid
name_list = args.names
prefix = outdir
names = utils.load_class_names(name_list)
utils.set_default_context_by_args(args)
with open(valid_images) as fp:
tmp_files = fp.readlines()
valid_files = [item.rstrip() for item in tmp_files]
# Build the YOLO v2 network
def create_losses(batchsize, imheight, imwidth, test=True):
import gc
gc.collect()
nnabla_ext.cuda.clear_memory_cache()
anchors = args.num_anchors
classes = args.num_classes
yolo_x = nnabla.Variable((batchsize, 3, imheight, imwidth))
yolo_features = yolov2.yolov2(yolo_x, anchors, classes, test=test)
return yolo_x, yolo_features
yolo_x_nnabla, yolo_features_nnabla = create_losses(
args.valid_batchsize, args.height, args.width, test=True)
nnabla.load_parameters(weightfile)
valid_dataset = dataset.data_iterator_yolo(valid_images, args,
train=False,
shape=(args.width, args.height), shuffle=False, batch_size=args.valid_batchsize)
assert(args.valid_batchsize > 1)
fps = [0]*args.num_classes
if not os.path.exists(outdir):
os.mkdir(outdir)
for i in range(args.num_classes):
buf = '%s/%s%s.txt' % (prefix, outfile, names[i])
fps[i] = open(buf, 'w')
lineId = 0
total_samples = len(valid_files)
total_batches = (total_samples+args.valid_batchsize -
1)//args.valid_batchsize
for each_batch in range(0, total_batches):
ret = valid_dataset.next()
data, target = ret
yolo_x_nnabla.d = data
yolo_features_nnabla.forward(clear_buffer=True)
batch_boxes = utils.get_region_boxes(
yolo_features_nnabla.d, args.conf_thresh, args.num_classes, args.anchors, args.num_anchors, 0, 1)
for i in range(yolo_features_nnabla.d.shape[0]):
if lineId >= total_samples:
print("Reached End of total_samples")
break
fileId = os.path.basename(valid_files[lineId]).split('.')[0]
width, height = utils.get_image_size(valid_files[lineId])
print(valid_files[lineId])
lineId += 1
boxes = batch_boxes[i]
boxes = utils.nms(boxes, args.nms_thresh)
for box in boxes:
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
det_conf = box[4]
for j in range((len(box)-5)//2):
cls_conf = box[5+2*j]
cls_id = box[6+2*j]
prob = det_conf * cls_conf
fps[cls_id].write('%s %f %f %f %f %f\n' %
(fileId, prob, x1, y1, x2, y2))
for i in range(args.num_classes):
fps[i].close()
if __name__ == '__main__':
weightfile = args.weight
outdir = args.output
outfile = 'comp4_det_test_'
valid(weightfile, outfile, outdir)
| 3,071 | 0 | 23 |
37daf27992d4c6c9ee9293de58cfda6f5c64b537 | 490 | py | Python | conftest.py | gtback/cached-property | b4e96315a6b552e85461a96e6ab8afafb0f4331b | [
"BSD-3-Clause"
] | null | null | null | conftest.py | gtback/cached-property | b4e96315a6b552e85461a96e6ab8afafb0f4331b | [
"BSD-3-Clause"
] | null | null | null | conftest.py | gtback/cached-property | b4e96315a6b552e85461a96e6ab8afafb0f4331b | [
"BSD-3-Clause"
] | null | null | null |
import sys
# Whether "import asyncio" works
has_asyncio = (sys.version_info[0] == 3 and sys.version_info[1] >= 4)
# Whether the async and await keywords work
has_async_await = (sys.version_info[0] == 3 and sys.version_info[1] >= 5)
print("conftest.py", has_asyncio, has_async_await)
collect_ignore = []
if not has_asyncio:
collect_ignore.append("tests/test_coroutine_cached_property.py")
if not has_async_await:
collect_ignore.append("tests/test_async_cached_property.py")
| 23.333333 | 73 | 0.755102 |
import sys
# Whether "import asyncio" works
has_asyncio = (sys.version_info[0] == 3 and sys.version_info[1] >= 4)
# Whether the async and await keywords work
has_async_await = (sys.version_info[0] == 3 and sys.version_info[1] >= 5)
print("conftest.py", has_asyncio, has_async_await)
collect_ignore = []
if not has_asyncio:
collect_ignore.append("tests/test_coroutine_cached_property.py")
if not has_async_await:
collect_ignore.append("tests/test_async_cached_property.py")
| 0 | 0 | 0 |
10bc4750a98ccf1bf4e423f0210a14adfd69da9c | 1,256 | py | Python | sellshop/blog/models.py | TalehIlqar/E-commerce | fca96703412c5f63f2fe00a952154d8ab2e1b3bd | [
"MIT"
] | null | null | null | sellshop/blog/models.py | TalehIlqar/E-commerce | fca96703412c5f63f2fe00a952154d8ab2e1b3bd | [
"MIT"
] | null | null | null | sellshop/blog/models.py | TalehIlqar/E-commerce | fca96703412c5f63f2fe00a952154d8ab2e1b3bd | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from sellshop.utils.base_models import BaseModel
from django.utils.translation import ugettext_lazy as _
| 40.516129 | 125 | 0.719745 | from django.db import models
from django.utils import timezone
from sellshop.utils.base_models import BaseModel
from django.utils.translation import ugettext_lazy as _
class Blog(BaseModel):
title = models.CharField("Title", max_length=30, help_text="Max 30 char.")
description = models.TextField(verbose_name="Description")
creator = models.ForeignKey("user.User", on_delete=models.CASCADE)
product = models.ForeignKey(
"product.ProductVersion", on_delete=models.CASCADE, default="")
image = models.ImageField(verbose_name="Image", upload_to="blogs/")
def __str__(self) -> str:
return f"{self.title}"
class Comment(BaseModel):
user = models.ForeignKey(
"user.User", verbose_name="User", on_delete=models.CASCADE)
description = models.TextField(verbose_name="Description")
blog = models.ForeignKey(
Blog, on_delete=models.CASCADE, null=True, blank=True, related_name="blogs_comment")
reply = models.ForeignKey(
'self', on_delete=models.CASCADE, null=True, blank=True, default="", related_name="replies", verbose_name="Reply to")
is_main = models.BooleanField(verbose_name="Is Main?", default=False)
def __str__(self) -> str:
return f"{self.description}"
| 76 | 964 | 46 |
d3eac75ca0b40cfa1cf4b64cb31bb4f033c73613 | 5,936 | py | Python | grasso/fat32.py | joakimfors/grasso | a31ed8d83739ed5e90e648c3e572d425149e6ee8 | [
"MIT"
] | 5 | 2016-10-27T23:56:53.000Z | 2021-06-11T21:05:27.000Z | grasso/fat32.py | joakimfors/grasso | a31ed8d83739ed5e90e648c3e572d425149e6ee8 | [
"MIT"
] | 1 | 2018-01-13T16:09:17.000Z | 2018-01-13T16:09:17.000Z | grasso/fat32.py | joakimfors/grasso | a31ed8d83739ed5e90e648c3e572d425149e6ee8 | [
"MIT"
] | 2 | 2016-04-11T17:14:14.000Z | 2020-04-18T12:00:50.000Z | # -*- encoding: utf-8 -*-
#
# Grasso - a FAT filesystem parser
#
# Copyright 2011 Emanuele Aina <em@nerd.ocracy.org>
#
# Released under the term of a MIT-style license, see LICENSE
# for details.
from struct import unpack
| 38.797386 | 78 | 0.48467 | # -*- encoding: utf-8 -*-
#
# Grasso - a FAT filesystem parser
#
# Copyright 2011 Emanuele Aina <em@nerd.ocracy.org>
#
# Released under the term of a MIT-style license, see LICENSE
# for details.
from struct import unpack
class ExtendedBIOSParameterBlock32(object):
length = 476
unpacker = "<IHHIHH12sBBB4s11s8s420sH"
def __init__(self, filesystem):
self.filesystem = filesystem
self.offset = self.filesystem.source.tell()
data = unpack(self.unpacker, self.filesystem.source.read(self.length))
self.sector_per_fat = data[0]
self.mirroring_flags = data[1]
self.version = data[2]
self.root_directory_cluster_number = data[3]
self.file_system_information_sector_number = data[4]
self.backup_boot_sector_number = data[5]
self.reserved = list(data[6])
self.physical_drive_number = data[7]
self.reserved_flags = data[8]
self.extended_boot_signature = data[9]
self.volume_id = list(data[10])
self.volume_label = data[11]
self.file_system_type = data[12]
self.boot_code = data[13]
self.signature = data[14]
def __repr__(self):
return "ExtendedBIOSParameterBlock32(\n" \
" offset=%d,\n" \
" length=%d,\n" \
" sector_per_fat=%d,\n" \
" mirroring_flags=%d,\n" \
" version=%d,\n" \
" root_directory_cluster_number=%d,\n" \
" file_system_information_sector_number=%d,\n" \
" backup_boot_sector_number=%d,\n" \
" reserved=%s,\n" \
" physical_drive_number=%d,\n" \
" reserved_flags=%d,\n" \
" extended_boot_signature=%d,\n" \
" volume_id=%s,\n" \
" volume_label='%s',\n" \
" file_system_type='%s',\n" \
" boot_code=[...],\n" \
" signature=%d,\n" \
")" % (
self.offset,
self.length,
self.sector_per_fat,
self.mirroring_flags,
self.version,
self.root_directory_cluster_number,
self.file_system_information_sector_number,
self.backup_boot_sector_number,
self.reserved,
self.physical_drive_number,
self.reserved_flags,
self.extended_boot_signature,
self.volume_id,
self.volume_label,
self.file_system_type,
self.signature
)
class FileSystemInformationSector32(object):
length = 512
unpacker = "<4s480s4sII12s4s"
def __init__(self, filesystem):
self.filesystem = filesystem
self.offset = self.filesystem.source.tell()
data = unpack(self.unpacker, self.filesystem.source.read(self.length))
self.signature_1 = list(data[0])
self.reserved_1 = list(data[1])
self.signature_2 = list(data[2])
self.free_cluster_count = data[3]
self.most_recent_allocated_cluster_number = data[4]
self.reserved_2 = list(data[5])
self.signature_3 = list(data[6])
def __repr__(self):
return "FileSystemInformationSector32(\n" \
" offset=%d,\n" \
" length=%d,\n" \
" signature_1=%s,\n" \
" reserved_1=[...],\n" \
" signature_2=%s,\n" \
" free_cluster_count=%d,\n" \
" most_recent_allocated_cluster_number=%d,\n" \
" reserved_2=%s,\n" \
" signature_3=%s,\n" \
")" % (
self.offset,
self.length,
self.signature_1,
self.signature_2,
self.free_cluster_count,
self.most_recent_allocated_cluster_number,
self.reserved_2,
self.signature_3
)
class FAT32(object):
def __init__(self, filesystem, length):
self.length = length
self.filesystem = filesystem
self.offset = self.filesystem.source.tell()
source = self.filesystem.source
self.media_descriptor = unpack('<B', source.read(1))[0]
self.ones = unpack('<BBB', source.read(3))
self.end_of_cluster = unpack('<I', source.read(4))[0]
self.next_clusters = {}
self.bad_clusters = {}
entries = self.length/4
for i in range(2, entries):
v = unpack('<I', source.read(4))[0] & 0x0FFFFFFF
if not v:
continue
if 0x00000002 <= v and v <= 0x0FFFFFEF:
self.next_clusters[i] = v
if 0x0FFFFFF8 <= v and v <= 0x0FFFFFFF:
self.next_clusters[i] = None
if v == 0x0FFFFFF7:
self.bad_clusters[i] = v
def get_chain(self, cluster):
c = cluster
while c:
yield c
c = self.next_clusters[c]
def __repr__(self):
return "FAT32(\n" \
" offset=%d,\n" \
" length=%d,\n" \
" media_descriptor=%d,\n" \
" end_of_cluster=0x%X,\n" \
" next_clusters=[...],\n" \
" bad_clusters=[...],\n" \
")" % (
self.offset,
self.length,
self.media_descriptor,
self.end_of_cluster,
)
| 5,303 | 261 | 149 |
63d1b146364c0bceb396641198af58e67666ee0e | 2,247 | py | Python | src/client.py | elros28/mtg-data | 0248d41629b53d15aa29a2f3aee0c5b0c22b28e3 | [
"MIT"
] | 2 | 2021-07-23T10:09:53.000Z | 2021-11-07T16:55:28.000Z | src/client.py | elros28/mtg-data | 0248d41629b53d15aa29a2f3aee0c5b0c22b28e3 | [
"MIT"
] | 1 | 2021-07-21T12:44:18.000Z | 2021-07-26T12:36:26.000Z | src/client.py | elros28/mtg-data | 0248d41629b53d15aa29a2f3aee0c5b0c22b28e3 | [
"MIT"
] | 1 | 2021-11-03T21:27:54.000Z | 2021-11-03T21:27:54.000Z | from sqlalchemy import create_engine
import logging
logger = logging.getLogger('client.py')
class Client:
"""
Connection to the database.
The current implementation only refers to the PostgreSQL
database, however, this could be easily enhanced to any
database at all, including cloud.
"""
def __init__(self, params):
"""
Connect to the database.
Use the information contained in the params.py file
to connect to the postgreSQL database.
"""
try:
self.engine = create_engine(f'postgresql+psycopg2://{params.user}:{params.password}@{params.host}/{params.database}')
self.conn = self.engine.connect()
except Exception as e:
logger.warning('Could not connect to the database on client.py file.')
logger.warning(f'Verify your credentials for {params.user}.')
logger.warning(e)
| 45.857143 | 317 | 0.646195 | from sqlalchemy import create_engine
import logging
logger = logging.getLogger('client.py')
class Client:
"""
Connection to the database.
The current implementation only refers to the PostgreSQL
database, however, this could be easily enhanced to any
database at all, including cloud.
"""
def insert_card(self, uuid, name, set_code, img, card_type, cost, cmc, price_paper, price_online, rarity, color_identity):
name = name.replace("'", "''")
card_type = card_type.replace("'", "''")
if price_online is None:
price_online = 0
if price_paper is None:
price_paper = 0
return self.conn.execute(f"insert into card (uuid, name, set, img, type, cost, cmc, price_paper, price_online, rarity, color_identity) values ('{uuid}','{name}','{set_code}','{img}','{card_type}','{cost}','{int(cmc)}','{price_paper}','{(price_online)}','{rarity}','{color_identity}') on conflict do nothing;")
def insert_deck(self, site, date, tournament, pos):
return self.conn.execute(f"insert into deck (id, date, site, tournament, position) values (DEFAULT,'{date}','{site}','{tournament}','{pos}') on conflict do nothing;")
def insert_deck_card(self, deck_id, card_id, section, amount):
section = section.replace("'", "''")
return self.conn.execute(f"insert into deck_card (deck_id, card_id, section, amount) values ('{deck_id}','{card_id}','{section}','{amount}') on conflict do nothing;")
def select_max_deck_id(self):
max_id = self.conn.execute("select max(id) from deck")
return int(max_id.fetchone().items()[0][1])
def __init__(self, params):
"""
Connect to the database.
Use the information contained in the params.py file
to connect to the postgreSQL database.
"""
try:
self.engine = create_engine(f'postgresql+psycopg2://{params.user}:{params.password}@{params.host}/{params.database}')
self.conn = self.engine.connect()
except Exception as e:
logger.warning('Could not connect to the database on client.py file.')
logger.warning(f'Verify your credentials for {params.user}.')
logger.warning(e)
| 1,218 | 0 | 107 |
6950c7906927de261c207816946c85bb7a602e88 | 455 | py | Python | PythonExercicios/ex016.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | PythonExercicios/ex016.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | PythonExercicios/ex016.py | gabjohann/python_3 | 380cb622669ed82d6b22fdd09d41f02f1ad50a73 | [
"MIT"
] | null | null | null | # Crie um programa que leia um número real qualquer pelo teclado e mostre na tela a sua porção inteira
# Ex.:
# Digite um número: 6.127
# O número 6.127 tem a parte inteira 6
from math import trunc
num = float(input('Digite um número: '))
print('O número {} tem a parte inteira {}'.format(num, trunc(num)))
'''
-> outra forma de resolução
num = float(input('Digite um número: '))
print('O número {} tem a parte inteira {}'.format(num, int(num)))
'''
| 25.277778 | 102 | 0.685714 | # Crie um programa que leia um número real qualquer pelo teclado e mostre na tela a sua porção inteira
# Ex.:
# Digite um número: 6.127
# O número 6.127 tem a parte inteira 6
from math import trunc
num = float(input('Digite um número: '))
print('O número {} tem a parte inteira {}'.format(num, trunc(num)))
'''
-> outra forma de resolução
num = float(input('Digite um número: '))
print('O número {} tem a parte inteira {}'.format(num, int(num)))
'''
| 0 | 0 | 0 |
f4bd9358c75784a5fe295f67c4684885fc88629f | 11,589 | py | Python | alipay/aop/api/domain/LeadsOrderInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/LeadsOrderInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/LeadsOrderInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 32.553371 | 93 | 0.591854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LeadsOrderInfo(object):
def __init__(self):
self._biz_type = None
self._camp_name = None
self._channel = None
self._city_name = None
self._contact_mobile = None
self._contact_name = None
self._create_time = None
self._ext_info = None
self._gift_name = None
self._is_answer = None
self._is_x_phone = None
self._item_name = None
self._memo = None
self._merchant_phone = None
self._reservation_record_id = None
self._scene_source = None
self._shop_city = None
self._shop_id = None
self._shop_name = None
self._status = None
self._x_phone_effect_end = None
self._x_phone_effect_start = None
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def camp_name(self):
return self._camp_name
@camp_name.setter
def camp_name(self, value):
self._camp_name = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def city_name(self):
return self._city_name
@city_name.setter
def city_name(self, value):
self._city_name = value
@property
def contact_mobile(self):
return self._contact_mobile
@contact_mobile.setter
def contact_mobile(self, value):
self._contact_mobile = value
@property
def contact_name(self):
return self._contact_name
@contact_name.setter
def contact_name(self, value):
self._contact_name = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def gift_name(self):
return self._gift_name
@gift_name.setter
def gift_name(self, value):
self._gift_name = value
@property
def is_answer(self):
return self._is_answer
@is_answer.setter
def is_answer(self, value):
self._is_answer = value
@property
def is_x_phone(self):
return self._is_x_phone
@is_x_phone.setter
def is_x_phone(self, value):
self._is_x_phone = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def merchant_phone(self):
return self._merchant_phone
@merchant_phone.setter
def merchant_phone(self, value):
self._merchant_phone = value
@property
def reservation_record_id(self):
return self._reservation_record_id
@reservation_record_id.setter
def reservation_record_id(self, value):
self._reservation_record_id = value
@property
def scene_source(self):
return self._scene_source
@scene_source.setter
def scene_source(self, value):
self._scene_source = value
@property
def shop_city(self):
return self._shop_city
@shop_city.setter
def shop_city(self, value):
self._shop_city = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def shop_name(self):
return self._shop_name
@shop_name.setter
def shop_name(self, value):
self._shop_name = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def x_phone_effect_end(self):
return self._x_phone_effect_end
@x_phone_effect_end.setter
def x_phone_effect_end(self, value):
self._x_phone_effect_end = value
@property
def x_phone_effect_start(self):
return self._x_phone_effect_start
@x_phone_effect_start.setter
def x_phone_effect_start(self, value):
self._x_phone_effect_start = value
def to_alipay_dict(self):
params = dict()
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.camp_name:
if hasattr(self.camp_name, 'to_alipay_dict'):
params['camp_name'] = self.camp_name.to_alipay_dict()
else:
params['camp_name'] = self.camp_name
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.city_name:
if hasattr(self.city_name, 'to_alipay_dict'):
params['city_name'] = self.city_name.to_alipay_dict()
else:
params['city_name'] = self.city_name
if self.contact_mobile:
if hasattr(self.contact_mobile, 'to_alipay_dict'):
params['contact_mobile'] = self.contact_mobile.to_alipay_dict()
else:
params['contact_mobile'] = self.contact_mobile
if self.contact_name:
if hasattr(self.contact_name, 'to_alipay_dict'):
params['contact_name'] = self.contact_name.to_alipay_dict()
else:
params['contact_name'] = self.contact_name
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.gift_name:
if hasattr(self.gift_name, 'to_alipay_dict'):
params['gift_name'] = self.gift_name.to_alipay_dict()
else:
params['gift_name'] = self.gift_name
if self.is_answer:
if hasattr(self.is_answer, 'to_alipay_dict'):
params['is_answer'] = self.is_answer.to_alipay_dict()
else:
params['is_answer'] = self.is_answer
if self.is_x_phone:
if hasattr(self.is_x_phone, 'to_alipay_dict'):
params['is_x_phone'] = self.is_x_phone.to_alipay_dict()
else:
params['is_x_phone'] = self.is_x_phone
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.merchant_phone:
if hasattr(self.merchant_phone, 'to_alipay_dict'):
params['merchant_phone'] = self.merchant_phone.to_alipay_dict()
else:
params['merchant_phone'] = self.merchant_phone
if self.reservation_record_id:
if hasattr(self.reservation_record_id, 'to_alipay_dict'):
params['reservation_record_id'] = self.reservation_record_id.to_alipay_dict()
else:
params['reservation_record_id'] = self.reservation_record_id
if self.scene_source:
if hasattr(self.scene_source, 'to_alipay_dict'):
params['scene_source'] = self.scene_source.to_alipay_dict()
else:
params['scene_source'] = self.scene_source
if self.shop_city:
if hasattr(self.shop_city, 'to_alipay_dict'):
params['shop_city'] = self.shop_city.to_alipay_dict()
else:
params['shop_city'] = self.shop_city
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.shop_name:
if hasattr(self.shop_name, 'to_alipay_dict'):
params['shop_name'] = self.shop_name.to_alipay_dict()
else:
params['shop_name'] = self.shop_name
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.x_phone_effect_end:
if hasattr(self.x_phone_effect_end, 'to_alipay_dict'):
params['x_phone_effect_end'] = self.x_phone_effect_end.to_alipay_dict()
else:
params['x_phone_effect_end'] = self.x_phone_effect_end
if self.x_phone_effect_start:
if hasattr(self.x_phone_effect_start, 'to_alipay_dict'):
params['x_phone_effect_start'] = self.x_phone_effect_start.to_alipay_dict()
else:
params['x_phone_effect_start'] = self.x_phone_effect_start
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LeadsOrderInfo()
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'camp_name' in d:
o.camp_name = d['camp_name']
if 'channel' in d:
o.channel = d['channel']
if 'city_name' in d:
o.city_name = d['city_name']
if 'contact_mobile' in d:
o.contact_mobile = d['contact_mobile']
if 'contact_name' in d:
o.contact_name = d['contact_name']
if 'create_time' in d:
o.create_time = d['create_time']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'gift_name' in d:
o.gift_name = d['gift_name']
if 'is_answer' in d:
o.is_answer = d['is_answer']
if 'is_x_phone' in d:
o.is_x_phone = d['is_x_phone']
if 'item_name' in d:
o.item_name = d['item_name']
if 'memo' in d:
o.memo = d['memo']
if 'merchant_phone' in d:
o.merchant_phone = d['merchant_phone']
if 'reservation_record_id' in d:
o.reservation_record_id = d['reservation_record_id']
if 'scene_source' in d:
o.scene_source = d['scene_source']
if 'shop_city' in d:
o.shop_city = d['shop_city']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'shop_name' in d:
o.shop_name = d['shop_name']
if 'status' in d:
o.status = d['status']
if 'x_phone_effect_end' in d:
o.x_phone_effect_end = d['x_phone_effect_end']
if 'x_phone_effect_start' in d:
o.x_phone_effect_start = d['x_phone_effect_start']
return o
| 9,347 | 2,104 | 23 |
8a7bb03a2e040749c97a36fbd0d5d752d59bf1d7 | 590 | py | Python | mytravelblog/accounts/admin.py | yetoshimo/my-travel-blog | de67dd135e66f2dda121850d54fd56fd644b9bff | [
"MIT"
] | null | null | null | mytravelblog/accounts/admin.py | yetoshimo/my-travel-blog | de67dd135e66f2dda121850d54fd56fd644b9bff | [
"MIT"
] | null | null | null | mytravelblog/accounts/admin.py | yetoshimo/my-travel-blog | de67dd135e66f2dda121850d54fd56fd644b9bff | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from mytravelblog.accounts.models import *
UserModel = get_user_model()
@admin.register(Profile)
admin.site.unregister(UserModel)
@admin.register(UserModel)
| 21.851852 | 52 | 0.740678 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from mytravelblog.accounts.models import *
UserModel = get_user_model()
class ProfileInlineAdmin(admin.StackedInline):
model = Profile
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'current_country',)
list_filter = ('user', 'current_country',)
ordering = ('user',)
admin.site.unregister(UserModel)
@admin.register(UserModel)
class UserAdmin(UserAdmin):
list_display = ('username', 'email', 'is_staff')
inlines = (ProfileInlineAdmin,)
| 0 | 276 | 67 |
6767dda16d02bef81ed8d77ad0a5d3cc20e5e995 | 418 | py | Python | erpnext_ocr/erpnext_ocr/doctype/ocr_settings/ocr_settings.py | Compres/erpnext_ocr | 7c9398ab4f5b81a416f8d05edd12b341c080557c | [
"MIT"
] | 1 | 2020-12-05T01:41:42.000Z | 2020-12-05T01:41:42.000Z | erpnext_ocr/erpnext_ocr/doctype/ocr_settings/ocr_settings.py | mohsinalimat/erpnext_ocr | 3f78ceb60896aee80219bae3d003203f7ec7f0ae | [
"MIT"
] | null | null | null | erpnext_ocr/erpnext_ocr/doctype/ocr_settings/ocr_settings.py | mohsinalimat/erpnext_ocr | 3f78ceb60896aee80219bae3d003203f7ec7f0ae | [
"MIT"
] | 1 | 2021-04-25T02:43:33.000Z | 2021-04-25T02:43:33.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Monogramm and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
| 27.866667 | 94 | 0.679426 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Monogramm and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
class OCRSettings(Document):
def validate(self):
if not self.pdf_resolution > 0:
frappe.throw(
_("PDF Resolution must be a positive integer eg 300 (high) or 200 (normal)."))
| 159 | 7 | 49 |
8dbb867533ac370e9c97ffb88733e99eb5d99fde | 6,639 | py | Python | code/Tisajokt/lambda_v2.py | usedToBeTomas/PrisonersDilemmaTournament | b5ce72e4e0b943dd2fa8cca35a191bd3b4c4d5aa | [
"MIT"
] | 23 | 2021-05-20T07:34:33.000Z | 2021-06-20T13:09:04.000Z | code/Tisajokt/lambda_v2.py | usedToBeTomas/PrisonersDilemmaTournament | b5ce72e4e0b943dd2fa8cca35a191bd3b4c4d5aa | [
"MIT"
] | 19 | 2021-05-21T04:10:55.000Z | 2021-06-13T15:17:52.000Z | code/Tisajokt/lambda_v2.py | usedToBeTomas/PrisonersDilemmaTournament | b5ce72e4e0b943dd2fa8cca35a191bd3b4c4d5aa | [
"MIT"
] | 43 | 2021-05-21T02:24:35.000Z | 2021-06-24T21:08:11.000Z | import numpy as np
import math
# lambda.py, by Tisajokt (james.and.tisa@gmail.com)
# Version 2 (Version 1 was the competition version)
# An OmegaTFT variant (see https://arxiv.org/ftp/cs/papers/0609/0609017.pdf), with improvements:
# - joss detection
# - extortionate ZD detection
# - extra random handling
# - extra alternation handling
# - exploitation of overly forgiving strategies
# - a snippet of logic inspired by natura.py (made by Josh#6441) for breaking out of mutual defection
D = 0
C = 1
MAX_JOSS_RATE = 6 # maximum frequency with which a joss can defect unprovoked, below which it's optimal to ignore them
DEADLOCK_THRESHOLD = 1 # omegaTFT anti-deadlock
LEAD_DEFECT_RANDOMNESS = 2 # defecting turn 1
COOPERATION_RANDOMNESS = -1 # cooperating consistently
SWITCH_RANDOMNESS = 1 # switching moves
DIFF_RANDOMNESS = 1 # not playing what I played
RANDOMNESS_THRESHOLD = 5 # threshold to switch to playing ALLD
NATURA_WINDOW = 7 # if there are fewer than 2 cooperations between both of us in this window of time, cooperate
NATURA_GRUDGE_LVL = 10 # don't cooperate via the above method if the opponent has this many or more defections in a row
EXPLOITABLE_FORGIVENESS = 0.5 # consider opponent exploitable if they forgive at this rate or higher
EXPLOITABLE_DEFECT_STREAK = 3 # maximum defection streak before it's considered unwise to exploit the opponent
EXPLOITATION_DELAY = 7 # turns to wait before attempting exploitation
EXPLOITATIVE_COOP_RATE = 0.25 # minimum accepted temptation cooperation rate from an exploitative opponent
EARLY_FORGIVENESS = 5 # for the first X many turns, always cooperate after opponent's cooperation
# the strategy is *technically* a lambda function now
strategy = lambda hist, mem : lambda_agent(hist, mem)
| 35.693548 | 201 | 0.700105 | import numpy as np
import math
# lambda.py, by Tisajokt (james.and.tisa@gmail.com)
# Version 2 (Version 1 was the competition version)
# An OmegaTFT variant (see https://arxiv.org/ftp/cs/papers/0609/0609017.pdf), with improvements:
# - joss detection
# - extortionate ZD detection
# - extra random handling
# - extra alternation handling
# - exploitation of overly forgiving strategies
# - a snippet of logic inspired by natura.py (made by Josh#6441) for breaking out of mutual defection
D = 0
C = 1
MAX_JOSS_RATE = 6 # maximum frequency with which a joss can defect unprovoked, below which it's optimal to ignore them
DEADLOCK_THRESHOLD = 1 # omegaTFT anti-deadlock
LEAD_DEFECT_RANDOMNESS = 2 # defecting turn 1
COOPERATION_RANDOMNESS = -1 # cooperating consistently
SWITCH_RANDOMNESS = 1 # switching moves
DIFF_RANDOMNESS = 1 # not playing what I played
RANDOMNESS_THRESHOLD = 5 # threshold to switch to playing ALLD
NATURA_WINDOW = 7 # if there are fewer than 2 cooperations between both of us in this window of time, cooperate
NATURA_GRUDGE_LVL = 10 # don't cooperate via the above method if the opponent has this many or more defections in a row
EXPLOITABLE_FORGIVENESS = 0.5 # consider opponent exploitable if they forgive at this rate or higher
EXPLOITABLE_DEFECT_STREAK = 3 # maximum defection streak before it's considered unwise to exploit the opponent
EXPLOITATION_DELAY = 7 # turns to wait before attempting exploitation
EXPLOITATIVE_COOP_RATE = 0.25 # minimum accepted temptation cooperation rate from an exploitative opponent
EARLY_FORGIVENESS = 5 # for the first X many turns, always cooperate after opponent's cooperation
def lambda_agent(hist, mem) -> int:
turns = hist.shape[1]
### turn 1: cooperate & initialize memory ###
if turns == 0:
return C, [0, 0, np.array([[0, 0], [0, 0]]), np.array([[0, 0], [0, 0]]), (0, 0, 0), True, False]
### turn 2: play tft ###
if turns == 1:
if hist[1,-1] == D:
mem[1] += LEAD_DEFECT_RANDOMNESS
return hist[1,-1], mem
deadlock = mem[0]
randomness = mem[1]
stimulus = mem[2]
response = mem[3]
streak_D, streak_C, streak_alt = mem[4]
exploitable = mem[5]
good_faith = mem[6]
##### TRACKING STUFF #####
# decrease randomness if they commit to cooperation
if hist[1,-2] == C and hist[1,-1] == C:
randomness += COOPERATION_RANDOMNESS
# track their response to previous moves
stimulus[hist[0,-2], hist[1,-2]] += 1
response[hist[0,-2], hist[1,-2]] += hist[1,-1]
# random detection
stimulus_C = sum(stimulus[C,:])
stimulus_D = sum(stimulus[D,:])
response_C = sum(response[C,:])
response_D = sum(response[D,:])
total_C = sum(stimulus[:,C]) + hist[1,-1]
total_D = turns - total_C
# track their streak
# done AFTER joss handling, which only applies when streak of C is broken
streak_C_broken = 0
if hist[1,-1] == D:
streak_D += 1
streak_C_broken = streak_C
streak_C = 0
else:
streak_C += 1
streak_D = 0
# track their alternation streak
if hist[1,-1] != hist[1,-2]:
streak_alt += 1
else:
streak_alt = 0
# calculate how responsive (tft-like) they are
# positive value means they're more likely to cooperate after I cooperate
tftness = 1.0 # 1.0 = tft, 0.0 = random, -1.0 = anti-tft
if stimulus_C >= 4 and stimulus_D >= 4:
tftness = response_C / stimulus_C - response_D / stimulus_D
# rate at which they defect after my cooperation
jossrate = 0.0
if stimulus_C > 0:
jossrate = 1 - stimulus_C / stimulus_C
##### DECIDING MY MOVE, TURN 3+ #####
### tft (default) ###
move = hist[1,-1]
# standard omegaTFT stuff (cooperation randomness moved to above)
### cooperate (break out of deadlock) ###
### defect (punish unreasonable opponent) ###
if deadlock >= DEADLOCK_THRESHOLD:
move = C
if deadlock == DEADLOCK_THRESHOLD:# and streak_D < 2:
deadlock += 1
else:
deadlock = 0
else:
if hist[1,-1] != hist[1,-2]:
randomness += SWITCH_RANDOMNESS
if hist[1,-1] != hist[0,-1]:
randomness += DIFF_RANDOMNESS
if randomness >= RANDOMNESS_THRESHOLD:
move = D
elif hist[1,-1] != hist[1,-2]:
deadlock += 1
else:
deadlock = 0
### defect (against random/anti-tft opponents) ###
if tftness <= 0:
move = D
# opponent never cooperates after my defection, defects at an acceptable rate? they're a joss, ignore
### cooperate (ignore joss) ###
if stimulus_D >= 2 and response_D == 0 and (streak_C_broken >= 3 or hist[1,-1]) and jossrate <= 1/MAX_JOSS_RATE:
move = C
# opponent forgives more often than not when I defect? exploit them
### defect (exploit overly-forgiving opponents) ###
if stimulus_D >= 1 and (hist[0,-1], hist[1,-1]) == (C, C):
if exploitable and (response_D / stimulus_D >= EXPLOITABLE_FORGIVENESS or stimulus_D == 1) and (streak_C >= EXPLOITATION_DELAY or np.array_equal(hist[0,-4:], [D,C,D,C]) and np.sum(hist[1,-5:]) >= 4):
move = D
randomness -= DIFF_RANDOMNESS
# they're not particularly nice, don't try to exploit them
if streak_D > EXPLOITABLE_DEFECT_STREAK:
exploitable = False
### defect (match their first defection streak, ex. small improvement vs. detectives who defect twice in a row) ###
if total_D > 0 and streak_D == total_D:
move = D
# detect grudges to avoid pointless forgiveness, but not for exploitative strategies
grudged = streak_D >= NATURA_GRUDGE_LVL and not(response[D,D] == 0 and response[C,D] > 0 and response[C,D] / stimulus[C,D] >= EXPLOITATIVE_COOP_RATE)
window = NATURA_WINDOW
if tftness < 0.1:
window *= 2
# attempt good-faith cooperation to break out of mutual defection (natura forgiveness)
# maintain good-faith as long as opponent cooperates
good_faith = good_faith and (hist[0,-1], hist[1,-1]) == (C, C)
if tftness > 0 and turns >= window and total_C > 0 and not grudged and sum(hist[0,-window:]) + sum(hist[1,-window:]) < 2:
good_faith = True
### cooperate (natura forgiveness to avoid mutual defection) ###
if good_faith:
move = C
randomness = min(randomness, RANDOMNESS_THRESHOLD - 1)
# forgive any miscommunications in the first few turns
### cooperate (early tft after cooperation) ###
if turns < EARLY_FORGIVENESS and hist[1,-1] == C:
move = C
# ...and on the 7th turn, increment randomness further if they led with defection (likely random)
if turns == 7 and hist[1,0] == D and randomness + LEAD_DEFECT_RANDOMNESS >= RANDOMNESS_THRESHOLD:
move = D
randomness += LEAD_DEFECT_RANDOMNESS
return move, [deadlock, randomness, stimulus, response, (streak_D, streak_C, streak_alt), exploitable, good_faith]
# the strategy is *technically* a lambda function now
strategy = lambda hist, mem : lambda_agent(hist, mem)
| 4,839 | 0 | 23 |
ef840494d74199bd6f048450a7b12bfa806e0c42 | 399 | py | Python | project/backend/migrations/0003_alter_item_img.py | ryan-lam/hackathonx2021 | 1bdab8c474c5bfa37a17703761357092521128aa | [
"MIT"
] | 1 | 2021-07-27T14:01:35.000Z | 2021-07-27T14:01:35.000Z | project/backend/migrations/0003_alter_item_img.py | ryan-lam/hackathonx2021 | 1bdab8c474c5bfa37a17703761357092521128aa | [
"MIT"
] | null | null | null | project/backend/migrations/0003_alter_item_img.py | ryan-lam/hackathonx2021 | 1bdab8c474c5bfa37a17703761357092521128aa | [
"MIT"
] | 2 | 2021-07-09T19:54:53.000Z | 2021-07-10T16:46:00.000Z | # Generated by Django 3.2.4 on 2021-06-26 00:17
from django.db import migrations, models
| 21 | 68 | 0.596491 | # Generated by Django 3.2.4 on 2021-06-26 00:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0002_auto_20210625_1925'),
]
operations = [
migrations.AlterField(
model_name='item',
name='img',
field=models.ImageField(null=True, upload_to='images/'),
),
]
| 0 | 285 | 23 |
617a110ea4fa69f6ecb3820b5787a2c4054eb5fc | 1,320 | py | Python | imbd/imbd_data_reshape.py | worldwidekatie/GPT3_Synthetic | 722ceb0b931873ea1b835dd291028d25b775adeb | [
"MIT"
] | 1 | 2022-02-28T19:49:20.000Z | 2022-02-28T19:49:20.000Z | imbd/imbd_data_reshape.py | worldwidekatie/GPT3_Synthetic | 722ceb0b931873ea1b835dd291028d25b775adeb | [
"MIT"
] | null | null | null | imbd/imbd_data_reshape.py | worldwidekatie/GPT3_Synthetic | 722ceb0b931873ea1b835dd291028d25b775adeb | [
"MIT"
] | 1 | 2022-03-06T06:40:42.000Z | 2022-03-06T06:40:42.000Z | import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv('imbd/data/imbd_orig.csv')
df = df.rename(columns={'review': 'text', 'sentiment':'label'})
"""Truncating"""
var1 = list(df["text"])
var = []
for i in var1:
if len(i) > 900:
var.append(i[:900])
else:
var.append(i)
length = []
for i in var:
length.append(len(i))
df["length"] = length
print(df["length"].describe())
length = sorted(length, reverse=True)
print(length[:50])
df['text'] = var
df = df[['text', 'label']]
df.to_csv('imbd/data/trunc_imbd.csv', index=False)
print(df.describe())
"""Taking a smaller samples of the IMBD dataset"""
"""Test/Train split"""
df_train, df_test= train_test_split(df, test_size=0.33, random_state=55)
df_train.to_csv('imbd/data/imbd_train.csv', index=False)
df_test.to_csv('imbd/data/imbd_test.csv', index=False)
"""Getting 5% Seeds"""
pos = df_train[df_train['label'] == 1]
neg = df_train[df_train['label'] == 0]
print(neg.shape)
print(pos.shape)
pos_seed_05 = pos.sample(frac=.05, replace=False, random_state=79)
print(pos_seed_05.shape)
pos_seed_05.to_csv("imbd/data/pos_seed_05.csv", index=False)
neg_seed_05 = neg.sample(frac=.05, replace=False, random_state=79)
print(neg_seed_05.shape)
neg_seed_05.to_csv("imbd/data/neg_seed_05.csv", index=False) | 24.444444 | 72 | 0.700758 | import pandas as pd
from sklearn.model_selection import train_test_split
df = pd.read_csv('imbd/data/imbd_orig.csv')
df = df.rename(columns={'review': 'text', 'sentiment':'label'})
"""Truncating"""
var1 = list(df["text"])
var = []
for i in var1:
if len(i) > 900:
var.append(i[:900])
else:
var.append(i)
length = []
for i in var:
length.append(len(i))
df["length"] = length
print(df["length"].describe())
length = sorted(length, reverse=True)
print(length[:50])
df['text'] = var
df = df[['text', 'label']]
df.to_csv('imbd/data/trunc_imbd.csv', index=False)
print(df.describe())
"""Taking a smaller samples of the IMBD dataset"""
"""Test/Train split"""
df_train, df_test= train_test_split(df, test_size=0.33, random_state=55)
df_train.to_csv('imbd/data/imbd_train.csv', index=False)
df_test.to_csv('imbd/data/imbd_test.csv', index=False)
"""Getting 5% Seeds"""
pos = df_train[df_train['label'] == 1]
neg = df_train[df_train['label'] == 0]
print(neg.shape)
print(pos.shape)
pos_seed_05 = pos.sample(frac=.05, replace=False, random_state=79)
print(pos_seed_05.shape)
pos_seed_05.to_csv("imbd/data/pos_seed_05.csv", index=False)
neg_seed_05 = neg.sample(frac=.05, replace=False, random_state=79)
print(neg_seed_05.shape)
neg_seed_05.to_csv("imbd/data/neg_seed_05.csv", index=False) | 0 | 0 | 0 |
25f014d9ff80f812cc52824ed7a98b158cb3beee | 102 | py | Python | django_extensions/tests/__init__.py | bhaugen/localecon | ee3134f701e6a786767cf7eeb165ee03f077e9da | [
"MIT"
] | 10 | 2015-02-14T14:22:31.000Z | 2022-02-22T17:40:34.000Z | django_extensions/tests/__init__.py | trbs/django-extensions | 291fa141193fb444927809dc71e3dc74eca9382b | [
"MIT"
] | 3 | 2017-02-01T16:44:04.000Z | 2018-04-02T13:48:03.000Z | django_extensions/tests/__init__.py | bhaugen/localecon | ee3134f701e6a786767cf7eeb165ee03f077e9da | [
"MIT"
] | 1 | 2021-07-01T16:08:34.000Z | 2021-07-01T16:08:34.000Z | from django_extensions.tests.utils import UTILS_TESTS
__test__ = {
'UTILS_TESTS': UTILS_TESTS,
}
| 17 | 53 | 0.764706 | from django_extensions.tests.utils import UTILS_TESTS
__test__ = {
'UTILS_TESTS': UTILS_TESTS,
}
| 0 | 0 | 0 |
ba07ae9ef71b0f882cd8fdbe4636858a19fd4974 | 459 | py | Python | 2016/day3.py | ksallberg/adventofcode | ba5b31eca58ff9afbcada7559366b5ee6c39f0f6 | [
"BSD-2-Clause"
] | 1 | 2015-12-06T16:40:10.000Z | 2015-12-06T16:40:10.000Z | 2016/day3.py | ksallberg/adventofcode | ba5b31eca58ff9afbcada7559366b5ee6c39f0f6 | [
"BSD-2-Clause"
] | null | null | null | 2016/day3.py | ksallberg/adventofcode | ba5b31eca58ff9afbcada7559366b5ee6c39f0f6 | [
"BSD-2-Clause"
] | null | null | null | instructions = [line.rstrip("\n") for line in open("input", "r")]
valids = []
for instr in instructions:
x = instr.split(" ")
y = [int(z) for z in x if z != '']
if valid(y) == True and y not in valids:
valids.append(y)
print "Valid triangles: ", len(valids)
| 19.956522 | 65 | 0.529412 | instructions = [line.rstrip("\n") for line in open("input", "r")]
valids = []
def valid(inp):
[a, b, c] = inp
if a + b <= c:
return False
elif b + c <= a:
return False
elif a + c <= b:
return False
return True
for instr in instructions:
x = instr.split(" ")
y = [int(z) for z in x if z != '']
if valid(y) == True and y not in valids:
valids.append(y)
print "Valid triangles: ", len(valids)
| 154 | 0 | 23 |
a5e3db457261257b47018c24c828a178bf77f16d | 4,959 | py | Python | module/debug.py | makalo/Siamese-RPN-tensorflow | efd7d61f84b0cd5c84cada7d881f766480aea322 | [
"MIT"
] | 140 | 2018-11-21T14:00:34.000Z | 2022-02-19T03:04:05.000Z | module/debug.py | WangJerry95/Siamese-RPN-tensorflow | efd7d61f84b0cd5c84cada7d881f766480aea322 | [
"MIT"
] | 21 | 2018-11-21T14:08:19.000Z | 2021-07-11T03:37:18.000Z | module/debug.py | makalo/Siamese-RPN-tensorflow | efd7d61f84b0cd5c84cada7d881f766480aea322 | [
"MIT"
] | 35 | 2018-12-06T14:34:56.000Z | 2022-03-29T12:19:03.000Z | import numpy as np
from config import cfg
import cv2
from numba import jit
@jit | 37.285714 | 121 | 0.554749 | import numpy as np
from config import cfg
import cv2
from numba import jit
@jit
def debug(img,gt,pre_cls,pre_reg,pre_score,pre_box,label,target_box,step,anchor_op):
img=(img*255).astype(np.uint8)
#print('=============================================================')
# pre_cls=pre_cls.reshape((-1,2))
# pre_reg=pre_reg.reshape((-1,4))
# print('===========box===========')
# print(pre_box[np.where(label==1)])
# print(target_box[np.where(label==1)])
# print('===========box===========')
# print('===========box===========')
# print(pre_score[np.where(label==1)])
# print('===========box===========')
# print('===========cls===========')
w = np.outer(np.hanning(17), np.hanning(17))
w=np.stack([w,w,w,w,w],-1)
w=w.reshape((-1))
#w=np.tile(w.flatten(), 5)
# index_cls=np.argmax(pre_cls[:,1])
# boxes_reg=pre_reg[index_cls]
# print('pre_cls_index={},pre_cls_max_value={},pre_reg={}'.format(index_cls,np.max(pre_cls[:,1]),boxes_reg))
index_score=np.argsort(pre_score[:,1])[::-1][0:10]
boxes_box=pre_box[index_score]
#print('pre_score_index={},pre_score_max_value={},pre_box={}'.format(index_score,np.max(pre_score[:,1]*w),boxes_box))
# should_score=pre_score[:,1]*label
# index_should=np.argmax(should_score)
# boxes_should=pre_box[index_should]
# print('should_index={},should_max_value={},should_box={}'.format(index_should,np.max(should_score),boxes_should))
index=index_score
boxes=boxes_box
#print(pre_score[:,1])
#print('===========cls===========')
# #============should_box===========
# box_should=boxes_should
# anchors_should=self.anchor_op.anchors
# anchor_should=anchors_should[index_should]#[x1,y1,x2,y2]
# anchor_should[2]=anchor_should[2]-anchor_should[0]
# anchor_should[3]=anchor_should[3]-anchor_should[1]
# anchor_should[0]=anchor_should[0]+(anchor_should[2])/2
# anchor_should[1]=anchor_should[1]+(anchor_should[3])/2#[x,y,w,h]
# b_should=np.zeros_like(box_should)
# b_should[0]=box_should[0]*anchor_should[2]+anchor_should[0]
# b_should[1]=box_should[1]*anchor_should[3]+anchor_should[1]
# b_should[2]=np.exp(box_should[2])*anchor_should[2]
# b_should[3]=np.exp(box_should[3])*anchor_should[3]#[x,y,w,h]
# b_should[0]=b_should[0]-b_should[2]/2
# b_should[1]=b_should[1]-b_should[3]/2
# b_should[2]=b_should[0]+b_should[2]
# b_should[3]=b_should[1]+b_should[3]#[x1,y1,x2,y2]
# if b_should[2]<1000 and b_should[3]<1000:
# cv2.rectangle(img,(int(b_should[0]),int(b_should[1])),(int(b_should[2]),int(b_should[3])),(0,255,0),1)
# #============should_box===========
#============pre_box===========
box=boxes
anchors=anchor_op.regu()
anchors=anchor_op.corner_to_center(anchors)
diff_anchors=anchor_op.diff_anchor_gt(gt,anchors)
anchor=anchors[index]#[x1,y1,x2,y2]
# anchor[2]=anchor[2]-anchor[0]
# anchor[3]=anchor[3]-anchor[1]
# anchor[0]=anchor[0]+(anchor[2])/2
# anchor[1]=anchor[1]+(anchor[3])/2#[x,y,w,h]
b=np.zeros_like(box)
b[:,0]=box[:,0]*anchor[:,2]+anchor[:,0]
b[:,1]=box[:,1]*anchor[:,3]+anchor[:,1]
b[:,2]=np.exp(box[:,2])*anchor[:,2]
b[:,3]=np.exp(box[:,3])*anchor[:,3]#[x,y,w,h]
# b[0]=b[0]-b[2]/2
# b[1]=b[1]-b[3]/2
# b[2]=b[0]+b[2]
# b[3]=b[1]+b[3]#[x1,y1,x2,y2]
b=anchor_op.center_to_corner(b)
anchor=anchor_op.center_to_corner(anchor)
#if b[2]<1000 and b[3]<1000:
for bbox in b:
color = np.random.random((3, )) * 0.6 + 0.4
color = color * 255
color = color.astype(np.int32).tolist()
cv2.rectangle(img,(int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),color,1)
cv2.rectangle(img,(int(b[0][0]),int(b[0][1])),(int(b[0][2]),int(b[0][3])),(0,0,0),2)
cv2.rectangle(img,(int(anchor[0][0]),int(anchor[0][1])),(int(anchor[0][2]),int(anchor[0][3])),(255,0,0),1)
#============pre_box===========
#============gt_box===========
# gt_b=np.zeros_like(box)
# gt_b[0]=(gt[0]-anchor[0])/(anchor[2]+0.01)
# gt_b[1]=(gt[1]-anchor[1])/(anchor[3]+0.01)
# gt_b[2]=np.log(gt[2]/(anchor[2]+0.01))
# gt_b[3]=np.log(gt[3]/(anchor[3]+0.01))
# print('++++++offset+++++++')
# print('pre={}'.format(box))
# print('comput_tg={}'.format(gt_b))
# print('target={}'.format(target_box[index]))
# print('anchor={}'.format(anchor))
# print('++++++offset+++++++')
gt[0]=gt[0]-gt[2]/2
gt[1]=gt[1]-gt[3]/2
gt[2]=gt[0]+gt[2]
gt[3]=gt[1]+gt[3]
cv2.rectangle(img,(int(gt[0]),int(gt[1])),(int(gt[2]),int(gt[3])),(0,0,255),2)
cv2.imwrite(cfg.debug_dir+'/'+str(step)+'.jpg',img)
#============gt_box===========
# print('===========reg===========')
# print(b.astype(np.int32))
# print(np.array(gt).astype(np.int32))
# print('===========reg===========')
# print('====================================================') | 4,858 | 0 | 22 |
5058e80dfc8daa5b6b883944bb57f767df48aa64 | 514 | py | Python | C++/testing/serverSock.py | EJEmmett/RocksatX19 | 27139f72915ae7dd376daeb47a8721c09dd1f717 | [
"BSL-1.0"
] | null | null | null | C++/testing/serverSock.py | EJEmmett/RocksatX19 | 27139f72915ae7dd376daeb47a8721c09dd1f717 | [
"BSL-1.0"
] | null | null | null | C++/testing/serverSock.py | EJEmmett/RocksatX19 | 27139f72915ae7dd376daeb47a8721c09dd1f717 | [
"BSL-1.0"
] | null | null | null | import socket
#from threading import *
HOST = '10.1.121.102'
PORT = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('connected by', addr)
while True:
data = conn.recv(100000024)
print(data)
if not data:
break
conn.sendall(data)
| 27.052632 | 60 | 0.453307 | import socket
#from threading import *
HOST = '10.1.121.102'
PORT = 65432
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('connected by', addr)
while True:
data = conn.recv(100000024)
print(data)
if not data:
break
conn.sendall(data)
| 0 | 0 | 0 |
8caf10bab7d923396862b0b100643a51fad3da89 | 74,384 | py | Python | pysnmp-with-texts/CISCO-POP-MGMT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-POP-MGMT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-POP-MGMT-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-POP-MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-POP-MGMT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:09:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ciscoExperiment, = mibBuilder.importSymbols("CISCO-SMI", "ciscoExperiment")
dsx1LineIndex, dsx1LineStatus = mibBuilder.importSymbols("DS1-MIB", "dsx1LineIndex", "dsx1LineStatus")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
NotificationType, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, Gauge32, Counter64, ObjectIdentity, MibIdentifier, Integer32, Bits, Counter32, Unsigned32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "Gauge32", "Counter64", "ObjectIdentity", "MibIdentifier", "Integer32", "Bits", "Counter32", "Unsigned32", "ModuleIdentity")
DisplayString, TimeStamp, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TimeStamp", "TextualConvention", "TruthValue")
ciscoPopMgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 10, 19))
ciscoPopMgmtMIB.setRevisions(('2005-12-21 00:00', '2002-12-26 00:00', '2000-11-29 00:00', '2000-03-03 00:00', '1998-02-02 00:00', '1997-10-21 00:00', '1997-05-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setRevisionsDescriptions(('Imported dsx1LineIndex from DS1-MIB instead of RFC1406-MIB Changed SYNTAX of cpmDS1SlotIndex cpmDS1PortIndex cpmChannelIndex cpmDS1UsageSlotIndex cpmDS1UsagePortIndex cpmActiveCallSummaryIndex cpmCallHistorySummaryIndex cpmActiveModemSlot cpmActiveModemPort cpmActiveEntrySlot cpmActiveEntryPort cpmActiveEntryChannel from Integer32 to Integer32 (0..2147483647) Imported Unsigned32 from SNMPv2-SMI instead of CISCO-TC.', 'Added voice(6) to object cpmCallType. Added Notification Group cpmNotificationGroupRev1', 'Added cpmCallVolume with objects: cpmCallVolSuccISDNDigital cpmCallVolAnalogCallClearedNormally', 'Extended cpmDs1DS0UsageTable with objects: cpmDS1TotalAnalogCalls cpmDS1TotalDigitalCalls cpmDS1TotalV110Calls cpmDS1TotalV120Calls cpmDS1TotalCalls cpmDS1TotalTimeInUse cpmDS1CurrentIdle cpmDS1CurrentOutOfService cpmDS1CurrentBusyout cpmDS1InOctets cpmDS1OutOctets cpmDS1InPackets cpmDS1OutPackets Added system level summary objects: cpmISDNCfgActiveDChannels cpmISDNCfgBChannelsTimeInUse cpmISDNCfgBChannelsTimeInUseAnalog cpmISDNCfgBChannelCalls cpmISDNCfgBChannelAnalogCalls cpmTotalISDNSyncPPPCalls Added DS0StatusTable with objects: cpmDS0OperStatus cpmDS0BusyoutAdminStatus cpmDS0BusyoutAllow cpmDS0BusyoutStatus cpmDS0BusyoutSource cpmDS0BusyoutTime cpmDS0ConfigFunction cpmDS0InterfaceIndex Added busyout notification and notification enable object: cpmDS0BusyoutNotification cpmDS0BusyoutNotifyEnable cpmDS1LoopbackNotifyConfig cpmDS1LoopbackNotifyEnable cpmDS1LoopbackNotification ', 'Added objects: cpmISDNCfgBChanInUseForVoice cpmCASCfgBChanInUseForVoice Added enumeration-type: voice', 'Added objects: cpmDS1DS0UsageTable cpmActiveDS0sHighWaterMark cpmSW56CfgBChannelsInUse', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setLastUpdated('200512210000Z')
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setContactInfo(' Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-apm@cisco.com')
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setDescription('Cisco Point Of Presence Management MIB to provide DSX1 and DSX0 facilities management and call summaries.')
ciscoPopMgmtMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1))
cpmDS0Usage = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1))
cpmCallFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2))
cpmActiveCallSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3))
cpmCallHistorySummary = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4))
cpmDS0Status = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5))
cpmDS1LoopbackNotifyConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 6))
cpmCallVolume = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 7))
cpmDS0UsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1), )
if mibBuilder.loadTexts: cpmDS0UsageTable.setStatus('current')
if mibBuilder.loadTexts: cpmDS0UsageTable.setDescription('The DS0 usage table is for hardware and software objects not used as interfaces, and not covered in rfc1213. These objects include analog calls coming over ISDN, Channelized T1, and Channelized E1. This table is created for every DS1 line in the device.')
cpmDS0UsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmDS1SlotIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmDS1PortIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmChannelIndex"))
if mibBuilder.loadTexts: cpmDS0UsageEntry.setStatus('current')
if mibBuilder.loadTexts: cpmDS0UsageEntry.setDescription('An entry in the DS0 Usage table.')
cpmDS1SlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1SlotIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1SlotIndex.setDescription('The slot index indicates the slot number on the device where the DS1 card resides.')
cpmDS1PortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1PortIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1PortIndex.setDescription('The port index indicates the port number of a specific DS1 on the DS1 card in the slot')
cpmChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmChannelIndex.setStatus('current')
if mibBuilder.loadTexts: cpmChannelIndex.setDescription('The channel index that distinguishes the DS0 timeslot of the DS1 port. The range of the channel index is based on the number of T1/E1 channels: 1-24(T1) and 1-31(E1).')
cpmConfiguredType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("isdn", 2), ("ct1", 3), ("ce1", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmConfiguredType.setStatus('current')
if mibBuilder.loadTexts: cpmConfiguredType.setDescription('The configured technology for the channel: ISDN(2), Channelized T1 (3) or Channelized E1 (4).')
cpmDS0CallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("idle", 1), ("unknown", 2), ("analog", 3), ("digital", 4), ("v110", 5), ("v120", 6), ("voice", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0CallType.setStatus('current')
if mibBuilder.loadTexts: cpmDS0CallType.setDescription('The type of the current call carried by this DS0. idle - This DS0 is currently idle. unknown - The data type of the call currently occupying this DS0 is not one of the types listed here. analog - The data type of the call currently occupying this DS0 is analog, i.e. a modem call. digital - The data type of the call currently occupying this DS0 is digital. v110 - The call currently occupying this DS0 is a V110 call. v120 - The call currently occupying this DS0 is a V120 call. voice - The call currently occupying this DS0 is a voice call.')
cpmL2Encapsulation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("idle", 1), ("unknown", 2), ("ppp", 3), ("slip", 4), ("arap", 5), ("hdlc", 6), ("exec", 7), ("voice", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmL2Encapsulation.setStatus('current')
if mibBuilder.loadTexts: cpmL2Encapsulation.setDescription('The data link encapsulation of the call currently occuppying this DS0: idle - This DS0 is currently idle. unknown - The encapsulation of the currently active call on this DS0 is not one of the options following. PPP - slip - arap - hdlc - exec - voice - voice encapsulation; IANA type voiceEncap(103)')
cpmCallCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallCount.setStatus('current')
if mibBuilder.loadTexts: cpmCallCount.setDescription('The number of calls that have occupied this DS0.')
cpmTimeInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmTimeInUse.setStatus('current')
if mibBuilder.loadTexts: cpmTimeInUse.setDescription('The amount of time that this DS0 has been in use. This is computed by summing up the call durations of all past calls that have occupied this DS0.')
cpmInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmInOctets.setStatus('current')
if mibBuilder.loadTexts: cpmInOctets.setDescription("The total number of octets received on this DS0 for data calls (cpmDS0CallType has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)'). All the received 'raw' octets are counted, including any protocol headers which may or may not be present, depending on the service type of data call.")
cpmOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmOutOctets.setStatus('current')
if mibBuilder.loadTexts: cpmOutOctets.setDescription("The total number of octets transmitted on this DS0 for data calls (cpmDS0CallType has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)'). All the transmitted 'raw' octets are counted, including any protocol headers which may or may not be present, depending on the service type of data call.")
cpmInPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmInPackets.setStatus('current')
if mibBuilder.loadTexts: cpmInPackets.setDescription("The total number of packets received on this DS0 for data calls (cpmDS0CallTyp has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)').")
cpmOutPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmOutPackets.setStatus('current')
if mibBuilder.loadTexts: cpmOutPackets.setDescription("The total number of packets transmitted on this DS0 for data calls (cpmDS0CallTyp has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)').")
cpmAssociatedInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 13), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmAssociatedInterface.setStatus('current')
if mibBuilder.loadTexts: cpmAssociatedInterface.setDescription('This is the value of ifIndex when the specific channel has an active call with a corresponding interface in the ifTable. For example, a digital ISDN call has a value pointing to the B-Channel entry in the ifTable. A modem call over ISDN or CT1/CE1 has a value pointing to the async interface of the modem assigned to this call. If the channel is idle, this value is 0.')
cpmISDNCfgBChanInUseForAnalog = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForAnalog.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForAnalog.setDescription('The number of configured ISDN B-Channels that are currently occupied by analog calls.')
cpmISDNCfgBChannelsInUse = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelsInUse.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelsInUse.setDescription('The number of configured ISDN B-Channels that are currently occupied by both Digital and Analog calls.')
cpmActiveDS0s = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveDS0s.setStatus('current')
if mibBuilder.loadTexts: cpmActiveDS0s.setDescription('The number of DS0s that are currently in use.')
cpmPPPCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmPPPCalls.setStatus('current')
if mibBuilder.loadTexts: cpmPPPCalls.setDescription('The current number of active PPP calls received by the managed device')
cpmV120Calls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmV120Calls.setStatus('current')
if mibBuilder.loadTexts: cpmV120Calls.setDescription('The current number of active V.120 calls received by the managed device')
cpmV110Calls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmV110Calls.setStatus('current')
if mibBuilder.loadTexts: cpmV110Calls.setDescription('The current number of active V.110 calls received by the managed device')
cpmActiveDS0sHighWaterMark = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveDS0sHighWaterMark.setStatus('current')
if mibBuilder.loadTexts: cpmActiveDS0sHighWaterMark.setDescription('The high water mark for number of DS0s that are active simultaneously')
cpmDS1DS0UsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9), )
if mibBuilder.loadTexts: cpmDS1DS0UsageTable.setStatus('current')
if mibBuilder.loadTexts: cpmDS1DS0UsageTable.setDescription('The DS1-DS0-usage table is for hardware and software objects not used as interfaces, and not covered in rfc1213. These objects provide information on timeslots usage for a particular DS1.')
cpmDS1DS0UsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmDS1UsageSlotIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmDS1UsagePortIndex"))
if mibBuilder.loadTexts: cpmDS1DS0UsageEntry.setStatus('current')
if mibBuilder.loadTexts: cpmDS1DS0UsageEntry.setDescription('An entry in the DS0 Usage table.')
cpmDS1UsageSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1UsageSlotIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1UsageSlotIndex.setDescription('The slot index indicates the slot number on the device where the DS1 card resides. The slot index of the first slot is starting from 0 and increment by 1 for the next slot in the device.')
cpmDS1UsagePortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1UsagePortIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1UsagePortIndex.setDescription('The port index indicates the port number of a specific DS1 on the DS1 card in the slot. The port index of the first port is starting from 0 and increment by 1 for the next port on the DS1 card.')
cpmDS1ActiveDS0s = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1ActiveDS0s.setStatus('current')
if mibBuilder.loadTexts: cpmDS1ActiveDS0s.setDescription('The number of DS0s that are currently in use for a particular DS1.')
cpmDS1ActiveDS0sHighWaterMark = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1ActiveDS0sHighWaterMark.setStatus('current')
if mibBuilder.loadTexts: cpmDS1ActiveDS0sHighWaterMark.setDescription('The highest number of simultaneously actived DS0s on a specified DS1.')
cpmDS1TotalAnalogCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 5), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalAnalogCalls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalAnalogCalls.setDescription("The accumulated number of analog data calls (cpmDS0CallType has the value 'analog(3)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalDigitalCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 6), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalDigitalCalls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalDigitalCalls.setDescription("The accumulated number of digital data calls (cpmDS0CallType has the value 'digital(4)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalV110Calls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 7), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalV110Calls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalV110Calls.setDescription("The accumulated number of v.110 data calls (cpmDS0CallType has the value 'v110(5)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalV120Calls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 8), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalV120Calls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalV120Calls.setDescription("The accumulated number of v.120 data calls (cpmDS0CallType has the value 'v120(6)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 9), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalCalls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalCalls.setDescription('The total number of calls on all the DS0s within this DS1 since last system re-initialization. The object includes active and terminated calls. This only includes DS0s configured as bearer channels, not those configured for signaling, such as PRI D-channel.')
cpmDS1TotalTimeInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 10), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalTimeInUse.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalTimeInUse.setDescription('The total call duration on all the DS0s within this DS1 since last system re-initialization. This only includes DS0s configured as bearer channels, not those configured for signaling, such as PRI D-channel.')
cpmDS1CurrentIdle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1CurrentIdle.setStatus('current')
if mibBuilder.loadTexts: cpmDS1CurrentIdle.setDescription('The total number of DS0s currently in idle state within this DS1. This only includes DS0s configured as bearer and CAS channels, not those configured for signaling, such as PRI D-channel. See cpmDS0OperStatus in the cpmDS0StatusTable for individual DS0s.')
cpmDS1CurrentOutOfService = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1CurrentOutOfService.setStatus('current')
if mibBuilder.loadTexts: cpmDS1CurrentOutOfService.setDescription('The total number of DS0s in this DS1 currently placed out of service. This only includes DS0s configured as bearer and CAS channels, not those configured for signaling, such as PRI D-channel. See cpmDS0OperStatus in the cpmDS0StatusTable for individual DS0s.')
cpmDS1CurrentBusyout = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1CurrentBusyout.setStatus('current')
if mibBuilder.loadTexts: cpmDS1CurrentBusyout.setDescription('The total number of DS0s in this DS1 which currently have been requested to be busied out, but are not yet out of service. When an active call on a busied out DS0 terminates, the DS0 will be out of service, this object will be decremented and cpmDS1TotalOutOfService will be incremented. This only includes DS0s configured as bearer channels, not those configured for signaling, such as PRI D-channel. See cpmDS0AdminStatus in the cpmDS0StatusTable for individual DS0s.')
cpmDS1InOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 14), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1InOctets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1InOctets.setDescription('The total number of octets received over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmInOctets objects in the cpmDS0UsageTable for this DS1.')
cpmDS1OutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 15), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1OutOctets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1OutOctets.setDescription('The total number of octets transmitted over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmOutOctets objects in the cpmDS0UsageTable for this DS1.')
cpmDS1InPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 16), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1InPackets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1InPackets.setDescription('The total number of data packets received over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmInPackets objects in the cpmDS0UsageTable for this DS1. It consists of PPP or PCM voice data packets.')
cpmDS1OutPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 17), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1OutPackets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1OutPackets.setDescription('The total number of data packets transmitted over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmOutPackets objects in the cpmDS0UsageTable for this DS1. It consists of PPP or PCM voice data packets.')
cpmSW56CfgBChannelsInUse = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmSW56CfgBChannelsInUse.setStatus('current')
if mibBuilder.loadTexts: cpmSW56CfgBChannelsInUse.setDescription('The number of configured SW56 B-Channels that are currently occupied by Digital calls.')
cpmISDNCfgBChanInUseForVoice = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForVoice.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForVoice.setDescription('The number of configured ISDN B-Channels that are currently occupied by Voice calls.')
cpmCASCfgBChanInUseForVoice = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCASCfgBChanInUseForVoice.setStatus('current')
if mibBuilder.loadTexts: cpmCASCfgBChanInUseForVoice.setDescription('The number of configured CAS Channels that are currently occupied by Voice calls.')
cpmISDNCfgActiveDChannels = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgActiveDChannels.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgActiveDChannels.setDescription('The number of ISDN signaling channels which are active.')
cpmISDNCfgBChannelsTimeInUse = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 14), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUse.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUse.setDescription('The total call duration on all the ISDN B-channels since last system re-initialization.')
cpmISDNCfgBChannelsTimeInUseAnlg = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 15), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUseAnlg.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUseAnlg.setDescription('The total analog call duration on all the ISDN B-channels since last system re-initialization. Analog call type is identified in cpmDS0CallType.')
cpmISDNCfgBChannelCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 16), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelCalls.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelCalls.setDescription('The total number of calls on all the ISDN B-channels since last system re-initialization.')
cpmISDNCfgBChannelAnalogCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 17), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelAnalogCalls.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelAnalogCalls.setDescription('The total number of analog calls on all the ISDN B-channels since last system re-initialization. Analog call type is identified in cpmDS0CallType.')
cpmTotalISDNSyncPPPCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmTotalISDNSyncPPPCalls.setStatus('current')
if mibBuilder.loadTexts: cpmTotalISDNSyncPPPCalls.setDescription('The total number of ISDN-sync PPP calls received by the managed device since last system re-initialization.')
cpmDS0BusyoutNotifyEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmDS0BusyoutNotifyEnable.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutNotifyEnable.setDescription("This variable controls generation of cpmDS0BusyoutNotification. When this variable is 'true(1)', generation of cpmDS0BusyoutNotification is enabled. When this variable is 'false(2)', generation of cpmDS0BusyoutNotification is disabled. The default value is 'false(2)'. ")
cpmDS0StatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2), )
if mibBuilder.loadTexts: cpmDS0StatusTable.setStatus('current')
if mibBuilder.loadTexts: cpmDS0StatusTable.setDescription('The DS0 Status Table provides additional DS0 level information and configuration. This table is an extension of the cpmDS0UsageTable. ')
cpmDS0StatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1), )
cpmDS0UsageEntry.registerAugmentions(("CISCO-POP-MGMT-MIB", "cpmDS0StatusEntry"))
cpmDS0StatusEntry.setIndexNames(*cpmDS0UsageEntry.getIndexNames())
if mibBuilder.loadTexts: cpmDS0StatusEntry.setStatus('current')
if mibBuilder.loadTexts: cpmDS0StatusEntry.setDescription('Status for an individual DS0. This entry AUGMENTS the cpmDS0UsageEntry. An entry exists for each configured DS0 in the system. ')
cpmDS0OperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("down", 2), ("idle", 3), ("setup", 4), ("connected", 5), ("test", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0OperStatus.setStatus('current')
if mibBuilder.loadTexts: cpmDS0OperStatus.setDescription('The operational state of the DSO. This object provides more detailed information than the IF-MIB ifOperStatus. unknown - The DSO is in an unknown state down - The DS0 is out of service idle - This DS0 is currently idle setup - A new connection is in the process of being established connected - A active connection exists test - The DSO is undergoing internal testing ')
cpmDS0BusyoutAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noBusyout", 1), ("busyout", 2), ("busyoutImmediate", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmDS0BusyoutAdminStatus.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutAdminStatus.setDescription('The administratively requested busyout state of the DSO. This object represents the desired busyout state of the DS0, which may or may not be reflected in the operation state, cpmDS0OperStatus. This object may be set via SNMP or the system console. noBusyout - No administrative busyout has been requested busyout - The DS0 will be shutdown when it next transitions to idle(2) cpmDS0OperStatus busyoutImmediate - Immediately transition the DS0 to down(2), terminating any current activity The effect of setting this object to busyout or busyoutImmediate is constrained by the value of cpmDS0BusyoutAllowed. If cpmDS0BusyoutAllowed has a value of false, attempting to set this object to busyout or busyoutImmediate will fail. This object provides more busyout information not available from the IF-MIB ifAdminStatus. This object reflects busyout requests configured on the local system as defined for the object cpmDS0BusyoutSource. ')
cpmDS0BusyoutAllow = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutAllow.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutAllow.setDescription('The object indicates if busyout requests for this DSO will be allowed. true - Setting cpmDS0BusyoutAdminStatus to busyout(2) or busyoutImmediate(3) will be accepted false - Busyout requests for this DS0 are not allowed The default value is true(1). If cpmDS0BusyoutAllowed has a value of false(2), attempting to set cpmDS0BusyoutAdminStatus to busyout or busyoutImmediate will fail. This object only constrains setting the value of cpmDS0BusyoutAdminStatus when cpmDS0BusyoutAdminStatus is set via SNMP. ')
cpmDS0BusyoutStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noBusyout", 1), ("busyoutPending", 2), ("busiedOut", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutStatus.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutStatus.setDescription('The busyout status of the DSO. noBusyout - The DS0 is not busied out nor has a busyout pending. busyoutPending - The DS0 has a busyout request pending. It will shutdown when it next transitions to idle(2) cpmDS0OperStatus. busiedOut - The DS0 is out of service due to a busyout request. This object is needed to reflect busyouts initiated other than administrativly, as described in cpmDS0BusyoutSource. ')
cpmDS0BusyoutSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("local", 2), ("internal", 3), ("remote", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutSource.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutSource.setDescription('The source of the busyout request either pending or active for the DSO. none - The DS0 is not busied out nor has a busyout request pending local - The DS0 is busied out or has a pending request due to an adminstrative command issued locally. internal - The DS0 is busied out or has a pending request due to internal system processing. remote - The DS0 is busied out or has a pending request due to a request from the peer switch. ')
cpmDS0BusyoutTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 6), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutTime.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutTime.setDescription('Value of sysUpTime when the most recent busyout request was issued for this DS0. The value is zero indicates no busyout request has been issued. ')
cpmDS0ConfigFunction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("unknown", 1), ("t1CcsSignallingChan", 2), ("t1CcsBearerChan", 3), ("e1CcsSignallingChan", 4), ("e1CcsBearerChan", 5), ("t1CasChan", 6), ("e1CasChan", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0ConfigFunction.setStatus('current')
if mibBuilder.loadTexts: cpmDS0ConfigFunction.setDescription('The object indicates the function this DSO is configured to provide. This object provides more detail than cpmConfiguredType. unknown - Unknown configuration t1SignallingChan - DS0 is configured as a T1 common channel signalling (CCS) such as ISDN PRI D-channel t1CcsBearerChan - DS0 is configured as a T1 common channel signalling (CCS) bearer channel e1SignallingChan - DS0 is configured as a E1 common channel signalling (CCS) such as ISDN PRI D-channel e1CcsBearerChan - DS0 is configured as a E1 common channel signalling (CCS) bearer channel t1CasChan - DS0 is configured as a T1 channel associated signalling channel e1CasChan - DS0 is configured as a E1 channel associated signalling channel ')
cpmDS0InterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 8), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0InterfaceIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS0InterfaceIndex.setDescription('This is the value of ifIndex in the ifTable for this DS0.')
cpmISDNCallsRejected = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCallsRejected.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCallsRejected.setDescription('The number of rejected ISDN calls in this managed device.')
cpmModemCallsRejected = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemCallsRejected.setStatus('current')
if mibBuilder.loadTexts: cpmModemCallsRejected.setDescription('The number of rejected modem calls in this managed device.')
cpmISDNCallsClearedAbnormally = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCallsClearedAbnormally.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCallsClearedAbnormally.setDescription('The number of connected ISDN calls that have been abnormally cleared, that is, they were cleared by some event other than the following: a - The transmission of a normal disconnect message by the local end. b - The reception of a normal disconnect message from the remote end.')
cpmModemCallsClearedAbnormally = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemCallsClearedAbnormally.setStatus('current')
if mibBuilder.loadTexts: cpmModemCallsClearedAbnormally.setDescription('The number of connected modem calls that have been abnormally cleared, that is, they were not cleared with the proper modem protocol handshakes.')
cpmISDNNoResource = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNNoResource.setStatus('current')
if mibBuilder.loadTexts: cpmISDNNoResource.setDescription('The number of ISDN calls that have been rejected because there is no B-Channel available to handle the call.')
cpmModemNoResource = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemNoResource.setStatus('current')
if mibBuilder.loadTexts: cpmModemNoResource.setDescription('The number of modem calls that have been rejected because there is no modem available to handle the call.')
cpmCallVolSuccISDNDigital = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 7, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallVolSuccISDNDigital.setStatus('current')
if mibBuilder.loadTexts: cpmCallVolSuccISDNDigital.setDescription("The number of incoming and outgoing successful ISDN digital data calls (cpmDS0CallType has the value 'digital(4)'), since system startup. A successful call is a terminated call that has a disconnect cause of 'normal call clearing(16)'.")
cpmCallVolAnalogCallClearedNormally = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 7, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallVolAnalogCallClearedNormally.setStatus('current')
if mibBuilder.loadTexts: cpmCallVolAnalogCallClearedNormally.setDescription("The number of incoming and outgoing successful analog data calls which use modem resource (cpmDS0CallType has the value 'analog(3)'), since system startup. A successful call is a terminated call who is cleared by the proper modem protocol handshake.")
cpmActiveCallSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1), )
if mibBuilder.loadTexts: cpmActiveCallSummaryTable.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryTable.setDescription('The active call summary table is needed to track currently active calls. This table is needed because modem calls are not in CALL-HISTORY-MIB, and the DIAL-CONTROL_MIB is not yet available.')
cpmActiveCallSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmActiveCallStartTimeIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryIndex"))
if mibBuilder.loadTexts: cpmActiveCallSummaryEntry.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryEntry.setDescription('An entry in the Call Summary table.')
cpmActiveCallStartTimeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 1), TimeStamp())
if mibBuilder.loadTexts: cpmActiveCallStartTimeIndex.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallStartTimeIndex.setDescription('The start time of the current call.')
cpmActiveCallSummaryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmActiveCallSummaryIndex.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryIndex.setDescription('Arbitrary small integer to distinguish calls that occured at the same time tick.')
cpmActiveUserID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveUserID.setStatus('current')
if mibBuilder.loadTexts: cpmActiveUserID.setDescription('The User login ID or zero length string if unavailable.')
cpmActiveUserIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveUserIpAddr.setStatus('current')
if mibBuilder.loadTexts: cpmActiveUserIpAddr.setDescription('The IP address of the call or 0.0.0.0 if unavailable. This object comes directly from IOS.')
cpmActiveCallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("analog", 2), ("digital", 3), ("v110", 4), ("v120", 5), ("voice", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveCallType.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallType.setDescription('The call type: unknown - None of the following analog - Modem call digital - Digital call v110 - V110 Call v120 - V120 Call voice - Voice Call')
cpmActiveModemSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveModemSlot.setStatus('current')
if mibBuilder.loadTexts: cpmActiveModemSlot.setDescription('Identification of modem resource allocated to call. This is the value of cmSlotIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmActiveModemPort uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmActiveModemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveModemPort.setStatus('current')
if mibBuilder.loadTexts: cpmActiveModemPort.setDescription('Identification of modem resource allocated to call. This is the value of cmPortIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmActiveModemSlot uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmActiveCallDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveCallDuration.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallDuration.setDescription('The duration of the current call.')
cpmActiveEntrySlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveEntrySlot.setStatus('current')
if mibBuilder.loadTexts: cpmActiveEntrySlot.setDescription('The logical slot in which the DS1 line that the user connected on resides.')
cpmActiveEntryPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveEntryPort.setStatus('current')
if mibBuilder.loadTexts: cpmActiveEntryPort.setDescription('The logical port for the DS1 line that the user connected on.')
cpmActiveEntryChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveEntryChannel.setStatus('current')
if mibBuilder.loadTexts: cpmActiveEntryChannel.setDescription('The channel within the DS1 that is allocated to the call.')
cpmActiveRemotePhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveRemotePhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmActiveRemotePhoneNumber.setDescription('The remote telephone number. For a call into the device, this is the originating number. For a call out of the device, this is the dialed number. If not available the string length is zero.')
cpmActiveLocalPhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveLocalPhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmActiveLocalPhoneNumber.setDescription('The local telephone number. For a call into the device, this is the dialed number. For a call out of the device, this is the originating number. If not available the string length is zero.')
cpmActiveTTYNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveTTYNumber.setStatus('current')
if mibBuilder.loadTexts: cpmActiveTTYNumber.setDescription("The TTY number associated with this call. This information comes from the IOS 'show line' command.")
cpmCallHistorySummaryTableMaxLength = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 500)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmCallHistorySummaryTableMaxLength.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryTableMaxLength.setDescription('The upper limit on the number of entries that the cpmCallHistoryTable may contain. A value of 0 will prevent any history from being retained. When this table is full, the oldest entry will be deleted and the new one will be created.')
cpmCallHistorySummaryRetainTimer = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 500)).clone(15)).setUnits('minutes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmCallHistorySummaryRetainTimer.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryRetainTimer.setDescription('The minimum amount of time that an cpmCallHistoryEntry will be maintained before being deleted. A value of 0 will prevent any history from being retained, but will not prevent callCompletion traps being genarated.')
cpmCallHistorySummaryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3), )
if mibBuilder.loadTexts: cpmCallHistorySummaryTable.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryTable.setDescription('The call history summary table is needed to track ended active calls. This table is needed because modem calls are not in CALL-HISTORY-MIB; the CISCO-ISDN-MIB objects do not contain information about the active calls; and the DIAL-CONTROL_MIB is not yet available.')
cpmCallHistorySummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmCallDisconnectTimeIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmCallStartTimeIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryIndex"))
if mibBuilder.loadTexts: cpmCallHistorySummaryEntry.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryEntry.setDescription('An entry in the Call Summary table.')
cpmCallDisconnectTimeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 1), TimeStamp())
if mibBuilder.loadTexts: cpmCallDisconnectTimeIndex.setStatus('current')
if mibBuilder.loadTexts: cpmCallDisconnectTimeIndex.setDescription('The disconnect time of the call.')
cpmCallStartTimeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 2), TimeStamp())
if mibBuilder.loadTexts: cpmCallStartTimeIndex.setStatus('current')
if mibBuilder.loadTexts: cpmCallStartTimeIndex.setDescription('The start time of the current call.')
cpmCallHistorySummaryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmCallHistorySummaryIndex.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryIndex.setDescription('Arbitrary small integer to distinguish calls that occured at the same time tick.')
cpmUserID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmUserID.setStatus('current')
if mibBuilder.loadTexts: cpmUserID.setDescription('The User login ID or zero length string if unavailable. This object comes directly from IOS.')
cpmUserIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmUserIpAddr.setStatus('current')
if mibBuilder.loadTexts: cpmUserIpAddr.setDescription('The IP address of the call or 0.0.0.0 if unavailable. This object comes directly from IOS.')
cpmCallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("analog", 2), ("digital", 3), ("v110", 4), ("v120", 5), ("voice", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallType.setStatus('current')
if mibBuilder.loadTexts: cpmCallType.setDescription('The call type: unknown - None of the following analog - Modem call digital - Digital call v110 - V110 Call v120 - V120 Call voice - Voice Call')
cpmModemSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemSlot.setStatus('current')
if mibBuilder.loadTexts: cpmModemSlot.setDescription('Identification of modem resource allocated to call. This is the value of cmSlotIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmModemPort uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmModemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemPort.setStatus('current')
if mibBuilder.loadTexts: cpmModemPort.setDescription('Identification of modem resource allocated to call. This is the value of cmPortIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmModemSlot uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmCallDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 9), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallDuration.setStatus('current')
if mibBuilder.loadTexts: cpmCallDuration.setDescription('The duration of the current call.')
cpmEntrySlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmEntrySlot.setStatus('current')
if mibBuilder.loadTexts: cpmEntrySlot.setDescription('The physical system slot in which the DS1 line that the user connected on resides.')
cpmEntryPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmEntryPort.setStatus('current')
if mibBuilder.loadTexts: cpmEntryPort.setDescription('The logical port for the DS1 line that the user connected on.')
cpmEntryChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmEntryChannel.setStatus('current')
if mibBuilder.loadTexts: cpmEntryChannel.setDescription('The channel within the DS1 that is allocated to the call.')
cpmRemotePhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmRemotePhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmRemotePhoneNumber.setDescription('The remote telephone number. For a call into the device, this is the originating number. For a call out of the device, this is the dialed number. If not available the string length is zero.')
cpmLocalPhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmLocalPhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmLocalPhoneNumber.setDescription('The local telephone number. For a call into the device, this is the dialed number. For a call out of the device, this is the originating number. If not available the string length is zero.')
cpmTTYNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmTTYNumber.setStatus('current')
if mibBuilder.loadTexts: cpmTTYNumber.setDescription("The TTY number associated with this call. This information comes from the IOS 'show line' command.")
cpmDS1LoopbackNotifyEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 6, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmDS1LoopbackNotifyEnable.setStatus('current')
if mibBuilder.loadTexts: cpmDS1LoopbackNotifyEnable.setDescription("This variable controls the generation of cpmDS1LoopbackNotification When this variable is 'true(1)', generation of these notification is enabled. When this variable is 'false(2)', generation is disabled The default value is 'false(2)'. ")
cPopMgmtMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 2))
cpmNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 2, 0))
cpmDS0BusyoutNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 10, 19, 2, 0, 1)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutTime"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutSource"), ("CISCO-POP-MGMT-MIB", "cpmDS0InterfaceIndex"))
if mibBuilder.loadTexts: cpmDS0BusyoutNotification.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutNotification.setDescription('This notification is sent when there is a state change in cpmDS0BusyoutStatus object.')
cpmDS1LoopbackNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 10, 19, 2, 0, 2)).setObjects(("DS1-MIB", "dsx1LineStatus"), ("DS1-MIB", "dsx1LineIndex"))
if mibBuilder.loadTexts: cpmDS1LoopbackNotification.setStatus('current')
if mibBuilder.loadTexts: cpmDS1LoopbackNotification.setDescription('This notification is sent when there is a state change in dsx1LineStatus object of RFC1406-MIB.')
cpmMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 3))
cpmMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1))
cpmMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2))
cpmMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 1)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBCompliance = cpmMIBCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: cpmMIBCompliance.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 2)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev1"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev1 = cpmMIBComplianceRev1.setStatus('obsolete')
if mibBuilder.loadTexts: cpmMIBComplianceRev1.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 3)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmComplianceRev2 = cpmComplianceRev2.setStatus('obsolete')
if mibBuilder.loadTexts: cpmComplianceRev2.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 4)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS0StatusGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmSystemGroup"), ("CISCO-POP-MGMT-MIB", "cpmNotificationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev3 = cpmMIBComplianceRev3.setStatus('deprecated')
if mibBuilder.loadTexts: cpmMIBComplianceRev3.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 5)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallVolumeGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS0StatusGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmSystemGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev4 = cpmMIBComplianceRev4.setStatus('deprecated')
if mibBuilder.loadTexts: cpmMIBComplianceRev4.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 6)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallVolumeGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS0StatusGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmSystemGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1LoopbackNotifyConfigGroup"), ("CISCO-POP-MGMT-MIB", "cpmNotificationGroupRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev5 = cpmMIBComplianceRev5.setStatus('current')
if mibBuilder.loadTexts: cpmMIBComplianceRev5.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmDS0UsageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 1)).setObjects(("CISCO-POP-MGMT-MIB", "cpmConfiguredType"), ("CISCO-POP-MGMT-MIB", "cpmDS0CallType"), ("CISCO-POP-MGMT-MIB", "cpmL2Encapsulation"), ("CISCO-POP-MGMT-MIB", "cpmCallCount"), ("CISCO-POP-MGMT-MIB", "cpmTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmInOctets"), ("CISCO-POP-MGMT-MIB", "cpmOutOctets"), ("CISCO-POP-MGMT-MIB", "cpmInPackets"), ("CISCO-POP-MGMT-MIB", "cpmOutPackets"), ("CISCO-POP-MGMT-MIB", "cpmAssociatedInterface"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForAnalog"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmPPPCalls"), ("CISCO-POP-MGMT-MIB", "cpmV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmV110Calls"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0UsageGroup = cpmDS0UsageGroup.setStatus('obsolete')
if mibBuilder.loadTexts: cpmDS0UsageGroup.setDescription('A collection of objects providing the analog and digital statistics for a DS1.')
cpmCallFailureGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 2)).setObjects(("CISCO-POP-MGMT-MIB", "cpmISDNCallsRejected"), ("CISCO-POP-MGMT-MIB", "cpmModemCallsRejected"), ("CISCO-POP-MGMT-MIB", "cpmISDNCallsClearedAbnormally"), ("CISCO-POP-MGMT-MIB", "cpmModemCallsClearedAbnormally"), ("CISCO-POP-MGMT-MIB", "cpmISDNNoResource"), ("CISCO-POP-MGMT-MIB", "cpmModemNoResource"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmCallFailureGroup = cpmCallFailureGroup.setStatus('current')
if mibBuilder.loadTexts: cpmCallFailureGroup.setDescription('A collection of objects providing aggregate totals of call failures')
cpmActiveCallSummaryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 3)).setObjects(("CISCO-POP-MGMT-MIB", "cpmActiveUserID"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallType"), ("CISCO-POP-MGMT-MIB", "cpmActiveUserIpAddr"), ("CISCO-POP-MGMT-MIB", "cpmActiveModemSlot"), ("CISCO-POP-MGMT-MIB", "cpmActiveModemPort"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallDuration"), ("CISCO-POP-MGMT-MIB", "cpmActiveEntrySlot"), ("CISCO-POP-MGMT-MIB", "cpmActiveEntryPort"), ("CISCO-POP-MGMT-MIB", "cpmActiveEntryChannel"), ("CISCO-POP-MGMT-MIB", "cpmActiveRemotePhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmActiveLocalPhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmActiveTTYNumber"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmActiveCallSummaryGroup = cpmActiveCallSummaryGroup.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryGroup.setDescription('A collection of objects providing the summary of the currently active calls.')
cpmCallHistorySummaryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 4)).setObjects(("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryTableMaxLength"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryRetainTimer"), ("CISCO-POP-MGMT-MIB", "cpmUserID"), ("CISCO-POP-MGMT-MIB", "cpmUserIpAddr"), ("CISCO-POP-MGMT-MIB", "cpmCallType"), ("CISCO-POP-MGMT-MIB", "cpmModemSlot"), ("CISCO-POP-MGMT-MIB", "cpmModemPort"), ("CISCO-POP-MGMT-MIB", "cpmCallDuration"), ("CISCO-POP-MGMT-MIB", "cpmEntrySlot"), ("CISCO-POP-MGMT-MIB", "cpmEntryPort"), ("CISCO-POP-MGMT-MIB", "cpmEntryChannel"), ("CISCO-POP-MGMT-MIB", "cpmRemotePhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmLocalPhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmTTYNumber"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmCallHistorySummaryGroup = cpmCallHistorySummaryGroup.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryGroup.setDescription('A collection of objects providing the summary of calls that were recently terminated.')
cpmDS0UsageGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 5)).setObjects(("CISCO-POP-MGMT-MIB", "cpmConfiguredType"), ("CISCO-POP-MGMT-MIB", "cpmDS0CallType"), ("CISCO-POP-MGMT-MIB", "cpmL2Encapsulation"), ("CISCO-POP-MGMT-MIB", "cpmCallCount"), ("CISCO-POP-MGMT-MIB", "cpmTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmInOctets"), ("CISCO-POP-MGMT-MIB", "cpmOutOctets"), ("CISCO-POP-MGMT-MIB", "cpmInPackets"), ("CISCO-POP-MGMT-MIB", "cpmOutPackets"), ("CISCO-POP-MGMT-MIB", "cpmAssociatedInterface"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForAnalog"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmPPPCalls"), ("CISCO-POP-MGMT-MIB", "cpmV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmV110Calls"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmSW56CfgBChannelsInUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0UsageGroupRev1 = cpmDS0UsageGroupRev1.setStatus('obsolete')
if mibBuilder.loadTexts: cpmDS0UsageGroupRev1.setDescription('A collection of objects providing the analog and digital statistics for a DS1.')
cpmDS0UsageGroupRev2 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 6)).setObjects(("CISCO-POP-MGMT-MIB", "cpmConfiguredType"), ("CISCO-POP-MGMT-MIB", "cpmDS0CallType"), ("CISCO-POP-MGMT-MIB", "cpmL2Encapsulation"), ("CISCO-POP-MGMT-MIB", "cpmCallCount"), ("CISCO-POP-MGMT-MIB", "cpmTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmInOctets"), ("CISCO-POP-MGMT-MIB", "cpmOutOctets"), ("CISCO-POP-MGMT-MIB", "cpmInPackets"), ("CISCO-POP-MGMT-MIB", "cpmOutPackets"), ("CISCO-POP-MGMT-MIB", "cpmAssociatedInterface"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForAnalog"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmPPPCalls"), ("CISCO-POP-MGMT-MIB", "cpmV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmV110Calls"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmSW56CfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForVoice"), ("CISCO-POP-MGMT-MIB", "cpmCASCfgBChanInUseForVoice"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0UsageGroupRev2 = cpmDS0UsageGroupRev2.setStatus('current')
if mibBuilder.loadTexts: cpmDS0UsageGroupRev2.setDescription('A collection of objects providing the analog and digital statistics for a DS1.')
cpmDS1UsageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 7)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS1TotalAnalogCalls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalDigitalCalls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalV110Calls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalCalls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmDS1CurrentIdle"), ("CISCO-POP-MGMT-MIB", "cpmDS1CurrentOutOfService"), ("CISCO-POP-MGMT-MIB", "cpmDS1CurrentBusyout"), ("CISCO-POP-MGMT-MIB", "cpmDS1InOctets"), ("CISCO-POP-MGMT-MIB", "cpmDS1OutOctets"), ("CISCO-POP-MGMT-MIB", "cpmDS1InPackets"), ("CISCO-POP-MGMT-MIB", "cpmDS1OutPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS1UsageGroup = cpmDS1UsageGroup.setStatus('current')
if mibBuilder.loadTexts: cpmDS1UsageGroup.setDescription('A collection of objects providing statistics aggregation across DS0s within a DS1.')
cpmSystemGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 8)).setObjects(("CISCO-POP-MGMT-MIB", "cpmISDNCfgActiveDChannels"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsTimeInUseAnlg"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelCalls"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelAnalogCalls"), ("CISCO-POP-MGMT-MIB", "cpmTotalISDNSyncPPPCalls"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmSystemGroup = cpmSystemGroup.setStatus('current')
if mibBuilder.loadTexts: cpmSystemGroup.setDescription('A collection of objects providing statistics aggregation for the entire system.')
cpmDS0StatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 9)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutNotifyEnable"), ("CISCO-POP-MGMT-MIB", "cpmDS0OperStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutAdminStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutAllow"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutSource"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutTime"), ("CISCO-POP-MGMT-MIB", "cpmDS0ConfigFunction"), ("CISCO-POP-MGMT-MIB", "cpmDS0InterfaceIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0StatusGroup = cpmDS0StatusGroup.setStatus('current')
if mibBuilder.loadTexts: cpmDS0StatusGroup.setDescription('A collection of objects providing the status for a DS0.')
cpmNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 10)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmNotificationGroup = cpmNotificationGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cpmNotificationGroup.setDescription('The collection of notifications ')
cpmDS1LoopbackNotifyConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 11)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS1LoopbackNotifyEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS1LoopbackNotifyConfigGroup = cpmDS1LoopbackNotifyConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cpmDS1LoopbackNotifyConfigGroup.setDescription('A collection of objects providing the notification configuration ')
cpmCallVolumeGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 12)).setObjects(("CISCO-POP-MGMT-MIB", "cpmCallVolSuccISDNDigital"), ("CISCO-POP-MGMT-MIB", "cpmCallVolAnalogCallClearedNormally"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmCallVolumeGroup = cpmCallVolumeGroup.setStatus('current')
if mibBuilder.loadTexts: cpmCallVolumeGroup.setDescription('A collection of objects providing aggregate totals of call successfully completed')
cpmNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 13)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutNotification"), ("CISCO-POP-MGMT-MIB", "cpmDS1LoopbackNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmNotificationGroupRev1 = cpmNotificationGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cpmNotificationGroupRev1.setDescription('The collection of notifications ')
mibBuilder.exportSymbols("CISCO-POP-MGMT-MIB", cpmOutPackets=cpmOutPackets, cpmNotifications=cpmNotifications, cpmCallHistorySummaryGroup=cpmCallHistorySummaryGroup, cpmISDNNoResource=cpmISDNNoResource, cpmDS0InterfaceIndex=cpmDS0InterfaceIndex, cpmTTYNumber=cpmTTYNumber, cpmDS0UsageEntry=cpmDS0UsageEntry, cpmChannelIndex=cpmChannelIndex, cpmDS1ActiveDS0s=cpmDS1ActiveDS0s, cpmSystemGroup=cpmSystemGroup, cpmTotalISDNSyncPPPCalls=cpmTotalISDNSyncPPPCalls, cpmDS0BusyoutAdminStatus=cpmDS0BusyoutAdminStatus, cpmDS0BusyoutSource=cpmDS0BusyoutSource, cpmPPPCalls=cpmPPPCalls, cpmV120Calls=cpmV120Calls, cpmDS0StatusEntry=cpmDS0StatusEntry, cpmActiveRemotePhoneNumber=cpmActiveRemotePhoneNumber, cpmCallHistorySummary=cpmCallHistorySummary, cpmActiveCallDuration=cpmActiveCallDuration, cpmCallHistorySummaryIndex=cpmCallHistorySummaryIndex, cpmDS0UsageGroupRev2=cpmDS0UsageGroupRev2, cpmDS0CallType=cpmDS0CallType, cpmCallType=cpmCallType, cpmActiveDS0sHighWaterMark=cpmActiveDS0sHighWaterMark, cpmEntryChannel=cpmEntryChannel, cpmModemPort=cpmModemPort, cpmComplianceRev2=cpmComplianceRev2, cpmNotificationGroupRev1=cpmNotificationGroupRev1, cpmDS1CurrentOutOfService=cpmDS1CurrentOutOfService, cPopMgmtMIBNotificationPrefix=cPopMgmtMIBNotificationPrefix, cpmISDNCfgActiveDChannels=cpmISDNCfgActiveDChannels, cpmCallStartTimeIndex=cpmCallStartTimeIndex, cpmDS1OutPackets=cpmDS1OutPackets, cpmDS1LoopbackNotifyConfigGroup=cpmDS1LoopbackNotifyConfigGroup, cpmDS0Status=cpmDS0Status, cpmUserIpAddr=cpmUserIpAddr, cpmActiveCallStartTimeIndex=cpmActiveCallStartTimeIndex, cpmDS1SlotIndex=cpmDS1SlotIndex, ciscoPopMgmtMIB=ciscoPopMgmtMIB, cpmCallDisconnectTimeIndex=cpmCallDisconnectTimeIndex, cpmCallHistorySummaryTableMaxLength=cpmCallHistorySummaryTableMaxLength, cpmTimeInUse=cpmTimeInUse, cpmActiveEntrySlot=cpmActiveEntrySlot, cpmCallVolumeGroup=cpmCallVolumeGroup, cpmCallVolSuccISDNDigital=cpmCallVolSuccISDNDigital, cpmAssociatedInterface=cpmAssociatedInterface, cpmMIBGroups=cpmMIBGroups, cpmSW56CfgBChannelsInUse=cpmSW56CfgBChannelsInUse, cpmDS0UsageTable=cpmDS0UsageTable, cpmMIBConformance=cpmMIBConformance, cpmCallCount=cpmCallCount, cpmDS0OperStatus=cpmDS0OperStatus, cpmDS0ConfigFunction=cpmDS0ConfigFunction, cpmCallHistorySummaryTable=cpmCallHistorySummaryTable, cpmCASCfgBChanInUseForVoice=cpmCASCfgBChanInUseForVoice, cpmDS0BusyoutStatus=cpmDS0BusyoutStatus, cpmISDNCfgBChannelAnalogCalls=cpmISDNCfgBChannelAnalogCalls, cpmDS1TotalDigitalCalls=cpmDS1TotalDigitalCalls, cpmActiveCallSummaryTable=cpmActiveCallSummaryTable, cpmDS0StatusTable=cpmDS0StatusTable, cpmActiveCallType=cpmActiveCallType, cpmDS1LoopbackNotifyEnable=cpmDS1LoopbackNotifyEnable, cpmDS1TotalCalls=cpmDS1TotalCalls, cpmL2Encapsulation=cpmL2Encapsulation, cpmDS0UsageGroupRev1=cpmDS0UsageGroupRev1, cpmActiveCallSummary=cpmActiveCallSummary, cpmDS1UsageSlotIndex=cpmDS1UsageSlotIndex, cpmRemotePhoneNumber=cpmRemotePhoneNumber, cpmCallFailureGroup=cpmCallFailureGroup, cpmDS0StatusGroup=cpmDS0StatusGroup, cpmActiveModemPort=cpmActiveModemPort, cpmDS1TotalAnalogCalls=cpmDS1TotalAnalogCalls, cpmMIBComplianceRev3=cpmMIBComplianceRev3, cpmMIBComplianceRev5=cpmMIBComplianceRev5, cpmActiveDS0s=cpmActiveDS0s, cpmEntrySlot=cpmEntrySlot, cpmCallHistorySummaryRetainTimer=cpmCallHistorySummaryRetainTimer, cpmActiveCallSummaryGroup=cpmActiveCallSummaryGroup, cpmInOctets=cpmInOctets, cpmDS1UsagePortIndex=cpmDS1UsagePortIndex, PYSNMP_MODULE_ID=ciscoPopMgmtMIB, cpmDS1InPackets=cpmDS1InPackets, cpmDS1CurrentBusyout=cpmDS1CurrentBusyout, cpmActiveLocalPhoneNumber=cpmActiveLocalPhoneNumber, cpmDS0BusyoutAllow=cpmDS0BusyoutAllow, cpmActiveUserID=cpmActiveUserID, cpmDS1LoopbackNotifyConfig=cpmDS1LoopbackNotifyConfig, cpmISDNCfgBChannelsInUse=cpmISDNCfgBChannelsInUse, cpmDS1PortIndex=cpmDS1PortIndex, cpmCallFailure=cpmCallFailure, cpmDS1ActiveDS0sHighWaterMark=cpmDS1ActiveDS0sHighWaterMark, cpmDS1CurrentIdle=cpmDS1CurrentIdle, cpmCallVolume=cpmCallVolume, cpmDS1UsageGroup=cpmDS1UsageGroup, cpmModemCallsRejected=cpmModemCallsRejected, cpmModemSlot=cpmModemSlot, cpmOutOctets=cpmOutOctets, cpmDS1DS0UsageTable=cpmDS1DS0UsageTable, cpmInPackets=cpmInPackets, cpmISDNCfgBChanInUseForAnalog=cpmISDNCfgBChanInUseForAnalog, cpmDS1LoopbackNotification=cpmDS1LoopbackNotification, cpmEntryPort=cpmEntryPort, cpmDS0BusyoutNotification=cpmDS0BusyoutNotification, cpmDS1TotalV120Calls=cpmDS1TotalV120Calls, cpmMIBComplianceRev4=cpmMIBComplianceRev4, cpmActiveEntryChannel=cpmActiveEntryChannel, cpmMIBCompliances=cpmMIBCompliances, cpmCallDuration=cpmCallDuration, cpmLocalPhoneNumber=cpmLocalPhoneNumber, cpmDS1OutOctets=cpmDS1OutOctets, cpmISDNCfgBChannelsTimeInUseAnlg=cpmISDNCfgBChannelsTimeInUseAnlg, cpmActiveEntryPort=cpmActiveEntryPort, cpmMIBComplianceRev1=cpmMIBComplianceRev1, cpmISDNCallsRejected=cpmISDNCallsRejected, cpmISDNCfgBChannelsTimeInUse=cpmISDNCfgBChannelsTimeInUse, cpmDS1TotalTimeInUse=cpmDS1TotalTimeInUse, cpmConfiguredType=cpmConfiguredType, cpmCallVolAnalogCallClearedNormally=cpmCallVolAnalogCallClearedNormally, cpmISDNCfgBChanInUseForVoice=cpmISDNCfgBChanInUseForVoice, cpmDS0BusyoutNotifyEnable=cpmDS0BusyoutNotifyEnable, cpmISDNCallsClearedAbnormally=cpmISDNCallsClearedAbnormally, cpmActiveModemSlot=cpmActiveModemSlot, cpmDS0UsageGroup=cpmDS0UsageGroup, cpmModemNoResource=cpmModemNoResource, cpmActiveCallSummaryEntry=cpmActiveCallSummaryEntry, cpmActiveTTYNumber=cpmActiveTTYNumber, cpmUserID=cpmUserID, cpmDS1InOctets=cpmDS1InOctets, cpmDS1DS0UsageEntry=cpmDS1DS0UsageEntry, cpmModemCallsClearedAbnormally=cpmModemCallsClearedAbnormally, cpmNotificationGroup=cpmNotificationGroup, ciscoPopMgmtMIBObjects=ciscoPopMgmtMIBObjects, cpmMIBCompliance=cpmMIBCompliance, cpmDS0BusyoutTime=cpmDS0BusyoutTime, cpmActiveUserIpAddr=cpmActiveUserIpAddr, cpmCallHistorySummaryEntry=cpmCallHistorySummaryEntry, cpmISDNCfgBChannelCalls=cpmISDNCfgBChannelCalls, cpmDS1TotalV110Calls=cpmDS1TotalV110Calls, cpmActiveCallSummaryIndex=cpmActiveCallSummaryIndex, cpmV110Calls=cpmV110Calls, cpmDS0Usage=cpmDS0Usage)
| 167.155056 | 6,002 | 0.778366 | #
# PySNMP MIB module CISCO-POP-MGMT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-POP-MGMT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:09:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ciscoExperiment, = mibBuilder.importSymbols("CISCO-SMI", "ciscoExperiment")
dsx1LineIndex, dsx1LineStatus = mibBuilder.importSymbols("DS1-MIB", "dsx1LineIndex", "dsx1LineStatus")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
NotificationType, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, Gauge32, Counter64, ObjectIdentity, MibIdentifier, Integer32, Bits, Counter32, Unsigned32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "Gauge32", "Counter64", "ObjectIdentity", "MibIdentifier", "Integer32", "Bits", "Counter32", "Unsigned32", "ModuleIdentity")
DisplayString, TimeStamp, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TimeStamp", "TextualConvention", "TruthValue")
ciscoPopMgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 10, 19))
ciscoPopMgmtMIB.setRevisions(('2005-12-21 00:00', '2002-12-26 00:00', '2000-11-29 00:00', '2000-03-03 00:00', '1998-02-02 00:00', '1997-10-21 00:00', '1997-05-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setRevisionsDescriptions(('Imported dsx1LineIndex from DS1-MIB instead of RFC1406-MIB Changed SYNTAX of cpmDS1SlotIndex cpmDS1PortIndex cpmChannelIndex cpmDS1UsageSlotIndex cpmDS1UsagePortIndex cpmActiveCallSummaryIndex cpmCallHistorySummaryIndex cpmActiveModemSlot cpmActiveModemPort cpmActiveEntrySlot cpmActiveEntryPort cpmActiveEntryChannel from Integer32 to Integer32 (0..2147483647) Imported Unsigned32 from SNMPv2-SMI instead of CISCO-TC.', 'Added voice(6) to object cpmCallType. Added Notification Group cpmNotificationGroupRev1', 'Added cpmCallVolume with objects: cpmCallVolSuccISDNDigital cpmCallVolAnalogCallClearedNormally', 'Extended cpmDs1DS0UsageTable with objects: cpmDS1TotalAnalogCalls cpmDS1TotalDigitalCalls cpmDS1TotalV110Calls cpmDS1TotalV120Calls cpmDS1TotalCalls cpmDS1TotalTimeInUse cpmDS1CurrentIdle cpmDS1CurrentOutOfService cpmDS1CurrentBusyout cpmDS1InOctets cpmDS1OutOctets cpmDS1InPackets cpmDS1OutPackets Added system level summary objects: cpmISDNCfgActiveDChannels cpmISDNCfgBChannelsTimeInUse cpmISDNCfgBChannelsTimeInUseAnalog cpmISDNCfgBChannelCalls cpmISDNCfgBChannelAnalogCalls cpmTotalISDNSyncPPPCalls Added DS0StatusTable with objects: cpmDS0OperStatus cpmDS0BusyoutAdminStatus cpmDS0BusyoutAllow cpmDS0BusyoutStatus cpmDS0BusyoutSource cpmDS0BusyoutTime cpmDS0ConfigFunction cpmDS0InterfaceIndex Added busyout notification and notification enable object: cpmDS0BusyoutNotification cpmDS0BusyoutNotifyEnable cpmDS1LoopbackNotifyConfig cpmDS1LoopbackNotifyEnable cpmDS1LoopbackNotification ', 'Added objects: cpmISDNCfgBChanInUseForVoice cpmCASCfgBChanInUseForVoice Added enumeration-type: voice', 'Added objects: cpmDS1DS0UsageTable cpmActiveDS0sHighWaterMark cpmSW56CfgBChannelsInUse', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setLastUpdated('200512210000Z')
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setContactInfo(' Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-apm@cisco.com')
if mibBuilder.loadTexts: ciscoPopMgmtMIB.setDescription('Cisco Point Of Presence Management MIB to provide DSX1 and DSX0 facilities management and call summaries.')
ciscoPopMgmtMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1))
cpmDS0Usage = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1))
cpmCallFailure = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2))
cpmActiveCallSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3))
cpmCallHistorySummary = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4))
cpmDS0Status = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5))
cpmDS1LoopbackNotifyConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 6))
cpmCallVolume = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 7))
cpmDS0UsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1), )
if mibBuilder.loadTexts: cpmDS0UsageTable.setStatus('current')
if mibBuilder.loadTexts: cpmDS0UsageTable.setDescription('The DS0 usage table is for hardware and software objects not used as interfaces, and not covered in rfc1213. These objects include analog calls coming over ISDN, Channelized T1, and Channelized E1. This table is created for every DS1 line in the device.')
cpmDS0UsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmDS1SlotIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmDS1PortIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmChannelIndex"))
if mibBuilder.loadTexts: cpmDS0UsageEntry.setStatus('current')
if mibBuilder.loadTexts: cpmDS0UsageEntry.setDescription('An entry in the DS0 Usage table.')
cpmDS1SlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1SlotIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1SlotIndex.setDescription('The slot index indicates the slot number on the device where the DS1 card resides.')
cpmDS1PortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1PortIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1PortIndex.setDescription('The port index indicates the port number of a specific DS1 on the DS1 card in the slot')
cpmChannelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmChannelIndex.setStatus('current')
if mibBuilder.loadTexts: cpmChannelIndex.setDescription('The channel index that distinguishes the DS0 timeslot of the DS1 port. The range of the channel index is based on the number of T1/E1 channels: 1-24(T1) and 1-31(E1).')
cpmConfiguredType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("isdn", 2), ("ct1", 3), ("ce1", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmConfiguredType.setStatus('current')
if mibBuilder.loadTexts: cpmConfiguredType.setDescription('The configured technology for the channel: ISDN(2), Channelized T1 (3) or Channelized E1 (4).')
cpmDS0CallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("idle", 1), ("unknown", 2), ("analog", 3), ("digital", 4), ("v110", 5), ("v120", 6), ("voice", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0CallType.setStatus('current')
if mibBuilder.loadTexts: cpmDS0CallType.setDescription('The type of the current call carried by this DS0. idle - This DS0 is currently idle. unknown - The data type of the call currently occupying this DS0 is not one of the types listed here. analog - The data type of the call currently occupying this DS0 is analog, i.e. a modem call. digital - The data type of the call currently occupying this DS0 is digital. v110 - The call currently occupying this DS0 is a V110 call. v120 - The call currently occupying this DS0 is a V120 call. voice - The call currently occupying this DS0 is a voice call.')
cpmL2Encapsulation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("idle", 1), ("unknown", 2), ("ppp", 3), ("slip", 4), ("arap", 5), ("hdlc", 6), ("exec", 7), ("voice", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmL2Encapsulation.setStatus('current')
if mibBuilder.loadTexts: cpmL2Encapsulation.setDescription('The data link encapsulation of the call currently occuppying this DS0: idle - This DS0 is currently idle. unknown - The encapsulation of the currently active call on this DS0 is not one of the options following. PPP - slip - arap - hdlc - exec - voice - voice encapsulation; IANA type voiceEncap(103)')
cpmCallCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallCount.setStatus('current')
if mibBuilder.loadTexts: cpmCallCount.setDescription('The number of calls that have occupied this DS0.')
cpmTimeInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmTimeInUse.setStatus('current')
if mibBuilder.loadTexts: cpmTimeInUse.setDescription('The amount of time that this DS0 has been in use. This is computed by summing up the call durations of all past calls that have occupied this DS0.')
cpmInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmInOctets.setStatus('current')
if mibBuilder.loadTexts: cpmInOctets.setDescription("The total number of octets received on this DS0 for data calls (cpmDS0CallType has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)'). All the received 'raw' octets are counted, including any protocol headers which may or may not be present, depending on the service type of data call.")
cpmOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmOutOctets.setStatus('current')
if mibBuilder.loadTexts: cpmOutOctets.setDescription("The total number of octets transmitted on this DS0 for data calls (cpmDS0CallType has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)'). All the transmitted 'raw' octets are counted, including any protocol headers which may or may not be present, depending on the service type of data call.")
cpmInPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmInPackets.setStatus('current')
if mibBuilder.loadTexts: cpmInPackets.setDescription("The total number of packets received on this DS0 for data calls (cpmDS0CallTyp has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)').")
cpmOutPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmOutPackets.setStatus('current')
if mibBuilder.loadTexts: cpmOutPackets.setDescription("The total number of packets transmitted on this DS0 for data calls (cpmDS0CallTyp has the value 'analog(3)' or 'digital(4)' or 'v110(5)' or 'v120(6)').")
cpmAssociatedInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 1, 1, 13), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmAssociatedInterface.setStatus('current')
if mibBuilder.loadTexts: cpmAssociatedInterface.setDescription('This is the value of ifIndex when the specific channel has an active call with a corresponding interface in the ifTable. For example, a digital ISDN call has a value pointing to the B-Channel entry in the ifTable. A modem call over ISDN or CT1/CE1 has a value pointing to the async interface of the modem assigned to this call. If the channel is idle, this value is 0.')
cpmISDNCfgBChanInUseForAnalog = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForAnalog.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForAnalog.setDescription('The number of configured ISDN B-Channels that are currently occupied by analog calls.')
cpmISDNCfgBChannelsInUse = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelsInUse.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelsInUse.setDescription('The number of configured ISDN B-Channels that are currently occupied by both Digital and Analog calls.')
cpmActiveDS0s = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveDS0s.setStatus('current')
if mibBuilder.loadTexts: cpmActiveDS0s.setDescription('The number of DS0s that are currently in use.')
cpmPPPCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmPPPCalls.setStatus('current')
if mibBuilder.loadTexts: cpmPPPCalls.setDescription('The current number of active PPP calls received by the managed device')
cpmV120Calls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmV120Calls.setStatus('current')
if mibBuilder.loadTexts: cpmV120Calls.setDescription('The current number of active V.120 calls received by the managed device')
cpmV110Calls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmV110Calls.setStatus('current')
if mibBuilder.loadTexts: cpmV110Calls.setDescription('The current number of active V.110 calls received by the managed device')
cpmActiveDS0sHighWaterMark = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveDS0sHighWaterMark.setStatus('current')
if mibBuilder.loadTexts: cpmActiveDS0sHighWaterMark.setDescription('The high water mark for number of DS0s that are active simultaneously')
cpmDS1DS0UsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9), )
if mibBuilder.loadTexts: cpmDS1DS0UsageTable.setStatus('current')
if mibBuilder.loadTexts: cpmDS1DS0UsageTable.setDescription('The DS1-DS0-usage table is for hardware and software objects not used as interfaces, and not covered in rfc1213. These objects provide information on timeslots usage for a particular DS1.')
cpmDS1DS0UsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmDS1UsageSlotIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmDS1UsagePortIndex"))
if mibBuilder.loadTexts: cpmDS1DS0UsageEntry.setStatus('current')
if mibBuilder.loadTexts: cpmDS1DS0UsageEntry.setDescription('An entry in the DS0 Usage table.')
cpmDS1UsageSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1UsageSlotIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1UsageSlotIndex.setDescription('The slot index indicates the slot number on the device where the DS1 card resides. The slot index of the first slot is starting from 0 and increment by 1 for the next slot in the device.')
cpmDS1UsagePortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmDS1UsagePortIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS1UsagePortIndex.setDescription('The port index indicates the port number of a specific DS1 on the DS1 card in the slot. The port index of the first port is starting from 0 and increment by 1 for the next port on the DS1 card.')
cpmDS1ActiveDS0s = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1ActiveDS0s.setStatus('current')
if mibBuilder.loadTexts: cpmDS1ActiveDS0s.setDescription('The number of DS0s that are currently in use for a particular DS1.')
cpmDS1ActiveDS0sHighWaterMark = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1ActiveDS0sHighWaterMark.setStatus('current')
if mibBuilder.loadTexts: cpmDS1ActiveDS0sHighWaterMark.setDescription('The highest number of simultaneously actived DS0s on a specified DS1.')
cpmDS1TotalAnalogCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 5), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalAnalogCalls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalAnalogCalls.setDescription("The accumulated number of analog data calls (cpmDS0CallType has the value 'analog(3)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalDigitalCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 6), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalDigitalCalls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalDigitalCalls.setDescription("The accumulated number of digital data calls (cpmDS0CallType has the value 'digital(4)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalV110Calls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 7), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalV110Calls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalV110Calls.setDescription("The accumulated number of v.110 data calls (cpmDS0CallType has the value 'v110(5)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalV120Calls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 8), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalV120Calls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalV120Calls.setDescription("The accumulated number of v.120 data calls (cpmDS0CallType has the value 'v120(6)') on all ds0s within this DS1 since system startup. The object includes active and terminated calls.")
cpmDS1TotalCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 9), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalCalls.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalCalls.setDescription('The total number of calls on all the DS0s within this DS1 since last system re-initialization. The object includes active and terminated calls. This only includes DS0s configured as bearer channels, not those configured for signaling, such as PRI D-channel.')
cpmDS1TotalTimeInUse = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 10), Unsigned32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1TotalTimeInUse.setStatus('current')
if mibBuilder.loadTexts: cpmDS1TotalTimeInUse.setDescription('The total call duration on all the DS0s within this DS1 since last system re-initialization. This only includes DS0s configured as bearer channels, not those configured for signaling, such as PRI D-channel.')
cpmDS1CurrentIdle = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1CurrentIdle.setStatus('current')
if mibBuilder.loadTexts: cpmDS1CurrentIdle.setDescription('The total number of DS0s currently in idle state within this DS1. This only includes DS0s configured as bearer and CAS channels, not those configured for signaling, such as PRI D-channel. See cpmDS0OperStatus in the cpmDS0StatusTable for individual DS0s.')
cpmDS1CurrentOutOfService = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1CurrentOutOfService.setStatus('current')
if mibBuilder.loadTexts: cpmDS1CurrentOutOfService.setDescription('The total number of DS0s in this DS1 currently placed out of service. This only includes DS0s configured as bearer and CAS channels, not those configured for signaling, such as PRI D-channel. See cpmDS0OperStatus in the cpmDS0StatusTable for individual DS0s.')
cpmDS1CurrentBusyout = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1CurrentBusyout.setStatus('current')
if mibBuilder.loadTexts: cpmDS1CurrentBusyout.setDescription('The total number of DS0s in this DS1 which currently have been requested to be busied out, but are not yet out of service. When an active call on a busied out DS0 terminates, the DS0 will be out of service, this object will be decremented and cpmDS1TotalOutOfService will be incremented. This only includes DS0s configured as bearer channels, not those configured for signaling, such as PRI D-channel. See cpmDS0AdminStatus in the cpmDS0StatusTable for individual DS0s.')
cpmDS1InOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 14), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1InOctets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1InOctets.setDescription('The total number of octets received over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmInOctets objects in the cpmDS0UsageTable for this DS1.')
cpmDS1OutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 15), Counter32()).setUnits('octets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1OutOctets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1OutOctets.setDescription('The total number of octets transmitted over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmOutOctets objects in the cpmDS0UsageTable for this DS1.')
cpmDS1InPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 16), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1InPackets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1InPackets.setDescription('The total number of data packets received over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmInPackets objects in the cpmDS0UsageTable for this DS1. It consists of PPP or PCM voice data packets.')
cpmDS1OutPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 9, 1, 17), Counter32()).setUnits('packets').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS1OutPackets.setStatus('current')
if mibBuilder.loadTexts: cpmDS1OutPackets.setDescription('The total number of data packets transmitted over all the DS0 bearer channels within this DS1 since last system re-initialization. This value is a sum of all the cpmOutPackets objects in the cpmDS0UsageTable for this DS1. It consists of PPP or PCM voice data packets.')
cpmSW56CfgBChannelsInUse = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmSW56CfgBChannelsInUse.setStatus('current')
if mibBuilder.loadTexts: cpmSW56CfgBChannelsInUse.setDescription('The number of configured SW56 B-Channels that are currently occupied by Digital calls.')
cpmISDNCfgBChanInUseForVoice = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForVoice.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChanInUseForVoice.setDescription('The number of configured ISDN B-Channels that are currently occupied by Voice calls.')
cpmCASCfgBChanInUseForVoice = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCASCfgBChanInUseForVoice.setStatus('current')
if mibBuilder.loadTexts: cpmCASCfgBChanInUseForVoice.setDescription('The number of configured CAS Channels that are currently occupied by Voice calls.')
cpmISDNCfgActiveDChannels = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 13), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgActiveDChannels.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgActiveDChannels.setDescription('The number of ISDN signaling channels which are active.')
cpmISDNCfgBChannelsTimeInUse = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 14), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUse.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUse.setDescription('The total call duration on all the ISDN B-channels since last system re-initialization.')
cpmISDNCfgBChannelsTimeInUseAnlg = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 15), Counter32()).setUnits('seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUseAnlg.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelsTimeInUseAnlg.setDescription('The total analog call duration on all the ISDN B-channels since last system re-initialization. Analog call type is identified in cpmDS0CallType.')
cpmISDNCfgBChannelCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 16), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelCalls.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelCalls.setDescription('The total number of calls on all the ISDN B-channels since last system re-initialization.')
cpmISDNCfgBChannelAnalogCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 17), Counter32()).setUnits('calls').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCfgBChannelAnalogCalls.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCfgBChannelAnalogCalls.setDescription('The total number of analog calls on all the ISDN B-channels since last system re-initialization. Analog call type is identified in cpmDS0CallType.')
cpmTotalISDNSyncPPPCalls = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmTotalISDNSyncPPPCalls.setStatus('current')
if mibBuilder.loadTexts: cpmTotalISDNSyncPPPCalls.setDescription('The total number of ISDN-sync PPP calls received by the managed device since last system re-initialization.')
cpmDS0BusyoutNotifyEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmDS0BusyoutNotifyEnable.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutNotifyEnable.setDescription("This variable controls generation of cpmDS0BusyoutNotification. When this variable is 'true(1)', generation of cpmDS0BusyoutNotification is enabled. When this variable is 'false(2)', generation of cpmDS0BusyoutNotification is disabled. The default value is 'false(2)'. ")
cpmDS0StatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2), )
if mibBuilder.loadTexts: cpmDS0StatusTable.setStatus('current')
if mibBuilder.loadTexts: cpmDS0StatusTable.setDescription('The DS0 Status Table provides additional DS0 level information and configuration. This table is an extension of the cpmDS0UsageTable. ')
cpmDS0StatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1), )
cpmDS0UsageEntry.registerAugmentions(("CISCO-POP-MGMT-MIB", "cpmDS0StatusEntry"))
cpmDS0StatusEntry.setIndexNames(*cpmDS0UsageEntry.getIndexNames())
if mibBuilder.loadTexts: cpmDS0StatusEntry.setStatus('current')
if mibBuilder.loadTexts: cpmDS0StatusEntry.setDescription('Status for an individual DS0. This entry AUGMENTS the cpmDS0UsageEntry. An entry exists for each configured DS0 in the system. ')
cpmDS0OperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("down", 2), ("idle", 3), ("setup", 4), ("connected", 5), ("test", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0OperStatus.setStatus('current')
if mibBuilder.loadTexts: cpmDS0OperStatus.setDescription('The operational state of the DSO. This object provides more detailed information than the IF-MIB ifOperStatus. unknown - The DSO is in an unknown state down - The DS0 is out of service idle - This DS0 is currently idle setup - A new connection is in the process of being established connected - A active connection exists test - The DSO is undergoing internal testing ')
cpmDS0BusyoutAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noBusyout", 1), ("busyout", 2), ("busyoutImmediate", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmDS0BusyoutAdminStatus.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutAdminStatus.setDescription('The administratively requested busyout state of the DSO. This object represents the desired busyout state of the DS0, which may or may not be reflected in the operation state, cpmDS0OperStatus. This object may be set via SNMP or the system console. noBusyout - No administrative busyout has been requested busyout - The DS0 will be shutdown when it next transitions to idle(2) cpmDS0OperStatus busyoutImmediate - Immediately transition the DS0 to down(2), terminating any current activity The effect of setting this object to busyout or busyoutImmediate is constrained by the value of cpmDS0BusyoutAllowed. If cpmDS0BusyoutAllowed has a value of false, attempting to set this object to busyout or busyoutImmediate will fail. This object provides more busyout information not available from the IF-MIB ifAdminStatus. This object reflects busyout requests configured on the local system as defined for the object cpmDS0BusyoutSource. ')
cpmDS0BusyoutAllow = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutAllow.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutAllow.setDescription('The object indicates if busyout requests for this DSO will be allowed. true - Setting cpmDS0BusyoutAdminStatus to busyout(2) or busyoutImmediate(3) will be accepted false - Busyout requests for this DS0 are not allowed The default value is true(1). If cpmDS0BusyoutAllowed has a value of false(2), attempting to set cpmDS0BusyoutAdminStatus to busyout or busyoutImmediate will fail. This object only constrains setting the value of cpmDS0BusyoutAdminStatus when cpmDS0BusyoutAdminStatus is set via SNMP. ')
cpmDS0BusyoutStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noBusyout", 1), ("busyoutPending", 2), ("busiedOut", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutStatus.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutStatus.setDescription('The busyout status of the DSO. noBusyout - The DS0 is not busied out nor has a busyout pending. busyoutPending - The DS0 has a busyout request pending. It will shutdown when it next transitions to idle(2) cpmDS0OperStatus. busiedOut - The DS0 is out of service due to a busyout request. This object is needed to reflect busyouts initiated other than administrativly, as described in cpmDS0BusyoutSource. ')
cpmDS0BusyoutSource = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("local", 2), ("internal", 3), ("remote", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutSource.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutSource.setDescription('The source of the busyout request either pending or active for the DSO. none - The DS0 is not busied out nor has a busyout request pending local - The DS0 is busied out or has a pending request due to an adminstrative command issued locally. internal - The DS0 is busied out or has a pending request due to internal system processing. remote - The DS0 is busied out or has a pending request due to a request from the peer switch. ')
cpmDS0BusyoutTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 6), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0BusyoutTime.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutTime.setDescription('Value of sysUpTime when the most recent busyout request was issued for this DS0. The value is zero indicates no busyout request has been issued. ')
cpmDS0ConfigFunction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("unknown", 1), ("t1CcsSignallingChan", 2), ("t1CcsBearerChan", 3), ("e1CcsSignallingChan", 4), ("e1CcsBearerChan", 5), ("t1CasChan", 6), ("e1CasChan", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0ConfigFunction.setStatus('current')
if mibBuilder.loadTexts: cpmDS0ConfigFunction.setDescription('The object indicates the function this DSO is configured to provide. This object provides more detail than cpmConfiguredType. unknown - Unknown configuration t1SignallingChan - DS0 is configured as a T1 common channel signalling (CCS) such as ISDN PRI D-channel t1CcsBearerChan - DS0 is configured as a T1 common channel signalling (CCS) bearer channel e1SignallingChan - DS0 is configured as a E1 common channel signalling (CCS) such as ISDN PRI D-channel e1CcsBearerChan - DS0 is configured as a E1 common channel signalling (CCS) bearer channel t1CasChan - DS0 is configured as a T1 channel associated signalling channel e1CasChan - DS0 is configured as a E1 channel associated signalling channel ')
cpmDS0InterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 5, 2, 1, 8), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmDS0InterfaceIndex.setStatus('current')
if mibBuilder.loadTexts: cpmDS0InterfaceIndex.setDescription('This is the value of ifIndex in the ifTable for this DS0.')
cpmISDNCallsRejected = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCallsRejected.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCallsRejected.setDescription('The number of rejected ISDN calls in this managed device.')
cpmModemCallsRejected = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemCallsRejected.setStatus('current')
if mibBuilder.loadTexts: cpmModemCallsRejected.setDescription('The number of rejected modem calls in this managed device.')
cpmISDNCallsClearedAbnormally = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNCallsClearedAbnormally.setStatus('current')
if mibBuilder.loadTexts: cpmISDNCallsClearedAbnormally.setDescription('The number of connected ISDN calls that have been abnormally cleared, that is, they were cleared by some event other than the following: a - The transmission of a normal disconnect message by the local end. b - The reception of a normal disconnect message from the remote end.')
cpmModemCallsClearedAbnormally = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemCallsClearedAbnormally.setStatus('current')
if mibBuilder.loadTexts: cpmModemCallsClearedAbnormally.setDescription('The number of connected modem calls that have been abnormally cleared, that is, they were not cleared with the proper modem protocol handshakes.')
cpmISDNNoResource = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmISDNNoResource.setStatus('current')
if mibBuilder.loadTexts: cpmISDNNoResource.setDescription('The number of ISDN calls that have been rejected because there is no B-Channel available to handle the call.')
cpmModemNoResource = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemNoResource.setStatus('current')
if mibBuilder.loadTexts: cpmModemNoResource.setDescription('The number of modem calls that have been rejected because there is no modem available to handle the call.')
cpmCallVolSuccISDNDigital = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 7, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallVolSuccISDNDigital.setStatus('current')
if mibBuilder.loadTexts: cpmCallVolSuccISDNDigital.setDescription("The number of incoming and outgoing successful ISDN digital data calls (cpmDS0CallType has the value 'digital(4)'), since system startup. A successful call is a terminated call that has a disconnect cause of 'normal call clearing(16)'.")
cpmCallVolAnalogCallClearedNormally = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 7, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallVolAnalogCallClearedNormally.setStatus('current')
if mibBuilder.loadTexts: cpmCallVolAnalogCallClearedNormally.setDescription("The number of incoming and outgoing successful analog data calls which use modem resource (cpmDS0CallType has the value 'analog(3)'), since system startup. A successful call is a terminated call who is cleared by the proper modem protocol handshake.")
cpmActiveCallSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1), )
if mibBuilder.loadTexts: cpmActiveCallSummaryTable.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryTable.setDescription('The active call summary table is needed to track currently active calls. This table is needed because modem calls are not in CALL-HISTORY-MIB, and the DIAL-CONTROL_MIB is not yet available.')
cpmActiveCallSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmActiveCallStartTimeIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryIndex"))
if mibBuilder.loadTexts: cpmActiveCallSummaryEntry.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryEntry.setDescription('An entry in the Call Summary table.')
cpmActiveCallStartTimeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 1), TimeStamp())
if mibBuilder.loadTexts: cpmActiveCallStartTimeIndex.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallStartTimeIndex.setDescription('The start time of the current call.')
cpmActiveCallSummaryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmActiveCallSummaryIndex.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryIndex.setDescription('Arbitrary small integer to distinguish calls that occured at the same time tick.')
cpmActiveUserID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveUserID.setStatus('current')
if mibBuilder.loadTexts: cpmActiveUserID.setDescription('The User login ID or zero length string if unavailable.')
cpmActiveUserIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveUserIpAddr.setStatus('current')
if mibBuilder.loadTexts: cpmActiveUserIpAddr.setDescription('The IP address of the call or 0.0.0.0 if unavailable. This object comes directly from IOS.')
cpmActiveCallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("analog", 2), ("digital", 3), ("v110", 4), ("v120", 5), ("voice", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveCallType.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallType.setDescription('The call type: unknown - None of the following analog - Modem call digital - Digital call v110 - V110 Call v120 - V120 Call voice - Voice Call')
cpmActiveModemSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveModemSlot.setStatus('current')
if mibBuilder.loadTexts: cpmActiveModemSlot.setDescription('Identification of modem resource allocated to call. This is the value of cmSlotIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmActiveModemPort uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmActiveModemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveModemPort.setStatus('current')
if mibBuilder.loadTexts: cpmActiveModemPort.setDescription('Identification of modem resource allocated to call. This is the value of cmPortIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmActiveModemSlot uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmActiveCallDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 8), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveCallDuration.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallDuration.setDescription('The duration of the current call.')
cpmActiveEntrySlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveEntrySlot.setStatus('current')
if mibBuilder.loadTexts: cpmActiveEntrySlot.setDescription('The logical slot in which the DS1 line that the user connected on resides.')
cpmActiveEntryPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveEntryPort.setStatus('current')
if mibBuilder.loadTexts: cpmActiveEntryPort.setDescription('The logical port for the DS1 line that the user connected on.')
cpmActiveEntryChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveEntryChannel.setStatus('current')
if mibBuilder.loadTexts: cpmActiveEntryChannel.setDescription('The channel within the DS1 that is allocated to the call.')
cpmActiveRemotePhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveRemotePhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmActiveRemotePhoneNumber.setDescription('The remote telephone number. For a call into the device, this is the originating number. For a call out of the device, this is the dialed number. If not available the string length is zero.')
cpmActiveLocalPhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveLocalPhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmActiveLocalPhoneNumber.setDescription('The local telephone number. For a call into the device, this is the dialed number. For a call out of the device, this is the originating number. If not available the string length is zero.')
cpmActiveTTYNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 3, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmActiveTTYNumber.setStatus('current')
if mibBuilder.loadTexts: cpmActiveTTYNumber.setDescription("The TTY number associated with this call. This information comes from the IOS 'show line' command.")
cpmCallHistorySummaryTableMaxLength = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 500)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmCallHistorySummaryTableMaxLength.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryTableMaxLength.setDescription('The upper limit on the number of entries that the cpmCallHistoryTable may contain. A value of 0 will prevent any history from being retained. When this table is full, the oldest entry will be deleted and the new one will be created.')
cpmCallHistorySummaryRetainTimer = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 500)).clone(15)).setUnits('minutes').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmCallHistorySummaryRetainTimer.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryRetainTimer.setDescription('The minimum amount of time that an cpmCallHistoryEntry will be maintained before being deleted. A value of 0 will prevent any history from being retained, but will not prevent callCompletion traps being genarated.')
cpmCallHistorySummaryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3), )
if mibBuilder.loadTexts: cpmCallHistorySummaryTable.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryTable.setDescription('The call history summary table is needed to track ended active calls. This table is needed because modem calls are not in CALL-HISTORY-MIB; the CISCO-ISDN-MIB objects do not contain information about the active calls; and the DIAL-CONTROL_MIB is not yet available.')
cpmCallHistorySummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1), ).setIndexNames((0, "CISCO-POP-MGMT-MIB", "cpmCallDisconnectTimeIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmCallStartTimeIndex"), (0, "CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryIndex"))
if mibBuilder.loadTexts: cpmCallHistorySummaryEntry.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryEntry.setDescription('An entry in the Call Summary table.')
cpmCallDisconnectTimeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 1), TimeStamp())
if mibBuilder.loadTexts: cpmCallDisconnectTimeIndex.setStatus('current')
if mibBuilder.loadTexts: cpmCallDisconnectTimeIndex.setDescription('The disconnect time of the call.')
cpmCallStartTimeIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 2), TimeStamp())
if mibBuilder.loadTexts: cpmCallStartTimeIndex.setStatus('current')
if mibBuilder.loadTexts: cpmCallStartTimeIndex.setDescription('The start time of the current call.')
cpmCallHistorySummaryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: cpmCallHistorySummaryIndex.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryIndex.setDescription('Arbitrary small integer to distinguish calls that occured at the same time tick.')
cpmUserID = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmUserID.setStatus('current')
if mibBuilder.loadTexts: cpmUserID.setDescription('The User login ID or zero length string if unavailable. This object comes directly from IOS.')
cpmUserIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmUserIpAddr.setStatus('current')
if mibBuilder.loadTexts: cpmUserIpAddr.setDescription('The IP address of the call or 0.0.0.0 if unavailable. This object comes directly from IOS.')
cpmCallType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("analog", 2), ("digital", 3), ("v110", 4), ("v120", 5), ("voice", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallType.setStatus('current')
if mibBuilder.loadTexts: cpmCallType.setDescription('The call type: unknown - None of the following analog - Modem call digital - Digital call v110 - V110 Call v120 - V120 Call voice - Voice Call')
cpmModemSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemSlot.setStatus('current')
if mibBuilder.loadTexts: cpmModemSlot.setDescription('Identification of modem resource allocated to call. This is the value of cmSlotIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmModemPort uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmModemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmModemPort.setStatus('current')
if mibBuilder.loadTexts: cpmModemPort.setDescription('Identification of modem resource allocated to call. This is the value of cmPortIndex from CISCO-MODEM-MGMT-MIB. This value along with cpmModemSlot uniquely identifies a modem. Value is -1 if call does not utilize a modem.')
cpmCallDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 9), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmCallDuration.setStatus('current')
if mibBuilder.loadTexts: cpmCallDuration.setDescription('The duration of the current call.')
cpmEntrySlot = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmEntrySlot.setStatus('current')
if mibBuilder.loadTexts: cpmEntrySlot.setDescription('The physical system slot in which the DS1 line that the user connected on resides.')
cpmEntryPort = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmEntryPort.setStatus('current')
if mibBuilder.loadTexts: cpmEntryPort.setDescription('The logical port for the DS1 line that the user connected on.')
cpmEntryChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmEntryChannel.setStatus('current')
if mibBuilder.loadTexts: cpmEntryChannel.setDescription('The channel within the DS1 that is allocated to the call.')
cpmRemotePhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmRemotePhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmRemotePhoneNumber.setDescription('The remote telephone number. For a call into the device, this is the originating number. For a call out of the device, this is the dialed number. If not available the string length is zero.')
cpmLocalPhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmLocalPhoneNumber.setStatus('current')
if mibBuilder.loadTexts: cpmLocalPhoneNumber.setDescription('The local telephone number. For a call into the device, this is the dialed number. For a call out of the device, this is the originating number. If not available the string length is zero.')
cpmTTYNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 4, 3, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpmTTYNumber.setStatus('current')
if mibBuilder.loadTexts: cpmTTYNumber.setDescription("The TTY number associated with this call. This information comes from the IOS 'show line' command.")
cpmDS1LoopbackNotifyEnable = MibScalar((1, 3, 6, 1, 4, 1, 9, 10, 19, 1, 6, 1), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpmDS1LoopbackNotifyEnable.setStatus('current')
if mibBuilder.loadTexts: cpmDS1LoopbackNotifyEnable.setDescription("This variable controls the generation of cpmDS1LoopbackNotification When this variable is 'true(1)', generation of these notification is enabled. When this variable is 'false(2)', generation is disabled The default value is 'false(2)'. ")
cPopMgmtMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 2))
cpmNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 2, 0))
cpmDS0BusyoutNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 10, 19, 2, 0, 1)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutTime"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutSource"), ("CISCO-POP-MGMT-MIB", "cpmDS0InterfaceIndex"))
if mibBuilder.loadTexts: cpmDS0BusyoutNotification.setStatus('current')
if mibBuilder.loadTexts: cpmDS0BusyoutNotification.setDescription('This notification is sent when there is a state change in cpmDS0BusyoutStatus object.')
cpmDS1LoopbackNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 10, 19, 2, 0, 2)).setObjects(("DS1-MIB", "dsx1LineStatus"), ("DS1-MIB", "dsx1LineIndex"))
if mibBuilder.loadTexts: cpmDS1LoopbackNotification.setStatus('current')
if mibBuilder.loadTexts: cpmDS1LoopbackNotification.setDescription('This notification is sent when there is a state change in dsx1LineStatus object of RFC1406-MIB.')
cpmMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 3))
cpmMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1))
cpmMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2))
cpmMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 1)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBCompliance = cpmMIBCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: cpmMIBCompliance.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 2)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev1"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev1 = cpmMIBComplianceRev1.setStatus('obsolete')
if mibBuilder.loadTexts: cpmMIBComplianceRev1.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 3)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmComplianceRev2 = cpmComplianceRev2.setStatus('obsolete')
if mibBuilder.loadTexts: cpmComplianceRev2.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev3 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 4)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS0StatusGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmSystemGroup"), ("CISCO-POP-MGMT-MIB", "cpmNotificationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev3 = cpmMIBComplianceRev3.setStatus('deprecated')
if mibBuilder.loadTexts: cpmMIBComplianceRev3.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev4 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 5)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallVolumeGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS0StatusGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmSystemGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev4 = cpmMIBComplianceRev4.setStatus('deprecated')
if mibBuilder.loadTexts: cpmMIBComplianceRev4.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmMIBComplianceRev5 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 1, 6)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0UsageGroupRev2"), ("CISCO-POP-MGMT-MIB", "cpmCallFailureGroup"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallSummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryGroup"), ("CISCO-POP-MGMT-MIB", "cpmCallVolumeGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS0StatusGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1UsageGroup"), ("CISCO-POP-MGMT-MIB", "cpmSystemGroup"), ("CISCO-POP-MGMT-MIB", "cpmDS1LoopbackNotifyConfigGroup"), ("CISCO-POP-MGMT-MIB", "cpmNotificationGroupRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmMIBComplianceRev5 = cpmMIBComplianceRev5.setStatus('current')
if mibBuilder.loadTexts: cpmMIBComplianceRev5.setDescription('The compliance statement used in a PoPM Stack, which implement the Cisco PoP Management MIB')
cpmDS0UsageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 1)).setObjects(("CISCO-POP-MGMT-MIB", "cpmConfiguredType"), ("CISCO-POP-MGMT-MIB", "cpmDS0CallType"), ("CISCO-POP-MGMT-MIB", "cpmL2Encapsulation"), ("CISCO-POP-MGMT-MIB", "cpmCallCount"), ("CISCO-POP-MGMT-MIB", "cpmTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmInOctets"), ("CISCO-POP-MGMT-MIB", "cpmOutOctets"), ("CISCO-POP-MGMT-MIB", "cpmInPackets"), ("CISCO-POP-MGMT-MIB", "cpmOutPackets"), ("CISCO-POP-MGMT-MIB", "cpmAssociatedInterface"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForAnalog"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmPPPCalls"), ("CISCO-POP-MGMT-MIB", "cpmV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmV110Calls"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0UsageGroup = cpmDS0UsageGroup.setStatus('obsolete')
if mibBuilder.loadTexts: cpmDS0UsageGroup.setDescription('A collection of objects providing the analog and digital statistics for a DS1.')
cpmCallFailureGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 2)).setObjects(("CISCO-POP-MGMT-MIB", "cpmISDNCallsRejected"), ("CISCO-POP-MGMT-MIB", "cpmModemCallsRejected"), ("CISCO-POP-MGMT-MIB", "cpmISDNCallsClearedAbnormally"), ("CISCO-POP-MGMT-MIB", "cpmModemCallsClearedAbnormally"), ("CISCO-POP-MGMT-MIB", "cpmISDNNoResource"), ("CISCO-POP-MGMT-MIB", "cpmModemNoResource"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmCallFailureGroup = cpmCallFailureGroup.setStatus('current')
if mibBuilder.loadTexts: cpmCallFailureGroup.setDescription('A collection of objects providing aggregate totals of call failures')
cpmActiveCallSummaryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 3)).setObjects(("CISCO-POP-MGMT-MIB", "cpmActiveUserID"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallType"), ("CISCO-POP-MGMT-MIB", "cpmActiveUserIpAddr"), ("CISCO-POP-MGMT-MIB", "cpmActiveModemSlot"), ("CISCO-POP-MGMT-MIB", "cpmActiveModemPort"), ("CISCO-POP-MGMT-MIB", "cpmActiveCallDuration"), ("CISCO-POP-MGMT-MIB", "cpmActiveEntrySlot"), ("CISCO-POP-MGMT-MIB", "cpmActiveEntryPort"), ("CISCO-POP-MGMT-MIB", "cpmActiveEntryChannel"), ("CISCO-POP-MGMT-MIB", "cpmActiveRemotePhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmActiveLocalPhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmActiveTTYNumber"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmActiveCallSummaryGroup = cpmActiveCallSummaryGroup.setStatus('current')
if mibBuilder.loadTexts: cpmActiveCallSummaryGroup.setDescription('A collection of objects providing the summary of the currently active calls.')
cpmCallHistorySummaryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 4)).setObjects(("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryTableMaxLength"), ("CISCO-POP-MGMT-MIB", "cpmCallHistorySummaryRetainTimer"), ("CISCO-POP-MGMT-MIB", "cpmUserID"), ("CISCO-POP-MGMT-MIB", "cpmUserIpAddr"), ("CISCO-POP-MGMT-MIB", "cpmCallType"), ("CISCO-POP-MGMT-MIB", "cpmModemSlot"), ("CISCO-POP-MGMT-MIB", "cpmModemPort"), ("CISCO-POP-MGMT-MIB", "cpmCallDuration"), ("CISCO-POP-MGMT-MIB", "cpmEntrySlot"), ("CISCO-POP-MGMT-MIB", "cpmEntryPort"), ("CISCO-POP-MGMT-MIB", "cpmEntryChannel"), ("CISCO-POP-MGMT-MIB", "cpmRemotePhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmLocalPhoneNumber"), ("CISCO-POP-MGMT-MIB", "cpmTTYNumber"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmCallHistorySummaryGroup = cpmCallHistorySummaryGroup.setStatus('current')
if mibBuilder.loadTexts: cpmCallHistorySummaryGroup.setDescription('A collection of objects providing the summary of calls that were recently terminated.')
cpmDS0UsageGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 5)).setObjects(("CISCO-POP-MGMT-MIB", "cpmConfiguredType"), ("CISCO-POP-MGMT-MIB", "cpmDS0CallType"), ("CISCO-POP-MGMT-MIB", "cpmL2Encapsulation"), ("CISCO-POP-MGMT-MIB", "cpmCallCount"), ("CISCO-POP-MGMT-MIB", "cpmTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmInOctets"), ("CISCO-POP-MGMT-MIB", "cpmOutOctets"), ("CISCO-POP-MGMT-MIB", "cpmInPackets"), ("CISCO-POP-MGMT-MIB", "cpmOutPackets"), ("CISCO-POP-MGMT-MIB", "cpmAssociatedInterface"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForAnalog"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmPPPCalls"), ("CISCO-POP-MGMT-MIB", "cpmV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmV110Calls"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmSW56CfgBChannelsInUse"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0UsageGroupRev1 = cpmDS0UsageGroupRev1.setStatus('obsolete')
if mibBuilder.loadTexts: cpmDS0UsageGroupRev1.setDescription('A collection of objects providing the analog and digital statistics for a DS1.')
cpmDS0UsageGroupRev2 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 6)).setObjects(("CISCO-POP-MGMT-MIB", "cpmConfiguredType"), ("CISCO-POP-MGMT-MIB", "cpmDS0CallType"), ("CISCO-POP-MGMT-MIB", "cpmL2Encapsulation"), ("CISCO-POP-MGMT-MIB", "cpmCallCount"), ("CISCO-POP-MGMT-MIB", "cpmTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmInOctets"), ("CISCO-POP-MGMT-MIB", "cpmOutOctets"), ("CISCO-POP-MGMT-MIB", "cpmInPackets"), ("CISCO-POP-MGMT-MIB", "cpmOutPackets"), ("CISCO-POP-MGMT-MIB", "cpmAssociatedInterface"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForAnalog"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmPPPCalls"), ("CISCO-POP-MGMT-MIB", "cpmV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmV110Calls"), ("CISCO-POP-MGMT-MIB", "cpmActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0s"), ("CISCO-POP-MGMT-MIB", "cpmDS1ActiveDS0sHighWaterMark"), ("CISCO-POP-MGMT-MIB", "cpmSW56CfgBChannelsInUse"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChanInUseForVoice"), ("CISCO-POP-MGMT-MIB", "cpmCASCfgBChanInUseForVoice"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0UsageGroupRev2 = cpmDS0UsageGroupRev2.setStatus('current')
if mibBuilder.loadTexts: cpmDS0UsageGroupRev2.setDescription('A collection of objects providing the analog and digital statistics for a DS1.')
cpmDS1UsageGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 7)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS1TotalAnalogCalls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalDigitalCalls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalV110Calls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalV120Calls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalCalls"), ("CISCO-POP-MGMT-MIB", "cpmDS1TotalTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmDS1CurrentIdle"), ("CISCO-POP-MGMT-MIB", "cpmDS1CurrentOutOfService"), ("CISCO-POP-MGMT-MIB", "cpmDS1CurrentBusyout"), ("CISCO-POP-MGMT-MIB", "cpmDS1InOctets"), ("CISCO-POP-MGMT-MIB", "cpmDS1OutOctets"), ("CISCO-POP-MGMT-MIB", "cpmDS1InPackets"), ("CISCO-POP-MGMT-MIB", "cpmDS1OutPackets"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS1UsageGroup = cpmDS1UsageGroup.setStatus('current')
if mibBuilder.loadTexts: cpmDS1UsageGroup.setDescription('A collection of objects providing statistics aggregation across DS0s within a DS1.')
cpmSystemGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 8)).setObjects(("CISCO-POP-MGMT-MIB", "cpmISDNCfgActiveDChannels"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsTimeInUse"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelsTimeInUseAnlg"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelCalls"), ("CISCO-POP-MGMT-MIB", "cpmISDNCfgBChannelAnalogCalls"), ("CISCO-POP-MGMT-MIB", "cpmTotalISDNSyncPPPCalls"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmSystemGroup = cpmSystemGroup.setStatus('current')
if mibBuilder.loadTexts: cpmSystemGroup.setDescription('A collection of objects providing statistics aggregation for the entire system.')
cpmDS0StatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 9)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutNotifyEnable"), ("CISCO-POP-MGMT-MIB", "cpmDS0OperStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutAdminStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutAllow"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutStatus"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutSource"), ("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutTime"), ("CISCO-POP-MGMT-MIB", "cpmDS0ConfigFunction"), ("CISCO-POP-MGMT-MIB", "cpmDS0InterfaceIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS0StatusGroup = cpmDS0StatusGroup.setStatus('current')
if mibBuilder.loadTexts: cpmDS0StatusGroup.setDescription('A collection of objects providing the status for a DS0.')
cpmNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 10)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmNotificationGroup = cpmNotificationGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cpmNotificationGroup.setDescription('The collection of notifications ')
cpmDS1LoopbackNotifyConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 11)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS1LoopbackNotifyEnable"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmDS1LoopbackNotifyConfigGroup = cpmDS1LoopbackNotifyConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cpmDS1LoopbackNotifyConfigGroup.setDescription('A collection of objects providing the notification configuration ')
cpmCallVolumeGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 12)).setObjects(("CISCO-POP-MGMT-MIB", "cpmCallVolSuccISDNDigital"), ("CISCO-POP-MGMT-MIB", "cpmCallVolAnalogCallClearedNormally"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmCallVolumeGroup = cpmCallVolumeGroup.setStatus('current')
if mibBuilder.loadTexts: cpmCallVolumeGroup.setDescription('A collection of objects providing aggregate totals of call successfully completed')
cpmNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 10, 19, 3, 2, 13)).setObjects(("CISCO-POP-MGMT-MIB", "cpmDS0BusyoutNotification"), ("CISCO-POP-MGMT-MIB", "cpmDS1LoopbackNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpmNotificationGroupRev1 = cpmNotificationGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cpmNotificationGroupRev1.setDescription('The collection of notifications ')
mibBuilder.exportSymbols("CISCO-POP-MGMT-MIB", cpmOutPackets=cpmOutPackets, cpmNotifications=cpmNotifications, cpmCallHistorySummaryGroup=cpmCallHistorySummaryGroup, cpmISDNNoResource=cpmISDNNoResource, cpmDS0InterfaceIndex=cpmDS0InterfaceIndex, cpmTTYNumber=cpmTTYNumber, cpmDS0UsageEntry=cpmDS0UsageEntry, cpmChannelIndex=cpmChannelIndex, cpmDS1ActiveDS0s=cpmDS1ActiveDS0s, cpmSystemGroup=cpmSystemGroup, cpmTotalISDNSyncPPPCalls=cpmTotalISDNSyncPPPCalls, cpmDS0BusyoutAdminStatus=cpmDS0BusyoutAdminStatus, cpmDS0BusyoutSource=cpmDS0BusyoutSource, cpmPPPCalls=cpmPPPCalls, cpmV120Calls=cpmV120Calls, cpmDS0StatusEntry=cpmDS0StatusEntry, cpmActiveRemotePhoneNumber=cpmActiveRemotePhoneNumber, cpmCallHistorySummary=cpmCallHistorySummary, cpmActiveCallDuration=cpmActiveCallDuration, cpmCallHistorySummaryIndex=cpmCallHistorySummaryIndex, cpmDS0UsageGroupRev2=cpmDS0UsageGroupRev2, cpmDS0CallType=cpmDS0CallType, cpmCallType=cpmCallType, cpmActiveDS0sHighWaterMark=cpmActiveDS0sHighWaterMark, cpmEntryChannel=cpmEntryChannel, cpmModemPort=cpmModemPort, cpmComplianceRev2=cpmComplianceRev2, cpmNotificationGroupRev1=cpmNotificationGroupRev1, cpmDS1CurrentOutOfService=cpmDS1CurrentOutOfService, cPopMgmtMIBNotificationPrefix=cPopMgmtMIBNotificationPrefix, cpmISDNCfgActiveDChannels=cpmISDNCfgActiveDChannels, cpmCallStartTimeIndex=cpmCallStartTimeIndex, cpmDS1OutPackets=cpmDS1OutPackets, cpmDS1LoopbackNotifyConfigGroup=cpmDS1LoopbackNotifyConfigGroup, cpmDS0Status=cpmDS0Status, cpmUserIpAddr=cpmUserIpAddr, cpmActiveCallStartTimeIndex=cpmActiveCallStartTimeIndex, cpmDS1SlotIndex=cpmDS1SlotIndex, ciscoPopMgmtMIB=ciscoPopMgmtMIB, cpmCallDisconnectTimeIndex=cpmCallDisconnectTimeIndex, cpmCallHistorySummaryTableMaxLength=cpmCallHistorySummaryTableMaxLength, cpmTimeInUse=cpmTimeInUse, cpmActiveEntrySlot=cpmActiveEntrySlot, cpmCallVolumeGroup=cpmCallVolumeGroup, cpmCallVolSuccISDNDigital=cpmCallVolSuccISDNDigital, cpmAssociatedInterface=cpmAssociatedInterface, cpmMIBGroups=cpmMIBGroups, cpmSW56CfgBChannelsInUse=cpmSW56CfgBChannelsInUse, cpmDS0UsageTable=cpmDS0UsageTable, cpmMIBConformance=cpmMIBConformance, cpmCallCount=cpmCallCount, cpmDS0OperStatus=cpmDS0OperStatus, cpmDS0ConfigFunction=cpmDS0ConfigFunction, cpmCallHistorySummaryTable=cpmCallHistorySummaryTable, cpmCASCfgBChanInUseForVoice=cpmCASCfgBChanInUseForVoice, cpmDS0BusyoutStatus=cpmDS0BusyoutStatus, cpmISDNCfgBChannelAnalogCalls=cpmISDNCfgBChannelAnalogCalls, cpmDS1TotalDigitalCalls=cpmDS1TotalDigitalCalls, cpmActiveCallSummaryTable=cpmActiveCallSummaryTable, cpmDS0StatusTable=cpmDS0StatusTable, cpmActiveCallType=cpmActiveCallType, cpmDS1LoopbackNotifyEnable=cpmDS1LoopbackNotifyEnable, cpmDS1TotalCalls=cpmDS1TotalCalls, cpmL2Encapsulation=cpmL2Encapsulation, cpmDS0UsageGroupRev1=cpmDS0UsageGroupRev1, cpmActiveCallSummary=cpmActiveCallSummary, cpmDS1UsageSlotIndex=cpmDS1UsageSlotIndex, cpmRemotePhoneNumber=cpmRemotePhoneNumber, cpmCallFailureGroup=cpmCallFailureGroup, cpmDS0StatusGroup=cpmDS0StatusGroup, cpmActiveModemPort=cpmActiveModemPort, cpmDS1TotalAnalogCalls=cpmDS1TotalAnalogCalls, cpmMIBComplianceRev3=cpmMIBComplianceRev3, cpmMIBComplianceRev5=cpmMIBComplianceRev5, cpmActiveDS0s=cpmActiveDS0s, cpmEntrySlot=cpmEntrySlot, cpmCallHistorySummaryRetainTimer=cpmCallHistorySummaryRetainTimer, cpmActiveCallSummaryGroup=cpmActiveCallSummaryGroup, cpmInOctets=cpmInOctets, cpmDS1UsagePortIndex=cpmDS1UsagePortIndex, PYSNMP_MODULE_ID=ciscoPopMgmtMIB, cpmDS1InPackets=cpmDS1InPackets, cpmDS1CurrentBusyout=cpmDS1CurrentBusyout, cpmActiveLocalPhoneNumber=cpmActiveLocalPhoneNumber, cpmDS0BusyoutAllow=cpmDS0BusyoutAllow, cpmActiveUserID=cpmActiveUserID, cpmDS1LoopbackNotifyConfig=cpmDS1LoopbackNotifyConfig, cpmISDNCfgBChannelsInUse=cpmISDNCfgBChannelsInUse, cpmDS1PortIndex=cpmDS1PortIndex, cpmCallFailure=cpmCallFailure, cpmDS1ActiveDS0sHighWaterMark=cpmDS1ActiveDS0sHighWaterMark, cpmDS1CurrentIdle=cpmDS1CurrentIdle, cpmCallVolume=cpmCallVolume, cpmDS1UsageGroup=cpmDS1UsageGroup, cpmModemCallsRejected=cpmModemCallsRejected, cpmModemSlot=cpmModemSlot, cpmOutOctets=cpmOutOctets, cpmDS1DS0UsageTable=cpmDS1DS0UsageTable, cpmInPackets=cpmInPackets, cpmISDNCfgBChanInUseForAnalog=cpmISDNCfgBChanInUseForAnalog, cpmDS1LoopbackNotification=cpmDS1LoopbackNotification, cpmEntryPort=cpmEntryPort, cpmDS0BusyoutNotification=cpmDS0BusyoutNotification, cpmDS1TotalV120Calls=cpmDS1TotalV120Calls, cpmMIBComplianceRev4=cpmMIBComplianceRev4, cpmActiveEntryChannel=cpmActiveEntryChannel, cpmMIBCompliances=cpmMIBCompliances, cpmCallDuration=cpmCallDuration, cpmLocalPhoneNumber=cpmLocalPhoneNumber, cpmDS1OutOctets=cpmDS1OutOctets, cpmISDNCfgBChannelsTimeInUseAnlg=cpmISDNCfgBChannelsTimeInUseAnlg, cpmActiveEntryPort=cpmActiveEntryPort, cpmMIBComplianceRev1=cpmMIBComplianceRev1, cpmISDNCallsRejected=cpmISDNCallsRejected, cpmISDNCfgBChannelsTimeInUse=cpmISDNCfgBChannelsTimeInUse, cpmDS1TotalTimeInUse=cpmDS1TotalTimeInUse, cpmConfiguredType=cpmConfiguredType, cpmCallVolAnalogCallClearedNormally=cpmCallVolAnalogCallClearedNormally, cpmISDNCfgBChanInUseForVoice=cpmISDNCfgBChanInUseForVoice, cpmDS0BusyoutNotifyEnable=cpmDS0BusyoutNotifyEnable, cpmISDNCallsClearedAbnormally=cpmISDNCallsClearedAbnormally, cpmActiveModemSlot=cpmActiveModemSlot, cpmDS0UsageGroup=cpmDS0UsageGroup, cpmModemNoResource=cpmModemNoResource, cpmActiveCallSummaryEntry=cpmActiveCallSummaryEntry, cpmActiveTTYNumber=cpmActiveTTYNumber, cpmUserID=cpmUserID, cpmDS1InOctets=cpmDS1InOctets, cpmDS1DS0UsageEntry=cpmDS1DS0UsageEntry, cpmModemCallsClearedAbnormally=cpmModemCallsClearedAbnormally, cpmNotificationGroup=cpmNotificationGroup, ciscoPopMgmtMIBObjects=ciscoPopMgmtMIBObjects, cpmMIBCompliance=cpmMIBCompliance, cpmDS0BusyoutTime=cpmDS0BusyoutTime, cpmActiveUserIpAddr=cpmActiveUserIpAddr, cpmCallHistorySummaryEntry=cpmCallHistorySummaryEntry, cpmISDNCfgBChannelCalls=cpmISDNCfgBChannelCalls, cpmDS1TotalV110Calls=cpmDS1TotalV110Calls, cpmActiveCallSummaryIndex=cpmActiveCallSummaryIndex, cpmV110Calls=cpmV110Calls, cpmDS0Usage=cpmDS0Usage)
| 0 | 0 | 0 |
5d2c53c2ab685d4fb3a4853e27026c1c7141306b | 16,537 | py | Python | tokenization.py | wangwang110/PIE | 474769e3c4266deefcb7dd5daf802a1306bc7c99 | [
"MIT"
] | 165 | 2019-10-08T09:54:46.000Z | 2022-03-17T06:50:32.000Z | tokenization.py | wangwang110/PIE | 474769e3c4266deefcb7dd5daf802a1306bc7c99 | [
"MIT"
] | 28 | 2019-11-02T07:06:26.000Z | 2022-03-24T09:20:58.000Z | tokenization.py | wangwang110/PIE | 474769e3c4266deefcb7dd5daf802a1306bc7c99 | [
"MIT"
] | 38 | 2019-12-05T06:01:54.000Z | 2022-03-21T09:35:23.000Z | # code adapted from https://github.com/google-research/bert
# modification of tokenization.py for GEC
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
from autocorrect import spell
from spellcheck_utils import can_spellcheck
import re
special_tokens = {"n't":"not", "'m":"am", "ca":"can", "Ca":"Can", "wo":"would", "Wo":"Would",
"'ll":"will", "'ve":"have"}
'''
def contains_round(text):
if ")" in text or "(" in text:
print("contains_right_round firing on {}".format(text))
return True
else:
return False
'''
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
#def convert_tokens_to_ids(self, tokens):
# return convert_by_vocab(self.vocab, tokens)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, vocab=None):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text, mode="test"):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
if len(token)==1 or do_not_split(token,mode) or (token in special_tokens):
split_tokens.append(token)
else:
split_tokens.extend(self._run_split_on_punc(token))
use_spell_check=False
if use_spell_check:
split_tokens = self._run_spell_check(split_tokens)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 29.269027 | 160 | 0.638145 | # code adapted from https://github.com/google-research/bert
# modification of tokenization.py for GEC
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import six
import tensorflow as tf
from autocorrect import spell
from spellcheck_utils import can_spellcheck
import re
special_tokens = {"n't":"not", "'m":"am", "ca":"can", "Ca":"Can", "wo":"would", "Wo":"Would",
"'ll":"will", "'ve":"have"}
def containsNumber(text):
reg_ex = re.compile(r".*[0-9].*")
if reg_ex.match(text):
#print("{} contains numbers".format(text))
return True
else:
return False
def containsMultiCapital(text):
reg_ex=re.compile(r".*[A-Z].*[A-Z].*")
if reg_ex.match(text):
#print("{} conatains multiple capitals".format(text))
return True
else:
return False
def checkAlternateDots(text):
if text[0]==".":
return False
alt = text[1::2]
if set(alt) == {'.'}:
#print("{} contains alternate dots".format(text))
return True
else:
return False
def end_with_dotcom(text):
if len(text)>=4 and text[-4:]==".com":
#print("{} contains .com in the end".format(text))
return True
else:
return False
def starts_with_www(text):
reg_ex = re.compile(r"^www\..*")
if reg_ex.match(text):
#print("{} starts with www.".format(text))
return True
else:
return False
def contains_slash(text):
if "/" in text:
#print("{} contains /".format(text))
return True
else:
return False
def contains_percent(text):
if "%" in text:
#print("{} contains %".format(text))
return True
else:
return False
def contains_ampersand(text):
if "&" in text:
#print("{} contains &".format(text))
return True
else:
return False
def contains_at_rate(text):
if "@" in text:
#print("{} contains @".format(text))
return True
else:
return False
def contains_square_brackets(text):
if "[" in text or "]" in text:
#print("{} contains ] or [".format(text))
return True
else:
return False
def last_dot_first_capital(text):
if len(text) > 1 and text[-1]=="." and text[0].upper()==text[0]:
#print("{} has dot as last letter and it's first letter is capital".format(text))
return True
else:
return False
def check_smilies(text):
if text in [":)",":(",";)",":/",":|"]:
#print("{} is a smiley".format(text))
return True
else:
return False
def do_not_split(text, mode="test"):
if mode == "train":
#print("************************* SPLIT IS ON *************************************")
return False
if containsNumber(text) or containsMultiCapital(text) or checkAlternateDots(text) \
or end_with_dotcom(text) or starts_with_www(text) or contains_at_rate(text) \
or contains_slash(text) or contains_percent(text) or contains_ampersand(text) \
or contains_square_brackets(text) \
or last_dot_first_capital(text) \
or check_smilies(text):
return True
else:
return False
'''
def contains_round(text):
if ")" in text or "(" in text:
print("contains_right_round firing on {}".format(text))
return True
else:
return False
'''
def spell_check(text):
if not can_spellcheck(text):
return None
result = spell(text)
return result
'''
if (text[0].isupper() == result[0].isupper()): #avoid case change due to spelling correction
return result
else:
return None
'''
def check_alternate_in_vocab(word,vocab):
assert word not in vocab
if word == word.lower():
tmp = word[0].upper() + word[1:]
else:
tmp = word.lower()
if tmp in vocab:
#print("replacing {} with its alternate {}".format(word, tmp))
return tmp
else:
return None
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, vocab=self.vocab)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text, mode="test"):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text,mode):
#print("Hello")
if (len(token) > 1 and do_not_split(token,mode)) or (token in special_tokens):
split_tokens.append(token)
else:
wordpiece_tokens = self.wordpiece_tokenizer.tokenize(token)
if len(wordpiece_tokens) > 1:
if token.capitalize() in self.vocab:
split_tokens.append(token.capitalize())
elif token.lower() in self.vocab:
split_tokens.append(token.lower())
elif token.upper() in self.vocab:
split_tokens.append(token.upper())
elif len(wordpiece_tokens) <=3:
split_tokens.extend(wordpiece_tokens)
else:
split_tokens.append(token)
else:
split_tokens.append(token)
return split_tokens
def convert_tokens_to_ids(self,items):
output = []
for item in items:
if item in special_tokens:
output.append(self.vocab[special_tokens[item]])
elif item in self.vocab:
output.append(self.vocab[item])
else:
if item.capitalize() in self.vocab:
output.append(self.vocab[item.capitalize()])
elif item.lower() in self.vocab:
output.append(self.vocab[item.lower()])
elif item.upper() in self.vocab:
output.append(self.vocab[item.upper()])
else:
output.append(self.vocab["[UNK]"])
return output
#def convert_tokens_to_ids(self, tokens):
# return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, vocab=None):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text, mode="test"):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
if len(token)==1 or do_not_split(token,mode) or (token in special_tokens):
split_tokens.append(token)
else:
split_tokens.extend(self._run_split_on_punc(token))
use_spell_check=False
if use_spell_check:
split_tokens = self._run_spell_check(split_tokens)
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_spell_check(self, tokens):
corrected_tokens = []
for word in tokens:
output_word = None
if (word in self.vocab) or (word.lower() in self.vocab) or (word.capitalize() in self.vocab) or (word.upper() in self.vocab) or do_not_split(word,"test"):
output_word = word
else:
spell_checked_word = spell_check(word)
if spell_checked_word:
if (spell_checked_word in self.vocab):
#print("spell check FINDS word in VOCAB --- {} --> {}".format(word, spell(word)))
output_word=spell_checked_word
else:
if word[0].isupper():
# "this case should never be encountered because spell_checked_word is None for cased words
print("Error this should not be encountered")
exit(1)
else:
output_word=spell_checked_word
#print("Spell check DID NOT FIND WORD in VOCAB --- {} --> {}".format(word, spell(word)))
#corrected_tokens.append(spell_checked_word)
#print("{} not in vocab and COULD NOT BE SPELL CHECKED".format(word))
#corrected_tokens.append(word)
else:
output_word=word
assert output_word!=None
#if output_word != word:
#print("{} --------------------------------> {}".format(word,output_word))
corrected_tokens.append(output_word)
return corrected_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| 6,235 | 0 | 540 |
03960228214839a0b38c88fbc49a473defdb2b27 | 1,452 | py | Python | Leetcode/Basic/Tree/108_Convert_Sorted_Array_to_Binary_Search_Tree.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
] | 1 | 2019-11-26T11:52:25.000Z | 2019-11-26T11:52:25.000Z | Leetcode/Basic/Tree/108_Convert_Sorted_Array_to_Binary_Search_Tree.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
] | null | null | null | Leetcode/Basic/Tree/108_Convert_Sorted_Array_to_Binary_Search_Tree.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
] | null | null | null | '''
Given an array where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted array: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
| 27.396226 | 157 | 0.511708 | '''
Given an array where elements are sorted in ascending order, convert it to a height balanced BST.
For this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1.
Example:
Given the sorted array: [-10,-3,0,5,9],
One possible answer is: [0,-3,9,-10,null,5], which represents the following height balanced BST:
0
/ \
-3 9
/ /
-10 5
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums):
if not nums:
return None
if len(nums) == 1:
return TreeNode(nums[0])
def helper(l, r):
if l + 1 == r:
root = TreeNode(nums[r])
root.left = TreeNode(nums[l])
return root
elif l + 2 == r:
root = TreeNode(nums[(l+r)>>1])
root.left = TreeNode(nums[l])
root.right = TreeNode(nums[r])
return root
else:
mid = (l+r) >> 1
root = TreeNode(nums[mid])
root.left = helper(l, mid-1)
root.right = helper(mid+1, r)
return root
root = helper(0, len(nums)-1)
return root
| 785 | -6 | 49 |
ebd0f8a35e0cc734c7ac99d852860f318c08a16b | 352 | py | Python | datahub/email_ingestion/__init__.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/email_ingestion/__init__.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/email_ingestion/__init__.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | """
Generic functionality for processing emails from a mail server.
This is used by datahub.interaction.email_processors for creating DIT interactions from
calendar invitations.
"""
from datahub.email_ingestion.mailbox import MailboxHandler
default_app_config = 'datahub.email_ingestion.apps.EmailIngestionConfig'
mailbox_handler = MailboxHandler()
| 29.333333 | 87 | 0.838068 | """
Generic functionality for processing emails from a mail server.
This is used by datahub.interaction.email_processors for creating DIT interactions from
calendar invitations.
"""
from datahub.email_ingestion.mailbox import MailboxHandler
default_app_config = 'datahub.email_ingestion.apps.EmailIngestionConfig'
mailbox_handler = MailboxHandler()
| 0 | 0 | 0 |
55b400c564fda3cf5530eb06082e2a4195ce4e15 | 589 | py | Python | dqn/demo.py | nerdoid/dqn | a51993ea1a3b062ac93ac87fd1817fddfec4c41d | [
"MIT"
] | null | null | null | dqn/demo.py | nerdoid/dqn | a51993ea1a3b062ac93ac87fd1817fddfec4c41d | [
"MIT"
] | null | null | null | dqn/demo.py | nerdoid/dqn | a51993ea1a3b062ac93ac87fd1817fddfec4c41d | [
"MIT"
] | null | null | null | import config
import envs
import agents
import stats
if __name__ == '__main__':
config = config.get_config()
demo(config)
| 17.848485 | 44 | 0.594228 | import config
import envs
import agents
import stats
def demo(config):
eval_env = envs.Atari(
config,
monitor=True,
monitor_freq=1,
monitor_name='demo'
)
num_actions = eval_env.num_actions
eval_agent = agents.DeepQLearner(
config,
num_actions,
None,
None,
eval_env,
None,
is_demo=True
)
eval_agent.evaluate(
config['num_eval_episodes'],
config['max_steps_per_eval_episode']
)
if __name__ == '__main__':
config = config.get_config()
demo(config)
| 433 | 0 | 23 |
18b647d3131e13d7afe9bbe3e07bdc581fe5f4f5 | 1,587 | py | Python | docs/conf.py | Tony1928/hylang | 8aeaace7cd719ab1d00b48808cbd53c67c944cb3 | [
"MIT"
] | 4 | 2017-08-09T01:31:56.000Z | 2022-01-17T01:11:23.000Z | docs/conf.py | woodrush/hy | d9a5acbcc93114031c70fd7ea497e4e59c868e25 | [
"MIT"
] | null | null | null | docs/conf.py | woodrush/hy | d9a5acbcc93114031c70fd7ea497e4e59c868e25 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
import re, os, sys, time, cgi
sys.path.append(os.path.abspath(".."))
from get_version import __version__ as hy_version
# Read the Docs might dirty its checkout, so strip the dirty flag.
hy_version = re.sub('[+.]dirty\Z', '', hy_version)
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'hy'
copyright = u'%s the authors' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(hy_version.split(".")[:-1])
# The full version, including alpha/beta/rc tags.
release = hy_version
hy_descriptive_version = cgi.escape(hy_version)
if "+" in hy_version:
hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
exclude_patterns = ['_build', 'coreteam.rst']
pygments_style = 'sphinx'
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_use_smartypants = False
html_show_sphinx = False
html_context = dict(
hy_descriptive_version = hy_descriptive_version)
| 31.117647 | 80 | 0.742281 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
import re, os, sys, time, cgi
sys.path.append(os.path.abspath(".."))
from get_version import __version__ as hy_version
# Read the Docs might dirty its checkout, so strip the dirty flag.
hy_version = re.sub('[+.]dirty\Z', '', hy_version)
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = u'hy'
copyright = u'%s the authors' % time.strftime('%Y')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(hy_version.split(".")[:-1])
# The full version, including alpha/beta/rc tags.
release = hy_version
hy_descriptive_version = cgi.escape(hy_version)
if "+" in hy_version:
hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
exclude_patterns = ['_build', 'coreteam.rst']
pygments_style = 'sphinx'
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_use_smartypants = False
html_show_sphinx = False
html_context = dict(
hy_descriptive_version = hy_descriptive_version)
| 0 | 0 | 0 |
91801d1cb2750485a4c2ed817a32807903cfa41e | 1,520 | py | Python | automatic_image_enhancer.py | ThePyProgrammer/ImageEnhancer | 11a3adbe4cc4577a8fe48904fdbbf5674d123cdb | [
"MIT"
] | 3 | 2021-08-02T14:47:51.000Z | 2021-08-06T02:26:28.000Z | automatic_image_enhancer.py | ThePyProgrammer/ImageEnhancer | 11a3adbe4cc4577a8fe48904fdbbf5674d123cdb | [
"MIT"
] | null | null | null | automatic_image_enhancer.py | ThePyProgrammer/ImageEnhancer | 11a3adbe4cc4577a8fe48904fdbbf5674d123cdb | [
"MIT"
] | 1 | 2022-01-01T20:53:40.000Z | 2022-01-01T20:53:40.000Z | from enancement_model import ImageEnhancer
import cv2
import numpy
import os
import sys
import tensorflow as tf
a = ImageLoader(image="path/to/your/image")
a.process_images(crop_only_images=True)
| 34.545455 | 129 | 0.589474 | from enancement_model import ImageEnhancer
import cv2
import numpy
import os
import sys
import tensorflow as tf
class ImageLoader:
def __init__(self, image):
self.image = image
try:
os.makedirs('images_enhanced')
except Exception as e:
print(e, "Truncating files", sep=" \n ")
for _, _, files in os.walk('images_enhanced'):
for m in range (len(files)):
os.remove(f'images_enhanced\\{files[m]}')
os.removedirs('images_enhanced')
os.makedirs('images_enhanced')
def process_images(self, crop_only_images=False):
if crop_only_images:
img = cv2.imread(self.image)
faces = cv2.CascadeClassifier('haarcascade_frontalface_default.xml').detectMultiScale(img, 1.3, 5)
for i, face in enumerate(faces):
cv2.imwrite(filename=f'images_enhanced\\image_{i}.jpg', img=img[face[1]:sum(face[1::2]), face[0]:sum(face[::2])])
for _, _, files in os.walk('images_enhanced'):
for i, file in enumerate(files):
ImageEnhancer(f"images_enhanced\\{file}", i)
else:
ImageEnhancer(image=self.image, number=0)
for i in range(5):
ImageEnhancer(f"images_enhanced\\Super_Resolution{i}.jpg", number=i+1)
print(f'Image {i} saved')
a = ImageLoader(image="path/to/your/image")
a.process_images(crop_only_images=True)
| 1,239 | -3 | 84 |
82dcb76adb4c589e7f07d325cae78e8c10fd8e4d | 2,738 | py | Python | tests/test_benchmark.py | ahmed-f-alrefaie/forecaster | 25b73a533f6195f3e5c703730e63cb3e242c649a | [
"MIT"
] | null | null | null | tests/test_benchmark.py | ahmed-f-alrefaie/forecaster | 25b73a533f6195f3e5c703730e63cb3e242c649a | [
"MIT"
] | null | null | null | tests/test_benchmark.py | ahmed-f-alrefaie/forecaster | 25b73a533f6195f3e5c703730e63cb3e242c649a | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from forecaster.mr_forecast import load_file
NSAMPLES = 100
@pytest.mark.linearbench
@pytest.mark.linearbench
@pytest.mark.probrbench
@pytest.mark.probrbench | 31.471264 | 89 | 0.703068 | import numpy as np
import pytest
from forecaster.mr_forecast import load_file
NSAMPLES = 100
@pytest.mark.linearbench
def test_piece_linear_original(benchmark):
from forecaster.func import generate_mass, pick_random_hyper, \
piece_linear, piece_linear_II
from forecaster.mr_forecast import load_file
all_hyper = load_file()
nsamples = NSAMPLES
mass = generate_mass(1.0, 0.1, nsamples)
sample_size = len(mass)
logm = np.log10(mass)
prob = np.random.random(sample_size)
logr = np.ones_like(logm)
hyper = pick_random_hyper(all_hyper, sample_size=sample_size)
def myfunc():
return [piece_linear(hyper[i], logm[i], prob[i]) for i in range(sample_size)]
benchmark(myfunc)
@pytest.mark.linearbench
def test_piece_linear_new(benchmark):
from forecaster.func import generate_mass, pick_random_hyper, \
piece_linear, piece_linear_II
from forecaster.mr_forecast import load_file
all_hyper = load_file()
nsamples = NSAMPLES
mass = generate_mass(1.0, 0.1, nsamples)
sample_size = len(mass)
logm = np.log10(mass)
prob = np.random.random(sample_size)
logr = np.ones_like(logm)
hyper = pick_random_hyper(all_hyper, sample_size=sample_size)
benchmark(piece_linear_II, hyper, logm, prob)
@pytest.mark.probrbench
def test_probR_original(benchmark):
from scipy.stats import norm, truncnorm
from forecaster.func import generate_mass, pick_random_hyper, \
piece_linear, piece_linear_II, ProbRGivenM
all_hyper = load_file()
mean = 0.01
std = 0.001
sample_size = NSAMPLES
radius = truncnorm.rvs( (0.-mean)/std, np.inf, loc=mean, scale=std, size=sample_size)
logr = np.log10(radius)
logm = np.ones_like(logr)
grid_size = 100
logm_grid = np.linspace(-3.522, 5.477, int(grid_size))
hyper = pick_random_hyper(all_hyper, sample_size=sample_size)
def func():
return np.array([ ProbRGivenM(logr[i], logm_grid, hyper[i,:])
for i in range(sample_size)])
benchmark(func)
@pytest.mark.probrbench
def test_probR_new(benchmark):
from scipy.stats import norm, truncnorm
from forecaster.func import generate_mass, pick_random_hyper, \
piece_linear, piece_linear_II, ProbRGivenM_II
all_hyper = load_file()
mean = 0.01
std = 0.001
sample_size = NSAMPLES
radius = truncnorm.rvs( (0.-mean)/std, np.inf, loc=mean, scale=std, size=sample_size)
logr = np.log10(radius)
logm = np.ones_like(logr)
grid_size = 100
logm_grid = np.linspace(-3.522, 5.477, int(grid_size))
hyper = pick_random_hyper(all_hyper, sample_size=sample_size)
benchmark(ProbRGivenM_II, logr, logm_grid, hyper) | 2,455 | 0 | 88 |
69b6a5536a6a6a11f59d29995be0d8f89462b424 | 522 | py | Python | python/caty/template/builder.py | hidaruma/caty | f71d2ab0a001ea4f7a96a6e02211187ebbf54773 | [
"MIT"
] | null | null | null | python/caty/template/builder.py | hidaruma/caty | f71d2ab0a001ea4f7a96a6e02211187ebbf54773 | [
"MIT"
] | null | null | null | python/caty/template/builder.py | hidaruma/caty | f71d2ab0a001ea4f7a96a6e02211187ebbf54773 | [
"MIT"
] | null | null | null | # coding:utf-8
u"""テンプレートエンジンの構築を行うモジュール。
テンプレートエンジンを単体で動作させる場合に使うコンビニエンスな関数を提供する。
"""
from caty.template.core.template import Template
from caty.template.core.loader import BytecodeLoader, TextBytecodePersister
def build_template(compiler, resource_io):
u"""Template オブジェクトの構築を行う。
compiler と resource_io はそれぞれ ICompiler と AbstarctResourceIO と
同一のインターフェースを持っている必要がある。
"""
persister = TextBytecodePersister()
bloader = BytecodeLoader(compiler, resource_io, persister)
return Template(bloader)
| 30.705882 | 75 | 0.791188 | # coding:utf-8
u"""テンプレートエンジンの構築を行うモジュール。
テンプレートエンジンを単体で動作させる場合に使うコンビニエンスな関数を提供する。
"""
from caty.template.core.template import Template
from caty.template.core.loader import BytecodeLoader, TextBytecodePersister
def build_template(compiler, resource_io):
u"""Template オブジェクトの構築を行う。
compiler と resource_io はそれぞれ ICompiler と AbstarctResourceIO と
同一のインターフェースを持っている必要がある。
"""
persister = TextBytecodePersister()
bloader = BytecodeLoader(compiler, resource_io, persister)
return Template(bloader)
| 0 | 0 | 0 |
0d4f945642e830f48d74f1338be5c931521cb604 | 69 | py | Python | tests/testdata/amazon.py | landscapeio/dodgy | b3fbaf05b106f7c3da8160e38c704f695613ff4d | [
"MIT"
] | 89 | 2015-01-28T20:47:32.000Z | 2022-03-23T01:54:44.000Z | tests/testdata/amazon.py | landscapeio/dodgy | b3fbaf05b106f7c3da8160e38c704f695613ff4d | [
"MIT"
] | 23 | 2015-01-31T10:23:12.000Z | 2021-09-22T09:20:26.000Z | tests/testdata/amazon.py | landscapeio/dodgy | b3fbaf05b106f7c3da8160e38c704f695613ff4d | [
"MIT"
] | 22 | 2015-01-05T10:12:42.000Z | 2022-01-13T10:33:48.000Z |
AWS_SECRET_ACCESS_KEY = r'A8+6AN5TSUZ3vysJg68Rt\A9E7duMlfKODwb3ZD8'
| 23 | 67 | 0.869565 |
AWS_SECRET_ACCESS_KEY = r'A8+6AN5TSUZ3vysJg68Rt\A9E7duMlfKODwb3ZD8'
| 0 | 0 | 0 |
a63d8c758a59e8d30d8f61b9b702cdc230baa4fe | 1,834 | py | Python | tf.gradients_eg/tf.gradients_eg.py | thainv0212/re-ddpg | 00ed4206e31bda16c6712cc4d680423a4b318629 | [
"MIT"
] | null | null | null | tf.gradients_eg/tf.gradients_eg.py | thainv0212/re-ddpg | 00ed4206e31bda16c6712cc4d680423a4b318629 | [
"MIT"
] | null | null | null | tf.gradients_eg/tf.gradients_eg.py | thainv0212/re-ddpg | 00ed4206e31bda16c6712cc4d680423a4b318629 | [
"MIT"
] | null | null | null | '''
Understanding optimization with tf.gradients using linear regression
Author: Steven Spielberg Pon Kumar
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(0.1, name="weight")
b = tf.Variable(0.1, name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
params= [W,b]
gradient = tf.gradients(cost,params)
opt = tf.train.GradientDescentOptimizer(learning_rate)
update=opt.apply_gradients(zip(gradient,params))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
sess.run(update, feed_dict={X: train_X, Y: train_Y}) #gradient descent
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
| 26.970588 | 86 | 0.648855 | '''
Understanding optimization with tf.gradients using linear regression
Author: Steven Spielberg Pon Kumar
'''
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(0.1, name="weight")
b = tf.Variable(0.1, name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
params= [W,b]
gradient = tf.gradients(cost,params)
opt = tf.train.GradientDescentOptimizer(learning_rate)
update=opt.apply_gradients(zip(gradient,params))
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
sess.run(update, feed_dict={X: train_X, Y: train_Y}) #gradient descent
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
| 0 | 0 | 0 |
64458d6c26d3a77fed375b45eb8d7b157c476d26 | 1,025 | py | Python | main.py | trollerfreak331/pornhub-pluenderer | ff4260cfbe78fa36d8cd9ab8edd571fbb292e708 | [
"MIT"
] | 12 | 2018-04-12T06:54:20.000Z | 2021-10-03T10:41:38.000Z | main.py | trollerfreak331/pornhub-pluenderer | ff4260cfbe78fa36d8cd9ab8edd571fbb292e708 | [
"MIT"
] | 3 | 2017-12-01T11:47:01.000Z | 2017-12-01T15:02:53.000Z | main.py | trollerfreak331/pornhub-pluenderer | ff4260cfbe78fa36d8cd9ab8edd571fbb292e708 | [
"MIT"
] | 5 | 2018-04-14T04:02:11.000Z | 2019-12-22T07:52:50.000Z | import sys
import signal
from clint.textui import colored, puts
from downloader import Downloader
from extractor import Extractor
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
if __name__ == "__main__":
main()
| 28.472222 | 82 | 0.675122 | import sys
import signal
from clint.textui import colored, puts
from downloader import Downloader
from extractor import Extractor
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
def main():
downloader = Downloader()
extractor = Extractor()
url = "https://pornhub.com"
puts(colored.green("getting video keys."))
main_page = downloader.get(url)
view_keys = extractor.get_viewkeys(main_page)
puts(colored.green("starting to download videos."))
for key in view_keys:
puts(colored.green("getting video information."))
absolute_url = "https://pornhub.com/view_video.php?viewkey=" + key
page = downloader.get(absolute_url)
info = extractor.get_video_info(page)
if info is None:
continue
hd_quality = info['mediaDefinitions'][0]
puts(colored.green("downloading video %s." % info['video_title']))
downloader.save_file(hd_quality["videoUrl"], info['video_title'] + ".mp4")
if __name__ == "__main__":
main()
| 775 | 0 | 23 |
d6406794ca1022d640d3a5f8b057db139be8dd9f | 773 | py | Python | utils/stylesheet/__init__.py | Badspler/stylesheet | fa270e590646d80e07af73fcc1ebdf08cce8b11a | [
"CC0-1.0"
] | 11 | 2017-10-23T16:15:19.000Z | 2022-03-28T06:44:59.000Z | utils/stylesheet/__init__.py | seanwallawalla-forks/stylesheet | 04406aa96c727d292ff6131ea36e571f97ed98e2 | [
"CC0-1.0"
] | 34 | 2017-10-13T16:31:19.000Z | 2021-11-30T01:25:46.000Z | utils/stylesheet/__init__.py | seanwallawalla-forks/stylesheet | 04406aa96c727d292ff6131ea36e571f97ed98e2 | [
"CC0-1.0"
] | 26 | 2017-10-13T16:14:46.000Z | 2022-03-28T06:45:23.000Z | """Package providing Stylesheet classes."""
import os
from stylesheet.stylesheet_image import (LocalStylesheetImage,
RemoteStylesheetImage,
StoredStylesheetImage,
StylesheetImage)
from stylesheet.stylesheet_image_list import StylesheetImageList
from stylesheet.stylesheet_assets import StylesheetAssets
from stylesheet.stylesheet_data import StylesheetData
from stylesheet.stylesheet_image_mapper import StylesheetImageMapper
from stylesheet.stylesheet_assets_validator import StylesheetAssetsValidator
from stylesheet.stylesheet_assets_builder import StylesheetAssetsBuilder
from stylesheet.stylesheet_assets_updater import StylesheetAssetsUpdater
| 51.533333 | 76 | 0.760673 | """Package providing Stylesheet classes."""
import os
from stylesheet.stylesheet_image import (LocalStylesheetImage,
RemoteStylesheetImage,
StoredStylesheetImage,
StylesheetImage)
from stylesheet.stylesheet_image_list import StylesheetImageList
from stylesheet.stylesheet_assets import StylesheetAssets
from stylesheet.stylesheet_data import StylesheetData
from stylesheet.stylesheet_image_mapper import StylesheetImageMapper
from stylesheet.stylesheet_assets_validator import StylesheetAssetsValidator
from stylesheet.stylesheet_assets_builder import StylesheetAssetsBuilder
from stylesheet.stylesheet_assets_updater import StylesheetAssetsUpdater
| 0 | 0 | 0 |
876632081b1a46b50fc68f8479a7c14b917d15ce | 14,354 | py | Python | deploy/stacks/cttso_ica_to_pieriandx.py | umccr/cttso-ica-to-pieriandx | 19add557f96232592d69fa642fd31bbdced52dd2 | [
"MIT"
] | null | null | null | deploy/stacks/cttso_ica_to_pieriandx.py | umccr/cttso-ica-to-pieriandx | 19add557f96232592d69fa642fd31bbdced52dd2 | [
"MIT"
] | null | null | null | deploy/stacks/cttso_ica_to_pieriandx.py | umccr/cttso-ica-to-pieriandx | 19add557f96232592d69fa642fd31bbdced52dd2 | [
"MIT"
] | null | null | null | from aws_cdk import (
Stack,
aws_batch_alpha as batch,
aws_ecr as ecr,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_iam as iam,
aws_ssm as ssm,
aws_lambda,
aws_s3_assets as assets,
Fn,
Duration
)
from constructs import Construct
from pathlib import Path
from typing import Dict
| 38.482574 | 160 | 0.547931 | from aws_cdk import (
Stack,
aws_batch_alpha as batch,
aws_ecr as ecr,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_iam as iam,
aws_ssm as ssm,
aws_lambda,
aws_s3_assets as assets,
Fn,
Duration
)
from constructs import Construct
from pathlib import Path
from typing import Dict
class CttsoIcaToPieriandxStack(Stack):
def __init__(self, scope: Construct, construct_id: str, props: Dict, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# The code that defines your stack goes here
env = kwargs.get("env")
# Set a prefix - rather than writing cttso-ica-to-pieriandx many times
cdk_attribute_prefix = "ctTSOICAToPierianDx"
# Get ssm values
# AMI
compute_env_ami = ssm.\
StringParameter.from_string_parameter_attributes(self,
"ec2Ami",
parameter_name="/cdk/cttso-ica-to-pieriandx/batch/ami").string_value
# Image Name
image_name = ssm.\
StringParameter.from_string_parameter_attributes(self,
"imageName",
parameter_name="/cdk/cttso-ica-to-pieriandx/batch/docker-image-name").string_value
image_tag = ssm.\
StringParameter.from_string_parameter_attributes(self,
"imageTag",
parameter_name="/cdk/cttso-ica-to-pieriandx/batch/docker-image-tag").string_value
# Add batch service role
batch_service_role = iam.Role(
self,
'BatchServiceRole',
assumed_by=iam.ServicePrincipal('batch.amazonaws.com'),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSBatchServiceRole')
]
)
spotfleet_role = iam.Role(
self,
'AmazonEC2SpotFleetRole',
assumed_by=iam.ServicePrincipal('spotfleet.amazonaws.com'),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2SpotFleetTaggingRole')
]
)
# Create role for Batch instances
batch_instance_role = iam.Role(
self,
'BatchInstanceRole',
role_name=f'{cdk_attribute_prefix}BatchInstanceRole',
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal('ec2.amazonaws.com'),
iam.ServicePrincipal('ecs.amazonaws.com')
),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2RoleforSSM'),
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2ContainerServiceforEC2Role'),
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy'),
iam.ManagedPolicy.from_aws_managed_policy_name('SecretsManagerReadWrite')
]
)
# Add list clusters ecs to policy
# FIXME - don't know why I need to do this - mybe I don't
# batch_instance_role.add_to_policy(
# iam.PolicyStatement(
# actions=[
# "ecs:ListClusters"
# ],
# resources=["*"]
# )
# )
# Turn the instance role into a Instance Profile
batch_instance_profile = iam.CfnInstanceProfile(
self,
'BatchInstanceProfile',
instance_profile_name=f'{cdk_attribute_prefix}BatchInstanceProfile',
roles=[batch_instance_role.role_name]
)
################################################################################
# Network
# (Import common infrastructure (maintained via TerraForm)
# VPC
vpc = ec2.Vpc.from_lookup(
self,
'UmccrMainVpc',
tags={'Name': 'main-vpc', 'Stack': 'networking'}
)
batch_security_group = ec2.SecurityGroup(
self,
"BatchSecurityGroup",
vpc=vpc,
description="Allow all outbound, no inbound traffic"
)
################################################################################
# Setup Batch compute resources
# Configure BlockDevice to expand instance disk space (if needed?)
# block_device_mappings = [
# {
# 'deviceName': '/dev/xvdf',
# 'ebs': {
# 'deleteOnTermination': True,
# 'encrypted': True,
# 'volumeSize': 2048,
# 'volumeType': 'gp2'
# }
# }
# ]
# Now create the actual UserData
# I.e. download the batch-user-data asset and run it with required parameters
# Set up local assets/files to be uploaded to S3 (so they are available when UserData requires them)
cttso_ica_to_pieriandx_wrapper_asset = assets.Asset(
self,
f'{cdk_attribute_prefix}WrapperAsset',
path=str(
Path(__file__).parent.joinpath(Path("../") / 'assets' / "cttso-ica-to-pieriandx-wrapper.sh").resolve())
)
cttso_ica_to_pieriandx_wrapper_asset.grant_read(batch_instance_role)
#user_data_asset = assets.Asset(
# self,
# 'UserDataAsset',
# path=str(Path(__file__).parent.joinpath(Path("../") / 'assets' / "batch-user-data.sh"))
#)
#user_data_asset.grant_read(batch_instance_role)
cw_agent_config_asset = assets.Asset(
self,
f'{cdk_attribute_prefix}CwAgentConfigAsset',
path=str(Path(__file__).parent.joinpath(Path("../") / 'assets' / "cw-agent-config-addon.json"))
)
cw_agent_config_asset.grant_read(batch_instance_role)
# Set up resources
# Add start of mime wrapper
user_data_mappings = {
"__S3_WRAPPER_SCRIPT_URL__": f"s3://{cttso_ica_to_pieriandx_wrapper_asset.bucket.bucket_name}/{cttso_ica_to_pieriandx_wrapper_asset.s3_object_key}",
"__S3_CWA_CONFIG_URL__": f"s3://{cw_agent_config_asset.bucket.bucket_name}/{cw_agent_config_asset.s3_object_key}"
}
with open(str((Path(__file__).parent.joinpath(Path("../") / 'assets' / "batch-user-data.sh")).resolve()), 'r') as user_data_h:
# Use a substitution
user_data_sub = Fn.sub(user_data_h.read(), user_data_mappings)
# Import substitution object into user_data set
user_data = ec2.UserData.custom(user_data_sub)
mime_wrapper = ec2.UserData.custom('MIME-Version: 1.0')
#
mime_wrapper.add_commands('Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="')
mime_wrapper.add_commands('')
mime_wrapper.add_commands('--==MYBOUNDARY==')
mime_wrapper.add_commands('Content-Type: text/x-shellscript; charset="us-ascii"')
#
# # Get batch user data asset
# with open(str(Path(__file__).parent.joinpath(Path("../") / "assets" / "batch-user-data.sh").resolve()), 'rb') as user_data_h:
# # Skip the first line (Shebang)
# _ = user_data_h.readline()
# # Read in user data
# mime_wrapper.add_commands(str(user_data_h.read(), 'utf-8'))
#
#
#
# Add user data to mime wrapper
mime_wrapper.add_commands(user_data.render())
#
# Add ending to mime wrapper
mime_wrapper.add_commands('--==MYBOUNDARY==--')
# Launch template
launch_template = ec2.LaunchTemplate(
self,
f'{cdk_attribute_prefix}BatchComputeLaunchTemplate',
launch_template_name=f'{cdk_attribute_prefix}BatchComputeLaunchTemplate',
user_data=mime_wrapper,
block_devices=[
ec2.BlockDevice(device_name='/dev/xvdf',
volume=ec2.BlockDeviceVolume.ebs(
volume_size=64, # GB
volume_type=ec2.EbsDeviceVolumeType.GP2,
encrypted=True,
delete_on_termination=True)
)
]
)
# Add in user data, see https://github.com/aws/aws-cdk/issues/6427#issuecomment-595626666
# launch_template.add_property_override(
# "LaunchTemplateData", {
# "UserData": Fn.base64(user_data.render())
# }
# )
# Launch template specs
launch_template_spec = batch.LaunchTemplateSpecification(
launch_template_name=f'{cdk_attribute_prefix}BatchComputeLaunchTemplate',
version=launch_template.version_number
)
# Compute resources
my_compute_res = batch.ComputeResources(
type=batch.ComputeResourceType.ON_DEMAND,
allocation_strategy=batch.AllocationStrategy.BEST_FIT,
desiredv_cpus=0,
maxv_cpus=32,
minv_cpus=0,
image=ec2.MachineImage.generic_linux(ami_map={'ap-southeast-2': compute_env_ami}),
launch_template=launch_template_spec,
spot_fleet_role=spotfleet_role,
instance_role=batch_instance_profile.instance_profile_name,
vpc=vpc,
vpc_subnets=ec2.SubnetSelection(
subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT,
availability_zones=["ap-southeast-2a"]
),
security_groups=[batch_security_group],
compute_resources_tags={
'Creator': 'Batch',
'Stack': cdk_attribute_prefix,
'Name': 'BatchWorker'
}
)
my_compute_env = batch.ComputeEnvironment(
self,
f'{cdk_attribute_prefix}BatchComputeEnv',
# compute_environment_name=f"{cdk_attribute_prefix}-batch-compute-env", # naming means unable to update
service_role=batch_service_role,
compute_resources=my_compute_res
)
# child = my_compute_env.node.default_child
# child_comp_res = child.compute_resources
# child_comp_res.tags = "{'Foo': 'Bar'}"
job_queue = batch.JobQueue(
self,
f'{cdk_attribute_prefix}JobQueue',
job_queue_name=f'cdk-{cdk_attribute_prefix}_job_queue',
compute_environments=[
batch.JobQueueComputeEnvironment(
compute_environment=my_compute_env,
order=1
)
],
priority=10
)
job_container = batch.JobDefinitionContainer(
image=ecs.ContainerImage.from_ecr_repository(
repository=ecr.Repository.from_repository_name(
self,
"cttso_ica_to_pieriandx_repository",
repository_name=image_name
),
tag=image_tag
),
vcpus=1,
user="cttso_ica_to_pieriandx_user:cttso_ica_to_pieriandx_group",
memory_limit_mib=1024,
command=[
"/opt/container/cttso-ica-to-pieriandx-wrapper.sh",
"--ica-workflow-run-id", "Ref::ica_workflow_run_id",
"--accession-json-base64-str", "Ref::accession_json_base64_str",
],
mount_points=[
ecs.MountPoint(
container_path='/work',
read_only=False,
source_volume='work'
),
ecs.MountPoint(
container_path='/opt/container',
read_only=True,
source_volume='container'
)
],
volumes=[
ecs.Volume(
name='container',
host=ecs.Host(
source_path='/opt/container'
)
),
ecs.Volume(
name='work',
host=ecs.Host(
source_path='/mnt'
)
)
],
)
job_definition = batch.JobDefinition(
self,
f'{cdk_attribute_prefix}JobDefinition',
job_definition_name=f'cdk-{cdk_attribute_prefix}-job-definition',
parameters={},
container=job_container,
retry_attempts=2,
timeout=Duration.hours(5)
)
################################################################################
# Set up job submission Lambda
lambda_role = iam.Role(
self,
f'{cdk_attribute_prefix}LambdaRole',
assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'),
# TODO - too much!
iam.ManagedPolicy.from_aws_managed_policy_name('AWSBatchFullAccess'),
iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMReadOnlyAccess')
]
)
runtime = aws_lambda.Runtime(
name="python3.9"
)
aws_lambda.Function(
self,
f'{cdk_attribute_prefix}Lambda',
function_name=f'{cdk_attribute_prefix}_batch_lambda',
handler='cttso_ica_to_pieriandx.lambda_handler',
runtime=runtime,
code=aws_lambda.Code.from_asset(str(Path(__file__).parent.joinpath(Path("../") / "lambdas" / "cttso_ica_to_pieriandx").resolve())),
environment={
'JOBDEF': job_definition.job_definition_name,
'JOBQUEUE': job_queue.job_queue_name,
'JOBNAME_PREFIX': "CTTSO_ICA_TO_PIERIANDX_",
'MEM': '1000',
'VCPUS': '1'
},
role=lambda_role
)
| 13,965 | 17 | 50 |
5ebe1cf11ac1ff1568bd99721166c755432a2301 | 11,460 | py | Python | bureau/personnel/tests/test_admin.py | clairempr/bureau | c9fd114e637829b4e9ff643459d15602cc2efc2f | [
"Apache-2.0"
] | 1 | 2019-02-15T09:05:35.000Z | 2019-02-15T09:05:35.000Z | bureau/personnel/tests/test_admin.py | clairempr/bureau | c9fd114e637829b4e9ff643459d15602cc2efc2f | [
"Apache-2.0"
] | null | null | null | bureau/personnel/tests/test_admin.py | clairempr/bureau | c9fd114e637829b4e9ff643459d15602cc2efc2f | [
"Apache-2.0"
] | null | null | null | import string
from partial_date import PartialDate
from django.contrib.admin import site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse
from django.test import Client, RequestFactory, TestCase
from assignments.tests.factories import AssignmentFactory
from military.tests.factories import RegimentFactory
from places.tests.factories import PlaceFactory, RegionFactory
from personnel.admin import DateOfBirthFilledListFilter, EmployeeAdmin, EmploymentYearListFilter, FirstLetterListFilter, \
PlaceOfBirthFilledListFilter, USCTListFilter, YES_NO_LOOKUPS
from personnel.models import Employee
from personnel.tests.factories import EmployeeFactory
User = get_user_model()
class EmployeeAdminTestCase(TestCase):
"""
Test custom EmployeeAdmin functionality
"""
def test_bureau_state(self):
"""
Field bureau_state should contain a
list of Employee's bureau_states
"""
state1 = RegionFactory(name='Lower Alabama')
state2 = RegionFactory(name='Old Virginny')
state3 = RegionFactory(name='Sunshine State')
states = [state1, state2]
employee = EmployeeFactory()
employee.bureau_states.set(states)
for state in states:
self.assertIn(state.name, EmployeeAdmin.bureau_state(EmployeeAdmin, employee),
'State in Employee.bureau_states should be in EmployeeAdmin.bureau_state')
self.assertNotIn(state3.name, EmployeeAdmin.bureau_state(EmployeeAdmin, employee),
'State not in Employee.bureau_states should not be in EmployeeAdmin.bureau_state')
def test_save_model(self):
"""
If Employee is a member of a VRC unit, 'vrc' should be True
"""
# Set up superuser to log in to admin and create new Employee
User.objects.create_superuser('admin', 'admin@example.com', 'Password123')
self.client = Client()
self.client.login(username='admin', password='Password123')
# Oops, we're forgetting to set vrc to True, even though he's in a VRC unit! Hope save_model catches it...
self.client.post(
reverse('admin:personnel_employee_add'),
{'id': 1, 'last_name': 'Dodge', 'first_name': 'Charles', 'gender': 'M', 'vrc': False,
'regiments': [RegimentFactory(vrc=True).id],
'assignments-TOTAL_FORMS': '0',
'assignments-INITIAL_FORMS': '0',
'assignments-MAX_NUM_FORMS': '1',
'assignments-MIN_NUM_FORMS': '1'},
follow=True,
)
self.assertTrue(Employee.objects.first().vrc,
"Employee in VRC unit should have 'vrc' set to true after saving")
class EmployeeAdminFilterTestCase(TestCase):
"""
Base class for testing EmployeeAdmin filters
"""
class DateOfBirthFilledListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test list filter for whether date_of_birth is filled
"""
class EmploymentYearListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test EmploymentYearListFilter
"""
class FirstLetterListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test list filter for first letter of last name
"""
class PlaceOfBirthFilledListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test list filter for whether place_of_birth is filled
"""
class USCTListFilterTestCase(TestCase):
"""
Test list filter for membership in a USCT regiment
"""
| 40.4947 | 122 | 0.692496 | import string
from partial_date import PartialDate
from django.contrib.admin import site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse
from django.test import Client, RequestFactory, TestCase
from assignments.tests.factories import AssignmentFactory
from military.tests.factories import RegimentFactory
from places.tests.factories import PlaceFactory, RegionFactory
from personnel.admin import DateOfBirthFilledListFilter, EmployeeAdmin, EmploymentYearListFilter, FirstLetterListFilter, \
PlaceOfBirthFilledListFilter, USCTListFilter, YES_NO_LOOKUPS
from personnel.models import Employee
from personnel.tests.factories import EmployeeFactory
User = get_user_model()
class EmployeeAdminTestCase(TestCase):
"""
Test custom EmployeeAdmin functionality
"""
def test_bureau_state(self):
"""
Field bureau_state should contain a
list of Employee's bureau_states
"""
state1 = RegionFactory(name='Lower Alabama')
state2 = RegionFactory(name='Old Virginny')
state3 = RegionFactory(name='Sunshine State')
states = [state1, state2]
employee = EmployeeFactory()
employee.bureau_states.set(states)
for state in states:
self.assertIn(state.name, EmployeeAdmin.bureau_state(EmployeeAdmin, employee),
'State in Employee.bureau_states should be in EmployeeAdmin.bureau_state')
self.assertNotIn(state3.name, EmployeeAdmin.bureau_state(EmployeeAdmin, employee),
'State not in Employee.bureau_states should not be in EmployeeAdmin.bureau_state')
def test_save_model(self):
"""
If Employee is a member of a VRC unit, 'vrc' should be True
"""
# Set up superuser to log in to admin and create new Employee
User.objects.create_superuser('admin', 'admin@example.com', 'Password123')
self.client = Client()
self.client.login(username='admin', password='Password123')
# Oops, we're forgetting to set vrc to True, even though he's in a VRC unit! Hope save_model catches it...
self.client.post(
reverse('admin:personnel_employee_add'),
{'id': 1, 'last_name': 'Dodge', 'first_name': 'Charles', 'gender': 'M', 'vrc': False,
'regiments': [RegimentFactory(vrc=True).id],
'assignments-TOTAL_FORMS': '0',
'assignments-INITIAL_FORMS': '0',
'assignments-MAX_NUM_FORMS': '1',
'assignments-MIN_NUM_FORMS': '1'},
follow=True,
)
self.assertTrue(Employee.objects.first().vrc,
"Employee in VRC unit should have 'vrc' set to true after saving")
class EmployeeAdminFilterTestCase(TestCase):
"""
Base class for testing EmployeeAdmin filters
"""
def setUp(self):
self.modeladmin = EmployeeAdmin(Employee, site)
self.user = AnonymousUser()
self.request_factory = RequestFactory()
class DateOfBirthFilledListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test list filter for whether date_of_birth is filled
"""
def test_lookups(self):
employee_dob = EmployeeFactory(last_name='Howard', date_of_birth='1830-11-08')
employee_no_dob = EmployeeFactory(last_name='Barker')
request = self.request_factory.get('/')
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure that Yes and No are present in the list filter
filter = DateOfBirthFilledListFilter(request, params='', model=Employee, model_admin=self.modeladmin)
self.assertEqual(sorted(filter.lookup_choices), sorted(YES_NO_LOOKUPS))
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_dob, employee_no_dob})
# Look for employees with date_of_birth filled
request = self.request_factory.get('/', {'date_of_birth': 'Yes'})
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_dob})
# Look for employees with date_of_birth not filled
request = self.request_factory.get('/', {'date_of_birth': 'No'})
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_no_dob})
class EmploymentYearListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test EmploymentYearListFilter
"""
def test_lookups(self):
employee_1865_1866 = EmployeeFactory()
employee_1867_1868 = EmployeeFactory()
AssignmentFactory(start_date=PartialDate('1865-10'), end_date=PartialDate('1866-01'),
employee=employee_1865_1866)
AssignmentFactory(start_date=PartialDate('1867-06'), end_date=PartialDate('1868-03'),
employee=employee_1867_1868)
request = self.request_factory.get('/')
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Lookups should be a range of years from earliest Assignment start_date to latest Assignment end_date
filter = EmploymentYearListFilter(request, params='', model=Employee, model_admin=self.modeladmin)
expected = [(year, year) for year in [1865, 1866, 1867, 1868]]
self.assertEqual(sorted(filter.lookup_choices), expected)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_1865_1866, employee_1867_1868})
# Look for employees who worked in 1867
request = self.request_factory.get('/', {'employment_year': '1867'})
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_1867_1868})
class FirstLetterListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test list filter for first letter of last name
"""
def test_lookups(self):
employee_c = EmployeeFactory(last_name='Curren')
employee_h = EmployeeFactory(last_name='Howard')
request = self.request_factory.get('/')
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure that all capital letters are present in the list filter
filter = FirstLetterListFilter(request, params='', model=Employee, model_admin=self.modeladmin)
expected = [(letter, letter) for letter in list(string.ascii_uppercase)]
self.assertEqual(sorted(filter.lookup_choices), sorted(expected))
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_c, employee_h})
# Look for employees whose last name starts with C
request = self.request_factory.get('/', {'letter': 'C'})
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_c})
class PlaceOfBirthFilledListFilterTestCase(EmployeeAdminFilterTestCase):
"""
Test list filter for whether place_of_birth is filled
"""
def test_lookups(self):
employee_pob = EmployeeFactory(last_name='Ruby', first_name='George Thompson', place_of_birth=PlaceFactory())
employee_no_pob = EmployeeFactory(last_name='Weiss', first_name='Charles N.')
request = self.request_factory.get('/')
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure that Yes and No are present in the list filter
filter = PlaceOfBirthFilledListFilter(request, params='', model=Employee, model_admin=self.modeladmin)
self.assertEqual(sorted(filter.lookup_choices), sorted(YES_NO_LOOKUPS))
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_pob, employee_no_pob})
# Look for employees with place_of_birth filled
request = self.request_factory.get('/', {'place_of_birth': 'Yes'})
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_pob})
# Look for employees with place_of_birth not filled
request = self.request_factory.get('/', {'place_of_birth': 'No'})
request.user = self.user
changelist = self.modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {employee_no_pob})
class USCTListFilterTestCase(TestCase):
"""
Test list filter for membership in a USCT regiment
"""
def test_lookups(self):
usct_regiment = RegimentFactory(usct=True)
usct_employee = EmployeeFactory(last_name='Dodge')
usct_employee.regiments.add(usct_regiment)
vrc_regiment = RegimentFactory(vrc=True)
vrc_employee = EmployeeFactory(last_name='MacNulty')
vrc_employee.regiments.add(vrc_regiment)
modeladmin = EmployeeAdmin(Employee, site)
request_factory = RequestFactory()
user = AnonymousUser()
request = request_factory.get('/')
request.user = user
changelist = modeladmin.get_changelist_instance(request)
# Make sure that Yes and No are present in the list filter
filter = USCTListFilter(request, params='', model=Employee, model_admin=EmployeeAdmin)
self.assertEqual(sorted(filter.lookup_choices), sorted(YES_NO_LOOKUPS))
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {usct_employee, vrc_employee})
# Look for employees who were members of a USCT regiment
request = request_factory.get('/', {'usct': 'Yes'})
request.user = user
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {usct_employee})
# Look for employees who were not members of a USCT regiment
request = request_factory.get('/', {'usct': 'No'})
request.user = user
changelist = modeladmin.get_changelist_instance(request)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertSetEqual(set(queryset), {vrc_employee})
| 7,731 | 0 | 162 |
2e620446c23d1768bb18fd0c506b9482be42dd86 | 1,336 | py | Python | tech_driven/test_backstage_pass.py | Neppord/bdd-ddd-gilded-rose | 15b0b94a55bc9024b9e7b4b4746914f2fbd46380 | [
"MIT"
] | null | null | null | tech_driven/test_backstage_pass.py | Neppord/bdd-ddd-gilded-rose | 15b0b94a55bc9024b9e7b4b4746914f2fbd46380 | [
"MIT"
] | null | null | null | tech_driven/test_backstage_pass.py | Neppord/bdd-ddd-gilded-rose | 15b0b94a55bc9024b9e7b4b4746914f2fbd46380 | [
"MIT"
] | null | null | null | from gilded_rose import GildedRose, Item
| 36.108108 | 83 | 0.774701 | from gilded_rose import GildedRose, Item
def test_update_sell_in():
backstage_pass_item = Item("Backstage passes to a TAFKAL80ETC concert", 2, 10)
items = [backstage_pass_item]
GildedRose(items).update_quality()
assert backstage_pass_item.sell_in == 1
def test_it_increase_in_quality_at_normal_rate():
backstage_pass_item = Item("Backstage passes to a TAFKAL80ETC concert", 11, 10)
items = [backstage_pass_item]
GildedRose(items).update_quality()
assert backstage_pass_item.quality == 11
def test_it_increase_in_quality_at_double_rate_when_there_is_10_days_left():
backstage_pass_item = Item("Backstage passes to a TAFKAL80ETC concert", 10, 10)
items = [backstage_pass_item]
GildedRose(items).update_quality()
assert backstage_pass_item.quality == 12
def test_it_increase_in_quality_at_triple_rate_when_there_is_5_days_left():
backstage_pass_item = Item("Backstage passes to a TAFKAL80ETC concert", 5, 10)
items = [backstage_pass_item]
GildedRose(items).update_quality()
assert backstage_pass_item.quality == 13
def test_it_has_no_quality_after_its_sell_date():
backstage_pass_item = Item("Backstage passes to a TAFKAL80ETC concert", 0, 10)
items = [backstage_pass_item]
GildedRose(items).update_quality()
assert backstage_pass_item.quality == 0
| 1,175 | 0 | 115 |
333a801e8dbdaa87aeadd4a35ceab9206fd8d5bc | 3,237 | py | Python | analyze/analyze.py | larsdittert/EyeOfCharacteristicsPythonService | 91113883de1cc59b3d47f341415661503befc0a9 | [
"MIT"
] | null | null | null | analyze/analyze.py | larsdittert/EyeOfCharacteristicsPythonService | 91113883de1cc59b3d47f341415661503befc0a9 | [
"MIT"
] | 4 | 2020-01-28T23:00:45.000Z | 2022-02-10T00:36:41.000Z | analyze/analyze.py | larsdittert/EyeOfCharacteristicsPythonService | 91113883de1cc59b3d47f341415661503befc0a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Eye Of Characteristics Analyzer.
# Copyright (C) 2019 IBM Deutschland
# Author: Lars Dittert <lars.dittert@de.ibm.com>
#
# This file is the main file to analyze images and predict them.
#
import dlib
import numpy as np
import cv2
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import load_model
| 38.535714 | 266 | 0.693543 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the Eye Of Characteristics Analyzer.
# Copyright (C) 2019 IBM Deutschland
# Author: Lars Dittert <lars.dittert@de.ibm.com>
#
# This file is the main file to analyze images and predict them.
#
import dlib
import numpy as np
import cv2
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras.models import load_model
class Analysis:
GENDER_MODEL_PATH = './saved_models/gender_model.h5'
AGE_MODEL_PATH = './saved_models/age_model.h5'
DEEPFASHION_MODEL_PATH = './saved_models/deepfashion_model.h5'
FACIAL_EXPRESSION_MODEL_PATH = './saved_models/facial_expression_model.h5'
GENDER_MODEL = load_model(GENDER_MODEL_PATH)
AGE_MODEL = load_model(AGE_MODEL_PATH)
DEEPFASHION_MODEL = load_model(DEEPFASHION_MODEL_PATH)
FACIAL_EXPRESSION_MODEL = load_model(FACIAL_EXPRESSION_MODEL_PATH)
GENDER_DICT = {0: "female", 1: "male"}
AGE_DICT = {0: "0-9", 1: "10-19", 2: "20-29", 3: "30-39", 4: "40-49", 5: "50-59", 6: "60+"}
LABEL_DICT = {1: "short sleeve top", 2: "long sleeve top", 3: "short sleeve outwear", 4: "long sleeve outwear", 5: "vest", 6: "sling", 7: "shorts", 8: "trousers", 9: "skirt", 10: "short sleeve dress", 11: "long sleeve dress", 12: "vest dress", 13: "sling dress"}
EMOTION_DICT = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
HOG_FACE_DETECTOR = dlib.get_frontal_face_detector()
def __init__(self):
pass
@classmethod
def detect_faces(self, grayscale_image):
return Analysis.HOG_FACE_DETECTOR(grayscale_image)
@classmethod
def crop_image_gray(self, image, width, height):
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(image, (width, height)), -1), 0)
return cropped_img
@classmethod
def crop_image(self, image, width, height):
cropped_img = np.expand_dims(cv2.resize(image, (width, height)), 0)
return cropped_img
@classmethod
def predict_gender(self, roi_gray):
cv2.normalize(roi_gray, roi_gray, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
prediction = Analysis.GENDER_MODEL.predict(roi_gray)
label = Analysis.GENDER_DICT[int(np.argmax(prediction))]
return label
@classmethod
def predict_age(self, roi_gray):
cv2.normalize(roi_gray, roi_gray, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
prediction = Analysis.AGE_MODEL.predict(roi_gray)
label = Analysis.AGE_DICT[int(np.argmax(prediction))]
return label
@classmethod
def predict_fashion(self, frame):
cv2.normalize(frame, frame, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
prediction = Analysis.DEEPFASHION_MODEL.predict(frame)
label = Analysis.LABEL_DICT[int(np.argmax(prediction))]
return label
@classmethod
def predict_facial_expression(self, roi_gray):
cv2.normalize(roi_gray, roi_gray, alpha=0, beta=1, norm_type=cv2.NORM_L2, dtype=cv2.CV_32F)
prediction = Analysis.FACIAL_EXPRESSION_MODEL.predict(roi_gray)
label = Analysis.EMOTION_DICT[int(np.argmax(prediction))]
return label
| 1,415 | 1,380 | 23 |
39f3abfc419b42dbcf3343ade9df24b9d2ba5af7 | 5,748 | py | Python | samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py | sintefneodroid/vision | a4e66251ead99f15f4697bfe2abd00e2f388e743 | [
"Apache-2.0"
] | null | null | null | samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py | sintefneodroid/vision | a4e66251ead99f15f4697bfe2abd00e2f388e743 | [
"Apache-2.0"
] | 1 | 2022-03-12T01:08:08.000Z | 2022-03-12T01:08:08.000Z | samples/misc/graphical_interfaces/kivy_demo_app/opencv_face_tracking.py | sintefneodroid/vision | a4e66251ead99f15f4697bfe2abd00e2f388e743 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import imageio
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.dropdown import DropDown
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from neodroidvision import PROJECT_APP_PATH
__author__ = "Christian Heider Nielsen"
__doc__ = ""
Config.set("graphics", "resizable", 0)
Window.size = (600, 600)
Window.clearcolor = (0.9, 0.9, 0.9, 1)
class VideoStreamApp(App):
"""
VideoStreamApp
"""
layout_kv = f"""
MainLayout:
BoxLayout:
orientation: 'vertical'
padding: root.width * 0.05, root.height * .05
spacing: '5dp'
BoxLayout:
size_hint: [1,.85]
Image:
id: image_source
source: '{MainLayout._frame_name}'
BoxLayout:
size_hint: [1,.15]
GridLayout:
cols: 3
spacing: '10dp'
Button:
id: status
text:'Start'
bold: True
background_normal: ''
background_color: (0.82, 0.82, 0.82, 1.0)
on_press: root.start()
Button:
text: 'Setting'
bold: True
background_normal: ''
background_color: (0.82, 0.82, 0.82, 1.0)
on_press: root.settings()
Button:
text: 'Close'
bold: True
background_normal: ''
background_color: (0.82, 0.82, 0.82, 1.0)
on_press: root.close()
"""
if __name__ == "__main__":
main()
| 28.455446 | 83 | 0.616388 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import imageio
from kivy.app import App
from kivy.clock import Clock
from kivy.config import Config
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.dropdown import DropDown
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from neodroidvision import PROJECT_APP_PATH
__author__ = "Christian Heider Nielsen"
__doc__ = ""
Config.set("graphics", "resizable", 0)
Window.size = (600, 600)
Window.clearcolor = (0.9, 0.9, 0.9, 1)
class MainLayout(BoxLayout):
_video_capture = None
_face_cascade = None
_frame_name = str(PROJECT_APP_PATH.user_cache / "face_detection_frame.jpg")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.build()
def build_dropdown(self):
dropdown_layout = DropDown()
for index in range(10):
# When adding widgets, we need to specify the height manually
# (disabling the size_hint_y) so the dropdown can calculate
# the area it needs.
btn2 = Button(text=f"Model {index:d}", size_hint_y=None, height=20)
# for each button, attach a callback that will call the select() method
# on the dropdown. We'll pass the text of the button as the data of the
# selection.
btn2.bind(on_release=lambda btn: dropdown_layout.select(btn.text))
# then add the button inside the dropdown
dropdown_layout.add_widget(btn2)
# create a big main button
self._dropdown_btn = Button(text="Model", size_hint=(0.5, 0.1))
# show the dropdown menu when the main button is released
# note: all the bind() calls pass the instance of the caller (here, the
# mainbutton instance) as the first argument of the callback (here,
# dropdown.open.).
self._dropdown_btn.bind(on_release=dropdown_layout.open)
# one last thing, listen for the selection in the dropdown list and
# assign the data to the button text.
dropdown_layout.bind(on_select=self.on_select_model)
return self._dropdown_btn
def on_select_model(self, ins, model):
self._selected_model = model
self._dropdown_btn.text = model
# setattr(self.dropdown_btn, 'text', model)
def build(self):
apply_btn = Button(text="Apply", bold=True)
apply_btn.bind(on_press=self.settings_process)
dropdown_btn = self.build_dropdown()
kv_layout = GridLayout(cols=2)
kv_layout.add_widget(Label(text="Model: ", bold=True))
kv_layout.add_widget(dropdown_btn)
settings_layout = BoxLayout(orientation="vertical")
settings_layout.add_widget(kv_layout)
settings_layout.add_widget(apply_btn)
self._popup = Popup(
title="Settings", content=settings_layout, size_hint=(0.6, 0.2)
)
def start(self):
if self.ids.status.text == "Stop":
self.stop()
else:
self.start_cam()
def start_cam(self):
self.ids.status.text = "Stop"
self._video_capture = cv2.VideoCapture(0)
self._face_cascade = cv2.CascadeClassifier(
cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
)
Clock.schedule_once(self.update)
def stop(self):
self.ids.status.text = "Start"
Clock.unschedule(self.update)
self._video_capture.release()
cv2.destroyAllWindows()
def update(self, dt):
ret, frame = self._video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
try:
faces = self._face_cascade.detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)
)
for (x, y, w, h) in faces:
cv2.rectangle(rgb, (x, y), (x + w, y + h), (0, 255, 0), 2)
except Exception as e:
print(e)
imageio.imsave(self._frame_name, rgb)
self.ids.image_source.reload()
Clock.schedule_once(self.update)
@staticmethod
def close():
App.get_running_app().stop()
def settings(self):
self._popup.open()
def settings_process(self, btn):
try:
self._current_model = self._selected_model
except:
pass
self._popup.dismiss()
class VideoStreamApp(App):
"""
VideoStreamApp
"""
layout_kv = f"""
MainLayout:
BoxLayout:
orientation: 'vertical'
padding: root.width * 0.05, root.height * .05
spacing: '5dp'
BoxLayout:
size_hint: [1,.85]
Image:
id: image_source
source: '{MainLayout._frame_name}'
BoxLayout:
size_hint: [1,.15]
GridLayout:
cols: 3
spacing: '10dp'
Button:
id: status
text:'Start'
bold: True
background_normal: ''
background_color: (0.82, 0.82, 0.82, 1.0)
on_press: root.start()
Button:
text: 'Setting'
bold: True
background_normal: ''
background_color: (0.82, 0.82, 0.82, 1.0)
on_press: root.settings()
Button:
text: 'Close'
bold: True
background_normal: ''
background_color: (0.82, 0.82, 0.82, 1.0)
on_press: root.close()
"""
def build(self):
a = Builder.load_string(VideoStreamApp.layout_kv)
a.start_cam()
return a
def main():
VideoStreamApp().run()
if __name__ == "__main__":
main()
| 3,543 | 453 | 73 |
66519ae77236e7c32472e7e367938d47cf6c51cd | 12,181 | py | Python | apps/annon/dataset/coco_to_aids.py | Roy-Tuhin/maskrcnn_sophisticate- | a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3 | [
"Apache-2.0"
] | null | null | null | apps/annon/dataset/coco_to_aids.py | Roy-Tuhin/maskrcnn_sophisticate- | a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3 | [
"Apache-2.0"
] | 14 | 2021-02-02T22:32:47.000Z | 2022-03-12T00:20:40.000Z | apps/annon/dataset/coco_to_aids.py | Boyetuhin/maskrcnn_sophisticate- | a5a2300abbe2633d66847cdbfa7ed2bc2f901ec3 | [
"Apache-2.0"
] | 1 | 2021-05-03T22:48:36.000Z | 2021-05-03T22:48:36.000Z | __author__ = 'mangalbhaskar'
__version__ = '1.0'
"""
## Description:
# --------------------------------------------------------
# Annotation Parser Interface for Annotation work flow.
# Upload the MS COCO dataset to MongoDB in Annon DB specification
#
# --------------------------------------------------------
# Copyright (c) 2020 mangalbhaskar
# Licensed under [see LICENSE for details]
# Written by mangalbhaskar
# --------------------------------------------------------
## Example:
# --------------------------------------------------------
## TODO:
# --------------------------------------------------------
## Future wok:
# --------------------------------------------------------
"""
import os
import sys
import json
import time
import logging
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.append(this_dir)
APP_ROOT_DIR = os.getenv('AI_APP')
if APP_ROOT_DIR not in sys.path:
sys.path.append(APP_ROOT_DIR)
# if BASE_PATH_CFG not in sys.path:
# sys.path.append(BASE_PATH_CFG)
# this = sys.modules[__name__]
from Annon import ANNON
import common
log = logging.getLogger('__main__.'+__name__)
# from pycocotools.coco import COCO
# from pycocotools import mask as maskUtils
def coco_to_annon(subset, metadata, dataset):
"""
mutex to transform coco data to annon format
"""
log.info("-----------------------------")
image_dir = metadata['image_dir']
annotation_file = metadata['annotation_file']
## CLASSINFO
categories = { cat['name']: cat for cat in dataset['categories'] }
catid_lblid_map = { str(cat['id']):cat['name'] for cat in dataset['categories'] }
log.info("categories: {}".format(categories))
cats = list(categories.keys())
cats.sort()
log.info("cats: {}".format(cats))
for i, cat in enumerate(cats):
category = categories[cat]
category['coco_id'] = category['id']
category['lbl_id'] = category['name']
category['source'] = 'coco'
metadata['catid_lblid_map'] = catid_lblid_map
## IMAGES
## instances, 2014 => ['license', 'file_name', 'coco_url', 'height', 'width', 'date_captured', 'flickr_url', 'id']
images = dataset['images']
for i, image in enumerate(images):
if i==0:
log.info("image.keys(): {}".format(image.keys()))
uuid_img = common.createUUID('img')
image['img_id'] = image['id']
image['filename'] = image['file_name']
image['subset'] = subset
image['file_attributes'] = {
'id': image['id']
,'uuid': uuid_img
}
image['size'] = 0
image['modified_on'] = None
image['base_dir'] = None
image['dir'] = None
image['file_id'] = None
image['filepath'] = None
image['rel_filename'] = None
## ANNOTATIONS
## instances, 2014 => ['segmentation', 'area', 'iscrowd', 'image_id', 'bbox', 'category_id', 'id']
annotations = dataset['annotations']
boxmode = 'XYWH_ABS'
for i, annotation in enumerate(annotations):
if i==0:
log.info("annotation.keys(): {}".format(annotation.keys()))
log.info("annotation.keys(): {}".format(annotation.keys()))
uuid_ant = common.createUUID('ant')
annotation['ant_id'] = annotation['id']
annotation['img_id'] = annotation['image_id']
annotation['lbl_id'] = annotation['category_id']
## crucial mapping
annotation['lbl_id'] = catid_lblid_map[str(annotation['category_id'])]
## BoxMode.XYWH_ABS
_bbox = {
"ymin": annotation['bbox'][1],
"xmin": annotation['bbox'][0],
"ymax": None,
"xmax": None,
"width": annotation['bbox'][2],
"height": annotation['bbox'][3]
}
annotation['annon_index'] = -1
annotation['annotation_rel_date'] = None
annotation['annotation_tool'] = 'coco'
annotation['annotator_id'] = 'coco'
# annotation['ant_type'] = 'bbox'
# annotation['ant_type'] = 'polygon'
annotation['filename'] = annotation['id']
annotation['subset'] = subset
annotation['modified_on'] = None
annotation['maskarea'] = -1
annotation['_bbox'] = _bbox
annotation['boxmode'] = boxmode
annotation['bboxarea'] = annotation['area']
annotation['region_attributes'] = {
'id': annotation['id']
,'uuid': uuid_ant
,'iscrowd': annotation['iscrowd']
}
annotation['dir'] = None
annotation['file_id'] = annotation['image_id']
annotation['filepath'] = None
annotation['rel_filename'] = annotation_file
annotation['image_name'] = None
annotation['image_dir'] = image_dir
annotation['file_attributes'] = {}
def prepare_datasets(cfg, args, datacfg):
"""Loads the coco json annotation file
Refer: pycocotools/coco.py
Mandatory arguments:
args.from_path => directory name where annotations directory contains all the annotation json files
args.task => None, panoptic
args.subset => test, train, val, minival, valminusminival
args.year => year of publication
args.from_path should be: "/aimldl-dat/data-public/ms-coco-1/annotations"
/aimldl-dat/data-public/ms-coco-1/annotations
├── all-datasets
│ ├── 2014
│ └── 2017
├── annotations
│ ├── captions_train2014.json
│ ├── captions_train2017.json
│ ├── captions_val2014.json
│ ├── captions_val2017.json
│ ├── coco_viz.py
│ ├── image_info_test2014.json
│ ├── instances_minival2014.json
│ ├── instances_train2014.json
│ ├── instances_train2017.json
│ ├── instances_val2014.json
│ ├── instances_val2017.json
│ ├── instances_valminusminival2014.json
│ ├── panoptic_instances_val2017.json
│ ├── panoptic_train2017.json
│ ├── panoptic_val2017.json
│ ├── person_keypoints_train2014.json
│ ├── person_keypoints_train2017.json
│ ├── person_keypoints_val2014.json
│ ├── person_keypoints_val2017.json
│ ├── stuff_train2017.json
│ └── stuff_val2017.json
├── cocostuff
│ └── models
│ └── deeplab
│ ├── cocostuff
│ │ ├── config
│ │ │ ├── deeplabv2_resnet101
│ │ │ └── deeplabv2_vgg16
│ │ ├── data
│ │ ├── features
│ │ ├── list
│ │ ├── log
│ │ └── model
│ │ └── deeplabv2_vgg16
│ └── deeplab-v2
├── instances_valminusminival2014.json
├── panoptic_train2017
├── panoptic_val2017
├── stuffthingmaps_trainval2017
│ ├── train2017
│ └── val2017
├── test2014
├── tfrecord
├── train2014
├── train2017
├── val2014
└── val2017
"""
log.info("-----------------------------")
if not args.from_path:
raise Exception("--{} not defined".format('from'))
if not args.task:
raise Exception("--{} not defined".format('task'))
if not args.year:
raise Exception("--{} not defined".format('year'))
from_path = args.from_path
if not os.path.exists(from_path) and os.path.isfile(from_path):
raise Exception('--from needs to be directory path')
task = args.task
year = args.year
## TODO: as user input
splits = ['train','val']
# splits = ['train']
aids = {}
stats = {}
total_stats = {
'total_images':0
,'total_annotations':0
,'total_labels':0
}
datacfg['id'] = 'coco'
datacfg['name'] = 'coco'
datacfg['problem_id'] = 'coco'
datacfg['annon_type'] = 'coco'
datacfg['splits'] = splits
datacfg['classinfo'] = []
for i, subset in enumerate(splits):
log.info("subset: {}".format(subset))
total_images = 0
total_annotations = 0
total_labels = 0
if subset not in aids:
aids[subset] = {
'IMAGES':None
,'ANNOTATIONS': None
,'STATS':None
}
dataset, metadata = load_coco_data(from_path, task, subset, year)
coco_to_annon(subset, metadata, dataset)
annon = ANNON(datacfg=datacfg, subset=subset, images_data=dataset['images'], annotations_data=dataset['annotations'], classinfo=dataset['categories'])
annon_stats = annon.getStats()
annon_stats['metadata'] = metadata
annon_stats['colors'] = None
lbl_ids = annon.getCatIds()
catIds = annon.getCatIds(catIds=lbl_ids)
imgIds = annon.getImgIds(catIds=lbl_ids)
annIds = annon.getAnnIds(imgIds=imgIds, catIds=lbl_ids)
classinfo_split = annon.loadCats(ids=lbl_ids)
# log.info("lbl_ids: {}".format(lbl_ids))
# log.info("catIds: {}".format(catIds))
# log.info("imgIds: {}".format(imgIds))
# log.info("annIds: {}".format(annIds))
# log.info("classinfo_split: {}".format(classinfo_split))
aids[subset]['IMAGES'] = annon.loadImgs(ids=imgIds)
aids[subset]['ANNOTATIONS'] = annon.loadAnns(ids=annIds)
## classinfo / categories should be unique names for all the splits taken together
## and should not to be differentiated splitwise - the differences should be captured in the per split stats
classinfo = list({v['lbl_id']:v for v in classinfo_split}.values())
datacfg['classinfo'] += classinfo
## Calculation for total_labels is incorrect because it does not take care of common labels amoung subsets
total_labels += len(classinfo)
total_annotations += len(annIds)
total_images += len(imgIds)
## update total stats object
total_stats['total_labels'] += total_labels
total_stats['total_annotations'] += total_annotations
total_stats['total_images'] += total_images
## update stats object
annon_stats['labels'] = catIds.copy()
annon_stats['classinfo'] = classinfo.copy()
if subset not in stats:
stats[subset] = annon_stats
aids[subset]['STATS'] = [stats[subset]]
# log.info("stats: {}".format(stats))
# log.info("aids: {}".format(aids))
datacfg['stats'] = stats
datacfg['summary'] = total_stats
return aids, datacfg
| 31.07398 | 154 | 0.620967 | __author__ = 'mangalbhaskar'
__version__ = '1.0'
"""
## Description:
# --------------------------------------------------------
# Annotation Parser Interface for Annotation work flow.
# Upload the MS COCO dataset to MongoDB in Annon DB specification
#
# --------------------------------------------------------
# Copyright (c) 2020 mangalbhaskar
# Licensed under [see LICENSE for details]
# Written by mangalbhaskar
# --------------------------------------------------------
## Example:
# --------------------------------------------------------
## TODO:
# --------------------------------------------------------
## Future wok:
# --------------------------------------------------------
"""
import os
import sys
import json
import time
import logging
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.append(this_dir)
APP_ROOT_DIR = os.getenv('AI_APP')
if APP_ROOT_DIR not in sys.path:
sys.path.append(APP_ROOT_DIR)
# if BASE_PATH_CFG not in sys.path:
# sys.path.append(BASE_PATH_CFG)
# this = sys.modules[__name__]
from Annon import ANNON
import common
log = logging.getLogger('__main__.'+__name__)
# from pycocotools.coco import COCO
# from pycocotools import mask as maskUtils
def coco_to_annon(subset, metadata, dataset):
"""
mutex to transform coco data to annon format
"""
log.info("-----------------------------")
image_dir = metadata['image_dir']
annotation_file = metadata['annotation_file']
## CLASSINFO
categories = { cat['name']: cat for cat in dataset['categories'] }
catid_lblid_map = { str(cat['id']):cat['name'] for cat in dataset['categories'] }
log.info("categories: {}".format(categories))
cats = list(categories.keys())
cats.sort()
log.info("cats: {}".format(cats))
for i, cat in enumerate(cats):
category = categories[cat]
category['coco_id'] = category['id']
category['lbl_id'] = category['name']
category['source'] = 'coco'
metadata['catid_lblid_map'] = catid_lblid_map
## IMAGES
## instances, 2014 => ['license', 'file_name', 'coco_url', 'height', 'width', 'date_captured', 'flickr_url', 'id']
images = dataset['images']
for i, image in enumerate(images):
if i==0:
log.info("image.keys(): {}".format(image.keys()))
uuid_img = common.createUUID('img')
image['img_id'] = image['id']
image['filename'] = image['file_name']
image['subset'] = subset
image['file_attributes'] = {
'id': image['id']
,'uuid': uuid_img
}
image['size'] = 0
image['modified_on'] = None
image['base_dir'] = None
image['dir'] = None
image['file_id'] = None
image['filepath'] = None
image['rel_filename'] = None
## ANNOTATIONS
## instances, 2014 => ['segmentation', 'area', 'iscrowd', 'image_id', 'bbox', 'category_id', 'id']
annotations = dataset['annotations']
boxmode = 'XYWH_ABS'
for i, annotation in enumerate(annotations):
if i==0:
log.info("annotation.keys(): {}".format(annotation.keys()))
log.info("annotation.keys(): {}".format(annotation.keys()))
uuid_ant = common.createUUID('ant')
annotation['ant_id'] = annotation['id']
annotation['img_id'] = annotation['image_id']
annotation['lbl_id'] = annotation['category_id']
## crucial mapping
annotation['lbl_id'] = catid_lblid_map[str(annotation['category_id'])]
## BoxMode.XYWH_ABS
_bbox = {
"ymin": annotation['bbox'][1],
"xmin": annotation['bbox'][0],
"ymax": None,
"xmax": None,
"width": annotation['bbox'][2],
"height": annotation['bbox'][3]
}
annotation['annon_index'] = -1
annotation['annotation_rel_date'] = None
annotation['annotation_tool'] = 'coco'
annotation['annotator_id'] = 'coco'
# annotation['ant_type'] = 'bbox'
# annotation['ant_type'] = 'polygon'
annotation['filename'] = annotation['id']
annotation['subset'] = subset
annotation['modified_on'] = None
annotation['maskarea'] = -1
annotation['_bbox'] = _bbox
annotation['boxmode'] = boxmode
annotation['bboxarea'] = annotation['area']
annotation['region_attributes'] = {
'id': annotation['id']
,'uuid': uuid_ant
,'iscrowd': annotation['iscrowd']
}
annotation['dir'] = None
annotation['file_id'] = annotation['image_id']
annotation['filepath'] = None
annotation['rel_filename'] = annotation_file
annotation['image_name'] = None
annotation['image_dir'] = image_dir
annotation['file_attributes'] = {}
def get_metadata(from_path, task, subset, year):
metadata = {
"annotation_file": None
,"annotation_filepath": None
,"image_dir": None
,"task": None
,"year": None
,"base_from_path": None
}
base_from_path = common.getBasePath(from_path)
log.info("base_from_path: {}".format(base_from_path))
## TODO: fix the subset issue
if task == "panoptic":
annotation_file = "{}_{}{}.json".format(task+"_instances", subset, year)
subset = task+"_"+subset
else:
annotation_file = "{}_{}{}.json".format(task, subset, year)
log.info("annotation_file: {}".format(annotation_file))
annotation_filepath = os.path.join(base_from_path, annotation_file)
log.info("annotation_filepath: {}".format(annotation_filepath))
if not os.path.exists(annotation_filepath):
raise Exception("File: {} does not exists!".format(annotation_filepath))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(base_from_path, subset, year)
log.info("image_dir: {}".format(image_dir))
metadata['task'] = task
metadata['year'] = year
metadata['base_from_path'] = base_from_path
metadata['annotation_file'] = annotation_file
metadata['annotation_filepath'] = annotation_filepath
metadata['image_dir'] = image_dir
return metadata
def load_coco_data(from_path, task, subset, year):
metadata = get_metadata(from_path, task, subset, year)
annotation_filepath = metadata['annotation_filepath']
log.info('loading annotations into memory...')
tic = time.time()
with open(annotation_filepath, 'r') as fr:
dataset = json.load(fr)
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
log.info('Done (t={:0.2f}s)'.format(time.time()- tic))
log.info("dataset.keys(): {}".format(dataset.keys()))
return dataset, metadata
def prepare_datasets(cfg, args, datacfg):
"""Loads the coco json annotation file
Refer: pycocotools/coco.py
Mandatory arguments:
args.from_path => directory name where annotations directory contains all the annotation json files
args.task => None, panoptic
args.subset => test, train, val, minival, valminusminival
args.year => year of publication
args.from_path should be: "/aimldl-dat/data-public/ms-coco-1/annotations"
/aimldl-dat/data-public/ms-coco-1/annotations
├── all-datasets
│ ├── 2014
│ └── 2017
├── annotations
│ ├── captions_train2014.json
│ ├── captions_train2017.json
│ ├── captions_val2014.json
│ ├── captions_val2017.json
│ ├── coco_viz.py
│ ├── image_info_test2014.json
│ ├── instances_minival2014.json
│ ├── instances_train2014.json
│ ├── instances_train2017.json
│ ├── instances_val2014.json
│ ├── instances_val2017.json
│ ├── instances_valminusminival2014.json
│ ├── panoptic_instances_val2017.json
│ ├── panoptic_train2017.json
│ ├── panoptic_val2017.json
│ ├── person_keypoints_train2014.json
│ ├── person_keypoints_train2017.json
│ ├── person_keypoints_val2014.json
│ ├── person_keypoints_val2017.json
│ ├── stuff_train2017.json
│ └── stuff_val2017.json
├── cocostuff
│ └── models
│ └── deeplab
│ ├── cocostuff
│ │ ├── config
│ │ │ ├── deeplabv2_resnet101
│ │ │ └── deeplabv2_vgg16
│ │ ├── data
│ │ ├── features
│ │ ├── list
│ │ ├── log
│ │ └── model
│ │ └── deeplabv2_vgg16
│ └── deeplab-v2
├── instances_valminusminival2014.json
├── panoptic_train2017
├── panoptic_val2017
├── stuffthingmaps_trainval2017
│ ├── train2017
│ └── val2017
├── test2014
├── tfrecord
├── train2014
├── train2017
├── val2014
└── val2017
"""
log.info("-----------------------------")
if not args.from_path:
raise Exception("--{} not defined".format('from'))
if not args.task:
raise Exception("--{} not defined".format('task'))
if not args.year:
raise Exception("--{} not defined".format('year'))
from_path = args.from_path
if not os.path.exists(from_path) and os.path.isfile(from_path):
raise Exception('--from needs to be directory path')
task = args.task
year = args.year
## TODO: as user input
splits = ['train','val']
# splits = ['train']
aids = {}
stats = {}
total_stats = {
'total_images':0
,'total_annotations':0
,'total_labels':0
}
datacfg['id'] = 'coco'
datacfg['name'] = 'coco'
datacfg['problem_id'] = 'coco'
datacfg['annon_type'] = 'coco'
datacfg['splits'] = splits
datacfg['classinfo'] = []
for i, subset in enumerate(splits):
log.info("subset: {}".format(subset))
total_images = 0
total_annotations = 0
total_labels = 0
if subset not in aids:
aids[subset] = {
'IMAGES':None
,'ANNOTATIONS': None
,'STATS':None
}
dataset, metadata = load_coco_data(from_path, task, subset, year)
coco_to_annon(subset, metadata, dataset)
annon = ANNON(datacfg=datacfg, subset=subset, images_data=dataset['images'], annotations_data=dataset['annotations'], classinfo=dataset['categories'])
annon_stats = annon.getStats()
annon_stats['metadata'] = metadata
annon_stats['colors'] = None
lbl_ids = annon.getCatIds()
catIds = annon.getCatIds(catIds=lbl_ids)
imgIds = annon.getImgIds(catIds=lbl_ids)
annIds = annon.getAnnIds(imgIds=imgIds, catIds=lbl_ids)
classinfo_split = annon.loadCats(ids=lbl_ids)
# log.info("lbl_ids: {}".format(lbl_ids))
# log.info("catIds: {}".format(catIds))
# log.info("imgIds: {}".format(imgIds))
# log.info("annIds: {}".format(annIds))
# log.info("classinfo_split: {}".format(classinfo_split))
aids[subset]['IMAGES'] = annon.loadImgs(ids=imgIds)
aids[subset]['ANNOTATIONS'] = annon.loadAnns(ids=annIds)
## classinfo / categories should be unique names for all the splits taken together
## and should not to be differentiated splitwise - the differences should be captured in the per split stats
classinfo = list({v['lbl_id']:v for v in classinfo_split}.values())
datacfg['classinfo'] += classinfo
## Calculation for total_labels is incorrect because it does not take care of common labels amoung subsets
total_labels += len(classinfo)
total_annotations += len(annIds)
total_images += len(imgIds)
## update total stats object
total_stats['total_labels'] += total_labels
total_stats['total_annotations'] += total_annotations
total_stats['total_images'] += total_images
## update stats object
annon_stats['labels'] = catIds.copy()
annon_stats['classinfo'] = classinfo.copy()
if subset not in stats:
stats[subset] = annon_stats
aids[subset]['STATS'] = [stats[subset]]
# log.info("stats: {}".format(stats))
# log.info("aids: {}".format(aids))
datacfg['stats'] = stats
datacfg['summary'] = total_stats
return aids, datacfg
def tdd():
from_path = '/aimldl-dat/data-public/ms-coco-1/annotations'
task = 'instances'
subset = 'train'
year = 2014
datacfg = None
dataset, mdata = load_coco_data(from_path, task, subset, year)
coco_to_annon(subset, mdata, dataset)
annon = ANNON(datacfg=datacfg, subset=subset, images_data=dataset['images'], annotations_data=dataset['annotations'], classinfo=dataset['categories'])
return dataset, mdata, annon
| 2,236 | 0 | 69 |
997ed027a962dc4d27343b4f36425053e5be6140 | 3,208 | py | Python | Python Advanced/3. Multidimensional Lists/Exercise/05. Alice in Wonderland.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
] | null | null | null | Python Advanced/3. Multidimensional Lists/Exercise/05. Alice in Wonderland.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
] | null | null | null | Python Advanced/3. Multidimensional Lists/Exercise/05. Alice in Wonderland.py | a-shiro/SoftUni-Courses | 7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77 | [
"MIT"
] | null | null | null |
dimensions = int(input())
matrix = read_matrix()
alice_row, alice_col = get_alice_coordinates()
rabbit_row, rabbit_col = get_rabbit_hole_coordinates()
tea_bags = 0
in_wonderland = True
while in_wonderland and tea_bags < 10:
direction = input()
if direction == 'up':
if 0 > alice_row - 1:
in_wonderland = False
elif matrix[alice_row - 1][alice_col] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row - 1][alice_col] != '.' and matrix[alice_row - 1][alice_col] != '*':
tea_bags += int(matrix[alice_row - 1][alice_col])
matrix[alice_row - 1][alice_col] = 'A'
matrix[alice_row][alice_col] = '*'
elif direction == 'down':
if dimensions == alice_row + 1:
in_wonderland = False
elif matrix[alice_row + 1][alice_col] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row + 1][alice_col] != '.' and matrix[alice_row + 1][alice_col] != '*':
tea_bags += int(matrix[alice_row + 1][alice_col])
matrix[alice_row + 1][alice_col] = 'A'
matrix[alice_row][alice_col] = '*'
elif direction == 'left':
if 0 > alice_col - 1:
in_wonderland = False
elif matrix[alice_row][alice_col - 1] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row][alice_col - 1] != '.' and matrix[alice_row][alice_col - 1] != '*':
tea_bags += int(matrix[alice_row][alice_col - 1])
matrix[alice_row][alice_col - 1] = 'A'
matrix[alice_row][alice_col] = '*'
elif direction == 'right':
if dimensions == alice_col + 1:
in_wonderland = False
elif matrix[alice_row][alice_col + 1] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row][alice_col + 1] != '.' and matrix[alice_row][alice_col + 1] != '*':
tea_bags += int(matrix[alice_row][alice_col + 1])
matrix[alice_row][alice_col + 1] = 'A'
matrix[alice_row][alice_col] = '*'
if tea_bags >= 10:
alice_row, alice_col = get_alice_coordinates()
matrix[alice_row][alice_col] = '*'
elif in_wonderland:
alice_row, alice_col = get_alice_coordinates()
if not in_wonderland:
print("Alice didn't make it to the tea party.")
else:
print('She did it! She went to the party.')
[print(' '.join(row)) for row in matrix] | 33.768421 | 99 | 0.573566 | def read_matrix():
matrix = []
for row in range(dimensions):
col = [n for n in input().split(' ')]
matrix.append(col)
return matrix
def get_alice_coordinates():
for row in matrix:
if 'A' in row:
alice_row, alice_col = matrix.index(row), row.index('A')
return alice_row, alice_col
def get_rabbit_hole_coordinates():
for row in matrix:
if 'R' in row:
rabbit_row, rabbit_col = matrix.index(row), row.index('R')
return rabbit_row, rabbit_col
dimensions = int(input())
matrix = read_matrix()
alice_row, alice_col = get_alice_coordinates()
rabbit_row, rabbit_col = get_rabbit_hole_coordinates()
tea_bags = 0
in_wonderland = True
while in_wonderland and tea_bags < 10:
direction = input()
if direction == 'up':
if 0 > alice_row - 1:
in_wonderland = False
elif matrix[alice_row - 1][alice_col] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row - 1][alice_col] != '.' and matrix[alice_row - 1][alice_col] != '*':
tea_bags += int(matrix[alice_row - 1][alice_col])
matrix[alice_row - 1][alice_col] = 'A'
matrix[alice_row][alice_col] = '*'
elif direction == 'down':
if dimensions == alice_row + 1:
in_wonderland = False
elif matrix[alice_row + 1][alice_col] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row + 1][alice_col] != '.' and matrix[alice_row + 1][alice_col] != '*':
tea_bags += int(matrix[alice_row + 1][alice_col])
matrix[alice_row + 1][alice_col] = 'A'
matrix[alice_row][alice_col] = '*'
elif direction == 'left':
if 0 > alice_col - 1:
in_wonderland = False
elif matrix[alice_row][alice_col - 1] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row][alice_col - 1] != '.' and matrix[alice_row][alice_col - 1] != '*':
tea_bags += int(matrix[alice_row][alice_col - 1])
matrix[alice_row][alice_col - 1] = 'A'
matrix[alice_row][alice_col] = '*'
elif direction == 'right':
if dimensions == alice_col + 1:
in_wonderland = False
elif matrix[alice_row][alice_col + 1] == 'R':
matrix[rabbit_row][rabbit_col] = '*'
in_wonderland = False
else:
if matrix[alice_row][alice_col + 1] != '.' and matrix[alice_row][alice_col + 1] != '*':
tea_bags += int(matrix[alice_row][alice_col + 1])
matrix[alice_row][alice_col + 1] = 'A'
matrix[alice_row][alice_col] = '*'
if tea_bags >= 10:
alice_row, alice_col = get_alice_coordinates()
matrix[alice_row][alice_col] = '*'
elif in_wonderland:
alice_row, alice_col = get_alice_coordinates()
if not in_wonderland:
print("Alice didn't make it to the tea party.")
else:
print('She did it! She went to the party.')
[print(' '.join(row)) for row in matrix] | 474 | 0 | 68 |
e8d6adf3feed698cd713b5295d7b624411a09079 | 2,011 | py | Python | DecisionTreeClassifier.py | akashmittal18/Twitter-Sentimental-Analysis- | 68eb06c778c9d512d6da9da5c657a510913bc2c1 | [
"MIT"
] | null | null | null | DecisionTreeClassifier.py | akashmittal18/Twitter-Sentimental-Analysis- | 68eb06c778c9d512d6da9da5c657a510913bc2c1 | [
"MIT"
] | null | null | null | DecisionTreeClassifier.py | akashmittal18/Twitter-Sentimental-Analysis- | 68eb06c778c9d512d6da9da5c657a510913bc2c1 | [
"MIT"
] | 2 | 2020-10-02T18:55:37.000Z | 2020-10-18T10:59:42.000Z | <<<<<<< HEAD
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from sklearn.tree import DecisionTreeClassifier
=======
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from sklearn.tree import DecisionTreeClassifier
>>>>>>> a8eac8957e283fe23b26e99d32eac0ba302a4a04
return "Negative" | 47.880952 | 117 | 0.773744 | <<<<<<< HEAD
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from sklearn.tree import DecisionTreeClassifier
def DecisionTreeClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec):
print("Decision Tree Classifier")
dtc = DecisionTreeClassifier(criterion="gini", random_state=100, max_depth=3, min_samples_leaf=5)
dtc.fit(x_train_vft, y_train)
y_predict_class = dtc.predict(x_test_vft)
print("Confusion Matrix")
print(confusion_matrix(y_test, y_predict_class))
print('Accuracy Score :', accuracy_score(y_test, y_predict_class))
print('ROC(Receiver Operating Characteristic) and AUC(Area Under Curve)', roc_auc_score(y_test, y_predict_class))
print('Average Precision Score:', average_precision_score(y_test, y_predict_class))
if dtc.predict(vec) == [1]:
return "Positive"
else:
=======
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from sklearn.tree import DecisionTreeClassifier
def DecisionTreeClassifierAlgo(x_train_vft, y_train, x_test_vft, y_test, vec):
print("Decision Tree Classifier")
dtc = DecisionTreeClassifier(criterion="gini", random_state=100, max_depth=3, min_samples_leaf=5)
dtc.fit(x_train_vft, y_train)
y_predict_class = dtc.predict(x_test_vft)
print("Confusion Matrix")
print(confusion_matrix(y_test, y_predict_class))
print('Accuracy Score :', accuracy_score(y_test, y_predict_class))
print('ROC(Receiver Operating Characteristic) and AUC(Area Under Curve)', roc_auc_score(y_test, y_predict_class))
print('Average Precision Score:', average_precision_score(y_test, y_predict_class))
if dtc.predict(vec) == [1]:
return "Positive"
else:
>>>>>>> a8eac8957e283fe23b26e99d32eac0ba302a4a04
return "Negative" | 1,410 | 0 | 46 |
d72282ac0ecaa1924612fe755db9f4659bf7c5a0 | 468 | py | Python | custom/apps/wisepill/models.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | custom/apps/wisepill/models.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | custom/apps/wisepill/models.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | null | null | null | from couchdbkit.ext.django.schema import *
class WisePillDeviceEvent(Document):
"""
One DeviceEvent is created each time a device sends data that is
forwarded to the CommCareHQ WisePill API (/wisepill/device/).
"""
domain = StringProperty()
data = StringProperty()
received_on = DateTimeProperty()
case_id = StringProperty() # Document _id of the case representing the device that sent this data in
processed = BooleanProperty()
| 33.428571 | 104 | 0.722222 | from couchdbkit.ext.django.schema import *
class WisePillDeviceEvent(Document):
"""
One DeviceEvent is created each time a device sends data that is
forwarded to the CommCareHQ WisePill API (/wisepill/device/).
"""
domain = StringProperty()
data = StringProperty()
received_on = DateTimeProperty()
case_id = StringProperty() # Document _id of the case representing the device that sent this data in
processed = BooleanProperty()
| 0 | 0 | 0 |
106effb6c812af3d1703d47a62d2b744e0059446 | 362 | py | Python | bookworm/document/formats/__init__.py | xingkong0113/bookworm | 7214067f48e7a951198806a1f9170e3fd8fc0cce | [
"MIT"
] | 36 | 2020-11-15T03:21:39.000Z | 2022-03-05T01:11:26.000Z | bookworm/document/formats/__init__.py | xingkong0113/bookworm | 7214067f48e7a951198806a1f9170e3fd8fc0cce | [
"MIT"
] | 90 | 2020-10-06T14:46:07.000Z | 2022-03-31T03:03:34.000Z | bookworm/document/formats/__init__.py | xingkong0113/bookworm | 7214067f48e7a951198806a1f9170e3fd8fc0cce | [
"MIT"
] | 20 | 2020-09-30T17:40:44.000Z | 2022-03-17T19:59:53.000Z | # coding: utf-8
from .pdf import FitzPdfDocument
from .epub import EpubDocument
from .mobi import MobiDocument
from .plain_text import PlainTextDocument
from .html import FileSystemHtmlDocument, WebHtmlDocument
from .markdown import MarkdownDocument
from .word import WordDocument
from .powerpoint import PowerpointPresentation
from .odf import OdfTextDocument
| 30.166667 | 57 | 0.850829 | # coding: utf-8
from .pdf import FitzPdfDocument
from .epub import EpubDocument
from .mobi import MobiDocument
from .plain_text import PlainTextDocument
from .html import FileSystemHtmlDocument, WebHtmlDocument
from .markdown import MarkdownDocument
from .word import WordDocument
from .powerpoint import PowerpointPresentation
from .odf import OdfTextDocument
| 0 | 0 | 0 |
7ddb29e8075b8805d2a36edc9007f941a697f2a3 | 5,528 | py | Python | alsek/storage/backends/redis.py | TariqAHassan/alsek | a0d0f44d2eea1538c9d04530ae917695f624d269 | [
"MIT"
] | 1 | 2021-08-16T02:38:40.000Z | 2021-08-16T02:38:40.000Z | alsek/storage/backends/redis.py | TariqAHassan/alsek | a0d0f44d2eea1538c9d04530ae917695f624d269 | [
"MIT"
] | null | null | null | alsek/storage/backends/redis.py | TariqAHassan/alsek | a0d0f44d2eea1538c9d04530ae917695f624d269 | [
"MIT"
] | null | null | null | """
Redis Backend
"""
from __future__ import annotations
from typing import Any, Dict, Iterable, Optional, Union, cast
import dill
from redis import ConnectionPool, Redis
from alsek._defaults import DEFAULT_NAMESPACE
from alsek._utils.aggregation import gather_init_params
from alsek._utils.printing import auto_repr
from alsek.storage.backends import Backend, LazyClient
from alsek.storage.serialization import JsonSerializer, Serializer
class RedisBackend(Backend):
"""Redis Backend.
Backend powered by Redis.
Args:
conn (str, Redis, LazyClient, optional): a connection url, ``Redis()`` object
or ``LazyClient``.
namespace (str): prefix to use when inserting
names in the backend
serializer (Serializer): tool for encoding and decoding
values written into the backend.
Warning:
* If ``conn`` is a ``Redis()`` object, ``decode_responses``
is expected to be set to ``True``.
"""
@staticmethod
@property
def conn(self) -> Redis:
"""Connection to the backend."""
if isinstance(self._conn, LazyClient):
self._conn = self._conn.get()
return cast(Redis, self._conn)
@classmethod
def exists(self, name: str) -> bool:
"""Check if ``name`` exists in the Redis backend.
Args:
name (str): name of the item
Returns:
bool
"""
return bool(self.conn.exists(self.full_name(name)))
def set(
self,
name: str,
value: Any,
nx: bool = False,
ttl: Optional[int] = None,
) -> None:
"""Set ``name`` to ``value`` in the Redis backend.
Args:
name (str): name of the item
value (Any): value to set for ``name``
nx (bool): if ``True`` the item must not exist prior to being set
ttl (int, optional): time to live for the entry in milliseconds
Returns:
None
Raises:
KeyError: if ``nx`` is ``True`` and ``name`` already exists
"""
response = self.conn.set(
self.full_name(name),
value=self.serializer.forward(value),
px=ttl,
nx=nx,
keepttl=ttl is None, # type: ignore
)
if nx and response is None:
raise KeyError(f"Name '{name}' already exists")
def get(self, name: str) -> Any:
"""Get ``name`` from the Redis backend.
Args:
name (str): name of the item
Returns:
Any
"""
encoded = self.conn.get(self.full_name(name))
return self.serializer.reverse(encoded)
def delete(self, name: str, missing_ok: bool = False) -> None:
"""Delete a ``name`` from the Redis backend.
Args:
name (str): name of the item
missing_ok (bool): if ``True``, do not raise for missing
Returns:
None
Raises:
KeyError: if ``missing_ok`` is ``False`` and ``name`` is not found.
"""
found = self.conn.delete(self.full_name(name))
if not missing_ok and not found:
raise KeyError(f"No name '{name}' found")
def scan(self, pattern: Optional[str] = None) -> Iterable[str]:
"""Scan the backend for matching names.
Args:
pattern (str): pattern to match against
Returns:
names_stream (Iterable[str]): a stream of matching name
"""
match = self.full_name(pattern or "*")
yield from map(self.short_name, self.conn.scan_iter(match))
| 29.248677 | 85 | 0.578148 | """
Redis Backend
"""
from __future__ import annotations
from typing import Any, Dict, Iterable, Optional, Union, cast
import dill
from redis import ConnectionPool, Redis
from alsek._defaults import DEFAULT_NAMESPACE
from alsek._utils.aggregation import gather_init_params
from alsek._utils.printing import auto_repr
from alsek.storage.backends import Backend, LazyClient
from alsek.storage.serialization import JsonSerializer, Serializer
class RedisBackend(Backend):
"""Redis Backend.
Backend powered by Redis.
Args:
conn (str, Redis, LazyClient, optional): a connection url, ``Redis()`` object
or ``LazyClient``.
namespace (str): prefix to use when inserting
names in the backend
serializer (Serializer): tool for encoding and decoding
values written into the backend.
Warning:
* If ``conn`` is a ``Redis()`` object, ``decode_responses``
is expected to be set to ``True``.
"""
def __init__(
self,
conn: Optional[Union[str, Redis, LazyClient]] = None,
namespace: str = DEFAULT_NAMESPACE,
serializer: Serializer = JsonSerializer(),
) -> None:
super().__init__(namespace, serializer=serializer)
self._conn = self._conn_parse(conn)
@staticmethod
def _conn_parse(
conn: Optional[Union[str, Redis, LazyClient]]
) -> Union[Redis, LazyClient]:
if isinstance(conn, LazyClient):
return conn
if conn is None:
return Redis(decode_responses=True)
elif isinstance(conn, Redis):
return conn
elif isinstance(conn, str):
return Redis.from_url(conn, decode_responses=True)
else:
raise ValueError(f"Unsupported `conn` {conn}")
@property
def conn(self) -> Redis:
"""Connection to the backend."""
if isinstance(self._conn, LazyClient):
self._conn = self._conn.get()
return cast(Redis, self._conn)
def __repr__(self) -> str:
return auto_repr(
self,
conn=self.conn,
namespace=self.namespace,
serializer=self.serializer,
)
def _encode(self) -> bytes:
data: Dict[str, Any] = dict(
backend=self.__class__,
settings=gather_init_params(self, ignore=("conn",)),
)
data["settings"]["conn"] = dict(
connection_class=self.conn.connection_pool.connection_class,
max_connections=self.conn.connection_pool.max_connections,
connection_kwargs=self.conn.connection_pool.connection_kwargs,
)
return cast(bytes, dill.dumps(data))
@classmethod
def _from_settings(cls, settings: Dict[str, Any]) -> RedisBackend:
settings["conn"] = Redis(
connection_pool=ConnectionPool(
connection_class=settings["conn"]["connection_class"],
max_connections=settings["conn"]["max_connections"],
**settings["conn"]["connection_kwargs"],
)
)
return cls(**settings)
def exists(self, name: str) -> bool:
"""Check if ``name`` exists in the Redis backend.
Args:
name (str): name of the item
Returns:
bool
"""
return bool(self.conn.exists(self.full_name(name)))
def set(
self,
name: str,
value: Any,
nx: bool = False,
ttl: Optional[int] = None,
) -> None:
"""Set ``name`` to ``value`` in the Redis backend.
Args:
name (str): name of the item
value (Any): value to set for ``name``
nx (bool): if ``True`` the item must not exist prior to being set
ttl (int, optional): time to live for the entry in milliseconds
Returns:
None
Raises:
KeyError: if ``nx`` is ``True`` and ``name`` already exists
"""
response = self.conn.set(
self.full_name(name),
value=self.serializer.forward(value),
px=ttl,
nx=nx,
keepttl=ttl is None, # type: ignore
)
if nx and response is None:
raise KeyError(f"Name '{name}' already exists")
def get(self, name: str) -> Any:
"""Get ``name`` from the Redis backend.
Args:
name (str): name of the item
Returns:
Any
"""
encoded = self.conn.get(self.full_name(name))
return self.serializer.reverse(encoded)
def delete(self, name: str, missing_ok: bool = False) -> None:
"""Delete a ``name`` from the Redis backend.
Args:
name (str): name of the item
missing_ok (bool): if ``True``, do not raise for missing
Returns:
None
Raises:
KeyError: if ``missing_ok`` is ``False`` and ``name`` is not found.
"""
found = self.conn.delete(self.full_name(name))
if not missing_ok and not found:
raise KeyError(f"No name '{name}' found")
def scan(self, pattern: Optional[str] = None) -> Iterable[str]:
"""Scan the backend for matching names.
Args:
pattern (str): pattern to match against
Returns:
names_stream (Iterable[str]): a stream of matching name
"""
match = self.full_name(pattern or "*")
yield from map(self.short_name, self.conn.scan_iter(match))
| 1,747 | 0 | 133 |
84454d7545f7d8140ad9375e76435e7665773cdf | 4,331 | py | Python | Test/Misc/data.py | ViliamVadocz/Bots | 092abc5bf92e9dab9d07499849d54a33b0b0c4f6 | [
"MIT"
] | null | null | null | Test/Misc/data.py | ViliamVadocz/Bots | 092abc5bf92e9dab9d07499849d54a33b0b0c4f6 | [
"MIT"
] | null | null | null | Test/Misc/data.py | ViliamVadocz/Bots | 092abc5bf92e9dab9d07499849d54a33b0b0c4f6 | [
"MIT"
] | null | null | null | '''Rocket League data processing.'''
from utils import Car, Ball, BoostPad, a3l, a3r, a3v, orient_matrix, turn_r
def setup(s, p):
"""Sets up the variables and classes for the hivemind.
Arguments:
s {BaseAgent} -- The hivemind bot helper process.
p {GameTickPacket} -- Information about the game.
fi {FieldInfoPacket} -- Information about the game field.
"""
# Game info.
fi = s.get_field_info()
s.dt = 1 / 120.0
s.last_time = 0.0
# Creates Car objects for all bots.
s.teammates = []
s.opponents = []
for index in range(p.num_cars):
if index == s.index:
s.agent = Car(s.index)
elif p.game_cars[index].team == s.team:
s.teammates.append(Car(index))
else:
s.opponents.append(Car(index))
s.agent.controller = None
# Creates a Ball object.
s.ball = Ball()
# Creates Boostpad objects.
s.l_pads = []
s.s_pads = []
for i in range(fi.num_boosts):
pad = fi.boost_pads[i]
pad_type = s.l_pads if pad.is_full_boost else s.s_pads
pad_obj = BoostPad(i, a3v(pad.location))
pad_type.append(pad_obj)
s.setup = True
def process(s, p):
"""Processes the gametick packet.
Arguments:
s {BaseAgent} -- The agent which is processing the packet.
p {GameTickPacket} -- The game packet being processed.
"""
# Processing game info.
s.time = p.game_info.seconds_elapsed
s.dt = s.time - s.last_time
s.last_time = s.time
s.r_active = p.game_info.is_round_active
s.ko_pause = p.game_info.is_kickoff_pause
s.m_ended = p.game_info.is_match_ended
# Processing agent data.
s.agent.pos = a3v(p.game_cars[s.agent.index].physics.location)
s.agent.rot = a3r(p.game_cars[s.agent.index].physics.rotation)
s.agent.vel = a3v(p.game_cars[s.agent.index].physics.velocity)
s.agent.ang_vel = a3v(p.game_cars[s.agent.index].physics.angular_velocity)
s.agent.on_g = p.game_cars[s.agent.index].has_wheel_contact
s.agent.sonic = p.game_cars[s.agent.index].is_super_sonic
s.agent.boost = p.game_cars[s.agent.index].boost
s.agent.orient_m = orient_matrix(s.agent.rot)
s.agent.turn_r = turn_r(s.agent.vel)
# Processing teammates.
for teammate in s.teammates:
teammate.pos = a3v(p.game_cars[teammate.index].physics.location)
teammate.rot = a3r(p.game_cars[teammate.index].physics.rotation)
teammate.vel = a3v(p.game_cars[teammate.index].physics.velocity)
teammate.ang_vel = a3v(p.game_cars[teammate.index].physics.angular_velocity)
teammate.on_g = p.game_cars[teammate.index].has_wheel_contact
teammate.sonic = p.game_cars[teammate.index].is_super_sonic
teammate.boost = p.game_cars[teammate.index].boost
teammate.orient_m = orient_matrix(teammate.rot)
teammate.turn_r = turn_r(teammate.vel)
# Processing opponents.
for opponent in s.opponents:
opponent.pos = a3v(p.game_cars[opponent.index].physics.location)
opponent.rot = a3r(p.game_cars[opponent.index].physics.rotation)
opponent.vel = a3v(p.game_cars[opponent.index].physics.velocity)
opponent.ang_vel = a3v(p.game_cars[opponent.index].physics.angular_velocity)
opponent.on_g = p.game_cars[opponent.index].has_wheel_contact
opponent.sonic = p.game_cars[opponent.index].is_super_sonic
opponent.boost = p.game_cars[opponent.index].boost
opponent.orient_m = orient_matrix(opponent.rot)
opponent.turn_r = turn_r(opponent.vel)
# Processing Ball data.
s.ball.pos = a3v(p.game_ball.physics.location)
s.ball.vel = a3v(p.game_ball.physics.velocity)
s.ball.ang_vel = a3v(p.game_ball.physics.angular_velocity)
s.ball.predict = s.get_ball_prediction_struct()
# Processing Boostpads.
s.active_pads = []
for pad_type in (s.l_pads, s.s_pads):
for pad in pad_type:
pad.active = p.game_boosts[pad.index].is_active
pad.timer = p.game_boosts[pad.index].timer
if pad.active == True:
s.active_pads.append(pad) | 38.327434 | 87 | 0.636804 | '''Rocket League data processing.'''
from utils import Car, Ball, BoostPad, a3l, a3r, a3v, orient_matrix, turn_r
def setup(s, p):
"""Sets up the variables and classes for the hivemind.
Arguments:
s {BaseAgent} -- The hivemind bot helper process.
p {GameTickPacket} -- Information about the game.
fi {FieldInfoPacket} -- Information about the game field.
"""
# Game info.
fi = s.get_field_info()
s.dt = 1 / 120.0
s.last_time = 0.0
# Creates Car objects for all bots.
s.teammates = []
s.opponents = []
for index in range(p.num_cars):
if index == s.index:
s.agent = Car(s.index)
elif p.game_cars[index].team == s.team:
s.teammates.append(Car(index))
else:
s.opponents.append(Car(index))
s.agent.controller = None
# Creates a Ball object.
s.ball = Ball()
# Creates Boostpad objects.
s.l_pads = []
s.s_pads = []
for i in range(fi.num_boosts):
pad = fi.boost_pads[i]
pad_type = s.l_pads if pad.is_full_boost else s.s_pads
pad_obj = BoostPad(i, a3v(pad.location))
pad_type.append(pad_obj)
s.setup = True
def process(s, p):
"""Processes the gametick packet.
Arguments:
s {BaseAgent} -- The agent which is processing the packet.
p {GameTickPacket} -- The game packet being processed.
"""
# Processing game info.
s.time = p.game_info.seconds_elapsed
s.dt = s.time - s.last_time
s.last_time = s.time
s.r_active = p.game_info.is_round_active
s.ko_pause = p.game_info.is_kickoff_pause
s.m_ended = p.game_info.is_match_ended
# Processing agent data.
s.agent.pos = a3v(p.game_cars[s.agent.index].physics.location)
s.agent.rot = a3r(p.game_cars[s.agent.index].physics.rotation)
s.agent.vel = a3v(p.game_cars[s.agent.index].physics.velocity)
s.agent.ang_vel = a3v(p.game_cars[s.agent.index].physics.angular_velocity)
s.agent.on_g = p.game_cars[s.agent.index].has_wheel_contact
s.agent.sonic = p.game_cars[s.agent.index].is_super_sonic
s.agent.boost = p.game_cars[s.agent.index].boost
s.agent.orient_m = orient_matrix(s.agent.rot)
s.agent.turn_r = turn_r(s.agent.vel)
# Processing teammates.
for teammate in s.teammates:
teammate.pos = a3v(p.game_cars[teammate.index].physics.location)
teammate.rot = a3r(p.game_cars[teammate.index].physics.rotation)
teammate.vel = a3v(p.game_cars[teammate.index].physics.velocity)
teammate.ang_vel = a3v(p.game_cars[teammate.index].physics.angular_velocity)
teammate.on_g = p.game_cars[teammate.index].has_wheel_contact
teammate.sonic = p.game_cars[teammate.index].is_super_sonic
teammate.boost = p.game_cars[teammate.index].boost
teammate.orient_m = orient_matrix(teammate.rot)
teammate.turn_r = turn_r(teammate.vel)
# Processing opponents.
for opponent in s.opponents:
opponent.pos = a3v(p.game_cars[opponent.index].physics.location)
opponent.rot = a3r(p.game_cars[opponent.index].physics.rotation)
opponent.vel = a3v(p.game_cars[opponent.index].physics.velocity)
opponent.ang_vel = a3v(p.game_cars[opponent.index].physics.angular_velocity)
opponent.on_g = p.game_cars[opponent.index].has_wheel_contact
opponent.sonic = p.game_cars[opponent.index].is_super_sonic
opponent.boost = p.game_cars[opponent.index].boost
opponent.orient_m = orient_matrix(opponent.rot)
opponent.turn_r = turn_r(opponent.vel)
# Processing Ball data.
s.ball.pos = a3v(p.game_ball.physics.location)
s.ball.vel = a3v(p.game_ball.physics.velocity)
s.ball.ang_vel = a3v(p.game_ball.physics.angular_velocity)
s.ball.predict = s.get_ball_prediction_struct()
# Processing Boostpads.
s.active_pads = []
for pad_type in (s.l_pads, s.s_pads):
for pad in pad_type:
pad.active = p.game_boosts[pad.index].is_active
pad.timer = p.game_boosts[pad.index].timer
if pad.active == True:
s.active_pads.append(pad) | 0 | 0 | 0 |
23813dfe1984bdbeac2ed1433c66117039d61cbd | 1,257 | py | Python | log.py | ninjawil/toolbox | 93763def83f40c1cabb23e5d942b491b8799b675 | [
"MIT"
] | null | null | null | log.py | ninjawil/toolbox | 93763def83f40c1cabb23e5d942b491b8799b675 | [
"MIT"
] | null | null | null | log.py | ninjawil/toolbox | 93763def83f40c1cabb23e5d942b491b8799b675 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''Provides a common logging set up for all scripts'''
#===============================================================================
# Import modules
#===============================================================================
import logging
import logging.handlers
import time
#===============================================================================
# Custom logger
#===============================================================================
| 31.425 | 80 | 0.431981 | #!/usr/bin/env python
'''Provides a common logging set up for all scripts'''
#===============================================================================
# Import modules
#===============================================================================
import logging
import logging.handlers
import time
#===============================================================================
# Custom logger
#===============================================================================
def setup(name, log_file):
formatter = logging.Formatter(
fmt='%(asctime)s [%(levelname)-8s] %(module)-15s : %(message)s')
logging.Formatter.converter = time.gmtime
fh = logging.handlers.TimedRotatingFileHandler(filename=log_file,
when='midnight',
backupCount=7,
utc=True)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
| 745 | 0 | 22 |
34b2f4084783208a5e9d272083c402d0807ee844 | 613 | py | Python | raspi-controller/espnetwork.py | alecu/ventilastation | db8cb6f4bbc77f8f9e03d26bd7cc6bd36bc34220 | [
"Apache-2.0"
] | 2 | 2019-04-03T01:10:00.000Z | 2019-11-18T15:31:32.000Z | raspi-controller/espnetwork.py | alecu/ventilastation | db8cb6f4bbc77f8f9e03d26bd7cc6bd36bc34220 | [
"Apache-2.0"
] | null | null | null | raspi-controller/espnetwork.py | alecu/ventilastation | db8cb6f4bbc77f8f9e03d26bd7cc6bd36bc34220 | [
"Apache-2.0"
] | 2 | 2019-03-02T20:27:31.000Z | 2019-11-17T23:21:50.000Z | UDP_THIS = "0.0.0.0", 5225
#UDP_OTHER = "127.0.0.1", 5005
UDP_OTHER = "192.168.4.1", 5005
import socket
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.setblocking(False)
sock.bind(UDP_THIS)
sock_iterator = sock_generator()
| 21.892857 | 43 | 0.608483 | UDP_THIS = "0.0.0.0", 5225
#UDP_OTHER = "127.0.0.1", 5005
UDP_OTHER = "192.168.4.1", 5005
import socket
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
sock.setblocking(False)
sock.bind(UDP_THIS)
def sock_send(what):
try:
sock.sendto(what, UDP_OTHER)
print('{:08b}'.format(what[0]))
except BlockingIOError:
print("error trying to send", what)
def sock_generator():
while True:
try:
data, _ = sock.recvfrom(1024)
yield data
except BlockingIOError:
yield None
sock_iterator = sock_generator()
| 306 | 0 | 46 |
f1a7a25d2b44e69ab0b3cf20ef431b057101e056 | 610 | py | Python | prototyping-board/driver/lighting/backlight.py | lnixdo2s/saiga | dba093e66696b556f97269585bc4bc0ef51c5763 | [
"MIT"
] | null | null | null | prototyping-board/driver/lighting/backlight.py | lnixdo2s/saiga | dba093e66696b556f97269585bc4bc0ef51c5763 | [
"MIT"
] | null | null | null | prototyping-board/driver/lighting/backlight.py | lnixdo2s/saiga | dba093e66696b556f97269585bc4bc0ef51c5763 | [
"MIT"
] | null | null | null | import config
from digitalio import DigitalInOut, Direction, Pull
| 25.416667 | 78 | 0.657377 | import config
from digitalio import DigitalInOut, Direction, Pull
class Backlight(Task):
def __init__(self):
# Setup background LED's
self.leds = []
for led_pin in config.BACKLIGHT.LED_PINS:
led = DigitalInOut(led_pin)
led.direction = Direction.OUTPUT
self.leds.append(led)
self.current_led_index = 0
def advance(self, time_delta):
background_led = self.leds[self.current_led_index]
background_led.value = not background_led.value
self.current_led_index = (self.current_led_index + 1) % len(self.leds)
| 465 | 1 | 76 |
d7ef6897682b652698d55a7b48ce637b3753ad8e | 16,880 | py | Python | pait/api_doc/base_parse.py | elviva404/pait | bdb34e316398f1eb31bd82ec109eb4a450b99e95 | [
"Apache-2.0"
] | 19 | 2020-08-26T13:46:33.000Z | 2022-02-22T07:48:29.000Z | pait/api_doc/base_parse.py | elviva404/pait | bdb34e316398f1eb31bd82ec109eb4a450b99e95 | [
"Apache-2.0"
] | 1 | 2021-06-06T17:45:54.000Z | 2021-06-06T17:45:54.000Z | pait/api_doc/base_parse.py | elviva404/pait | bdb34e316398f1eb31bd82ec109eb4a450b99e95 | [
"Apache-2.0"
] | 1 | 2022-01-21T20:25:33.000Z | 2022-01-21T20:25:33.000Z | import inspect
import warnings
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, get_type_hints
from pydantic import BaseModel
from pydantic.fields import Undefined
from typing_extensions import TypedDict
from pait.field import BaseField, Depends
from pait.model.core import PaitCoreModel
from pait.util import FuncSig, create_pydantic_model, get_func_sig, get_parameter_list_from_class
FieldDictType = Dict[Type[BaseField], List[FieldSchemaTypeDict]]
| 45.99455 | 119 | 0.538329 | import inspect
import warnings
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, get_type_hints
from pydantic import BaseModel
from pydantic.fields import Undefined
from typing_extensions import TypedDict
from pait.field import BaseField, Depends
from pait.model.core import PaitCoreModel
from pait.util import FuncSig, create_pydantic_model, get_func_sig, get_parameter_list_from_class
class _IgnoreField(BaseField):
pass
class FieldSchemaRawTypeDict(TypedDict):
param_name: str
schema: dict
parent_schema: dict
annotation: Type
field: BaseField
class FieldSchemaTypeDict(TypedDict):
param_name: str
description: str
default: Any
type: str
other: dict
raw: FieldSchemaRawTypeDict
FieldDictType = Dict[Type[BaseField], List[FieldSchemaTypeDict]]
class PaitBaseParse(object):
def __init__(self, pait_dict: Dict[str, PaitCoreModel], undefined: Any = Undefined):
self._undefined: Any = undefined
self._group_list: List[str] = []
self._group_pait_dict: Dict[str, List[PaitCoreModel]] = {}
self._init(pait_dict)
self.content: str = ""
self._content_type: str = ""
def _init(self, pait_dict: Dict[str, PaitCoreModel]) -> None:
"""read from `pait_id_dict` and write PaitMd attributes"""
for pait_id, pait_model in pait_dict.items():
if not pait_model.operation_id:
continue
group: str = pait_model.group
if group not in self._group_pait_dict:
self._group_pait_dict[group] = [pait_model]
else:
self._group_pait_dict[group].append(pait_model)
self._group_list = sorted(self._group_pait_dict.keys())
def _parse_schema(
self, schema_dict: dict, definition_dict: Optional[dict] = None, parent_key: str = ""
) -> List[FieldSchemaTypeDict]:
"""gen pait field dict from pydantic basemodel schema"""
field_dict_list: List[FieldSchemaTypeDict] = []
# model property openapi dict
# e.g. : {'code': {'title': 'Code', 'description': 'api code', 'default': 1, 'type': 'integer'}}
property_dict: dict = schema_dict["properties"]
# class schema in the parent schema
if not definition_dict:
definition_dict = schema_dict.get("definitions", {})
for param_name, param_dict in property_dict.items():
if parent_key:
all_param_name: str = f"{parent_key}.{param_name}"
else:
all_param_name = param_name
if "$ref" in param_dict and definition_dict:
# ref support
key: str = param_dict["$ref"].split("/")[-1]
if isinstance(definition_dict, dict):
field_dict_list.extend(self._parse_schema(definition_dict[key], definition_dict, all_param_name))
elif "items" in param_dict and "$ref" in param_dict["items"]:
# mad item ref support
key = param_dict["items"]["$ref"].split("/")[-1]
if isinstance(definition_dict, dict):
field_dict_list.extend(self._parse_schema(definition_dict[key], definition_dict, all_param_name))
elif "allOf" in param_dict:
for item in param_dict["allOf"]:
key = item["$ref"].split("/")[-1]
if not isinstance(definition_dict, dict):
continue
if "enum" in definition_dict[key]:
if len(param_dict["allOf"]) > 1:
raise RuntimeError("Not support")
default: Any = definition_dict[key].get("enum", self._undefined)
if default is not self._undefined:
default = f'Only choose from: {",".join(["`" + i + "`" for i in default])}'
_type: str = "enum"
else:
if "enum" in param_dict:
# enum support
default = param_dict.get("enum", self._undefined)
if default is not self._undefined:
default = f'Only choose from: {",".join(["`" + i + "`" for i in default])}'
_type = "enum"
else:
default = param_dict.get("default", self._undefined)
_type = param_dict.get("type", "object")
field_dict_list.append(
{
"param_name": all_param_name,
"description": param_dict.get("description", ""),
"default": default,
"type": _type,
"other": {
key: value
for key, value in param_dict.items()
if key not in {"description", "title", "type", "default"}
},
"raw": {
"param_name": param_name,
"schema": param_dict,
"parent_schema": schema_dict,
# can not parse annotation and field
"annotation": str,
"field": _IgnoreField.i(),
},
}
)
return field_dict_list
def _parse_base_model_to_field_dict(
self,
field_dict: FieldDictType,
_pydantic_model: Type[BaseModel],
param_field_dict: Dict[str, BaseField],
) -> None:
"""
write field_dict from _pydantic_model or param_field_dict
:param field_dict:
e.g.
{
"Body": [
{
"param_name": "",
"description": "",
"default": "",
"type": _"",
"other": {"": ""},
"raw": {
"param_name": "",
"schema": {"": ""},
"parent_schema": {"": ""},
},
}
]
}
:param _pydantic_model: pydantic.basemodel
:param param_field_dict:
e.g.
{
'uid': Query(default=Ellipsis, description='user id', gt=10, lt=1000, extra={}),
'user_name': Query(default=Ellipsis, description='user name', min_length=2, max_length=4, extra={}),
'user_agent': Header(
default=Ellipsis, alias='user-agent', alias_priority=2, description='user agent', extra={}
),
'age': Body(default=Ellipsis, description='age', gt=1, lt=100, extra={})
}
:return:
"""
# TODO design like _parse_schema
param_name_alias_dict: Dict[str, str] = {
value.alias: key for key, value in param_field_dict.items() if isinstance(value, BaseField) and value.alias
}
property_dict: Dict[str, Any] = _pydantic_model.schema()["properties"]
for param_name, param_dict in property_dict.items():
param_python_name: str = param_name_alias_dict.get(param_name, param_name)
pait_field: BaseField = param_field_dict[param_python_name]
pait_field_class: Type[BaseField] = pait_field.__class__
if "$ref" in param_dict:
# ref support
key: str = param_dict["$ref"].split("/")[-1]
param_dict = _pydantic_model.schema()["definitions"][key]
elif "items" in param_dict and "$ref" in param_dict["items"]:
# mad item ref support
key = param_dict["items"]["$ref"].split("/")[-1]
param_dict = _pydantic_model.schema()["definitions"][key]
elif "allOf" in param_dict:
if len(param_dict["allOf"]) > 1:
warnings.warn(f"{param_dict['param_name']} only support 1 item")
param_dict.update(param_dict["allOf"][0])
key = param_dict["$ref"].split("/")[-1]
param_dict = _pydantic_model.schema()["definitions"][key]
if "enum" in param_dict:
# enum support
default: Any = param_dict.get("enum", self._undefined)
if default is not self._undefined:
default = f'Only choose from: {",".join(["`" + i + "`" for i in default])}'
_type: str = "enum"
description: str = param_field_dict[param_python_name].description
else:
default = param_dict.get("default", self._undefined)
_type = param_dict.get("type", self._undefined)
description = param_dict.get("description")
# NOTE: I do not know <pydandic.Filed(default=None)> can not found default value
if default is self._undefined and param_name not in _pydantic_model.schema().get("required", []):
default = param_field_dict[param_python_name].default
_field_dict: FieldSchemaTypeDict = {
"param_name": param_name,
"description": description,
"default": default,
"type": _type,
"other": {
key: value
for key, value in param_dict.items()
if key not in {"description", "title", "type", "default"}
},
"raw": {
"param_name": param_name,
"schema": param_dict,
"parent_schema": _pydantic_model.schema(),
"annotation": _pydantic_model.__annotations__[param_python_name],
"field": pait_field,
},
}
if pait_field_class not in field_dict:
field_dict[pait_field_class] = [_field_dict]
else:
field_dict[pait_field_class].append(_field_dict)
def parameter_list_handle(
self,
parameter_list: List["inspect.Parameter"],
field_dict: FieldDictType,
single_field_list: List[Tuple[str, "inspect.Parameter"]],
pait_model: PaitCoreModel,
) -> None:
"""parse parameter_list to field_dict and single_field_list"""
for parameter in parameter_list:
if parameter.default != parameter.empty:
annotation: type = parameter.annotation
pait_field: Union[BaseField, Depends] = parameter.default
if (
inspect.isclass(annotation)
and issubclass(annotation, BaseModel)
and not isinstance(pait_field, Depends)
):
# support def test(pait_model_route: BaseModel = Body())
# Adapt each property of pydantic.BaseModel to pait.field
# Convert Field classes of pydantic.
# Model properties to Field classes of genuine request types, such as: Body, Query, Header, etc.
param_filed_dict: Dict[str, BaseField] = {
_param_name: pait_field.from_pydantic_field(
annotation.__fields__[_param_name].field_info # type: ignore
)
for _param_name, _ in get_type_hints(annotation).items()
}
self._parse_base_model_to_field_dict(field_dict, annotation, param_filed_dict)
else:
# def test(pait_model_route: int = Body())
if isinstance(pait_field, Depends):
field_dict.update(self._parse_func_param_to_field_dict(pait_field.func, pait_model))
else:
field_name: str = pait_field.__class__.__name__.lower()
single_field_list.append((field_name, parameter))
# parse link
# TODO mv to gen tree
if not isinstance(pait_field, Depends) and pait_field.link:
pait_field.link.register(pait_model, parameter.name, pait_field)
elif issubclass(parameter.annotation, BaseModel):
# def test(pait_model_route: PaitBaseModel)
_pait_model: Type[BaseModel] = parameter.annotation
param_filed_dict = {
key: model_field.field_info
for key, model_field in _pait_model.__fields__.items()
if isinstance(model_field.field_info, BaseField)
}
self._parse_base_model_to_field_dict(field_dict, _pait_model, param_filed_dict)
def _parse_func_param_to_field_dict(self, func: Callable, pait_model: PaitCoreModel) -> FieldDictType:
"""gen filed dict from func
{
"Body": [
{
'field': {
'param_name': str,
'description': str,
'default': str,
'type': type,
'other': dict,
'raw': {
'param_name': str,
'schema': dict,
'parent_schema': pydantic base model.schema(),
'annotation': annotation,
'field': basefield,
}
}
}
]
}
"""
field_dict: FieldDictType = {}
func_sig: FuncSig = get_func_sig(func)
single_field_list: List[Tuple[str, "inspect.Parameter"]] = []
qualname = func.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0]
class_ = getattr(inspect.getmodule(func), qualname)
if inspect.isclass(class_):
parameter_list: List["inspect.Parameter"] = get_parameter_list_from_class(class_)
self.parameter_list_handle(parameter_list, field_dict, single_field_list, pait_model)
self.parameter_list_handle(func_sig.param_list, field_dict, single_field_list, pait_model)
if single_field_list:
annotation_dict: Dict[str, Tuple[Type, Any]] = {}
_pait_field_dict: Dict[str, BaseField] = {}
_column_name_set: Set[str] = set()
for field_name, parameter in single_field_list:
field: BaseField = parameter.default
key: str = field.alias or parameter.name
if key in _column_name_set:
# Since the same name cannot exist together in a Dict,
# it will be parsed directly when a Key exists
# fix
# class Demo(BaseModel):
# header_token: str = Header(alias="token")
# query_token: str = Query(alias="token")
_pydantic_model: Type[BaseModel] = create_pydantic_model(
{parameter.name: (parameter.annotation, field)}
)
self._parse_base_model_to_field_dict(field_dict, _pydantic_model, {parameter.name: field})
else:
_column_name_set.add(key)
annotation_dict[parameter.name] = (parameter.annotation, field)
_pait_field_dict[parameter.name] = field
_pydantic_model = create_pydantic_model(annotation_dict)
self._parse_base_model_to_field_dict(field_dict, _pydantic_model, _pait_field_dict)
return field_dict
def _parse_pait_model_to_field_dict(self, pait_model: PaitCoreModel) -> FieldDictType:
"""Extracting request and response information through routing functions"""
all_field_dict: FieldDictType = self._parse_func_param_to_field_dict(pait_model.func, pait_model)
for pre_depend in pait_model.pre_depend_list:
for field_class, field_dict_list in self._parse_func_param_to_field_dict(pre_depend, pait_model).items():
if field_class not in all_field_dict:
all_field_dict[field_class] = field_dict_list
else:
all_field_dict[field_class].extend(field_dict_list)
return all_field_dict
def output(self, filename: Optional[str], suffix: str = "") -> None:
if not suffix:
suffix = self._content_type
if not filename:
print(self.content)
else:
if not filename.endswith(suffix):
filename += suffix
with open(filename, mode="w") as f:
f.write(self.content)
| 659 | 15,638 | 92 |
d994184a02e321e22efb4c5fcfc857a6a35dbc02 | 5,838 | py | Python | sosia/processing/caching/tests/test_retrieving.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 14 | 2019-03-12T22:07:47.000Z | 2022-03-08T14:05:05.000Z | sosia/processing/caching/tests/test_retrieving.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 31 | 2018-10-15T16:02:44.000Z | 2021-04-09T08:13:44.000Z | sosia/processing/caching/tests/test_retrieving.py | sosia-dev/sosia | d4d2d5edb0cd1d085b5a457eb6d19bf8e9fea7f5 | [
"MIT"
] | 2 | 2020-01-09T06:47:09.000Z | 2020-12-05T13:21:03.000Z | # -*- coding: utf-8 -*-
"""Tests for processing.caching.retrieving module."""
from itertools import product
from nose.tools import assert_equal, assert_true
import numpy as np
from os.path import expanduser
import pandas as pd
from pybliometrics.scopus import ScopusSearch, AuthorSearch
from pandas.testing import assert_frame_equal
from sosia.establishing import connect_database, make_database
from sosia.processing import build_dict, insert_data, retrieve_authors,\
retrieve_author_info, retrieve_authors_from_sourceyear, robust_join,\
query_pubs_by_sourceyear
test_cache = expanduser("~/.sosia/test.sqlite")
refresh = 30
| 39.986301 | 80 | 0.683453 | # -*- coding: utf-8 -*-
"""Tests for processing.caching.retrieving module."""
from itertools import product
from nose.tools import assert_equal, assert_true
import numpy as np
from os.path import expanduser
import pandas as pd
from pybliometrics.scopus import ScopusSearch, AuthorSearch
from pandas.testing import assert_frame_equal
from sosia.establishing import connect_database, make_database
from sosia.processing import build_dict, insert_data, retrieve_authors,\
retrieve_author_info, retrieve_authors_from_sourceyear, robust_join,\
query_pubs_by_sourceyear
test_cache = expanduser("~/.sosia/test.sqlite")
refresh = 30
def test_retrieve_authors():
make_database(test_cache, drop=True)
conn = connect_database(test_cache)
# Variables
expected_auth = [53164702100, 57197093438]
df = pd.DataFrame(expected_auth, columns=["auth_id"], dtype="int64")
expected_cols = ['auth_id', 'eid', 'surname', 'initials', 'givenname',
'affiliation', 'documents', 'affiliation_id', 'city',
'country', 'areas']
# Retrieve data
incache, missing = retrieve_authors(df, conn)
assert_equal(incache.shape[0], 0)
assert_equal(incache.columns.to_list(), expected_cols)
assert_equal(missing, expected_auth)
def test_retrieve_authors_insert():
make_database(test_cache, drop=True)
conn = connect_database(test_cache)
# Variables
expected_auth = [53164702100, 57197093438]
search_auth = [55317901900]
expected_cols = ['auth_id', 'eid', 'surname', 'initials', 'givenname',
'affiliation', 'documents', 'affiliation_id', 'city',
'country', 'areas']
# Insert data
q = f"AU-ID({robust_join(expected_auth, sep=') OR AU-ID(')})"
res = pd.DataFrame(AuthorSearch(q, refresh=refresh).authors, dtype="int64")
res["auth_id"] = res["eid"].str.split("-").str[-1]
res = res[expected_cols]
insert_data(res, conn, table="authors")
# Retrieve data
df = pd.DataFrame(expected_auth + search_auth, columns=["auth_id"],
dtype="int64")
incache, missing = retrieve_authors(df, conn)
assert_equal(incache.shape[0], 2)
assert_equal(missing, [55317901900])
def test_retrieve_author_info_authorpubs():
make_database(test_cache, drop=True)
conn = connect_database(test_cache)
# Variables
table = "author_pubs"
data = {"auth_id": [53164702100, 53164702100],
"year": [2010, 2017], "n_pubs": [0, 6]}
expected = pd.DataFrame(data, dtype="int64")
# Insert data
insert_data(expected.iloc[0].values, conn, table=table)
insert_data(expected.iloc[1].values, conn, table=table)
# Retrieve data
cols = ["auth_id", "year"]
incache, tosearch = retrieve_author_info(expected[cols], conn, table)
assert_frame_equal(incache, expected)
assert_true(tosearch.empty)
def test_retrieve_author_info_authorncits():
make_database(test_cache, drop=True)
conn = connect_database(test_cache)
# Variables
table = "author_ncits"
data = {"auth_id": [53164702100, 53164702100],
"year": [2010, 2017], "n_cits": [0, 6]}
expected = pd.DataFrame(data, dtype="int64")
# Insert data
insert_data(expected, conn, table=table)
# Retrieve data
cols = ["auth_id", "year"]
incache, tosearch = retrieve_author_info(expected[cols], conn, table)
assert_frame_equal(incache, expected)
assert_true(tosearch.empty)
def test_retrieve_author_info_authoryear():
make_database(test_cache, drop=True)
conn = connect_database(test_cache)
# Variables
table = "author_year"
expected_auth = [53164702100, 57197093438]
search_auth = [55317901900]
year = 2016
df2 = pd.DataFrame(expected_auth + search_auth,
columns=["auth_id"], dtype="int64")
df2["year"] = year
# Insert data
fill = robust_join(expected_auth, sep=') OR AU-ID(')
q = f"(AU-ID({fill})) AND PUBYEAR BEF {year+1}"
d = build_dict(ScopusSearch(q, refresh=refresh).results, expected_auth)
expected = pd.DataFrame.from_dict(d, orient="index", dtype="int64")
expected = expected.sort_index().rename_axis('auth_id').reset_index()
expected["year"] = year
expected = expected[['auth_id', 'year', 'first_year', 'n_pubs', 'n_coauth']]
insert_data(expected, conn, table=table)
# Retrieve data
incache, missing = retrieve_author_info(df2, conn, table)
assert_frame_equal(incache, expected)
assert_equal(missing['auth_id'].tolist(), search_auth)
assert_equal(missing['year'].tolist(), [year])
def test_retrieve_authors_from_sourceyear():
make_database(test_cache, drop=True)
conn = connect_database(test_cache)
# Variables
expected_sources = [22900]
expected_years = [2005, 2010]
df = pd.DataFrame(product(expected_sources, expected_years),
columns=["source_id", "year"], dtype="int64")
# Populate cache
expected = query_pubs_by_sourceyear(expected_sources, expected_years[0],
refresh=refresh)
expected["source_id"] = expected["source_id"].astype(np.int64)
expected["afid"] = expected["afid"].astype(int).astype(str)
expected = expected.sort_values(["auids", "afid"]).reset_index(drop=True)
expected = expected[['source_id', 'year', 'auids', 'afid']]
expected["auids"] = expected["auids"].str.split(";")
insert_data(expected, conn, table="sources_afids")
# Retrieve from cache
incache, missing = retrieve_authors_from_sourceyear(df, conn)
incache["afid"] = incache["afid"].astype(int).astype(str)
incache = incache.sort_values(["auids", "afid"]).reset_index(drop=True)
assert_frame_equal(incache, expected)
assert_frame_equal(missing, df.tail(1).reset_index(drop=True))
| 5,057 | 0 | 138 |
ae6cb48200b509a82fd2a123306ed243842deb6c | 2,401 | py | Python | project3/module5/deeplearning/ann.py | pmitche/it3105-aiprogramming | 79f3b4a5f624d473b461548b263bcf7ecc0846dc | [
"MIT"
] | 3 | 2015-12-12T15:33:39.000Z | 2019-01-17T13:44:09.000Z | project3/module6/deeplearning/ann.py | pmitche/it3105-aiprogramming | 79f3b4a5f624d473b461548b263bcf7ecc0846dc | [
"MIT"
] | null | null | null | project3/module6/deeplearning/ann.py | pmitche/it3105-aiprogramming | 79f3b4a5f624d473b461548b263bcf7ecc0846dc | [
"MIT"
] | null | null | null | import theano
import theano.tensor as T
from project3.module6.deeplearning.layer import HiddenLayer
| 33.816901 | 104 | 0.568513 | import theano
import theano.tensor as T
from project3.module6.deeplearning.layer import HiddenLayer
class ANN(object):
def __init__(self, num_in, hidden_list, act_list, num_out, learning_rate):
self.X = T.dmatrix("X")
self.layers = []
self.params = []
for i in range(len(hidden_list)):
self.layers.append(
HiddenLayer(
input=self.X if i == 0 else self.layers[i-1].output,
num_in=num_in if i == 0 else self.layers[i-1].number_of_nodes,
number_of_nodes=hidden_list[i],
activation=act_list[i]
)
)
self.params += self.layers[i].params
# Output layer should always be a SoftmaxLayer.
self.layers.append(
HiddenLayer(
input=self.layers[-1].output,
num_in=self.layers[-1].number_of_nodes,
number_of_nodes=num_out,
activation=T.nnet.softmax
)
)
self.params += self.layers[-1].params
self.train, self.predict = self.compile_model(self.X, self.layers, self.params, learning_rate)
def compile_model(self, X, layers, params, learning_rate):
Y = T.dmatrix("Y")
def sgd(cost, params, lr):
gradients = [T.grad(cost=cost, wrt=param) for param in params]
updates = [
(param, param - lr * gradient)
for param, gradient in zip(params, gradients)
]
return updates
#cost = T.sum(pow((Y - layers[-1].output), 2))
cost = T.nnet.categorical_crossentropy(layers[-1].output, Y).mean()
updates = sgd(cost=cost, params=params, lr=learning_rate)
Y_pred = self.layers[-1].output
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=Y_pred, allow_input_downcast=True)
return train, predict
def blind_test(self, images):
def scale(images):
for image in range(len(images)):
for value in range(len(images[image])):
images[image][value] /= 255.0
scale(images)
predictions = []
for image in images:
predictions.append(self.predict([image]))
return predictions | 2,200 | -3 | 103 |
b12d14662f5bdff6c520d8b9801ec290a0a4e2ac | 666 | py | Python | Python/Tkinter/Code With Harry/Tkinter 1 - CWH 24 - Status Bar.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | Python/Tkinter/Code With Harry/Tkinter 1 - CWH 24 - Status Bar.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | Python/Tkinter/Code With Harry/Tkinter 1 - CWH 24 - Status Bar.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | # ========== Status Bar ===========
from tkinter import *
root = Tk()
root.geometry('500x300')
root.title('status bar')
statusvar = StringVar()
statusvar.set('Ready') # first setting statusbar as ready
sbar = Label(root, textvariable=statusvar, relief=RIDGE, anchor='w')
sbar.pack(side=BOTTOM, fill=X)
Button(root, text='Upload', command=upload).pack()
root.mainloop()
| 31.714286 | 85 | 0.656156 | # ========== Status Bar ===========
from tkinter import *
root = Tk()
root.geometry('500x300')
root.title('status bar')
def upload():
statusvar.set('Busy..') # after run upload function statusbar changes to busy..
sbar.update() # now statusbar is update to busy..
import time
time.sleep(2)
statusvar.set('Ready Now') # after 2 sec again statusbar set to ready now
statusvar = StringVar()
statusvar.set('Ready') # first setting statusbar as ready
sbar = Label(root, textvariable=statusvar, relief=RIDGE, anchor='w')
sbar.pack(side=BOTTOM, fill=X)
Button(root, text='Upload', command=upload).pack()
root.mainloop()
| 260 | 0 | 23 |
10972feb4d13c4af71ef7723212f3748423ef8b2 | 10,179 | py | Python | one_fm/jinja/print_format/methods.py | mohsinalimat/One-FM | ad9a5d8f785c4e69ca68ba1ef75dd26725e5c9c3 | [
"MIT"
] | null | null | null | one_fm/jinja/print_format/methods.py | mohsinalimat/One-FM | ad9a5d8f785c4e69ca68ba1ef75dd26725e5c9c3 | [
"MIT"
] | null | null | null | one_fm/jinja/print_format/methods.py | mohsinalimat/One-FM | ad9a5d8f785c4e69ca68ba1ef75dd26725e5c9c3 | [
"MIT"
] | null | null | null | import frappe
from datetime import date, datetime
class PrintFormat:
"""
Print format class
"""
def sic_attendance_absent_present(self, doc):
"""
Print format for absent/present in sales invoice
for Contracts
"""
# print format
template, context = sic_attendance_absent_present(doc)
return frappe.render_template(
template, context
)
def sic_single_invoice_separate_attendance(self, doc):
"""
Print format for absent/present in sales invoice
for Contracts
"""
# print format
template, context = sic_single_invoice_separate_attendance(doc)
return frappe.render_template(
template, context
)
pf = PrintFormat()
# ATTENDANCE MAPS
attendance_map = {
'Present': 'p',
'Absent': '',
'On Leave': 'o',
'Half Day': 'h',
'Work From Home': 'w'
}
| 40.392857 | 126 | 0.524217 | import frappe
from datetime import date, datetime
class PrintFormat:
"""
Print format class
"""
def sic_attendance_absent_present(self, doc):
"""
Print format for absent/present in sales invoice
for Contracts
"""
# print format
template, context = sic_attendance_absent_present(doc)
return frappe.render_template(
template, context
)
def sic_single_invoice_separate_attendance(self, doc):
"""
Print format for absent/present in sales invoice
for Contracts
"""
# print format
template, context = sic_single_invoice_separate_attendance(doc)
return frappe.render_template(
template, context
)
pf = PrintFormat()
# ATTENDANCE MAPS
attendance_map = {
'Present': 'p',
'Absent': '',
'On Leave': 'o',
'Half Day': 'h',
'Work From Home': 'w'
}
def sic_attendance_absent_present(doc):
context = {}
try:
if(doc.contracts):
contracts = frappe.get_doc('Contracts', doc.contracts)
posting_date = datetime.strptime(str(doc.posting_date), '%Y-%M-%d')
first_day = frappe.utils.get_first_day(doc.posting_date).day
last_day = frappe.utils.get_last_day(doc.posting_date).day
sale_items = "('',"
for c, i in enumerate(contracts.items):
if(len(contracts.items)==c+1):
sale_items+=f" '{i.item_code}'"
else:
sale_items+=f" '{i.item_code}',"
sale_items += ")"
# get post_type in attendance
post_types_query = frappe.db.sql(f"""
SELECT pt.name, pt.post_name, pt.sale_item, at.post_type
FROM `tabPost Type` pt JOIN `tabAttendance` at
ON pt.name=at.post_type
WHERE at.attendance_date BETWEEN '{posting_date.year}-{posting_date.month}-0{first_day}'
AND '{posting_date.year}-{posting_date.month}-{last_day}'
AND at.project="{contracts.project}"
AND at.docstatus=1 AND pt.sale_item IN {sale_items}
GROUP BY pt.name
;""", as_dict=1)
# filter post types
post_types = "('',"
if(len(post_types_query)==0):
post_types=f"('')"
else:
for c, i in enumerate(post_types_query):
if(len(post_types_query)==c+1):
post_types+=f" '{i.name}'"
else:
post_types+=f" '{i.name}',"
post_types += ")"
attendances = frappe.db.sql(f"""
SELECT at.employee, em.employee_id, em.employee_name,
at.post_type, at.status, at.project, at.site, at.attendance_date
FROM `tabAttendance` at JOIN `tabEmployee` em
ON at.employee=em.name WHERE at.attendance_date
BETWEEN '{posting_date.year}-{posting_date.month}-0{first_day}'
AND '{posting_date.year}-{posting_date.month}-{last_day}'
AND at.project="{contracts.project}"
AND at.docstatus=1 AND at.post_type IN {post_types}
ORDER BY at.employee ASC
;
""", as_dict=1)
results = [
{'sn':'S/N', 'employee_id':'Employee ID',
'employee_name':'Employee Name',
'days_worked':[{i:i} for i in range(first_day, last_day+1)]}
]
employee_dict = {}
# sort attendance by employee
for i in attendances:
if(employee_dict.get(i.employee)):
employee_dict[i.employee]['days_worked'][i.attendance_date.day] = attendance_map.get(i.status)
else:
employee_dict[i.employee] = {**i, **{'days_worked':{i.attendance_date.day: attendance_map.get(i.status)}}}
# fill attendance
count_loop = 1
for k, v in employee_dict.items():
days_worked = []
due_date = int(contracts.due_date) or 28
for month_day in range(first_day, last_day+1):
if((month_day>=due_date) and (not employee_dict[k]['days_worked'].get(month_day))):
days_worked.append('p')
elif(not employee_dict[k]['days_worked'].get(month_day)):
days_worked.append('')
else:
days_worked.append(employee_dict[k]['days_worked'].get(month_day))
# push ready employee data
results.append({
'sn':count_loop, 'employee_id':v.get('employee_id'),
'employee_name':v.get('employee_name'),
'days_worked':days_worked
})
count_loop += 1
# check for result before posting to template
if employee_dict:
context={
'results':results
}
return 'one_fm/jinja/print_format/templates/sic_attendance_absent_present.html', context
else:
return '', context
except Exception as e:
print(str(e))
frappe.log_error(str(e), 'Print Format')
context = {}
return '', context
def sic_single_invoice_separate_attendance(doc):
context = {}
try:
if(doc.contracts):
contracts = frappe.get_doc('Contracts', doc.contracts)
posting_date = datetime.strptime(str(doc.posting_date), '%Y-%M-%d')
first_day = frappe.utils.get_first_day(doc.posting_date).day
last_day = frappe.utils.get_last_day(doc.posting_date).day
sale_items = "('',"
for c, i in enumerate(contracts.items):
if(len(contracts.items)==c+1):
sale_items+=f" '{i.item_code}'"
else:
sale_items+=f" '{i.item_code}',"
sale_items += ")"
# get post_type in attendance
post_types_query = frappe.db.sql(f"""
SELECT pt.name, pt.post_name, pt.sale_item, at.post_type
FROM `tabPost Type` pt JOIN `tabAttendance` at
ON pt.name=at.post_type
WHERE at.attendance_date BETWEEN '{posting_date.year}-{posting_date.month}-0{first_day}'
AND '{posting_date.year}-{posting_date.month}-{last_day}'
AND at.project="{contracts.project}"
AND at.docstatus=1 AND pt.sale_item IN {sale_items}
GROUP BY pt.name
;""", as_dict=1)
# filter post types
post_types = "('',"
if(len(post_types_query)==0):
post_types=f"('')"
else:
for c, i in enumerate(post_types_query):
if(len(post_types_query)==c+1):
post_types+=f" '{i.name}'"
else:
post_types+=f" '{i.name}',"
post_types += ")"
attendances = frappe.db.sql(f"""
SELECT at.employee, em.employee_id, em.employee_name,
at.post_type, at.status, at.project, at.site, at.attendance_date
FROM `tabAttendance` at JOIN `tabEmployee` em
ON at.employee=em.name WHERE at.attendance_date
BETWEEN '{posting_date.year}-{posting_date.month}-0{first_day}'
AND '{posting_date.year}-{posting_date.month}-{last_day}'
AND at.project="{contracts.project}"
AND at.docstatus=1 AND at.post_type IN {post_types}
ORDER BY at.employee ASC
;
""", as_dict=1)
header = [
{'sn':'S/N', 'employee_id':'Employee ID',
'employee_name':'Employee Name',
'days_worked':[{i:i} for i in range(first_day, last_day+1)]}
]
results = []
employee_dict = {}
sites = {}
# sort attendance by employee
for i in attendances:
if not (sites.get(i.site)):
sites[i.site] = {'employees': [], 'sitename': i.site}
if(employee_dict.get(i.employee)):
employee_dict[i.employee]['days_worked'][i.attendance_date.day] = attendance_map.get(i.status)
else:
employee_dict[i.employee] = {**i, **{'days_worked':{i.attendance_date.day: attendance_map.get(i.status)}}}
# fill attendance
count_loop = 1
for k, v in employee_dict.items():
days_worked = []
due_date = int(contracts.due_date) or 28
for month_day in range(first_day, last_day+1):
if((month_day>=due_date) and (not employee_dict[k]['days_worked'].get(month_day))):
days_worked.append('p')
elif(not employee_dict[k]['days_worked'].get(month_day)):
days_worked.append('')
else:
days_worked.append(employee_dict[k]['days_worked'].get(month_day))
# push ready employee data
sites[v.get('site')]['employees'].append({
'sn':count_loop, 'employee_id':v.get('employee_id'),
'site':v.get('site'), 'post_type':v.get('post_type'),
'employee_name':v.get('employee_name'),
'days_worked':days_worked
})
count_loop += 1
# check for result before posting to template
if employee_dict:
context={
'header':header, 'sites':sites
}
return 'one_fm/jinja/print_format/templates/sic_single_invoice_separate_attendance.html', context
else:
return '', context
except Exception as e:
print(str(e))
frappe.log_error(str(e), 'Print Format')
context = {}
return '', context
| 9,200 | 0 | 46 |
ba3b7ed149d4dfe9c507caf3cf9fff861f5ae4be | 18,373 | py | Python | python/rearview/console.py | m3047/rear_view_rpz | a646d740325f58dbe50c44fa7fd61a1753d1c822 | [
"Apache-2.0"
] | 8 | 2021-12-28T20:13:54.000Z | 2022-02-22T02:17:36.000Z | python/rearview/console.py | m3047/rear_view_rpz | a646d740325f58dbe50c44fa7fd61a1753d1c822 | [
"Apache-2.0"
] | 3 | 2021-11-17T22:41:32.000Z | 2022-01-20T18:11:05.000Z | python/rearview/console.py | m3047/rear_view_rpz | a646d740325f58dbe50c44fa7fd61a1753d1c822 | [
"Apache-2.0"
] | 2 | 2021-12-28T20:13:47.000Z | 2022-01-05T05:20:49.000Z | #!/usr/bin/python3
# Copyright (c) 2021-2022 by Fred Morris Tacoma WA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interactive console.
This console is enabled by setting for example
CONSOLE = { 'host':'127.0.0.1', 'port':3047 }
in the configuration file.
The purpose of the console is to allow interactive examination of in-memory
data structures and caches.
The commands are synchronous with respect to the operation of the server, which
is to say the server isn't doing anything else until the underlying operation
has completed. This provides a better snapshot of the state at any given moment,
but can negatively impact data collection from a busy server.
IPv6 addresses are expected to be in the compressed rather than expanded format.
The following commands are supported:
Address to zone correlation
---------------------------
a2z
Perform a crosscheck of the addresses in db.RearView.associations and
rpz.RPZ.contents. Technically the former are addresses (1.2.3.4), while the
latter are PTR FQDNs (4.3.2.1.in-addr.arpa).
Address details
---------------
addr{ess} <some-address>
Get details regarding an address' resolutions and best resolution, and
whether this is reflected in the zone construct. The final line has a bunch
of cryptic legends and numbers. Those are:
* fs first seen delta from now (seconds)
* ls last seen delta from now (seconds)
* qc query count
* qt query trend
* h heuristic or score
Zone details
------------
entry <some-address>
Compares what is in the in-memory zone view to what is actually present in
the zone-as-served. NOTE THAT THE ACTUAL DNS REQUEST IS SYNCHRONOUS. This
command causes a separate DNS request to be issued outside of the TCP
connection, which negatively impacts performance of the agent.
Queue depth
-----------
qd
The depths of various processing queues.
Cache eviction queue
--------------------
cache [<|>] <number>
Display information about the entries (addresses) at the beginning (<)
or end (>) of the queue. The specified number of entries is displayed.
Cache Evictions
---------------
evict{ions} <number>
Displays a logic readout of the most recent "n" cache evictions. There is
an internal limit on the number of evictions which are retained for
review.
Zone Data Refresh
-----------------
refr{esh} <number>
Displays a logic readout of the most recent "n" zone refresh batches. Resolutions
which survive "sheep shearing" (cache eviction) are scheduled for having updated
information written back to the zone file in batches to minimize performance impacts;
if things are really busy everything may not get refreshed.
Batches go through three phases, at least for logging purposes:
1) The batch is created.
2) The batch is accumulating addresses to update with fresh information.
3) The batch is written to the zone as an update.
Quit
----
quit
Ends the console session; no other response occurs.
Response Codes
--------------
Each response line is prepended by one of these codes and an ASCII space.
200 Success, single line output.
210 Success, beginning of multi-line output.
212 Success, continuation line.
400 User error / bad request.
500 Not found or internal error.
"""
import time
import logging
import asyncio
from dns.resolver import Resolver
from .rpz import reverse_to_address, address_to_reverse
from .heuristic import heuristic_func
class Request(object):
"""Everything to do with processing a request.
The idiom is generally Request(message).response and then do whatever is sensible
with response. Response can be nothing, in which case there is nothing further
to do.
"""
COMMANDS = dict(a2z=1, address=2, entry=2, qd=1, cache=3, evictions=2, refresh=2, quit=1)
ABBREVIATED = { k for k in COMMANDS.keys() if len(k) > 4 }
def dispatch_request(self, request):
"""Called by __init__() to dispatch the request."""
failed = self.validate_request(request)
if failed:
code,response = self.bad_request(failed)
else:
verb = request[0].lower()
code,response = getattr(self, verb)(request)
if self.quit_session:
response = ''
return
if len(response) == 1:
self.response = '{} {}\n'.format(code, response[0])
else:
self.response = '\n'.join(
( '{} {}'.format( line and 212 or 210, text )
for line,text in enumerate(response)
)
) + '\n'
return
def a2z(self, request):
"""a2z"""
addresses = sorted(self.rear_view.associations.addresses.keys())
zonekeys = sorted(
[
( reverse_to_address( zk ), zk )
for zk in self.rear_view.rpz.contents.keys()
]
)
response = []
addrs = 0
zks = 0
while addresses or zonekeys:
if addresses[0] < zonekeys[0][0]:
response.append('< {}'.format(addresses.pop(0)))
addrs += 1
elif addresses[0] > zonekeys[0][0]:
response.append('> {}'.format(zonekeys.pop(0)[1]))
zks += 1
else:
del addresses[0]
del zonekeys[0]
return 200, response
def address(self, request):
"""addr{ess} <some-address>
Kind of a hot mess, but here's what's going on:
* If there's no best resolution it could be that's because it was loaded
from the actual zone file, which we can tell if it has a depth > 1 and
the first entry is None.
* Other things.
"""
addr = request[1]
addresses = self.rear_view.associations.addresses
if addr not in addresses:
return 500, ['not found']
addr_rec = addresses[addr]
best = addr_rec.best_resolution
zone_key = address_to_reverse(addr)
if zone_key in self.rear_view.rpz.contents:
ptr = self.rear_view.rpz.contents[zone_key].ptr
ptr_chain = addr_rec.match(ptr)
else:
ptr = ptr_chain = None
if best is None:
best_chain = None
else:
best_chain = best.chain
response = []
if best is None:
if not (ptr_chain and ptr_chain[0] == None):
response.append('! no best resolution')
else:
if best.chain not in addr_rec.resolutions:
response.append('! best resolution not in chains')
for resolution in sorted(addr_rec.resolutions.values()):
response.append(
'{} {}'.format(
(best is not None and best == resolution) and '***' or ' ',
resolution.chain
)
)
now = time.time()
response.append(
' fs:{:0.1f} ls:{:0.1f} qc:{:d} qt:{:0.1f} h:{:0.1f}'.format(
resolution.first_seen-now, resolution.last_seen-now, resolution.query_count, resolution.query_trend,
heuristic_func(resolution)
)
)
zone_key = address_to_reverse(addr)
if zone_key in self.rear_view.rpz.contents:
response.append('-> {}'.format(self.rear_view.rpz.contents[zone_key].ptr))
else:
response.append('-> MISSING FROM ZONE CONTENTS')
return 200, response
def entry(self, request):
"""entry <some-address>"""
addr = request[1]
zone_key = address_to_reverse(addr)
rpz = self.rear_view.rpz
contents = rpz.contents
if zone_key not in contents:
memory_value = '** MISSING **'
else:
memory_value = contents[zone_key].ptr
try:
resolver = Resolver()
resolver.nameservers = [rpz.server]
answer = resolver.query(zone_key + '.' + rpz.rpz, 'PTR', source=rpz.server)
server_value = answer[0].target.to_text().rstrip('.')
except Exception as e:
server_value = '** ' + type(e).__name__ + ' **'
return 200, ['{} {}'.format(memory_value, server_value)]
def qd(self, request):
"""qd"""
response = []
response.append(
'association: {}'.format(self.rear_view.association_queue.qsize())
)
response.append(
'solver: {}'.format(self.rear_view.solver_queue.qsize())
)
response.append(
'eviction: {}'.format(self.rear_view.cache_eviction_scheduled)
)
response.append(
'zone updates: {}'.format(self.rear_view.rpz.task_queue.qsize())
)
return 200, response
def cache(self, request):
"""cache [<|>] <number>"""
which_end = request[1]
if which_end not in '<>':
return self.bad_request('expected "<" or ">"')
try:
n_addrs = int(request[2])
if n_addrs < 1:
raise ValueError
except:
return self.bad_request('expected a positive integer value')
associations = self.rear_view.associations
response = []
res_addrs = sum((len(a.resolutions) for a in associations.addresses.values()))
res_cache = associations.n_resolutions
response.append(
'Actual Resolutions in cache: {} actual: {}'.format(res_cache, res_addrs)
)
cache = associations.cache
if n_addrs > len(cache):
n_addrs = len(cache)
if which_end == '<':
i = 0
inc = 1
else:
i = -1
inc = -1
while n_addrs:
address = cache[i]
response.append(
'{} ({})'.format(address.address, len(address.resolutions))
)
i += inc
n_addrs -= 1
return 200, response
def evictions(self, request):
"""evictions <number>"""
try:
n_evicts = int(request[1])
if n_evicts < 1:
raise ValueError
except:
return self.bad_request('expected a positive integer value')
logger = self.rear_view.associations.logger
response = []
if n_evicts > len(logger):
n_evicts = len(logger)
base = n_evicts * -1
for n in range(n_evicts):
entry = logger.log[base + n]
response.append('** {:0.3f} **'.format(entry.timestamp - time.time()))
response.append(
'Resolutions:'
)
response.append(
' Overage:{:>6d} Target:{:>6d} Working:{:>6d} N After:{:>6d}'.format(
*[entry[k] for k in 'overage target_pool_size working_pool_size n_resolutions'.split()]
)
)
response.append(
'Addresses:'
)
n_addresses = entry['n_addresses']
response.append(
' Selected:{:>6d} {:>10s}:{:>6d} Affected:{:>6d} Deleted:{:>6d}'.format(
n_addresses,
(n_addresses > 1 and 'Recycled' or 'Single'),
(n_addresses > 1 and len(entry['recycled_addresses']) or entry['single_address']),
len(entry['affected_addresses']), len(entry['deleted_addresses'])
)
)
response.append(
'Affected:'
)
response += [' {}'.format(k) for k in sorted(entry['affected_addresses'])]
response.append(
'Deleted:'
)
response += [' {}'.format(k) for k in sorted(entry['deleted_addresses'])]
if n_addresses > 1:
response.append(
'Recycled:'
)
response += [' {}'.format(k) for k in sorted(entry['recycled_addresses'])]
response.append(
'Candidates:'
)
for candidate in sorted(entry['candidates']):
response.append(
' {:>8.1f} {:>3d} {}'.format(candidate[0], candidate[1], candidate[2].address)
)
response.append(
' {}'.format(candidate[3].chain)
)
return 200, response
def refresh(self, request):
"""refresh <number>"""
try:
n_updates = int(request[1])
if n_updates < 1:
raise ValueError
except:
return self.bad_request('expected a positive integer value')
logger = self.rear_view.rpz.batch_logger
response = []
if n_updates > len(logger):
n_updates = len(logger)
base = n_updates * -1
for n in range(n_updates):
# NOTE: The state will determine what can be displayed, and in turn is determined
# by the keys which are available if you read rpz.BatchLogger.
entry = logger.log[base + n]
state = logger.state(base + n)
response.append('** {:0.3f} {} **'.format(entry.timestamp - time.time(), state.upper()))
if logger.STATE[state] <= logger.STATE['accumulating']:
batch_size = (
logger.STATE[state] <= logger.STATE['complete']
and 'Batch Size:{:>4d}'.format(entry['batch_size'])
or ''
)
response.append(
'Add Calls:{:>4d} Total to Process:{:>4d} {}'.format(
entry['add_calls'], entry['to_process'], batch_size
)
)
good = entry.get('recycled_good', 0)
no_best = entry.get('recycled_no_best_resolution', 0)
no_resolutions = entry.get('recycled_no_resolutions', 0)
# TODO: This is looking for mangled sheep. It can be removed at some point
# in the future when nobody remembers what this is about.
if (good + no_best + no_resolutions) != entry.get('recycled', 0):
response.append('Sheep are disappearing! (hopefully nobody sees this)')
#
response.append(
'Recycled Good:{:>4d} No Best:{:>4d} No Resolutions:{:>4d}'.format(
good, no_best, no_resolutions
)
)
if logger.STATE[state] <= logger.STATE['complete']:
response.append(
'RCode:{:>3d} Wire Size Request:{:>5d} Response:{:>4d}'.format(
entry['update_rcode'], entry['wire_req_bytes'], entry['wire_resp_bytes']
)
)
if logger.STATE[state] <= logger.STATE['writing']:
processing = (
logger.STATE[state] <= logger.STATE['complete']
and 'Processing:{:>0.3f}'.format(entry['completion_timestamp']-entry['threshold_timestamp'])
or ''
)
response.append(
'Elapsed Accumulating:{:>0.3f} {}'.format(
entry['threshold_timestamp'] - entry.timestamp, processing
)
)
return 200, response
def quit(self, request):
"""quit"""
self.quit_session = True
return 200, []
def bad_request(self, reason):
"""A bad/unrecognized request."""
return 400, [reason]
class Context(object):
"""Context for the console."""
def __init__(self, dnstap=None):
"""Create a context object.
dnstap is normally set in code, but we pass it in with a default of
None to make its presence known.
"""
self.dnstap = dnstap
return
| 33.898524 | 128 | 0.549012 | #!/usr/bin/python3
# Copyright (c) 2021-2022 by Fred Morris Tacoma WA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interactive console.
This console is enabled by setting for example
CONSOLE = { 'host':'127.0.0.1', 'port':3047 }
in the configuration file.
The purpose of the console is to allow interactive examination of in-memory
data structures and caches.
The commands are synchronous with respect to the operation of the server, which
is to say the server isn't doing anything else until the underlying operation
has completed. This provides a better snapshot of the state at any given moment,
but can negatively impact data collection from a busy server.
IPv6 addresses are expected to be in the compressed rather than expanded format.
The following commands are supported:
Address to zone correlation
---------------------------
a2z
Perform a crosscheck of the addresses in db.RearView.associations and
rpz.RPZ.contents. Technically the former are addresses (1.2.3.4), while the
latter are PTR FQDNs (4.3.2.1.in-addr.arpa).
Address details
---------------
addr{ess} <some-address>
Get details regarding an address' resolutions and best resolution, and
whether this is reflected in the zone construct. The final line has a bunch
of cryptic legends and numbers. Those are:
* fs first seen delta from now (seconds)
* ls last seen delta from now (seconds)
* qc query count
* qt query trend
* h heuristic or score
Zone details
------------
entry <some-address>
Compares what is in the in-memory zone view to what is actually present in
the zone-as-served. NOTE THAT THE ACTUAL DNS REQUEST IS SYNCHRONOUS. This
command causes a separate DNS request to be issued outside of the TCP
connection, which negatively impacts performance of the agent.
Queue depth
-----------
qd
The depths of various processing queues.
Cache eviction queue
--------------------
cache [<|>] <number>
Display information about the entries (addresses) at the beginning (<)
or end (>) of the queue. The specified number of entries is displayed.
Cache Evictions
---------------
evict{ions} <number>
Displays a logic readout of the most recent "n" cache evictions. There is
an internal limit on the number of evictions which are retained for
review.
Zone Data Refresh
-----------------
refr{esh} <number>
Displays a logic readout of the most recent "n" zone refresh batches. Resolutions
which survive "sheep shearing" (cache eviction) are scheduled for having updated
information written back to the zone file in batches to minimize performance impacts;
if things are really busy everything may not get refreshed.
Batches go through three phases, at least for logging purposes:
1) The batch is created.
2) The batch is accumulating addresses to update with fresh information.
3) The batch is written to the zone as an update.
Quit
----
quit
Ends the console session; no other response occurs.
Response Codes
--------------
Each response line is prepended by one of these codes and an ASCII space.
200 Success, single line output.
210 Success, beginning of multi-line output.
212 Success, continuation line.
400 User error / bad request.
500 Not found or internal error.
"""
import time
import logging
import asyncio
from dns.resolver import Resolver
from .rpz import reverse_to_address, address_to_reverse
from .heuristic import heuristic_func
class Request(object):
"""Everything to do with processing a request.
The idiom is generally Request(message).response and then do whatever is sensible
with response. Response can be nothing, in which case there is nothing further
to do.
"""
COMMANDS = dict(a2z=1, address=2, entry=2, qd=1, cache=3, evictions=2, refresh=2, quit=1)
ABBREVIATED = { k for k in COMMANDS.keys() if len(k) > 4 }
def __init__(self, message, dnstap):
self.rear_view = dnstap.rear_view
self.response = ""
self.quit_session = False
request = message.strip().split()
if not request:
return
self.dispatch_request(request)
return
def validate_request(self, request):
verb = request[0].lower()
if len(verb) >= 4:
for v in self.ABBREVIATED:
if v.startswith(verb):
verb = request[0] = v
if verb not in self.COMMANDS:
return 'unrecognized command'
if len(request) != self.COMMANDS[verb]:
return 'improperly formed request'
return ''
def dispatch_request(self, request):
"""Called by __init__() to dispatch the request."""
failed = self.validate_request(request)
if failed:
code,response = self.bad_request(failed)
else:
verb = request[0].lower()
code,response = getattr(self, verb)(request)
if self.quit_session:
response = ''
return
if len(response) == 1:
self.response = '{} {}\n'.format(code, response[0])
else:
self.response = '\n'.join(
( '{} {}'.format( line and 212 or 210, text )
for line,text in enumerate(response)
)
) + '\n'
return
def a2z(self, request):
"""a2z"""
addresses = sorted(self.rear_view.associations.addresses.keys())
zonekeys = sorted(
[
( reverse_to_address( zk ), zk )
for zk in self.rear_view.rpz.contents.keys()
]
)
response = []
addrs = 0
zks = 0
while addresses or zonekeys:
if addresses[0] < zonekeys[0][0]:
response.append('< {}'.format(addresses.pop(0)))
addrs += 1
elif addresses[0] > zonekeys[0][0]:
response.append('> {}'.format(zonekeys.pop(0)[1]))
zks += 1
else:
del addresses[0]
del zonekeys[0]
return 200, response
def address(self, request):
"""addr{ess} <some-address>
Kind of a hot mess, but here's what's going on:
* If there's no best resolution it could be that's because it was loaded
from the actual zone file, which we can tell if it has a depth > 1 and
the first entry is None.
* Other things.
"""
addr = request[1]
addresses = self.rear_view.associations.addresses
if addr not in addresses:
return 500, ['not found']
addr_rec = addresses[addr]
best = addr_rec.best_resolution
zone_key = address_to_reverse(addr)
if zone_key in self.rear_view.rpz.contents:
ptr = self.rear_view.rpz.contents[zone_key].ptr
ptr_chain = addr_rec.match(ptr)
else:
ptr = ptr_chain = None
if best is None:
best_chain = None
else:
best_chain = best.chain
response = []
if best is None:
if not (ptr_chain and ptr_chain[0] == None):
response.append('! no best resolution')
else:
if best.chain not in addr_rec.resolutions:
response.append('! best resolution not in chains')
for resolution in sorted(addr_rec.resolutions.values()):
response.append(
'{} {}'.format(
(best is not None and best == resolution) and '***' or ' ',
resolution.chain
)
)
now = time.time()
response.append(
' fs:{:0.1f} ls:{:0.1f} qc:{:d} qt:{:0.1f} h:{:0.1f}'.format(
resolution.first_seen-now, resolution.last_seen-now, resolution.query_count, resolution.query_trend,
heuristic_func(resolution)
)
)
zone_key = address_to_reverse(addr)
if zone_key in self.rear_view.rpz.contents:
response.append('-> {}'.format(self.rear_view.rpz.contents[zone_key].ptr))
else:
response.append('-> MISSING FROM ZONE CONTENTS')
return 200, response
def entry(self, request):
"""entry <some-address>"""
addr = request[1]
zone_key = address_to_reverse(addr)
rpz = self.rear_view.rpz
contents = rpz.contents
if zone_key not in contents:
memory_value = '** MISSING **'
else:
memory_value = contents[zone_key].ptr
try:
resolver = Resolver()
resolver.nameservers = [rpz.server]
answer = resolver.query(zone_key + '.' + rpz.rpz, 'PTR', source=rpz.server)
server_value = answer[0].target.to_text().rstrip('.')
except Exception as e:
server_value = '** ' + type(e).__name__ + ' **'
return 200, ['{} {}'.format(memory_value, server_value)]
def qd(self, request):
"""qd"""
response = []
response.append(
'association: {}'.format(self.rear_view.association_queue.qsize())
)
response.append(
'solver: {}'.format(self.rear_view.solver_queue.qsize())
)
response.append(
'eviction: {}'.format(self.rear_view.cache_eviction_scheduled)
)
response.append(
'zone updates: {}'.format(self.rear_view.rpz.task_queue.qsize())
)
return 200, response
def cache(self, request):
"""cache [<|>] <number>"""
which_end = request[1]
if which_end not in '<>':
return self.bad_request('expected "<" or ">"')
try:
n_addrs = int(request[2])
if n_addrs < 1:
raise ValueError
except:
return self.bad_request('expected a positive integer value')
associations = self.rear_view.associations
response = []
res_addrs = sum((len(a.resolutions) for a in associations.addresses.values()))
res_cache = associations.n_resolutions
response.append(
'Actual Resolutions in cache: {} actual: {}'.format(res_cache, res_addrs)
)
cache = associations.cache
if n_addrs > len(cache):
n_addrs = len(cache)
if which_end == '<':
i = 0
inc = 1
else:
i = -1
inc = -1
while n_addrs:
address = cache[i]
response.append(
'{} ({})'.format(address.address, len(address.resolutions))
)
i += inc
n_addrs -= 1
return 200, response
def evictions(self, request):
"""evictions <number>"""
try:
n_evicts = int(request[1])
if n_evicts < 1:
raise ValueError
except:
return self.bad_request('expected a positive integer value')
logger = self.rear_view.associations.logger
response = []
if n_evicts > len(logger):
n_evicts = len(logger)
base = n_evicts * -1
for n in range(n_evicts):
entry = logger.log[base + n]
response.append('** {:0.3f} **'.format(entry.timestamp - time.time()))
response.append(
'Resolutions:'
)
response.append(
' Overage:{:>6d} Target:{:>6d} Working:{:>6d} N After:{:>6d}'.format(
*[entry[k] for k in 'overage target_pool_size working_pool_size n_resolutions'.split()]
)
)
response.append(
'Addresses:'
)
n_addresses = entry['n_addresses']
response.append(
' Selected:{:>6d} {:>10s}:{:>6d} Affected:{:>6d} Deleted:{:>6d}'.format(
n_addresses,
(n_addresses > 1 and 'Recycled' or 'Single'),
(n_addresses > 1 and len(entry['recycled_addresses']) or entry['single_address']),
len(entry['affected_addresses']), len(entry['deleted_addresses'])
)
)
response.append(
'Affected:'
)
response += [' {}'.format(k) for k in sorted(entry['affected_addresses'])]
response.append(
'Deleted:'
)
response += [' {}'.format(k) for k in sorted(entry['deleted_addresses'])]
if n_addresses > 1:
response.append(
'Recycled:'
)
response += [' {}'.format(k) for k in sorted(entry['recycled_addresses'])]
response.append(
'Candidates:'
)
for candidate in sorted(entry['candidates']):
response.append(
' {:>8.1f} {:>3d} {}'.format(candidate[0], candidate[1], candidate[2].address)
)
response.append(
' {}'.format(candidate[3].chain)
)
return 200, response
def refresh(self, request):
"""refresh <number>"""
try:
n_updates = int(request[1])
if n_updates < 1:
raise ValueError
except:
return self.bad_request('expected a positive integer value')
logger = self.rear_view.rpz.batch_logger
response = []
if n_updates > len(logger):
n_updates = len(logger)
base = n_updates * -1
for n in range(n_updates):
# NOTE: The state will determine what can be displayed, and in turn is determined
# by the keys which are available if you read rpz.BatchLogger.
entry = logger.log[base + n]
state = logger.state(base + n)
response.append('** {:0.3f} {} **'.format(entry.timestamp - time.time(), state.upper()))
if logger.STATE[state] <= logger.STATE['accumulating']:
batch_size = (
logger.STATE[state] <= logger.STATE['complete']
and 'Batch Size:{:>4d}'.format(entry['batch_size'])
or ''
)
response.append(
'Add Calls:{:>4d} Total to Process:{:>4d} {}'.format(
entry['add_calls'], entry['to_process'], batch_size
)
)
good = entry.get('recycled_good', 0)
no_best = entry.get('recycled_no_best_resolution', 0)
no_resolutions = entry.get('recycled_no_resolutions', 0)
# TODO: This is looking for mangled sheep. It can be removed at some point
# in the future when nobody remembers what this is about.
if (good + no_best + no_resolutions) != entry.get('recycled', 0):
response.append('Sheep are disappearing! (hopefully nobody sees this)')
#
response.append(
'Recycled Good:{:>4d} No Best:{:>4d} No Resolutions:{:>4d}'.format(
good, no_best, no_resolutions
)
)
if logger.STATE[state] <= logger.STATE['complete']:
response.append(
'RCode:{:>3d} Wire Size Request:{:>5d} Response:{:>4d}'.format(
entry['update_rcode'], entry['wire_req_bytes'], entry['wire_resp_bytes']
)
)
if logger.STATE[state] <= logger.STATE['writing']:
processing = (
logger.STATE[state] <= logger.STATE['complete']
and 'Processing:{:>0.3f}'.format(entry['completion_timestamp']-entry['threshold_timestamp'])
or ''
)
response.append(
'Elapsed Accumulating:{:>0.3f} {}'.format(
entry['threshold_timestamp'] - entry.timestamp, processing
)
)
return 200, response
def quit(self, request):
"""quit"""
self.quit_session = True
return 200, []
def bad_request(self, reason):
"""A bad/unrecognized request."""
return 400, [reason]
class Context(object):
"""Context for the console."""
def __init__(self, dnstap=None):
"""Create a context object.
dnstap is normally set in code, but we pass it in with a default of
None to make its presence known.
"""
self.dnstap = dnstap
return
async def handle_requests(self, reader, writer):
remote_addr = writer.get_extra_info('peername')
while True:
writer.write('# '.encode())
data = await reader.readline()
try:
message = data.decode()
except UnicodeDecodeError:
logging.warn('Invalid characters in stream (UnicodeDecodeError), closing connection for {}'.format(remote_addr))
break
if not message:
break
request = Request(message, self.dnstap)
if request.quit_session:
break
if not request.response:
continue
writer.write(request.response.encode())
await writer.drain()
writer.close()
return
| 1,428 | 0 | 89 |
5cb702b5dabf7677aa8ce35bbd46abef7cfa833f | 450 | py | Python | src/resources.py | eskeB/codealong.rpg-sim | c165889525c8e22227ce89ca9517ede53945030c | [
"MIT"
] | null | null | null | src/resources.py | eskeB/codealong.rpg-sim | c165889525c8e22227ce89ca9517ede53945030c | [
"MIT"
] | null | null | null | src/resources.py | eskeB/codealong.rpg-sim | c165889525c8e22227ce89ca9517ede53945030c | [
"MIT"
] | null | null | null |
#imports
#global variables
#classes
from tkinter import N
#functions
#main code
| 14.0625 | 105 | 0.617778 |
#imports
#global variables
#classes
from tkinter import N
class Character:
def __init__(self, name, health, damage, armor):
self.name = name
self.health = health
self.damage = damage
self.armor = armor
def __str__(self):
return f'Name:{self.name}\nHealth:{self.health}\nDamage : {self.damage}\n Armor : {self.armor}\n'
#functions
def hello():
print("Hello world")
#main code
| 256 | -5 | 104 |
ff3cb32318c8dc666e6d4f4e01c609d5d6e90fc4 | 102 | py | Python | src/python/modules/transit/transit_constants.py | brdimattia/SmartMirror | 1c2e58d78ae70303d0486c674bbc5394d7e50b0a | [
"MIT"
] | null | null | null | src/python/modules/transit/transit_constants.py | brdimattia/SmartMirror | 1c2e58d78ae70303d0486c674bbc5394d7e50b0a | [
"MIT"
] | 3 | 2021-06-02T00:56:25.000Z | 2022-01-13T02:05:12.000Z | src/python/modules/transit/transit_constants.py | brdimattia/SmartMirror | 1c2e58d78ae70303d0486c674bbc5394d7e50b0a | [
"MIT"
] | null | null | null | TITLE_TEXT_SIZE=30
NEXT_TRAIN_TEXT_SIZE=36
GREEN="#3ab54a"
TRANSIT_API_ENDPOINT="https://www.mbta.com" | 25.5 | 43 | 0.833333 | TITLE_TEXT_SIZE=30
NEXT_TRAIN_TEXT_SIZE=36
GREEN="#3ab54a"
TRANSIT_API_ENDPOINT="https://www.mbta.com" | 0 | 0 | 0 |
09d1008b4b192925b5f7897553ba652bf418ff05 | 483 | py | Python | FLP/messagetypes/text/defs.py | TheXIFC/FL-Studio-Time-Calculator | 23815f22d48f30b43f3eebe631100627e71d1430 | [
"MIT"
] | 4 | 2021-11-16T11:05:32.000Z | 2021-12-19T09:34:07.000Z | FLP/messagetypes/text/defs.py | TheXIFC/FL-Studio-Time-Calculator | 23815f22d48f30b43f3eebe631100627e71d1430 | [
"MIT"
] | 1 | 2021-05-20T16:25:40.000Z | 2021-05-20T16:25:40.000Z | FLP/messagetypes/text/defs.py | TheXIFC/FL-Studio-Time-Calculator | 23815f22d48f30b43f3eebe631100627e71d1430 | [
"MIT"
] | 3 | 2021-11-23T00:29:11.000Z | 2022-01-27T02:29:14.000Z | '''
Created on 17 May 2021
@author: julianporter
'''
from .numeric import HexConverter,IntConverter
from .text import String8Converter,String16Converter
HEXConv = HexConverter()
BYTEConv = IntConverter(1)
WORDConv = IntConverter(2)
DWORDConv = IntConverter(4)
STR8Conv = String8Converter()
STR16Conv = String16Converter()
| 18.576923 | 52 | 0.712215 | '''
Created on 17 May 2021
@author: julianporter
'''
from .numeric import HexConverter,IntConverter
from .text import String8Converter,String16Converter
HEXConv = HexConverter()
BYTEConv = IntConverter(1)
WORDConv = IntConverter(2)
DWORDConv = IntConverter(4)
STR8Conv = String8Converter()
STR16Conv = String16Converter()
def SAFEConv(value):
try:
if len(value)>1: return STR8Conv(value)
return BYTEConv(value[0])
except:
return f'{value}'
| 128 | 0 | 23 |
94870628847ef8f5d0614681d7e454c506500fb9 | 163 | py | Python | slither/tests/captionTest.py | fjfzcjj/Forked-PySlither | d9f87c54f34e88d2e575fe35bf2df47baba9b43e | [
"MIT"
] | 13 | 2016-03-31T18:05:43.000Z | 2022-01-31T21:09:58.000Z | slither/tests/captionTest.py | fjfzcjj/Forked-PySlither | d9f87c54f34e88d2e575fe35bf2df47baba9b43e | [
"MIT"
] | 61 | 2016-03-30T19:56:41.000Z | 2019-01-09T22:16:40.000Z | slither/tests/captionTest.py | fjfzcjj/Forked-PySlither | d9f87c54f34e88d2e575fe35bf2df47baba9b43e | [
"MIT"
] | 9 | 2016-03-31T17:01:25.000Z | 2019-10-08T06:14:43.000Z | import slither
i = 0
slither.setup("")
slither.runMainLoop(run_a_frame)
| 12.538462 | 33 | 0.619632 | import slither
i = 0
def run_a_frame():
global i
slither.setCaption("-"*(i+1))
i = (i + 1) % 60
slither.setup("")
slither.runMainLoop(run_a_frame)
| 65 | 0 | 23 |
e443c5e29108dd45c61166fa4cc04e6f86b2dc53 | 2,226 | py | Python | scripts/ssc/evaluation/eval_WCAE_tripost.py | MrBellamonte/MT-VAEs-TDA | 8881b5db607c673fb558f7b74ece27f244b16b77 | [
"MIT"
] | null | null | null | scripts/ssc/evaluation/eval_WCAE_tripost.py | MrBellamonte/MT-VAEs-TDA | 8881b5db607c673fb558f7b74ece27f244b16b77 | [
"MIT"
] | 1 | 2020-09-22T13:04:58.000Z | 2020-09-22T13:05:23.000Z | scripts/ssc/evaluation/eval_WCAE_tripost.py | MrBellamonte/AEs-VAEs-TDA | 8881b5db607c673fb558f7b74ece27f244b16b77 | [
"MIT"
] | null | null | null | from fractions import Fraction
import matplotlib.pyplot as plt
import mpltern
import pandas as pd
import numpy as np
if __name__ == "__main__":
df_path = '/Users/simons/PycharmProjects/MT-VAEs-TDA/output/eval/WCAE/metrics_selected_processed.csv'
# get df with cols: eval metrics (tbd), uid, k, bs, mu_push
metrics = ['rmse_manifold_Z', 'training.loss.autoencoder', 'test_mean_Lipschitz_std_refZ',
'test_mean_Lipschitz_std_refZ']
# get uids, get mu_push, k out of uid
df = pd.read_csv(df_path)
metrics = ['rmse_manifold_Z', 'training.loss.autoencoder', 'test_mean_Lipschitz_std_refZ',
'test_mean_trustworthiness']
max_metrics = ['test_mean_trustworthiness']
metric = metrics[2]
df = df[df['metric'] == metric]
df = df[['batch_size', 'mu_push', 'k', 'value']]
if metric in max_metrics:
df = df.groupby(['batch_size', 'mu_push', 'k'], as_index=False).min()
else:
df = df.groupby(['batch_size', 'mu_push', 'k'], as_index=False).min()
print(df)
ax = plt.subplot(projection='ternary')
bs_replace = {
64 : 0,
128: 0.25,
256: 0.5,
512: 1
}
k_replace = {
1: 0,
2: 0.2,
3: 0.4,
4: 0.6,
5: 0.8,
6: 1,
}
mu_replace = {
1 : 0,
1.05: 0.2,
1.1 : 0.4,
1.15: 0.6,
1.2 : 0.8,
1.25: 1,
}
bs = list(df['batch_size'].replace(bs_replace))
k = list(df['k'].replace(k_replace))
mu = list(df['mu_push'].replace(mu_replace))
v = list(df['value'])
print(df)
vmin = 0.0
vmax = 1.2
levels = np.linspace(vmin, vmax, 7)
cs = ax.tricontourf(bs, k, mu, v)
#cs = ax.tripcolor(bs, k, mu, v, shading = 'gouraud', rasterized = True)
#cs = ax.tripcolor(bs, k, mu, v, shading='flat')
cax = ax.inset_axes([1.05, 0.1, 0.05, 0.9])
colorbar = plt.colorbar(cs, cax=cax)
colorbar.set_label('Length', rotation=270, va='baseline')
ax.set_tlabel('Batch size')
ax.set_llabel('k')
ax.set_rlabel('mu')
ax.taxis.set_label_position('tick1')
ax.laxis.set_label_position('tick1')
ax.raxis.set_label_position('tick1')
plt.show()
| 27.481481 | 105 | 0.585804 | from fractions import Fraction
import matplotlib.pyplot as plt
import mpltern
import pandas as pd
import numpy as np
if __name__ == "__main__":
df_path = '/Users/simons/PycharmProjects/MT-VAEs-TDA/output/eval/WCAE/metrics_selected_processed.csv'
# get df with cols: eval metrics (tbd), uid, k, bs, mu_push
metrics = ['rmse_manifold_Z', 'training.loss.autoencoder', 'test_mean_Lipschitz_std_refZ',
'test_mean_Lipschitz_std_refZ']
# get uids, get mu_push, k out of uid
df = pd.read_csv(df_path)
metrics = ['rmse_manifold_Z', 'training.loss.autoencoder', 'test_mean_Lipschitz_std_refZ',
'test_mean_trustworthiness']
max_metrics = ['test_mean_trustworthiness']
metric = metrics[2]
df = df[df['metric'] == metric]
df = df[['batch_size', 'mu_push', 'k', 'value']]
if metric in max_metrics:
df = df.groupby(['batch_size', 'mu_push', 'k'], as_index=False).min()
else:
df = df.groupby(['batch_size', 'mu_push', 'k'], as_index=False).min()
print(df)
ax = plt.subplot(projection='ternary')
bs_replace = {
64 : 0,
128: 0.25,
256: 0.5,
512: 1
}
k_replace = {
1: 0,
2: 0.2,
3: 0.4,
4: 0.6,
5: 0.8,
6: 1,
}
mu_replace = {
1 : 0,
1.05: 0.2,
1.1 : 0.4,
1.15: 0.6,
1.2 : 0.8,
1.25: 1,
}
bs = list(df['batch_size'].replace(bs_replace))
k = list(df['k'].replace(k_replace))
mu = list(df['mu_push'].replace(mu_replace))
v = list(df['value'])
print(df)
vmin = 0.0
vmax = 1.2
levels = np.linspace(vmin, vmax, 7)
cs = ax.tricontourf(bs, k, mu, v)
#cs = ax.tripcolor(bs, k, mu, v, shading = 'gouraud', rasterized = True)
#cs = ax.tripcolor(bs, k, mu, v, shading='flat')
cax = ax.inset_axes([1.05, 0.1, 0.05, 0.9])
colorbar = plt.colorbar(cs, cax=cax)
colorbar.set_label('Length', rotation=270, va='baseline')
ax.set_tlabel('Batch size')
ax.set_llabel('k')
ax.set_rlabel('mu')
ax.taxis.set_label_position('tick1')
ax.laxis.set_label_position('tick1')
ax.raxis.set_label_position('tick1')
plt.show()
| 0 | 0 | 0 |
443dcde4daaca2dbdafde3c9187c91ccfe2dedd6 | 48 | py | Python | virtual/lib/python3.6/site-packages/liked/__init__.py | alexomaset/I-made-instagram | 3c9141513727b86392e3668735613385f70b32be | [
"MIT"
] | 1 | 2018-01-07T09:27:00.000Z | 2018-01-07T09:27:00.000Z | virtual/lib/python3.6/site-packages/liked/__init__.py | alexomaset/I-made-instagram | 3c9141513727b86392e3668735613385f70b32be | [
"MIT"
] | 4 | 2021-03-19T00:58:37.000Z | 2021-06-10T21:29:57.000Z | liked/__init__.py | amyth/django-liked | 3973d6c85236e5357ef72d83b1ad3e40528f4e5c | [
"MIT"
] | null | null | null | default_app_config = "liked.app.LikedAppConfig"
| 24 | 47 | 0.833333 | default_app_config = "liked.app.LikedAppConfig"
| 0 | 0 | 0 |
e8badc22990250b62d5fda655e8de9bbbb6d9926 | 603 | py | Python | leads/urls.py | tmbyers1102/ourtsy_v1 | 23ace98c82b0677f9d6ef7ee1096286f78c10b7d | [
"MIT"
] | null | null | null | leads/urls.py | tmbyers1102/ourtsy_v1 | 23ace98c82b0677f9d6ef7ee1096286f78c10b7d | [
"MIT"
] | null | null | null | leads/urls.py | tmbyers1102/ourtsy_v1 | 23ace98c82b0677f9d6ef7ee1096286f78c10b7d | [
"MIT"
] | null | null | null | from django.urls import path
from leads.views import (
LeadListView,
LeadDetailView,
LeadDeleteView,
LeadCreateView,
LeadUpdateView,
lead_delete, lead_list, lead_detail, lead_create, lead_update
)
app_name = "leads"
urlpatterns = [
path('', LeadListView.as_view(), name='lead-list'),
path('<int:pk>/', LeadDetailView.as_view(), name='lead-detail'),
path('<int:pk>/update/', LeadUpdateView.as_view(), name='lead-update'),
path('<int:pk>/delete/', LeadDeleteView.as_view(), name='lead-delete'),
path('create/', LeadCreateView.as_view(), name='lead-create'),
] | 31.736842 | 75 | 0.684909 | from django.urls import path
from leads.views import (
LeadListView,
LeadDetailView,
LeadDeleteView,
LeadCreateView,
LeadUpdateView,
lead_delete, lead_list, lead_detail, lead_create, lead_update
)
app_name = "leads"
urlpatterns = [
path('', LeadListView.as_view(), name='lead-list'),
path('<int:pk>/', LeadDetailView.as_view(), name='lead-detail'),
path('<int:pk>/update/', LeadUpdateView.as_view(), name='lead-update'),
path('<int:pk>/delete/', LeadDeleteView.as_view(), name='lead-delete'),
path('create/', LeadCreateView.as_view(), name='lead-create'),
] | 0 | 0 | 0 |
2d2ba99da00e60b55321b61f4681892d54cf94df | 1,603 | py | Python | src/storage/filestore.py | pterodactal666/eorzeas-only-hope | 02c97730b44d757847ca3569b3b1e6b61bd6ddf3 | [
"BSD-2-Clause"
] | null | null | null | src/storage/filestore.py | pterodactal666/eorzeas-only-hope | 02c97730b44d757847ca3569b3b1e6b61bd6ddf3 | [
"BSD-2-Clause"
] | null | null | null | src/storage/filestore.py | pterodactal666/eorzeas-only-hope | 02c97730b44d757847ca3569b3b1e6b61bd6ddf3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python3
# vim: ts=4 expandtab
from __future__ import annotations
from typing import Optional, Set, TextIO
from os.path import exists as path_exists
from .datastore import DataStore, RaiseType
class FileStore(DataStore):
"""A datastore of names of people who can save Eorzea, written to a file
with one entry per line"""
file_handle: TextIO
def __init__(self: FileStore, file_name: str):
"""Sets up the datastore, reading the dataset from the file if needed"""
from_storage: Optional[Set[str]] = None
if path_exists(file_name):
with open(file_name, "r") as handle:
from_storage = {line.strip() for line in handle}
super().__init__(from_storage)
self.file_handle = open(file_name, "a")
def _write_append(self: FileStore, value: str) -> Optional[bool]:
"""Append a value to the underlying datstore this type implements.
This function may be a no-op method, in which case it MUST return None.
Otherwise, it should return if the write succeded.
Values passed to this function SHOULD NOT exist in the store already,
so the implement does not need to consider de-duplication.
"""
return self.file_handle.write("%s\n" % value) > 0
| 31.431373 | 80 | 0.671865 | #!/usr/bin/python3
# vim: ts=4 expandtab
from __future__ import annotations
from typing import Optional, Set, TextIO
from os.path import exists as path_exists
from .datastore import DataStore, RaiseType
class FileStore(DataStore):
"""A datastore of names of people who can save Eorzea, written to a file
with one entry per line"""
file_handle: TextIO
def __init__(self: FileStore, file_name: str):
"""Sets up the datastore, reading the dataset from the file if needed"""
from_storage: Optional[Set[str]] = None
if path_exists(file_name):
with open(file_name, "r") as handle:
from_storage = {line.strip() for line in handle}
super().__init__(from_storage)
self.file_handle = open(file_name, "a")
def _write_append(self: FileStore, value: str) -> Optional[bool]:
"""Append a value to the underlying datstore this type implements.
This function may be a no-op method, in which case it MUST return None.
Otherwise, it should return if the write succeded.
Values passed to this function SHOULD NOT exist in the store already,
so the implement does not need to consider de-duplication.
"""
return self.file_handle.write("%s\n" % value) > 0
def _write_list(self: FileStore, value: Set[str]) -> Optional[bool]:
return None
def __exit__(
self: FileStore, exception_type: RaiseType, message, traceback
) -> Optional[bool]:
self.file_handle.close()
return super().__exit__(exception_type, message, traceback)
| 257 | 0 | 54 |
0633d77a222dc1c895ac0c51e6f89a42291eafa0 | 4,236 | py | Python | src/text_cat.py | mihai-t/text-categorization-nlp | c9612cf934468d111b87e3a289d60305e35155f5 | [
"MIT"
] | null | null | null | src/text_cat.py | mihai-t/text-categorization-nlp | c9612cf934468d111b87e3a289d60305e35155f5 | [
"MIT"
] | null | null | null | src/text_cat.py | mihai-t/text-categorization-nlp | c9612cf934468d111b87e3a289d60305e35155f5 | [
"MIT"
] | null | null | null | import os
import matplotlib.pyplot as plot
import numpy as np
from sklearn import decomposition
from sklearn.svm import SVC
from analysis import DocumentAnalysis, WordIterator
from utils import list_files, PROJECT_PATH
WORD2VEC_ANALYSER = DocumentAnalysis(model_file_name=os.path.join(PROJECT_PATH, "models", "all.model"))
def create_plot(matrix, classes, svm, name):
"""
Given a matrix of samples and their correct classes, plots the data on a 2d plot by performing a PCA analysis.
Furthermore, plots the separating hyperplane computed by a SVM classifier
:param matrix: Labeled points in the hyperplane
:param classes: List of correct classes
:param svm: Trained model
:param name: name of the plot
:return:
"""
pca = decomposition.PCA(n_components=2)
principal_component = pca.fit_transform(matrix)
plot.figure()
labels = set(classes)
colors = ["red", "blue", "cyan", "magenta"]
multiples = np.arange(-0.005, 0.005, 0.0001)
first = multiples[:, np.newaxis] * pca.components_[0, :]
second = multiples[:, np.newaxis] * pca.components_[1, :]
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, matrix[0].shape[0])
z = svm.predict(flat_grid)
z = z.reshape(grid.shape[:-1])
z = np.vectorize(lambda x: 1 if x == "romanian_news" else 0)(z)
plot.contourf(multiples, multiples, z, cmap=plot.cm.Paired)
plot.axis('off')
for i, l in enumerate(labels):
plot.scatter(principal_component[np.where(np.array(classes) == l), 0],
principal_component[np.where(np.array(classes) == l), 1], label=l, color=colors[i % len(colors)],
edgecolors="black")
plot.legend(loc='best', numpoints=1)
plot.title(name)
plot.grid()
if not os.path.exists(os.path.join(PROJECT_PATH, "pca")):
os.mkdir(os.path.join(PROJECT_PATH, "pca"))
plot.savefig(os.path.join(PROJECT_PATH, "pca", name + '_pca.png'), dpi=125)
def compute_document_vector(file_name, analyser=WORD2VEC_ANALYSER):
"""
Computes the document vector of a given sample using the word2vec model
The document vector is defined as the mean of all the word vectors of the text sample
:param file_name: given text sample
:param analyser: trained word2vec model
:return: the average vector of the document
"""
vectors = None
count = 0
for _ in WordIterator(file_name):
count += 1
for word in WordIterator(file_name):
w = analyser.vector(word)
if w is None:
continue
if vectors is None:
vectors = analyser.vector(word) / count
else:
vectors += analyser.vector(word) / count
return vectors / count
def build_training_set(analyser=WORD2VEC_ANALYSER):
"""
Given a word2vec analyser, compute the document vectors of the labeled samples
:param analyser: given word2vec model
:return: annotated data set
"""
X = []
Y = []
for file in list_files(os.path.join(PROJECT_PATH, "text", "romanian_prose")):
v = compute_document_vector(file, analyser)
X.append(v)
Y.append("prose")
for file in list_files(os.path.join(PROJECT_PATH, "text", "romanian_news")):
v = compute_document_vector(file, analyser)
X.append(v)
Y.append("romanian_news")
return X, Y
def test(svm):
"""
Classify unlabeled samples using the trained svm model
:param svm: given trained model
:return: document vectors of the classified documents
"""
X = [compute_document_vector(os.path.join(PROJECT_PATH, "unlabeled", "institut.txt")),
compute_document_vector(os.path.join(PROJECT_PATH, "unlabeled", "vizita.txt"))]
Y = ["institut", "vizita"]
print(Y[0] + " predicted as: " + svm.predict(X[0].reshape(1, -1))[0])
print(Y[1] + " predicted as: " + svm.predict(X[1].reshape(1, -1))[0])
return X, Y
KERNELS = ["linear", "poly", "sigmoid", "rbf"]
if __name__ == "__main__":
X, Y = build_training_set()
for k in KERNELS:
svm = SVC(kernel=k)
svm.fit(X, Y)
X_test, Y_test = test(svm)
create_plot(X + X_test, Y + Y_test, svm, k)
| 32.584615 | 118 | 0.65203 | import os
import matplotlib.pyplot as plot
import numpy as np
from sklearn import decomposition
from sklearn.svm import SVC
from analysis import DocumentAnalysis, WordIterator
from utils import list_files, PROJECT_PATH
WORD2VEC_ANALYSER = DocumentAnalysis(model_file_name=os.path.join(PROJECT_PATH, "models", "all.model"))
def create_plot(matrix, classes, svm, name):
"""
Given a matrix of samples and their correct classes, plots the data on a 2d plot by performing a PCA analysis.
Furthermore, plots the separating hyperplane computed by a SVM classifier
:param matrix: Labeled points in the hyperplane
:param classes: List of correct classes
:param svm: Trained model
:param name: name of the plot
:return:
"""
pca = decomposition.PCA(n_components=2)
principal_component = pca.fit_transform(matrix)
plot.figure()
labels = set(classes)
colors = ["red", "blue", "cyan", "magenta"]
multiples = np.arange(-0.005, 0.005, 0.0001)
first = multiples[:, np.newaxis] * pca.components_[0, :]
second = multiples[:, np.newaxis] * pca.components_[1, :]
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, matrix[0].shape[0])
z = svm.predict(flat_grid)
z = z.reshape(grid.shape[:-1])
z = np.vectorize(lambda x: 1 if x == "romanian_news" else 0)(z)
plot.contourf(multiples, multiples, z, cmap=plot.cm.Paired)
plot.axis('off')
for i, l in enumerate(labels):
plot.scatter(principal_component[np.where(np.array(classes) == l), 0],
principal_component[np.where(np.array(classes) == l), 1], label=l, color=colors[i % len(colors)],
edgecolors="black")
plot.legend(loc='best', numpoints=1)
plot.title(name)
plot.grid()
if not os.path.exists(os.path.join(PROJECT_PATH, "pca")):
os.mkdir(os.path.join(PROJECT_PATH, "pca"))
plot.savefig(os.path.join(PROJECT_PATH, "pca", name + '_pca.png'), dpi=125)
def compute_document_vector(file_name, analyser=WORD2VEC_ANALYSER):
"""
Computes the document vector of a given sample using the word2vec model
The document vector is defined as the mean of all the word vectors of the text sample
:param file_name: given text sample
:param analyser: trained word2vec model
:return: the average vector of the document
"""
vectors = None
count = 0
for _ in WordIterator(file_name):
count += 1
for word in WordIterator(file_name):
w = analyser.vector(word)
if w is None:
continue
if vectors is None:
vectors = analyser.vector(word) / count
else:
vectors += analyser.vector(word) / count
return vectors / count
def build_training_set(analyser=WORD2VEC_ANALYSER):
"""
Given a word2vec analyser, compute the document vectors of the labeled samples
:param analyser: given word2vec model
:return: annotated data set
"""
X = []
Y = []
for file in list_files(os.path.join(PROJECT_PATH, "text", "romanian_prose")):
v = compute_document_vector(file, analyser)
X.append(v)
Y.append("prose")
for file in list_files(os.path.join(PROJECT_PATH, "text", "romanian_news")):
v = compute_document_vector(file, analyser)
X.append(v)
Y.append("romanian_news")
return X, Y
def test(svm):
"""
Classify unlabeled samples using the trained svm model
:param svm: given trained model
:return: document vectors of the classified documents
"""
X = [compute_document_vector(os.path.join(PROJECT_PATH, "unlabeled", "institut.txt")),
compute_document_vector(os.path.join(PROJECT_PATH, "unlabeled", "vizita.txt"))]
Y = ["institut", "vizita"]
print(Y[0] + " predicted as: " + svm.predict(X[0].reshape(1, -1))[0])
print(Y[1] + " predicted as: " + svm.predict(X[1].reshape(1, -1))[0])
return X, Y
KERNELS = ["linear", "poly", "sigmoid", "rbf"]
if __name__ == "__main__":
X, Y = build_training_set()
for k in KERNELS:
svm = SVC(kernel=k)
svm.fit(X, Y)
X_test, Y_test = test(svm)
create_plot(X + X_test, Y + Y_test, svm, k)
| 0 | 0 | 0 |
bed2840553f751669371a2026945a014d023f7e2 | 205 | py | Python | variable_and_data_type/numbers_demo/random_number.py | pysga1996/python-basic-programming | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | [
"MIT"
] | null | null | null | variable_and_data_type/numbers_demo/random_number.py | pysga1996/python-basic-programming | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | [
"MIT"
] | null | null | null | variable_and_data_type/numbers_demo/random_number.py | pysga1996/python-basic-programming | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | [
"MIT"
] | null | null | null | # Python does not have a random() function to make a random number,
# but Python has a built-in module called random that can be used to make random numbers:
import random
print(random.randrange(1, 10))
| 29.285714 | 89 | 0.760976 | # Python does not have a random() function to make a random number,
# but Python has a built-in module called random that can be used to make random numbers:
import random
print(random.randrange(1, 10))
| 0 | 0 | 0 |
eb70d58c97a55bb82b5f3f4d2050fae75cb58188 | 6,843 | py | Python | binderhub/repoproviders.py | rs2/binderhub | 7f0e6eaa5f44536c3756c1d07acc15e263322565 | [
"BSD-3-Clause"
] | null | null | null | binderhub/repoproviders.py | rs2/binderhub | 7f0e6eaa5f44536c3756c1d07acc15e263322565 | [
"BSD-3-Clause"
] | null | null | null | binderhub/repoproviders.py | rs2/binderhub | 7f0e6eaa5f44536c3756c1d07acc15e263322565 | [
"BSD-3-Clause"
] | null | null | null | """
Classes for Repo providers
Subclass the base class, ``RepoProvider``, to support different version
control services and providers.
"""
from datetime import timedelta
import json
import os
import time
from prometheus_client import Gauge
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.httputil import url_concat
from traitlets import Dict, Unicode, default
from traitlets.config import LoggingConfigurable
GITHUB_RATE_LIMIT = Gauge('binderhub_github_rate_limit_remaining', 'GitHub rate limit remaining')
def tokenize_spec(spec):
"""Tokenize a GitHub-style spec into parts, error if spec invalid."""
spec_parts = spec.split('/', 2) # allow ref to contain "/"
if len(spec_parts) != 3:
msg = 'Spec is not of the form "user/repo/ref", provided: "{spec}".'.format(spec=spec)
if len(spec_parts) == 2 and spec_parts[-1] != 'master':
msg += ' Did you mean "{spec}/master"?'.format(spec=spec)
raise ValueError(msg)
return spec_parts
class RepoProvider(LoggingConfigurable):
"""Base class for a repo provider"""
name = Unicode(
help="""
Descriptive human readable name of this repo provider.
"""
)
spec = Unicode(
help="""
The spec for this builder to parse
"""
)
unresolved_ref = Unicode()
@gen.coroutine
class FakeProvider(RepoProvider):
"""Fake provider for local testing of the UI
"""
class GitHubRepoProvider(RepoProvider):
"""Repo provider for the GitHub service"""
name = Unicode('GitHub')
client_id = Unicode(config=True,
help="""GitHub client id for authentication with the GitHub API
For use with client_secret.
Loaded from GITHUB_CLIENT_ID env by default.
"""
)
@default('client_id')
client_secret = Unicode(config=True,
help="""GitHub client secret for authentication with the GitHub API
For use with client_id.
Loaded from GITHUB_CLIENT_SECRET env by default.
"""
)
@default('client_secret')
access_token = Unicode(config=True,
help="""GitHub access token for authentication with the GitHub API
Loaded from GITHUB_ACCESS_TOKEN env by default.
"""
)
@default('access_token')
auth = Dict(
help="""Auth parameters for the GitHub API access
Populated from client_id, client_secret, access_token.
"""
)
@default('auth')
@gen.coroutine
| 31.104545 | 97 | 0.611866 | """
Classes for Repo providers
Subclass the base class, ``RepoProvider``, to support different version
control services and providers.
"""
from datetime import timedelta
import json
import os
import time
from prometheus_client import Gauge
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.httputil import url_concat
from traitlets import Dict, Unicode, default
from traitlets.config import LoggingConfigurable
GITHUB_RATE_LIMIT = Gauge('binderhub_github_rate_limit_remaining', 'GitHub rate limit remaining')
def tokenize_spec(spec):
"""Tokenize a GitHub-style spec into parts, error if spec invalid."""
spec_parts = spec.split('/', 2) # allow ref to contain "/"
if len(spec_parts) != 3:
msg = 'Spec is not of the form "user/repo/ref", provided: "{spec}".'.format(spec=spec)
if len(spec_parts) == 2 and spec_parts[-1] != 'master':
msg += ' Did you mean "{spec}/master"?'.format(spec=spec)
raise ValueError(msg)
return spec_parts
def strip_suffix(text, suffix):
if text.endswith(suffix):
text = text[:-(len(suffix))]
return text
class RepoProvider(LoggingConfigurable):
"""Base class for a repo provider"""
name = Unicode(
help="""
Descriptive human readable name of this repo provider.
"""
)
spec = Unicode(
help="""
The spec for this builder to parse
"""
)
unresolved_ref = Unicode()
@gen.coroutine
def get_resolved_ref(self):
raise NotImplementedError("Must be overridden in child class")
def get_repo_url(self):
raise NotImplementedError("Must be overridden in the child class")
def get_build_slug(self):
raise NotImplementedError("Must be overriden in the child class")
class FakeProvider(RepoProvider):
"""Fake provider for local testing of the UI
"""
async def get_resolved_ref(self):
return "1a2b3c4d5e6f"
def get_repo_url(self):
return "fake/repo"
def get_build_slug(self):
return '{user}-{repo}'.format(user='Rick', repo='Morty')
class GitHubRepoProvider(RepoProvider):
"""Repo provider for the GitHub service"""
name = Unicode('GitHub')
client_id = Unicode(config=True,
help="""GitHub client id for authentication with the GitHub API
For use with client_secret.
Loaded from GITHUB_CLIENT_ID env by default.
"""
)
@default('client_id')
def _client_id_default(self):
return os.getenv('GITHUB_CLIENT_ID', '')
client_secret = Unicode(config=True,
help="""GitHub client secret for authentication with the GitHub API
For use with client_id.
Loaded from GITHUB_CLIENT_SECRET env by default.
"""
)
@default('client_secret')
def _client_secret_default(self):
return os.getenv('GITHUB_CLIENT_SECRET', '')
access_token = Unicode(config=True,
help="""GitHub access token for authentication with the GitHub API
Loaded from GITHUB_ACCESS_TOKEN env by default.
"""
)
@default('access_token')
def _access_token_default(self):
return os.getenv('GITHUB_ACCESS_TOKEN', '')
auth = Dict(
help="""Auth parameters for the GitHub API access
Populated from client_id, client_secret, access_token.
"""
)
@default('auth')
def _default_auth(self):
auth = {}
for key in ('client_id', 'client_secret', 'access_token'):
value = getattr(self, key)
if value:
auth[key] = value
return auth
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user, self.repo, self.unresolved_ref = tokenize_spec(self.spec)
self.repo = strip_suffix(self.repo, ".git")
def get_repo_url(self):
return "https://github.com/{user}/{repo}".format(user=self.user, repo=self.repo)
@gen.coroutine
def get_resolved_ref(self):
if hasattr(self, 'resolved_ref'):
return self.resolved_ref
client = AsyncHTTPClient()
api_url = "https://api.github.com/repos/{user}/{repo}/commits/{ref}".format(
user=self.user, repo=self.repo, ref=self.unresolved_ref
)
self.log.debug("Fetching %s", api_url)
if self.auth:
# Add auth params. After logging!
api_url = url_concat(api_url, self.auth)
try:
resp = yield client.fetch(api_url, user_agent="BinderHub")
except HTTPError as e:
if (
e.code == 403
and e.response
and e.response.headers.get('x-ratelimit-remaining') == '0'
):
rate_limit = e.response.headers['x-ratelimit-limit']
reset_timestamp = int(e.response.headers['x-ratelimit-reset'])
reset_seconds = int(reset_timestamp - time.time())
self.log.error(
"GitHub Rate limit ({limit}) exceeded. Reset in {delta}.".format(
limit=rate_limit,
delta=timedelta(seconds=reset_seconds),
)
)
# round expiry up to nearest 5 minutes
minutes_until_reset = 5 * (1 + (reset_seconds // 60 // 5))
raise ValueError("GitHub rate limit exceeded. Try again in %i minutes."
% minutes_until_reset
)
elif e.code == 404:
return None
else:
raise
# record and log github rate limit
remaining = int(resp.headers['x-ratelimit-remaining'])
rate_limit = int(resp.headers['x-ratelimit-limit'])
reset_timestamp = int(resp.headers['x-ratelimit-reset'])
# record with prometheus
GITHUB_RATE_LIMIT.set(remaining)
# log at different levels, depending on remaining fraction
fraction = remaining / rate_limit
if fraction < 0.2:
log = self.log.warning
elif fraction < 0.5:
log = self.log.info
else:
log = self.log.debug
# str(timedelta) looks like '00:32'
delta = timedelta(seconds=int(reset_timestamp - time.time()))
log("GitHub rate limit remaining {remaining}/{limit}. Reset in {delta}.".format(
remaining=remaining, limit=rate_limit, delta=delta,
))
ref_info = json.loads(resp.body.decode('utf-8'))
if 'sha' not in ref_info:
# TODO: Figure out if we should raise an exception instead?
return None
self.resolved_ref = ref_info['sha']
return self.resolved_ref
def get_build_slug(self):
return '{user}-{repo}'.format(user=self.user, repo=self.repo)
| 3,930 | 0 | 395 |
8901795a24f1d916e1722a7335091cebf641f4eb | 8,439 | py | Python | drugstore/shop/tests/test_product.py | lexover/vue-django-webstore-example | 4710628000ac237319bce4aa64aed4fb75779cf2 | [
"MIT"
] | null | null | null | drugstore/shop/tests/test_product.py | lexover/vue-django-webstore-example | 4710628000ac237319bce4aa64aed4fb75779cf2 | [
"MIT"
] | null | null | null | drugstore/shop/tests/test_product.py | lexover/vue-django-webstore-example | 4710628000ac237319bce4aa64aed4fb75779cf2 | [
"MIT"
] | null | null | null | import os
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from shop.models import Product
from shop.models import ProductGroup
from .helper import create_user
from .helper import Factory
from .helper import ResponseFilter as rf
| 50.532934 | 119 | 0.659794 | import os
from django.conf import settings
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from shop.models import Product
from shop.models import ProductGroup
from .helper import create_user
from .helper import Factory
from .helper import ResponseFilter as rf
class ProductViewTestCase(TestCase):
url = 'products'
items_number = 3
def setUp(self) -> None:
self.client = APIClient()
self.group = ProductGroup.objects.create(**Factory.get_product_group())
self.data = Factory.get_product(group=self.group.pk)
self.instance = Product.objects.create(**{**self.data, **{'group': self.group}})
self.created_images = []
self.rating = {'value': 0, 'votes': 0}
def tearDown(self) -> None:
if self.created_images:
for img in self.created_images:
os.remove(img)
# When created product it
def register_created_image(self, response):
data = response.data['results'] if 'results' in response.data else response.data
image_path = data.get('image', None)
if image_path:
# Get path from 'images/picture_01.png'
img = image_path[(image_path.find('/images') + 1):]
self.created_images.append(os.path.join(settings.MEDIA_ROOT, img))
# ==================== GET ==================== #
def test_get_all_products(self):
self.data['rating'] = self.rating
data = [self.data]
for item in range(1, self.items_number):
val = (Factory.get_product(group=self.group.pk, slug=item))
Product.objects.create(**{**val, **{'group': self.group}})
val['rating'] = self.rating
data.append(val)
response = self.client.get(reverse(f'{self.url}-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(data), self.items_number)
self.assertEqual(rf.to_data(response.data, filters=[rf.filter_img]), data)
def test_get_valid_product(self):
response = self.client.get(reverse(f'{self.url}-detail', kwargs={'pk': self.instance.pk}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.data['rating'] = self.rating
self.assertEqual(rf.to_data(response.data, filters=[rf.filter_img]), self.data)
def test_get_invalid_product(self):
response = self.client.get(reverse(f'{self.url}-detail', kwargs={'pk': self.instance.pk + 1}))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
# ==================== CREATE ==================== #
def create_product(self, data, user=None):
if user is not None:
self.client.credentials(HTTP_AUTHORIZATION=user['token'])
with open(os.path.join(settings.MEDIA_ROOT, 'images', 'product_01.png'), 'rb') as image:
return self.client.post(reverse(f'{self.url}-list'),
data={**data, **{'image': image}},
format='multipart')
def test_create_valid_product_status_201(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.create_product(data, create_user(is_admin=True))
self.register_created_image(response)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data['rating'] = self.rating
self.assertEqual(rf.to_data(response.data, filters=[rf.filter_img, rf.filter_img_hash]), data)
# If sale price is not specified or set to 0.0 it should be replaced in DB by current price.
def test_create_valid_product_without_sale_price_status_201(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
# Remove 'sale_price' before save.
del data['sale_price']
response = self.create_product(data, create_user(is_admin=True))
self.register_created_image(response)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Set 'sale_price' as current price before check.
data['sale_price'] = data['price']
data['rating'] = self.rating
self.assertEqual(rf.to_data(response.data, filters=[rf.filter_img, rf.filter_img_hash]), data)
def test_create_invalid_product_status_400(self):
data = {**Factory.get_product(group=self.group.pk, slug=self.items_number + 1), **{'name': self.data['name']}}
response = self.create_product(data, create_user(is_admin=True))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_unauthorized_create_product_group_status_401(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.create_product(data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unauthorized_create_product_group_status_403(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.create_product(data, create_user(is_admin=False))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# ==================== UPDATE ==================== #
def update_product(self, data, user=None):
if user is not None:
self.client.credentials(HTTP_AUTHORIZATION=user['token'])
with open(os.path.join(settings.MEDIA_ROOT, 'images', 'product_01.png'), 'rb') as image:
return self.client.put(reverse(f'{self.url}-detail', kwargs={'pk': self.instance.pk}),
data={**data, **{'image': image}},
format='multipart')
def test_update_valid_product_status_200(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.update_product(data, create_user(is_admin=True))
self.register_created_image(response)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data['rating'] = self.rating
self.assertEqual(rf.to_data(response.data, filters=[rf.filter_img, rf.filter_img_hash]), data)
def test_update_invalid_product_status_400(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.update_product({**data, **{'name': ''}}, create_user(is_admin=True))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_unauthorized_update_product_group_status_401(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.update_product(data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unauthorized_update_product_group_status_403(self):
data = Factory.get_product(group=self.group.pk, slug=self.items_number + 1)
response = self.update_product(data, create_user(is_admin=False))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# ==================== DELETE ==================== #
def delete_product(self, pk, user=None):
if user is not None:
self.client.credentials(HTTP_AUTHORIZATION=user['token'])
return self.client.delete(reverse(f'{self.url}-detail', kwargs={'pk': pk}))
def test_valid_delete_product_status_204(self):
response = self.delete_product(pk=self.instance.pk, user=create_user(is_admin=True))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_invalid_delete_product_status_404(self):
response = self.delete_product(pk=self.instance.pk+1, user=create_user(is_admin=True))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_unauthorized_delete_product_status_401(self):
response = self.delete_product(pk=self.instance.pk)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_unauthorized_delete_product_status_403(self):
response = self.delete_product(pk=self.instance.pk, user=create_user())
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| 6,961 | 1,079 | 24 |
4a4971975035bcb2dc36e9fec35608a8d9bdb1e8 | 750 | py | Python | functional_programming/list_comprehensio.py | danielkpodo/python-zero-to-mastery | d39468f48211bc82e4e2613745d9107d433e05af | [
"MIT"
] | null | null | null | functional_programming/list_comprehensio.py | danielkpodo/python-zero-to-mastery | d39468f48211bc82e4e2613745d9107d433e05af | [
"MIT"
] | null | null | null | functional_programming/list_comprehensio.py | danielkpodo/python-zero-to-mastery | d39468f48211bc82e4e2613745d9107d433e05af | [
"MIT"
] | null | null | null | # They are actually grouped into three
# list comprehension
# Set Comprehension
# dictionary compresension
# They are aquick way for creating list or sets or dictionsary instead of looping or appending to a bunch of list
# format [param for param in "list"]
# add all letters in a string
# using comprehension to get all letters in a string
word = "hello"
sord_strings = [char for char in word]
print(sord_strings)
# using comprehension to list all numbers in a range
# random_range = [num for num in range(1, 101)]
# print(random_range)
# squaring lists using comprehension
squared_items = [num ** 2 for num in range(1, 101)]
print(squared_items)
# filtering odd numbers in a list
only_odd = [num for num in range(100) if num % 2 != 0]
| 22.727273 | 113 | 0.741333 | # They are actually grouped into three
# list comprehension
# Set Comprehension
# dictionary compresension
# They are aquick way for creating list or sets or dictionsary instead of looping or appending to a bunch of list
# format [param for param in "list"]
# add all letters in a string
# using comprehension to get all letters in a string
word = "hello"
sord_strings = [char for char in word]
print(sord_strings)
# using comprehension to list all numbers in a range
# random_range = [num for num in range(1, 101)]
# print(random_range)
# squaring lists using comprehension
squared_items = [num ** 2 for num in range(1, 101)]
print(squared_items)
# filtering odd numbers in a list
only_odd = [num for num in range(100) if num % 2 != 0]
| 0 | 0 | 0 |
a5d99f442f4b1c5a8e1c03517e964ce157869dde | 3,367 | py | Python | notifications/views.py | salazarpardo/redinnovacion | 3f7c13af0af1887112a0492aea7782871fba0129 | [
"CC-BY-3.0"
] | null | null | null | notifications/views.py | salazarpardo/redinnovacion | 3f7c13af0af1887112a0492aea7782871fba0129 | [
"CC-BY-3.0"
] | null | null | null | notifications/views.py | salazarpardo/redinnovacion | 3f7c13af0af1887112a0492aea7782871fba0129 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
""" Views for the notifications application. """
# standard library
# django
from django.views.generic.base import TemplateView
# models
from .models import Notification
# views
from base.views import BaseListView
# forms
class NotificationListView(BaseListView):
"""
View for displaying a list of notifications.
"""
model = Notification
template_name = 'notifications/list.jade'
ordering = ('-id',)
| 30.609091 | 78 | 0.650728 | # -*- coding: utf-8 -*-
""" Views for the notifications application. """
# standard library
# django
from django.views.generic.base import TemplateView
# models
from .models import Notification
# views
from base.views import BaseListView
# forms
class NotificationListView(BaseListView):
"""
View for displaying a list of notifications.
"""
model = Notification
template_name = 'notifications/list.jade'
ordering = ('-id',)
def get_queryset(self):
queryset = super(NotificationListView, self).get_queryset()
return queryset.filter(user=self.request.user)
def get_context_data(self, **kwargs):
context = super(NotificationListView, self).get_context_data(**kwargs)
notification_ids = []
for notification in context['object_list']:
notification_ids.append(notification.id)
# Get the unread notification list
unread_notifications = self.request.user.notification_set.filter(
read=False
)
# replace the unread notifications since we are marking them as read
context['unread_notifications'] = unread_notifications.count()
context['dropdown_notifications_list'] = list(
unread_notifications.order_by('-created_at')[:10]
)
context['body_class'] = 'notifications-list'
Notification.objects.filter(id__in=notification_ids).update(read=True)
return context
class LastNotificationView(TemplateView):
template_name = "notifications/includes/dropdown.jade"
def get_context_data(self, **kwargs):
context = super(LastNotificationView, self).get_context_data(**kwargs)
# anon users doesn't need this data
if (
not self.request.user.is_authenticated()
or self.request.user.is_pending()
):
return {}
# get unread notifications
unread_notifications = self.request.user.notification_set.filter(
read=False
)
# add unread notifications count to the context
context['unread_notifications'] = unread_notifications.count()
# get unread messages
unread_messages = self.request.user.received_email_messages.filter(
read=False
)
# add unread messages count to the context
context['unread_messages'] = unread_messages.count()
# get messages notifications
unread_notifications_messages_ids = (
self.request.user.notification_set.filter(
email_message__in=unread_messages
).values_list('id', flat=True)
)
user_notifications = self.request.user.notification_set.filter(
id__in=(
list(unread_notifications.values_list('id', flat=True))
+ list(unread_notifications_messages_ids)
)
).distinct()
# get first 10 notifications
user_notifications = list(
user_notifications.order_by('-updated_at')[:10]
)
# add first 10 notifications to the context
context['dropdown_notifications_list'] = user_notifications[:10]
# add total notifications count to the context
context['total_notifications'] = (
context['unread_notifications'] + context['unread_messages']
)
return context
| 2,728 | 107 | 77 |
f3661e8298cc1586815eb79028981a4ef84ff17a | 2,313 | py | Python | ML_model/mnist.py | Willjay90/DigitRecognition | 97db4606063327d7b55c35f8f8ce87d14d2daeab | [
"MIT"
] | 8 | 2019-04-06T19:46:50.000Z | 2021-05-28T13:41:50.000Z | ML_model/mnist.py | mohsinalimat/DigitRecognition | 97db4606063327d7b55c35f8f8ce87d14d2daeab | [
"MIT"
] | 2 | 2018-04-01T16:30:15.000Z | 2018-05-07T06:08:01.000Z | ML_model/mnist.py | mohsinalimat/DigitRecognition | 97db4606063327d7b55c35f8f8ce87d14d2daeab | [
"MIT"
] | 3 | 2019-04-19T19:11:52.000Z | 2020-11-03T06:33:14.000Z | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.utils import to_categorical
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam
def get_MNIST_data():
"""
https://www.tensorflow.org/get_started/mnist/beginners
The MNIST data is split into three parts:
- 55,000 data points of training data (mnist.train),
- 10,000 points of test data (mnist.test), and
- 5,000 points of validation data (mnist.validation)
"""
# using tensorflow api
return input_data.read_data_sets("MNIST_data/", one_hot=True)
# Initial setups
num_classes = 10
mnist = get_MNIST_data()
X = mnist.train.images.reshape(-1, 28, 28, 1)
y = mnist.train.labels
# model -- Deep MNIST from TensorFlow Tutorial (https://www.tensorflow.org/get_started/mnist/pros)
#####################################################
model = Sequential()
model.add(Conv2D(32, (5,5), padding='same', activation='relu', input_shape=X.shape[1:]))
model.add(MaxPooling2D())
model.add(Conv2D(64, (5,5), padding='same', activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
#####################################################
# summary of your network
model.summary()
# compile the model
model.compile(optimizer=Adam(1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
X_val = mnist.validation.images.reshape(-1, 28, 28, 1)
y_val = mnist.validation.labels
# training
model.fit(X, y, batch_size=64, epochs=1, validation_data=(X_val, y_val))
# check the score
X_test = mnist.test.images.reshape(-1, 28, 28, 1)
y_test = mnist.test.labels
score = model.evaluate(X_test, y_test)
print('Loss: %.3f' % score[0])
print('Accuracy: %.3f' % score[1])
# save your model! serialize model to JSON
#model_json = model.to_json()
#with open("keras_model.json", "w") as json_file:
# json_file.write(model_json)
# serialize weights to HDF5
#model.save_weights("keras_mnist_model.h5")
# Saving/loading whole models (architecture + weights + optimizer state)
model.save('keras_mnist_model.h5')
| 28.9125 | 98 | 0.700821 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from keras.models import Sequential
from keras.utils import to_categorical
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam
def get_MNIST_data():
"""
https://www.tensorflow.org/get_started/mnist/beginners
The MNIST data is split into three parts:
- 55,000 data points of training data (mnist.train),
- 10,000 points of test data (mnist.test), and
- 5,000 points of validation data (mnist.validation)
"""
# using tensorflow api
return input_data.read_data_sets("MNIST_data/", one_hot=True)
# Initial setups
num_classes = 10
mnist = get_MNIST_data()
X = mnist.train.images.reshape(-1, 28, 28, 1)
y = mnist.train.labels
# model -- Deep MNIST from TensorFlow Tutorial (https://www.tensorflow.org/get_started/mnist/pros)
#####################################################
model = Sequential()
model.add(Conv2D(32, (5,5), padding='same', activation='relu', input_shape=X.shape[1:]))
model.add(MaxPooling2D())
model.add(Conv2D(64, (5,5), padding='same', activation='relu'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
#####################################################
# summary of your network
model.summary()
# compile the model
model.compile(optimizer=Adam(1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
X_val = mnist.validation.images.reshape(-1, 28, 28, 1)
y_val = mnist.validation.labels
# training
model.fit(X, y, batch_size=64, epochs=1, validation_data=(X_val, y_val))
# check the score
X_test = mnist.test.images.reshape(-1, 28, 28, 1)
y_test = mnist.test.labels
score = model.evaluate(X_test, y_test)
print('Loss: %.3f' % score[0])
print('Accuracy: %.3f' % score[1])
# save your model! serialize model to JSON
#model_json = model.to_json()
#with open("keras_model.json", "w") as json_file:
# json_file.write(model_json)
# serialize weights to HDF5
#model.save_weights("keras_mnist_model.h5")
# Saving/loading whole models (architecture + weights + optimizer state)
model.save('keras_mnist_model.h5')
| 0 | 0 | 0 |
ed4634cb70b0972f57d2914be36d83f7ce490d82 | 7,562 | py | Python | pyDRESCALk/dist_clustering.py | lanl/pyDRESCALk | 25af3796442e5ec87fb54caa4344e8871a2abd15 | [
"BSD-3-Clause"
] | 2 | 2021-12-04T05:20:13.000Z | 2021-12-06T17:30:28.000Z | pyDRESCALk/dist_clustering.py | lanl/pyDRESCALk | 25af3796442e5ec87fb54caa4344e8871a2abd15 | [
"BSD-3-Clause"
] | null | null | null | pyDRESCALk/dist_clustering.py | lanl/pyDRESCALk | 25af3796442e5ec87fb54caa4344e8871a2abd15 | [
"BSD-3-Clause"
] | 2 | 2021-12-04T06:23:21.000Z | 2021-12-08T20:53:12.000Z | # @Author: Manish Bhattarai, Erik Skau
from .utils import *
class custom_clustering():
r"""
Greedy algorithm to approximate a quadratic assignment problem to cluster vectors. Given p groups of k vectors, construct k clusters, each cluster containing a single vector from each of the p groups. This clustering approximation uses cos distances and mean centroids.
Args:
A_all (ndarray) : Order three tensor of shape m by k by p, where m is the ambient dimension of the vectors, k is the number of vectors in each group, and p is the number of groups of vectors.
R_all (ndarray) : Order three tensor of shape n by k by p, where n is the ambient dimension of the vectors, k is the number of vectors in each group, and p is the number of groups of vectors.
params (class) : Class object with communication parameters which comprises of grid information (p_r,p_c) , commincator (comm) and epsilon (eps).
"""
@comm_timing()
@comm_timing()
def normalize_by_A(self):
r'''Normalize the factors A and R'''
Wall_norm = (self.A_all * self.A_all).sum(axis=0)
if self.p_r != 1:
Wall_norm = self.comm1.allreduce(Wall_norm)
Wall_norm += self.eps
temp = self.np.sqrt(Wall_norm)
self.A_all /= temp.reshape(1, temp.shape[0], temp.shape[1])
self.R_all = temp.reshape(1,temp.shape[0], 1, temp.shape[1])*self.R_all*temp.reshape(1,1,temp.shape[0],temp.shape[1])
@comm_timing()
def mad(self, data, flag=1, axis=-1):
r'''Compute the median/mean absolute deviation'''
if flag == 1: # the median absolute deviation
return self.np.median(self.np.absolute(data - self.np.median(data, axis=axis, keepdims=True)), axis=axis)
else: # flag = 0 the mean absolute deviation
# return self.np.nanmean((self.np.absolute(data.T - self.np.nanmean(data, axis = dimf))).T,axis = dimf)
return self.np.mean(self.np.absolute(data - self.np.mean(data, axis=axis)), axis=axis)
@comm_timing()
def change_order(self, tens):
r'''change the order of features'''
ans = list(range(len(tens)))
for p in tens:
ans[p[0]] = p[1]
return ans
@comm_timing()
def greedy_lsa(self, A):
r"""Return the permutation order"""
X = A.copy()
pairs = []
for i in range(X.shape[0]):
minindex = np.argmax(X)
ind = np.unravel_index(minindex, X.shape)
pairs.append([ind[0].item(),ind[1].item()])
X[:, ind[1]] = -self.np.inf
X[ind[0], :] = -self.np.inf
return pairs
@comm_timing()
def dist_feature_ordering(self, centroids, W_sub):
r'''return the features in proper order'''
k = W_sub.shape[1]
dist = centroids.T @ W_sub
if self.p_r != 1:
dist = self.comm1.allreduce(dist)
#print(self.np.diag(dist))
tmp = self.greedy_lsa(dist)
j = self.change_order(tmp)
W_sub = W_sub[:, j]
return W_sub, j
@comm_timing()
def dist_custom_clustering(self, centroids=None, vb=0):
"""
Performs the distributed custom clustering
Parameters
----------
centroids : ndarray, optional
The m by k initialization of the centroids of the clusters. None corresponds to using the first slice, A_all[:,:,0], as the initial centroids. Defaults to None.
vb : bool, optional
Verbose to display intermediate results
Returns
-------
centroids : ndarray
The m by k centroids of the clusters
A_all :ndarray
Clustered organization of the vectors A_all
R_all : ndarray
Clustered organization of the vectors R_all
permute_order : list
Indices of the permuted features
"""
permute_order = []
self.normalize_by_A()
if centroids == None:
centroids = self.A_all[:, :, 0].copy()
'''dist = centroids.T @ self.A_all[:, :, 0]
if self.p_r != 1:
dist = self.comm1.allreduce(dist)'''
for i in range(100):
for p in range(self.A_all.shape[-1]):
A_ord, j = self.dist_feature_ordering(centroids, self.A_all[:, :, p])
permute_order.append(j)
self.A_all[:, :, p] = A_ord
self.R_all[:, :, :, p] = self.np.stack([self.R_all[:, k, :, p] for k in j],axis=1)
self.R_all[:, :, :, p] = self.np.stack([self.R_all[:, :, k, p] for k in j],axis=2)
centroids = self.np.median(self.A_all, axis=-1)
centroids_norm = (centroids ** 2).sum(axis=0)
if self.p_r != 1:
centroids_norm = self.comm1.allreduce(centroids_norm)
centroids_norm += self.eps
temp = self.np.sqrt(centroids_norm)
centroids /= temp
return centroids, self.A_all, self.R_all, permute_order
@comm_timing()
def dist_silhouettes(self):
"""
Computes the cosine distances silhouettes of a distributed clustering of vectors.
Returns
-------
sils : ndarray
The k by p array of silhouettes where sils[i,j] is the silhouette measure for the vector A_all[:,i,j]
"""
self.dist_custom_clustering()
N, k, n_pert = self.A_all.shape
W_flat = self.A_all.reshape(N, k * n_pert)
A_all2 = (W_flat.T @ W_flat).reshape(k, n_pert, k, n_pert)
if self.p_r != 1:
A_all2 = self.comm1.allreduce(A_all2)
distances = self.np.arccos(self.np.clip(A_all2, -1.0, 1.0))
(N, K, n_perts) = self.A_all.shape
if K == 1:
sils = self.np.ones((K, n_perts))
else:
a = self.np.zeros((K, n_perts))
b = self.np.zeros((K, n_perts))
for k in range(K):
for n in range(n_perts):
a[k, n] = 1 / (n_perts - 1) * self.np.sum(distances[k, n, k, :])
tmp = self.np.sum(distances[k, n, :, :], axis=1)
tmp[k] = self.np.inf
b[k, n] = 1 / n_perts * self.np.min(tmp)
sils = (b - a) / self.np.maximum(a, b)
return sils
@comm_timing()
def fit(self):
r"""
Calls the sub routines to perform distributed custom clustering and compute silhouettes
Returns
-------
centroids : ndarray
The m by k centroids of the clusters
CentStd : ndarray
Absolute deviation of the features from the centroid
A_all : ndarray
Clustered organization of the vectors A_all
R_all : ndarray
Clustered organization of the vectors R_all
S_avg : ndarray
mean Silhouette score
permute_order : list
Indices of the permuted features
"""
centroids, _, _, IDX_F2 = self.dist_custom_clustering()
CentStd = self.mad(self.A_all, axis=-1)
cluster_coefficients = self.dist_silhouettes()
S_avg = cluster_coefficients.flatten().mean()
result = [centroids, CentStd, self.R_all, cluster_coefficients.mean(axis=1), S_avg, IDX_F2]
return result
| 40.223404 | 273 | 0.581195 | # @Author: Manish Bhattarai, Erik Skau
from .utils import *
class custom_clustering():
r"""
Greedy algorithm to approximate a quadratic assignment problem to cluster vectors. Given p groups of k vectors, construct k clusters, each cluster containing a single vector from each of the p groups. This clustering approximation uses cos distances and mean centroids.
Args:
A_all (ndarray) : Order three tensor of shape m by k by p, where m is the ambient dimension of the vectors, k is the number of vectors in each group, and p is the number of groups of vectors.
R_all (ndarray) : Order three tensor of shape n by k by p, where n is the ambient dimension of the vectors, k is the number of vectors in each group, and p is the number of groups of vectors.
params (class) : Class object with communication parameters which comprises of grid information (p_r,p_c) , commincator (comm) and epsilon (eps).
"""
@comm_timing()
def __init__(self, Wall, Hall, params):
self.A_all = Wall
self.R_all = Hall
self.p_r, self.p_c = params.p_r, params.p_c
self.comm1 = params.comm1
self.eps = params.eps
self.p = self.p_r * self.p_c
self.np = params.np
@comm_timing()
def normalize_by_A(self):
r'''Normalize the factors A and R'''
Wall_norm = (self.A_all * self.A_all).sum(axis=0)
if self.p_r != 1:
Wall_norm = self.comm1.allreduce(Wall_norm)
Wall_norm += self.eps
temp = self.np.sqrt(Wall_norm)
self.A_all /= temp.reshape(1, temp.shape[0], temp.shape[1])
self.R_all = temp.reshape(1,temp.shape[0], 1, temp.shape[1])*self.R_all*temp.reshape(1,1,temp.shape[0],temp.shape[1])
@comm_timing()
def mad(self, data, flag=1, axis=-1):
r'''Compute the median/mean absolute deviation'''
if flag == 1: # the median absolute deviation
return self.np.median(self.np.absolute(data - self.np.median(data, axis=axis, keepdims=True)), axis=axis)
else: # flag = 0 the mean absolute deviation
# return self.np.nanmean((self.np.absolute(data.T - self.np.nanmean(data, axis = dimf))).T,axis = dimf)
return self.np.mean(self.np.absolute(data - self.np.mean(data, axis=axis)), axis=axis)
@comm_timing()
def change_order(self, tens):
r'''change the order of features'''
ans = list(range(len(tens)))
for p in tens:
ans[p[0]] = p[1]
return ans
@comm_timing()
def greedy_lsa(self, A):
r"""Return the permutation order"""
X = A.copy()
pairs = []
for i in range(X.shape[0]):
minindex = np.argmax(X)
ind = np.unravel_index(minindex, X.shape)
pairs.append([ind[0].item(),ind[1].item()])
X[:, ind[1]] = -self.np.inf
X[ind[0], :] = -self.np.inf
return pairs
@comm_timing()
def dist_feature_ordering(self, centroids, W_sub):
r'''return the features in proper order'''
k = W_sub.shape[1]
dist = centroids.T @ W_sub
if self.p_r != 1:
dist = self.comm1.allreduce(dist)
#print(self.np.diag(dist))
tmp = self.greedy_lsa(dist)
j = self.change_order(tmp)
W_sub = W_sub[:, j]
return W_sub, j
@comm_timing()
def dist_custom_clustering(self, centroids=None, vb=0):
"""
Performs the distributed custom clustering
Parameters
----------
centroids : ndarray, optional
The m by k initialization of the centroids of the clusters. None corresponds to using the first slice, A_all[:,:,0], as the initial centroids. Defaults to None.
vb : bool, optional
Verbose to display intermediate results
Returns
-------
centroids : ndarray
The m by k centroids of the clusters
A_all :ndarray
Clustered organization of the vectors A_all
R_all : ndarray
Clustered organization of the vectors R_all
permute_order : list
Indices of the permuted features
"""
permute_order = []
self.normalize_by_A()
if centroids == None:
centroids = self.A_all[:, :, 0].copy()
'''dist = centroids.T @ self.A_all[:, :, 0]
if self.p_r != 1:
dist = self.comm1.allreduce(dist)'''
for i in range(100):
for p in range(self.A_all.shape[-1]):
A_ord, j = self.dist_feature_ordering(centroids, self.A_all[:, :, p])
permute_order.append(j)
self.A_all[:, :, p] = A_ord
self.R_all[:, :, :, p] = self.np.stack([self.R_all[:, k, :, p] for k in j],axis=1)
self.R_all[:, :, :, p] = self.np.stack([self.R_all[:, :, k, p] for k in j],axis=2)
centroids = self.np.median(self.A_all, axis=-1)
centroids_norm = (centroids ** 2).sum(axis=0)
if self.p_r != 1:
centroids_norm = self.comm1.allreduce(centroids_norm)
centroids_norm += self.eps
temp = self.np.sqrt(centroids_norm)
centroids /= temp
return centroids, self.A_all, self.R_all, permute_order
@comm_timing()
def dist_silhouettes(self):
"""
Computes the cosine distances silhouettes of a distributed clustering of vectors.
Returns
-------
sils : ndarray
The k by p array of silhouettes where sils[i,j] is the silhouette measure for the vector A_all[:,i,j]
"""
self.dist_custom_clustering()
N, k, n_pert = self.A_all.shape
W_flat = self.A_all.reshape(N, k * n_pert)
A_all2 = (W_flat.T @ W_flat).reshape(k, n_pert, k, n_pert)
if self.p_r != 1:
A_all2 = self.comm1.allreduce(A_all2)
distances = self.np.arccos(self.np.clip(A_all2, -1.0, 1.0))
(N, K, n_perts) = self.A_all.shape
if K == 1:
sils = self.np.ones((K, n_perts))
else:
a = self.np.zeros((K, n_perts))
b = self.np.zeros((K, n_perts))
for k in range(K):
for n in range(n_perts):
a[k, n] = 1 / (n_perts - 1) * self.np.sum(distances[k, n, k, :])
tmp = self.np.sum(distances[k, n, :, :], axis=1)
tmp[k] = self.np.inf
b[k, n] = 1 / n_perts * self.np.min(tmp)
sils = (b - a) / self.np.maximum(a, b)
return sils
@comm_timing()
def fit(self):
r"""
Calls the sub routines to perform distributed custom clustering and compute silhouettes
Returns
-------
centroids : ndarray
The m by k centroids of the clusters
CentStd : ndarray
Absolute deviation of the features from the centroid
A_all : ndarray
Clustered organization of the vectors A_all
R_all : ndarray
Clustered organization of the vectors R_all
S_avg : ndarray
mean Silhouette score
permute_order : list
Indices of the permuted features
"""
centroids, _, _, IDX_F2 = self.dist_custom_clustering()
CentStd = self.mad(self.A_all, axis=-1)
cluster_coefficients = self.dist_silhouettes()
S_avg = cluster_coefficients.flatten().mean()
result = [centroids, CentStd, self.R_all, cluster_coefficients.mean(axis=1), S_avg, IDX_F2]
return result
| 251 | 0 | 26 |
132c8e61aab53c5f476262d03457945113a4bfb6 | 95 | py | Python | coding/learn_celery_02/celery_app/tasks.py | yatao91/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | 3 | 2021-05-25T16:58:52.000Z | 2022-02-05T09:37:17.000Z | coding/learn_celery_02/celery_app/tasks.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | coding/learn_celery_02/celery_app/tasks.py | yataosu/learning_road | e88dc43de98e35922bfc71c222ec71766851e618 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from celery_app import app
@app.task
| 11.875 | 26 | 0.589474 | # -*- coding: utf-8 -*-
from celery_app import app
@app.task
def add(x, y):
return x + y
| 10 | 0 | 22 |
7e79e0ff94b60c23342a586e521e5ede8f485c5f | 3,864 | py | Python | hargreaves/deals/clients.py | dastra/hargreaves-sdk-python | 8099d775c6a70ac415690c0322fe1b964356f6ff | [
"MIT"
] | null | null | null | hargreaves/deals/clients.py | dastra/hargreaves-sdk-python | 8099d775c6a70ac415690c0322fe1b964356f6ff | [
"MIT"
] | null | null | null | hargreaves/deals/clients.py | dastra/hargreaves-sdk-python | 8099d775c6a70ac415690c0322fe1b964356f6ff | [
"MIT"
] | null | null | null | import logging
from ..account.clients import IAccountClient
from ..deals.models import DealRequest, DealResult
from ..orders.manual.clients import IManualOrderClient
from ..orders.market.clients import IMarketOrderClient
from ..orders.market.errors import MarketClosedError, MarketOrderLiveQuoteError
from ..orders.models import OrderRequest
from ..search import InvestmentTypes
from ..search.clients import security_filter, ISecuritySearchClient
from requests_tracker.session import IWebSession
logger = logging.getLogger(__name__)
| 41.106383 | 97 | 0.712992 | import logging
from ..account.clients import IAccountClient
from ..deals.models import DealRequest, DealResult
from ..orders.manual.clients import IManualOrderClient
from ..orders.market.clients import IMarketOrderClient
from ..orders.market.errors import MarketClosedError, MarketOrderLiveQuoteError
from ..orders.models import OrderRequest
from ..search import InvestmentTypes
from ..search.clients import security_filter, ISecuritySearchClient
from requests_tracker.session import IWebSession
logger = logging.getLogger(__name__)
class DealClient():
_account_client: IAccountClient
_search_client: ISecuritySearchClient
_market_order_client: IMarketOrderClient
_manual_order_client: IManualOrderClient
def __init__(self,
account_client: IAccountClient,
search_client: ISecuritySearchClient,
market_order_client: IMarketOrderClient,
manual_order_client: IManualOrderClient
):
self._account_client = account_client
self._search_client = search_client
self._market_order_client = market_order_client
self._manual_order_client = manual_order_client
def execute_deal(self, web_session: IWebSession, deal_request: DealRequest) -> DealResult:
accounts = self._account_client.get_account_summary(web_session=web_session)
account_summary = next((account_summary for account_summary in accounts
if account_summary.account_id == deal_request.account_id), None)
account_detail = self._account_client.get_account_detail(
web_session=web_session, account_summary=account_summary)
account_value = account_detail.total_value
account_cash = account_detail.total_cash
logger.debug(f"Account Value = £ {account_value:,.2f}, "
f"Cash Available = £ {account_cash:,.2f}")
search_results = self._search_client.investment_search(
web_session=web_session,
search_string=deal_request.stock_ticker,
investment_types=InvestmentTypes.ALL)
print(f"Found {len(search_results)} results, let's filter to 1")
found_security = security_filter(
search_results=search_results,
stock_ticker=deal_request.stock_ticker,
sedol_code=deal_request.sedol_code)
order_request = OrderRequest(
sedol_code=found_security.sedol_code,
category_code=found_security.category,
position_type=deal_request.position_type,
position_percentage=deal_request.position_percentage,
account_id=deal_request.account_id,
account_value=account_value
)
logger.debug("Executing Smart Deal ...")
try:
order_confirmation = self._market_order_client.execute_order_flow(
web_session=web_session, order_request=order_request)
return DealResult(order_request=order_request, order_confirmation=order_confirmation)
except MarketClosedError as ex:
logger.warning("Market is closed ...")
if not (deal_request.allow_fill_or_kill and ex.can_place_fill_or_kill_order):
raise ex
order_confirmation = self._manual_order_client.execute_order_flow(
web_session=web_session, order_request=order_request)
return DealResult(order_request=order_request, order_confirmation=order_confirmation)
except MarketOrderLiveQuoteError:
logger.warning("Unable to retrieve live-quote ...")
order_confirmation = self._manual_order_client.execute_order_flow(
web_session=web_session, order_request=order_request)
return DealResult(order_request=order_request, order_confirmation=order_confirmation)
| 3,087 | 220 | 23 |
4a843d0ff446bdb6b9abfce1f3606a0a63f7b471 | 1,364 | py | Python | tests/unit/TestJinjaPillarGrainsGetFormatRule.py | sblaisot/salt-lint | 9b525b6c590535a995c97657c5ad09d9974f3aa8 | [
"MIT"
] | null | null | null | tests/unit/TestJinjaPillarGrainsGetFormatRule.py | sblaisot/salt-lint | 9b525b6c590535a995c97657c5ad09d9974f3aa8 | [
"MIT"
] | null | null | null | tests/unit/TestJinjaPillarGrainsGetFormatRule.py | sblaisot/salt-lint | 9b525b6c590535a995c97657c5ad09d9974f3aa8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2018 Will Thames <will@thames.id.au>
# Copyright (c) 2018 Ansible by Red Hat
# Modified work Copyright (c) 2019 Jeffrey Bouter
import unittest
from saltlint.linter import RulesCollection
from saltlint.rules.JinjaPillarGrainsGetFormatRule import JinjaPillarGrainsGetFormatRule
from tests import RunFromText
GOOD_STATEMENT_LINE = '''
example_test:
file.managed:
- name: /etc/test
- user: root
- group: {{ salt['pillar.get']('item') }} test
- something: {{ grains['item'] }}
- content: |
{{ salt['pillar.get']('test') }}
'''
BAD_STATEMENT_LINE = '''
example_test:
file.managed:
- name: /etc/test
- user: root
- group: {{ pillar.get('item') }} test
- something: {{ grains.get('item')}}
- content: |
{{ salt['pillar.get']('test') }}
'''
| 27.836735 | 88 | 0.674487 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2018 Will Thames <will@thames.id.au>
# Copyright (c) 2018 Ansible by Red Hat
# Modified work Copyright (c) 2019 Jeffrey Bouter
import unittest
from saltlint.linter import RulesCollection
from saltlint.rules.JinjaPillarGrainsGetFormatRule import JinjaPillarGrainsGetFormatRule
from tests import RunFromText
GOOD_STATEMENT_LINE = '''
example_test:
file.managed:
- name: /etc/test
- user: root
- group: {{ salt['pillar.get']('item') }} test
- something: {{ grains['item'] }}
- content: |
{{ salt['pillar.get']('test') }}
'''
BAD_STATEMENT_LINE = '''
example_test:
file.managed:
- name: /etc/test
- user: root
- group: {{ pillar.get('item') }} test
- something: {{ grains.get('item')}}
- content: |
{{ salt['pillar.get']('test') }}
'''
class TestJinjaPillarGrainsGetFormatRule(unittest.TestCase):
collection = RulesCollection()
def setUp(self):
self.collection.register(JinjaPillarGrainsGetFormatRule())
self.runner = RunFromText(self.collection)
def test_statement_positive(self):
results = self.runner.run_state(GOOD_STATEMENT_LINE)
self.assertEqual(0, len(results))
def test_statement_negative(self):
results = self.runner.run_state(BAD_STATEMENT_LINE)
self.assertEqual(2, len(results))
| 344 | 155 | 23 |
a3278cf42543eeab20b3e70cef2d07f96c468ad4 | 904 | py | Python | leonardo/module/search/forms.py | timgates42/django-leonardo | c155f97fee9e2be1e0f508d47a1c205028253ecc | [
"BSD-3-Clause"
] | 102 | 2015-04-30T12:27:14.000Z | 2021-10-31T18:21:16.000Z | leonardo/module/search/forms.py | timgates42/django-leonardo | c155f97fee9e2be1e0f508d47a1c205028253ecc | [
"BSD-3-Clause"
] | 158 | 2015-04-30T22:42:34.000Z | 2019-09-07T15:37:22.000Z | leonardo/module/search/forms.py | timgates42/django-leonardo | c155f97fee9e2be1e0f508d47a1c205028253ecc | [
"BSD-3-Clause"
] | 64 | 2015-05-10T12:00:39.000Z | 2021-07-29T19:47:27.000Z |
from crispy_forms.bootstrap import (Accordion, AccordionGroup, FieldWithButtons,
StrictButton, Tab)
from crispy_forms.layout import Field, HTML, Layout, Submit
from crispy_forms.helper import FormHelper
from django.utils.translation import ugettext_lazy as _
from haystack.forms import ModelSearchForm
| 33.481481 | 105 | 0.65708 |
from crispy_forms.bootstrap import (Accordion, AccordionGroup, FieldWithButtons,
StrictButton, Tab)
from crispy_forms.layout import Field, HTML, Layout, Submit
from crispy_forms.helper import FormHelper
from django.utils.translation import ugettext_lazy as _
from haystack.forms import ModelSearchForm
class SearchForm(ModelSearchForm):
def _wrap_all(self):
# stylung
self.helper.filter(
str, max_level=4).wrap(
Field, css_class="form-control")
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_labels = False
self.helper.layout = Layout(
FieldWithButtons('q', Submit('submit', _("Search...")), css_class="col-xs-6 col-md-offset-3")
)
| 473 | 13 | 77 |
14ecc68206b8d1801404c300feb8fed4ef817efa | 175 | py | Python | functions/Blur.py | tylerrussin/Road-Line_Detection | c2214907f47a2389c9cc4618962f308ad4309823 | [
"MIT"
] | null | null | null | functions/Blur.py | tylerrussin/Road-Line_Detection | c2214907f47a2389c9cc4618962f308ad4309823 | [
"MIT"
] | null | null | null | functions/Blur.py | tylerrussin/Road-Line_Detection | c2214907f47a2389c9cc4618962f308ad4309823 | [
"MIT"
] | null | null | null | import cv2 | 29.166667 | 82 | 0.708571 | import cv2
def blur(image):
# Adding Gaussian Blur (much more effective when applied after edge detection)
image = cv2.GaussianBlur(image, (5, 5), 0)
return image | 142 | 0 | 23 |
d9ef3f232f162c575016d84f2f8b286031ba4176 | 3,286 | py | Python | bases_2021_1S/Grupo 03/storage/AVL/DataAccessLayer/reports.py | dadu0699/tytus | e1920f6932c840859e3e79eb8756a1d3da88bd77 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | bases_2021_1S/Grupo 03/storage/AVL/DataAccessLayer/reports.py | dadu0699/tytus | e1920f6932c840859e3e79eb8756a1d3da88bd77 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | bases_2021_1S/Grupo 03/storage/AVL/DataAccessLayer/reports.py | dadu0699/tytus | e1920f6932c840859e3e79eb8756a1d3da88bd77 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | # AVL Mode Package
# Released under MIT License
# Copyright (c) 2020 TytusDb Team
# Developers: SG#16
import os
from storage.AVL.DataAccessLayer.handler import Handler
from storage.AVL.DataAccessLayer.tree_graph import TreeGraph
| 34.229167 | 115 | 0.521302 | # AVL Mode Package
# Released under MIT License
# Copyright (c) 2020 TytusDb Team
# Developers: SG#16
import os
from storage.AVL.DataAccessLayer.handler import Handler
from storage.AVL.DataAccessLayer.tree_graph import TreeGraph
def graphicTables(database: str):
try:
databases = Handler.rootinstance()
for i in databases:
if database == i.name:
Handler.init_DirReports()
fname = 'tmp/db-tables.png'
tables = open("tmp/tables.dot", "w")
temporal = 'digraph tables{\ngraph[bgcolor="#0f1319"] \nnode[style="filled",color="black", ' \
'fillcolor="#45c2c5",shape="box",fontcolor="black",fontname="Century Gothic", ' \
'fontsize=18];\n'
c = 0
for table in i.tablesName:
temporal += "node" + \
str(c) + "[label=\"" + \
str(table).replace(' ', '') + "\"];\n"
c += 1
temporal += "}"
tables.write(temporal)
tables.close()
os.system("dot -Tpng tmp/tables.dot -o " + fname)
os.remove('tmp/tables.dot')
return fname
except:
return None
def graphicDatabases():
try:
Handler.init_DirReports()
databases = Handler.rootinstance()
fname = 'tmp/databases.png'
tables = open("tmp/databases.dot", "w")
temporal = 'digraph databases{\ngraph[bgcolor="#0f1319"] \nnode[style="filled",color="black", ' \
'fillcolor="#45c2c5", shape="box",fontcolor="black", fontname="Century Gothic", fontsize=18];\n'
c = 0
for i in databases:
temporal += "node" + \
str(c) + "[label=\"" + str(i.name).replace(' ', '') + "\"];\n"
c += 1
temporal += "}"
tables.write(temporal)
tables.close()
os.system("dot -Tpng tmp/databases.dot -o " + fname)
os.remove('tmp/databases.dot')
return fname
except:
return None
def graphAVL(database: str, table: str):
try:
Handler.init_DirReports()
avl = Handler.tableinstance(database, table)
grafo = TreeGraph(avl)
grafo.export()
return 'tmp/grafo-avl.png'
except:
return None
def graphTuple(database: str, table: str, index):
try:
Handler.init_DirReports()
avl = Handler.tableinstance(database, table)
tupla = avl.search(index)
fname = 'tmp/tupla.png'
tuples = open("tmp/tupla.dot", "w")
temporal = 'digraph tables{\ngraph[bgcolor="#0f1319"] \nnode[style="filled",color="black", ' \
'fillcolor="#45c2c5",shape="box",fontcolor="black",fontname="Century Gothic", fontsize=18];\n'
c = 0
for registro in tupla:
temporal += "node" + \
str(c) + "[label=\"" + \
str(registro).replace(' ', '') + "\"];\n"
c += 1
temporal += "}"
tuples.write(temporal)
tuples.close()
os.system("dot -Tpng tmp/tupla.dot -o " + fname)
os.remove('tmp/tupla.dot')
return fname
except:
return None
| 2,959 | 0 | 92 |
9499f00eaa6957dbbb45d152b61c998d9b191fa5 | 1,308 | py | Python | working_examples/testing_imports.py | Delosari/dazer | 7078895d7b2f3029944c354b687d739152fceef2 | [
"MIT"
] | null | null | null | working_examples/testing_imports.py | Delosari/dazer | 7078895d7b2f3029944c354b687d739152fceef2 | [
"MIT"
] | null | null | null | working_examples/testing_imports.py | Delosari/dazer | 7078895d7b2f3029944c354b687d739152fceef2 | [
"MIT"
] | null | null | null | import testing_constants as global_settings
# from . import settings as global_settings
#
# class Settings:
#
# def __init__(self):
# for setting in dir(global_settings):
# if setting.isupper():
# setattr(self, setting, getattr(global_settings, setting))
#
# def __setattr__(self, attr, value):
# if not getattr(self, attr, None):
# super().__setattr__(attr, value)
# else:
# raise TypeError("'constant' does not support item assignment")
#
#
# settings = Settings()
if __name__== "__main__":
print 'Initial'
print global_settings.myA
print global_settings.myB
settings = Settings()
print 'settings'
print settings.myA
print settings.myB
# class ModelIngredients(ImportModelData, SspFitter, NebularContinuaCalculator, EmissionComponents, ReddeningLaws, MCMC_printer):
#
# def __init__(self): | 24.679245 | 129 | 0.665138 | import testing_constants as global_settings
# from . import settings as global_settings
#
# class Settings:
#
# def __init__(self):
# for setting in dir(global_settings):
# if setting.isupper():
# setattr(self, setting, getattr(global_settings, setting))
#
# def __setattr__(self, attr, value):
# if not getattr(self, attr, None):
# super().__setattr__(attr, value)
# else:
# raise TypeError("'constant' does not support item assignment")
#
#
# settings = Settings()
class Settings:
def __init__(self):
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
def __setattr__(self, attr, value):
if not getattr(self, attr, None):
super(Settings, self).__setattr__(attr, value)
else:
raise TypeError("'constant' does not support item assignment")
if __name__== "__main__":
print 'Initial'
print global_settings.myA
print global_settings.myB
settings = Settings()
print 'settings'
print settings.myA
print settings.myB
# class ModelIngredients(ImportModelData, SspFitter, NebularContinuaCalculator, EmissionComponents, ReddeningLaws, MCMC_printer):
#
# def __init__(self): | 355 | -6 | 77 |
a768325066d042cb4f4790dd05d70a7d81d0d41b | 11,139 | py | Python | scripts/database-backup/postgres/remote_mysql.py | BoyanHH/training-projects | a7dc23e118fb78beec23992338e099f983c43e67 | [
"Apache-2.0"
] | null | null | null | scripts/database-backup/postgres/remote_mysql.py | BoyanHH/training-projects | a7dc23e118fb78beec23992338e099f983c43e67 | [
"Apache-2.0"
] | null | null | null | scripts/database-backup/postgres/remote_mysql.py | BoyanHH/training-projects | a7dc23e118fb78beec23992338e099f983c43e67 | [
"Apache-2.0"
] | null | null | null | import subprocess
import os
import sys
import psutil
import datetime
def check_dependencies():
"""Checks for dependencies
Mandatory dependencies are - mysqldump,mysqlshow,find,mkdir
Non-mandatory - gzip"""
try:
if subprocess.call('/usr/bin/mysqldump > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: mysqldump")
sys.exit(1)
if subprocess.call('/usr/bin/mysqlshow > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: mysqlshow")
sys.exit(1)
if subprocess.call('/usr/bin/find > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: find")
sys.exit(1)
if subprocess.call('/bin/mkdir > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: mkdir")
sys.exit(1)
if not gzip_enabled!="yes":
if subprocess.call('/bin/gzip -h > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: gzip")
sys.exit(1)
except subprocess.CalledProcessError:
sys.stderr.write("ERROR:Unable to check for dependencies")
sys.exit(1)
def check_directories(backup_dir_daily,backup_dir_weekly,backup_dir_monthly):
"""Checks if needed directories exist and are writeable"""
command="/bin/mkdir -p "
if not os.access(backup_dir_daily, os.W_OK):
sys.stderr.write("WARNING:Cannot write to daily backupdir OR directory does not exist.\n "+backup_dir_daily+"\n")
try:
daily_command=command+backup_dir_daily
output = subprocess.check_output([daily_command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Trying to create daily backup dir "+str(daily_command)+"\n")
sys.exit(1)
if not os.access(backup_dir_weekly, os.W_OK):
sys.stderr.write("WARNING:Cannot write to weekly backupdir OR directory does not exist.\n "+backup_dir_daily+"\n")
try:
weekly_command=command+backup_dir_weekly
output = subprocess.check_output([weekly_command],shell=True ).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Trying to create weekly backup dir "+str(weekly_command)+"\n")
sys.exit(1)
if not os.access(backup_dir_monthly, os.W_OK):
sys.stderr.write("WARNING:Cannot write to weekly backupdir OR directory does not exist.\n "+backup_dir_daily+"\n")
try:
monthly_command=command+backup_dir_monthly
output = subprocess.check_output([monthly_command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Trying to create monthly backup dir "+str(monthly_command)+"\n")
sys.exit(1)
#except:
# sys.stderr.write("CRITICAL:Main exception")
#try:
if __name__ == '__main__':
main()
#except:
# sys.stderr.write(" Main exception")
# sys.exit(-1)
| 42.353612 | 179 | 0.617829 | import subprocess
import os
import sys
import psutil
import datetime
def check_dependencies():
"""Checks for dependencies
Mandatory dependencies are - mysqldump,mysqlshow,find,mkdir
Non-mandatory - gzip"""
try:
if subprocess.call('/usr/bin/mysqldump > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: mysqldump")
sys.exit(1)
if subprocess.call('/usr/bin/mysqlshow > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: mysqlshow")
sys.exit(1)
if subprocess.call('/usr/bin/find > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: find")
sys.exit(1)
if subprocess.call('/bin/mkdir > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: mkdir")
sys.exit(1)
if not gzip_enabled!="yes":
if subprocess.call('/bin/gzip -h > /dev/null 2>&1', shell=True) == 127:
sys.stderr.write("ERROR:Missing dependancy: gzip")
sys.exit(1)
except subprocess.CalledProcessError:
sys.stderr.write("ERROR:Unable to check for dependencies")
sys.exit(1)
def check_if_db_exists(db_name,db_user,ip,port):
command="/usr/bin/mysqlshow -u "+db_user+" -h "+ip+" --port="+port+" "
try:
output = subprocess.check_output([command],shell=True, stderr=subprocess.PIPE).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR:Unable to check if database exists: "+str(db_name)+". User or database does not exist\n")
sys.exit(1)
if db_name not in output:
sys.stderr.write("ERROR:Database does not exist: "+str(db_name)+ "\n")
sys.exit(1)
def check_directories(backup_dir_daily,backup_dir_weekly,backup_dir_monthly):
"""Checks if needed directories exist and are writeable"""
command="/bin/mkdir -p "
if not os.access(backup_dir_daily, os.W_OK):
sys.stderr.write("WARNING:Cannot write to daily backupdir OR directory does not exist.\n "+backup_dir_daily+"\n")
try:
daily_command=command+backup_dir_daily
output = subprocess.check_output([daily_command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Trying to create daily backup dir "+str(daily_command)+"\n")
sys.exit(1)
if not os.access(backup_dir_weekly, os.W_OK):
sys.stderr.write("WARNING:Cannot write to weekly backupdir OR directory does not exist.\n "+backup_dir_daily+"\n")
try:
weekly_command=command+backup_dir_weekly
output = subprocess.check_output([weekly_command],shell=True ).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Trying to create weekly backup dir "+str(weekly_command)+"\n")
sys.exit(1)
if not os.access(backup_dir_monthly, os.W_OK):
sys.stderr.write("WARNING:Cannot write to weekly backupdir OR directory does not exist.\n "+backup_dir_daily+"\n")
try:
monthly_command=command+backup_dir_monthly
output = subprocess.check_output([monthly_command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Trying to create monthly backup dir "+str(monthly_command)+"\n")
sys.exit(1)
def type_of_backup(weekly_backup_day,monthly_backup_date):
time_now = datetime.datetime.now()
current_day = time_now.strftime("%A")
if current_day == weekly_backup_day:
return "weekly"
current_date = time_now.day
if monthly_backup_date == str(current_date):
return "monthly"
return "daily"
def daily_backup_procedure(backup_dir_daily,days_expire_daily,db_name,backup_name,db_user,mysql_dump_cmd):
command="/usr/bin/find "+backup_dir_daily+" -maxdepth 1 -type f -name \"*.sql*\" -mtime +"+days_expire_daily
try:
output = subprocess.check_output([command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Failed to remove old daily backups ")
sys.exit(1)
output_formatted=output.split('\n')
try:
for old_file in output_formatted[:-1]:
os.remove(old_file)
#except IsADirectoryError:
# sys.stderr.write(" File is a directory(unable to remove)= "+old_file)
except FileNotFoundError:
sys.stderr.write("WARNING: File not found= "+old_file+"when attempting to remove it")
if not gzip_enabled!="yes":
mysql_dump_cmd+="| /bin/gzip > "+backup_dir_daily+"/"+backup_name
else:
mysql_dump_cmd+=" > "+backup_dir_daily+""+backup_name
try:
output = subprocess.check_output([mysql_dump_cmd],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR:Failed to do daily backup \n")
sys.exit(1) #exit code fix later
return 0
def weekly_backup_procedure(backup_dir_weekly,days_expire_weekly,db_name,backup_name,db_user,mysql_dump_cmd):
command="/usr/bin/find "+backup_dir_weekly+" -maxdepth 1 -type f -name \"*.sql*\" -mtime +"+days_expire_weekly
try:
output = subprocess.check_output([command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("WARNING:Failed to remove old weekly backups \n")
sys.exit(1) #exit code fix later
output_formatted=output.split('\n')
try:
for old_file in output_formatted[:-1]:
os.remove(old_file)
except FileNotFoundError:
sys.stderr.write("WARNING: File not found="+old_file)
try:
if not gzip_enabled!="yes":
mysql_dump_cmd+="| /bin/gzip > "+backup_dir_weekly+"/"+backup_name
else:
mysql_dump_cmd+=" > "+backup_dir_weekly+""+backup_name
output = subprocess.check_output([mysql_dump_cmd],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Failed to do weekly backup\n")
sys.exit(1)
return 0
def monthly_backup_procedure(backup_dir_monthly,days_expire_monthly,db_name,backup_name,db_user,mysql_dump_cmd):
command="/usr/bin/find "+backup_dir_monthly+" -maxdepth 1 -type f -name \"*.sql*\" -mtime +"+days_expire_monthly
try:
output = subprocess.check_output([command],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Failed to remove old monthly backups\n")
sys.exit(1) #exit code fix later
output_formatted=output.split('\n')
try:
for old_file in output_formatted[:-1]:
os.remove(old_file)
except FileNotFoundError:
sys.stderr.write("WARNING: File not found="+old_file)
if not gzip_enabled!="yes":
mysql_dump_cmd="| /bin/gzip > "+backup_dir_monthly+"/"+backup_name
else:
mysql_dump_cmd+="> "+backup_dir_monthly+""+backup_name
try:
output = subprocess.check_output([mysql_dump_cmd],shell=True).decode('utf-8')
except subprocess.CalledProcessError:
sys.stderr.write("ERROR: Failed to do monthly backup\n")
sys.exit(1)
return 0
def main(config,database):
#try:
global gzip_enabled
gzip_enabled = config["DEFAULT"]["gzip_enabled"]
check_dependencies()
hostname = config["DEFAULT"]["remote_ip"]
backup_dir_daily = config["DEFAULT"]["backup_dir"]+hostname+"/"+database+"/daily"
backup_dir_weekly = config["DEFAULT"]["backup_dir"]+hostname+"/"+database+"/weekly/"
backup_dir_monthly = config["DEFAULT"]["backup_dir"]+hostname+"/"+database+"/monthly/"
if(config["DEFAULT"]["interactive"]=="yes"):
mysql_dump_cmd = "/usr/bin/mysqldump --user="+config["DEFAULT"]["db_user"]+" --port="+config["DEFAULT"]["remote_port"]+" --host="+config["DEFAULT"]["remote_ip"]+" -p "
else:
mysql_dump_cmd = "/usr/bin/mysqldump --user="+config["DEFAULT"]["db_user"]+ " "+"--port="+config["DEFAULT"]["remote_port"]+" --host="+config["DEFAULT"]["remote_ip"]
mysql_dump_cmd+=" "+database+" "
check_if_db_exists(database,config["DEFAULT"]["db_user"],config["DEFAULT"]["remote_ip"],config["DEFAULT"]["remote_port"])
if(config["DEFAULT"]["backup_name"]=="~date~.sql"):
backup_name = datetime.datetime.now().strftime("%d")+"-"+datetime.datetime.now().strftime("%m")+".sql"
elif(config["DEFAULT"]["backup_name"]=="~dbname-date~.sql"):
backup_name = database+"-"
backup_name += datetime.datetime.now().strftime("%d")+"-"+datetime.datetime.now().strftime("%m")+".sql"
else:
backup_name = config["DEFAULT"]["backup_name"]
if not (backup_name.endswith(".sql")):
backup_name += ".sql"
if not gzip_enabled != "yes":
if not (backup_name.endswith(".gz")):
backup_name += ".gz"
db_user = config["DEFAULT"]["db_user"]
check_directories(backup_dir_daily,backup_dir_weekly,backup_dir_monthly)
backup_type = type_of_backup(config["DEFAULT"]["weekly_backup_day"],config["DEFAULT"]["monthly_backup_date"])
if backup_type == "daily":
daily_backup_procedure(backup_dir_daily,config["DEFAULT"]["days_expire_daily"],database,backup_name,config["DEFAULT"]["db_user"],mysql_dump_cmd)
return 0
if backup_type == "weekly":
weekly_backup_procedure(backup_dir_weekly,config["DEFAULT"]["days_expire_weekly"],database,backup_name,config["DEFAULT"]["db_user"],mysql_dump_cmd)
daily_backup_procedure(backup_dir_daily,config["DEFAULT"]["days_expire_daily"],database,backup_name,config["DEFAULT"]["db_user"],mysql_dump_cmd)
return 0
else:
monthly_backup_procedure(backup_dir_monthly,config["DEFAULT"]["days_expire_monthly"],database,backup_name,config["DEFAULT"]["db_user"],mysql_dump_cmd)
daily_backup_procedure(backup_dir_daily,config["DEFAULT"]["days_expire_daily"],database,backup_name,config["DEFAULT"]["db_user"],mysql_dump_cmd)
return 0
#except:
# sys.stderr.write("CRITICAL:Main exception")
#try:
if __name__ == '__main__':
main()
#except:
# sys.stderr.write(" Main exception")
# sys.exit(-1)
| 7,490 | 0 | 162 |
e1899c3d29087299f78a5582a03c652196e1ebbb | 160 | py | Python | cfgov/paying_for_college/forms.py | adebisi-aden/consumerfinance.gov | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | [
"CC0-1.0"
] | 37 | 2020-08-18T19:52:39.000Z | 2022-03-23T08:08:41.000Z | cfgov/paying_for_college/forms.py | adebisi-aden/consumerfinance.gov | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | [
"CC0-1.0"
] | 338 | 2020-08-14T20:46:36.000Z | 2022-03-31T20:49:32.000Z | cfgov/paying_for_college/forms.py | adebisi-aden/consumerfinance.gov | 8c0f5afac341823c59f73b0c6bd60592e0f5eaca | [
"CC0-1.0"
] | 14 | 2020-10-21T15:27:03.000Z | 2022-03-17T03:16:36.000Z | from django import forms
| 20 | 59 | 0.66875 | from django import forms
class FeedbackForm(forms.Form):
message = forms.CharField(widget=forms.Textarea(attrs={
'class': 'a-text-input',
}))
| 0 | 111 | 23 |
62a6274e4ab46635bafe5141a202e2107793372e | 206 | py | Python | xmemeAPI/admin.py | sukanta-nandi/XMEME-Backend | a532cfca17ccde0aae9faf2a4b937644f61b9894 | [
"MIT"
] | null | null | null | xmemeAPI/admin.py | sukanta-nandi/XMEME-Backend | a532cfca17ccde0aae9faf2a4b937644f61b9894 | [
"MIT"
] | null | null | null | xmemeAPI/admin.py | sukanta-nandi/XMEME-Backend | a532cfca17ccde0aae9faf2a4b937644f61b9894 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import memeData
# Register your models here.
@admin.register(memeData)
| 20.6 | 38 | 0.728155 | from django.contrib import admin
from .models import memeData
# Register your models here.
@admin.register(memeData)
class memeDataAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
#pass
| 0 | 61 | 22 |
9085eec4b909adc583df9513ab3180e68764b29e | 2,159 | py | Python | example/app.py | KLMatlock/Flask-pyoidc-oda | 132cdbd56889e87a74655b22e8798067ee2af222 | [
"Apache-2.0"
] | null | null | null | example/app.py | KLMatlock/Flask-pyoidc-oda | 132cdbd56889e87a74655b22e8798067ee2af222 | [
"Apache-2.0"
] | 2 | 2020-08-26T19:02:11.000Z | 2021-03-29T20:47:46.000Z | example/app.py | KLMatlock/Flask-pyoidc-oda | 132cdbd56889e87a74655b22e8798067ee2af222 | [
"Apache-2.0"
] | null | null | null | import datetime
import flask
import logging
from flask import Flask, jsonify
import requests
from flask_session import Session
from flask_pyoidc import OIDCAuthentication
from flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata
from flask_pyoidc.user_session import UserSession
app = Flask(__name__)
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# See http://flask.pocoo.org/docs/0.12/config/
app.config.update({
'SECRET_KEY': 'dev_key', # make sure to change this!!
'PERMANENT_SESSION_LIFETIME': datetime.timedelta(days=7).total_seconds(),
'DEBUG': True,
'SESSION_TYPE': 'filesystem',
'SESSION_FILE_DIR': '/tmp/sessions'})
app.config['OIDC_PROVIDERS'] = 'provider1'
app.config['provider1_ISSUER'] = '<auth base url>'
app.config['provider1_CLIENT'] = '<client id>'
app.config['provider1_SECRET'] = ''
app.config['OIDC_REQUIRED_ROLES'] = "admin"
app.config['OIDC_ROLE_CLAIM'] = "realm_access.roles"
Session(app)
auth = OIDCAuthentication( app=app)
@app.route('/')
@auth.oidc_auth(bearer = True)
@app.route('/bearer_test')
@auth.oidc_auth()
@app.route('/hello')
@auth.oidc_auth()
@app.route('/logout')
@auth.oidc_logout
@auth.error_view
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
auth.init_app(app)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 26.9875 | 103 | 0.718388 | import datetime
import flask
import logging
from flask import Flask, jsonify
import requests
from flask_session import Session
from flask_pyoidc import OIDCAuthentication
from flask_pyoidc.provider_configuration import ProviderConfiguration, ClientMetadata
from flask_pyoidc.user_session import UserSession
app = Flask(__name__)
import logging
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# See http://flask.pocoo.org/docs/0.12/config/
app.config.update({
'SECRET_KEY': 'dev_key', # make sure to change this!!
'PERMANENT_SESSION_LIFETIME': datetime.timedelta(days=7).total_seconds(),
'DEBUG': True,
'SESSION_TYPE': 'filesystem',
'SESSION_FILE_DIR': '/tmp/sessions'})
app.config['OIDC_PROVIDERS'] = 'provider1'
app.config['provider1_ISSUER'] = '<auth base url>'
app.config['provider1_CLIENT'] = '<client id>'
app.config['provider1_SECRET'] = ''
app.config['OIDC_REQUIRED_ROLES'] = "admin"
app.config['OIDC_ROLE_CLAIM'] = "realm_access.roles"
Session(app)
auth = OIDCAuthentication( app=app)
@app.route('/')
@auth.oidc_auth(bearer = True)
def login1():
user_session = UserSession(flask.session)
return jsonify(access_token=user_session.access_token,
id_token=user_session.id_token,
userinfo=user_session.userinfo)
@app.route('/bearer_test')
@auth.oidc_auth()
def bearer_test():
user_session = UserSession(flask.session)
headers = {'Authorization': 'Bearer ' + user_session.access_token}
#diagnoses_response = requests.get('<remote_url_with_bearer_enabled>',
# headers=headers)
print('done!')
return True
@app.route('/hello')
@auth.oidc_auth()
def hello_auth():
return 'Hello world!'
@app.route('/logout')
@auth.oidc_logout
def logout():
return "You've been successfully logged out!"
@auth.error_view
def error(error=None, error_description=None):
return jsonify({'error': error, 'message': error_description})
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
auth.init_app(app)
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 623 | 0 | 110 |
f456e29c3077711b21263f879b42e306461b0af3 | 2,701 | py | Python | onadata/apps/api/migrations/0001_initial.py | childhelpline/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 1 | 2018-07-15T13:13:43.000Z | 2018-07-15T13:13:43.000Z | onadata/apps/api/migrations/0001_initial.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 14 | 2018-07-10T12:48:46.000Z | 2022-03-11T23:24:51.000Z | onadata/apps/api/migrations/0001_initial.py | aondiaye/myhelpline | d72120ee31b6713cbaec79f299f5ee8bcb7ea429 | [
"BSD-3-Clause"
] | 5 | 2018-07-04T07:59:14.000Z | 2020-01-28T07:50:18.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
| 37.513889 | 76 | 0.504998 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('logger', '0001_initial'),
('main', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OrganizationProfile',
fields=[
('userprofile_ptr', models.OneToOneField(
parent_link=True, auto_created=True, primary_key=True,
serialize=False, to='main.UserProfile')),
('is_organization', models.BooleanField(default=True)),
('creator', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': (
('can_add_xform',
'Can add/upload an xform to organization'),
('view_organizationprofile',
'Can view organization profile')),
},
bases=('main.userprofile',),
),
migrations.CreateModel(
name='Team',
fields=[
('group_ptr', models.OneToOneField(
parent_link=True, auto_created=True, primary_key=True,
serialize=False, to='auth.Group')),
('date_created', models.DateTimeField(
auto_now_add=True, null=True)),
('date_modified', models.DateTimeField(
auto_now=True, null=True)),
('created_by', models.ForeignKey(
related_name='team_creator', blank=True,
to=settings.AUTH_USER_MODEL, null=True)),
('organization',
models.ForeignKey(to=settings.AUTH_USER_MODEL)),
('projects', models.ManyToManyField(to='logger.Project')),
],
options={
'permissions': (('view_team', 'Can view team.'),),
},
bases=('auth.group',),
),
migrations.CreateModel(
name='TempToken',
fields=[
('key', models.CharField(max_length=40, serialize=False,
primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(related_name='_user',
to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
]
| 0 | 2,538 | 23 |
9d1c1b779ce0314a41fea46de857cf536106ab9a | 2,657 | py | Python | admin01/guilin.py | ckjh/education | 9540c0550097db5742b66b7d831d15a3785c7629 | [
"MIT"
] | null | null | null | admin01/guilin.py | ckjh/education | 9540c0550097db5742b66b7d831d15a3785c7629 | [
"MIT"
] | null | null | null | admin01/guilin.py | ckjh/education | 9540c0550097db5742b66b7d831d15a3785c7629 | [
"MIT"
] | null | null | null | # 章节列表
# 序列化 ...
# 章节序列化
# 章节反序列化
| 27.968421 | 60 | 0.479488 | # 章节列表
class SectionView(APIView):
def get( self, request):
mes = {}
try:
section = Section.objects.all()
s = SectionSerializersModel(section,many=True)
mes['code'] =200
mes['message'] ='ok'
mes['dataList'] =s.data
except:
mes['code'] =10010
mes['message'] ='数据库请求失败'
return Response(mes)
def post( self, request):
mes = {}
data = request.data
data[ 'video' ] = get_pic_url ( data[ 'video' ] )
if data:
s = SectionSerializers(data=data)
if s.is_valid():
s.save()
mes[ 'code' ] = 200
mes[ 'message' ] = 'ok'
else:
print(s.errors)
mes[ 'code' ] = 10020
mes[ 'message' ] = '添加失败'
else:
mes[ 'code' ] = 10030
mes[ 'message' ] = '获取数据不全'
return Response(mes)
def put( self, request):
data = request.data.copy ()
data[ 'video' ] = get_pic_url ( data[ 'video' ] )
print ( data )
c1 = Section.objects.get ( id=data[ 'id' ] )
ser = SectionSerializers ( c1, data=data )
mes = {}
if ser.is_valid ():
ser.save ()
mes[ 'code' ] = 200
mes[ 'msg' ] = 'ok'
else:
print ( ser.errors )
mes[ 'code' ] = 400
mes[ 'msg' ] = '失败'
return Response ( mes )
def delete(self, request):
id = request.data['id']
mes = {}
if id:
Section.objects.get(id=id).delete()
mes['code'] = 200
mes['msg'] = "删除成功"
else:
mes['code'] = 400
mes['msg'] = "删除失败"
return Response(mes)
# 序列化 ...
# 章节序列化
class SectionSerializersModel(serializers.ModelSerializer):
class Meta:
model = Section
fields = '__all__'
# 章节反序列化
class SectionSerializers(serializers.Serializer):
course = serializers.IntegerField()
section = serializers.CharField()
video = serializers.CharField()
sort = serializers.IntegerField()
def create(self, data):
m = Section.objects.create(**data)
return m
def update(self, instance, validated_data):
instance.name = validated_data['course']
instance.desc = validated_data['section']
instance.pic = validated_data['video']
instance.pic = validated_data['sort']
instance.save()
return instance
| 2,118 | 357 | 182 |
55f3670d95e12a8c9bbdb109da1cb86268a2f44f | 2,786 | py | Python | Zebrafish spinal locomotor circuit/Version 2/Single_coiling_MN_KO.py | Bui-lab/Code | 6ce5972a4bd0c059ab167522ab1d945f3b0f5707 | [
"MIT"
] | null | null | null | Zebrafish spinal locomotor circuit/Version 2/Single_coiling_MN_KO.py | Bui-lab/Code | 6ce5972a4bd0c059ab167522ab1d945f3b0f5707 | [
"MIT"
] | null | null | null | Zebrafish spinal locomotor circuit/Version 2/Single_coiling_MN_KO.py | Bui-lab/Code | 6ce5972a4bd0c059ab167522ab1d945f3b0f5707 | [
"MIT"
] | 2 | 2021-08-25T08:14:52.000Z | 2021-11-29T12:56:17.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 08:41:10 2017
@author: Yann Roussel and Tuan Bui
Edited by: Emine Topcu on Sep 2021
"""
from Single_coiling_model import Single_coil_base
| 56.857143 | 199 | 0.590811 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 08:41:10 2017
@author: Yann Roussel and Tuan Bui
Edited by: Emine Topcu on Sep 2021
"""
from Single_coiling_model import Single_coil_base
class Single_coil_MN_KO(Single_coil_base):
tMNKOstart = 50000
tMNKOend = 100000
def __init__(self, dt = 0.1, stim0 = 8, sigma = 0,
E_glu = 0, E_gly = -70, cv = 0.55, nIC = 5, nMN = 10, nV0d = 10, nMuscle = 10):
super().__init__(dt, stim0, sigma,
E_glu, E_gly, cv, nIC, nMN, nV0d, nMuscle)
super().setWeightParameters(IC_IC_gap_weight = 0.001, IC_MN_gap_weight = 0.04, IC_V0d_gap_weight = 0.05,
MN_MN_gap_weight = 0.1, V0d_V0d_gap_weight = 0.04, MN_V0d_gap_weight = 0.01,
V0d_MN_syn_weight = 2.0, V0d_IC_syn_weight = 2.0, MN_Muscle_syn_weight = 0.015)
super().setRangeParameters(rangeMin = 0.2, rangeIC_MN = 10, rangeIC_V0d = 10, rangeMN_MN = 6.5, rangeV0d_V0d = 3.5,
rangeMN_V0d = 1.5, rangeV0d_MN = 8, rangeV0d_IC = 20, rangeMN_Muscle = 1)
def calcMNPotentialsandResidues(self, t):
for k in range (0, self.nMN):
if t < (self.tshutoff/self.dt): #Synaptic currents are shut off for the first 50 ms of the sims to let initial conditions subside
IsynL= 0.0
IsynR= 0.0
else:
IsynL = sum(self.RSyn_V0d_MN[self.nMN*l+k,0]*self.RW_V0d_MN[l,k] for l in range (0, self.nV0d))
IsynR = sum(self.LSyn_V0d_MN[self.nMN*l+k,0]*self.LW_V0d_MN[l,k] for l in range (0, self.nV0d))
if t > self.tMNKOstart and t < self.tMNKOend:
self.resLMN[k,:] = self.L_MN[k].getNextVal(self.resLMN[k,0],self.resLMN[k,1], 0)
self.resRMN[k,:] = self.R_MN[k].getNextVal(self.resRMN[k,0],self.resRMN[k,1], 0)
else:
#if k == 5: # this is to hyperpolarize a MN to observe periodic depolarizations and synaptic bursts
# IsynL = IsynL - 10
IgapL = - sum(self.LSGap_MN_IC[k,:]) + sum(self.LSGap_IC_MN[:,k]) - sum(self.LSGap_MN_MN[k,:]) + sum(self.LSGap_MN_MN[:,k]) - sum(self.LSGap_MN_V0d[k,:]) + sum(self.LSGap_V0d_MN[:,k])
IgapR = - sum(self.RSGap_MN_IC[k,:]) + sum(self.RSGap_IC_MN[:,k]) - sum(self.RSGap_MN_MN[k,:]) + sum(self.RSGap_MN_MN[:,k]) - sum(self.RSGap_MN_V0d[k,:]) + sum(self.RSGap_V0d_MN[:,k])
self.resLMN[k,:] = self.L_MN[k].getNextVal(self.resLMN[k,0],self.resLMN[k,1], IgapL + IsynL)
self.resRMN[k,:] = self.R_MN[k].getNextVal(self.resRMN[k,0],self.resRMN[k,1], IgapR + IsynR)
self.VLMN[k,t] = self.resLMN[k,0]
self.VRMN[k,t] = self.resRMN[k,0]
| 2,429 | 121 | 23 |
2cb176a75a9d45bf615f1f167aa8d3cc9191262f | 1,978 | py | Python | src/utils/send_email.py | HaoJiangGuo/fp-server | 9c00b8f0ee64049eb9f214c3efe1fdee977542a6 | [
"MIT"
] | 173 | 2018-06-10T14:21:05.000Z | 2022-03-24T09:24:35.000Z | src/utils/send_email.py | HaoJiangGuo/fp-server | 9c00b8f0ee64049eb9f214c3efe1fdee977542a6 | [
"MIT"
] | 19 | 2018-06-17T08:33:04.000Z | 2021-02-09T07:10:41.000Z | src/utils/send_email.py | HaoJiangGuo/fp-server | 9c00b8f0ee64049eb9f214c3efe1fdee977542a6 | [
"MIT"
] | 45 | 2018-06-10T14:49:12.000Z | 2022-03-24T09:24:38.000Z | # -*- coding:utf-8 -*-
"""
发送邮件
"""
import email
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
if __name__ == "__main__":
test()
| 27.472222 | 78 | 0.600607 | # -*- coding:utf-8 -*-
"""
发送邮件
"""
import email
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class MailSender(object):
def __init__(self, host, username, password, to_emails, subject, content):
""" 初始化
@param host 邮件服务端host
@param username 用户名
@param password 密码
@param to_emails 发送到邮箱列表
@param title 标题
@param content 内容
"""
self.host = host
self.username = username
self.password = password
self.to_emails = to_emails
self.subject = subject
self.content = content
def send_mail(self):
""" 发送邮件
"""
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = self.subject
msgRoot['From'] = self.username
msgRoot['To'] = ",".join(self.to_emails)
msgRoot['Date'] = email.utils.formatdate()
msgRoot.preamble = 'This is a multi-part message in MIME format.'
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText(self.content, 'plain', 'GB2312')
msgAlternative.attach(msgText)
# msgText = MIMEText(self.content, 'html', 'GB2312')
# msgAlternative.attach(msgText)
# sending mail
svr = smtplib.SMTP(self.host, 25)
# svr = smtplib.SMTP(self.host)
svr.set_debuglevel(0)
svr.ehlo()
svr.starttls()
svr.login(self.username, self.password)
svr.sendmail(self.username, self.to_emails, msgRoot.as_string())
svr.quit()
def test():
host = 'smtp.xxx.com'
username = 'test@xxxx.com'
password = '123456'
to_emails = ['valesail7@gmail.com']
subject = 'Test Send Email'
content = "Just a test."
sender = MailSender(host, username, password, to_emails, subject, content)
sender.send_mail()
if __name__ == "__main__":
test()
| 275 | 1,521 | 46 |
457fe4ab3be3a4a162e2f2354e33660cec704ad0 | 4,358 | py | Python | tests/bugs/cisco/test_cscvg76186.py | Dom932/device-bug-checker | f1b04813ac4bddc07cc0c3ce62640246fcaaf34f | [
"MIT"
] | null | null | null | tests/bugs/cisco/test_cscvg76186.py | Dom932/device-bug-checker | f1b04813ac4bddc07cc0c3ce62640246fcaaf34f | [
"MIT"
] | null | null | null | tests/bugs/cisco/test_cscvg76186.py | Dom932/device-bug-checker | f1b04813ac4bddc07cc0c3ce62640246fcaaf34f | [
"MIT"
] | null | null | null | import pytest
from bugs.cisco import CSCvg76186
from devices.cisco import CiscoIOS
unaffected_output1 = "Role: Client (SmartInstall Disabled)"
unaffected_output2 = "Capability: Client\nOper Mode: Disabled\nRole: NA"
affected_output1 = "Role: Client (SmartInstall enabled)"
affected_output2 = "Capability: Client\nOper Mode: Enabled\nRole: Client"
not_supported = "Command not supported on platform"
@pytest.fixture()
@pytest.fixture()
| 34.864 | 113 | 0.688619 | import pytest
from bugs.cisco import CSCvg76186
from devices.cisco import CiscoIOS
unaffected_output1 = "Role: Client (SmartInstall Disabled)"
unaffected_output2 = "Capability: Client\nOper Mode: Disabled\nRole: NA"
affected_output1 = "Role: Client (SmartInstall enabled)"
affected_output2 = "Capability: Client\nOper Mode: Enabled\nRole: Client"
not_supported = "Command not supported on platform"
@pytest.fixture()
def bug():
return CSCvg76186()
@pytest.fixture()
def mock_connection():
class MockConnection:
""" Mocking class for connection """
def __init__(self):
self.connection = True
self.vstack_output = not_supported
def disconnect(self):
self.connection = False
def is_alive(self):
return self.connection
def send_command(self, input):
return self.vstack_output
return MockConnection()
class TestCSCvg76186:
def test_instance(self, bug):
""" Test object instance """
assert isinstance(bug, CSCvg76186)
assert isinstance(bug, CSCvg76186)
def test_bug_description(self):
""" Test bug_description method. Note only tests that result is not null"""
assert CSCvg76186.bug_description() is not None
def test_bug_reference(self):
""" Test bug_reference method. Note only tests that result is not null"""
assert CSCvg76186.bug_reference() is not None
def test_manufacture_bug_id(self):
""" Test manufacture_bug_id method """
assert CSCvg76186.manufacture_bug_id() == "CSCvg76186"
def test_cve_id(self):
""" Test cve_id method """
assert CSCvg76186.cve_id() == "CVE-2018-0171"
def test_bug_severity(self):
""" Tesht bug_severity method"""
assert CSCvg76186.bug_severity() == "Critical"
def test_connection_requirements(self):
""" Test connection_requirements method """
assert CSCvg76186.connection_requirements() == ['connection']
def test_device_type_requirements(self):
""" Test device_type_requirements method"""
assert CSCvg76186.device_type_requirements() == ('cisco_ios', 'cisco_ios_ssh_telnet', 'cisco_ios_telnet')
def test_manufacture(self):
""" Test manufacture method """
assert CSCvg76186.manufacture() == "Cisco"
def test_enable_mode_required(self):
""" Test if enable_mode_required method """
assert CSCvg76186.enable_mode_required() is False
def test_affected_devices(self):
""" Test affected_devices method """
assert CSCvg76186.affected_devices() == ['Switch']
def test_remediate_implimented(self):
""" Test remediate_implemented method """
assert CSCvg76186.remediate_implemented() is False
def test_check_bug_unaffected1(self, bug, mock_connection):
""" Test check_bug method with an unaffected device """
mock_connection.vstack_output = unaffected_output1
result = bug.check_bug(mock_connection)
assert result.impacted is False
def test_check_bug_unaffected2(self, bug, mock_connection):
""" Test check_bug method with an unaffected device """
mock_connection.vstack_output = unaffected_output2
result = bug.check_bug(mock_connection)
assert result.impacted is False
def test_check_bug_affected1(self, bug, mock_connection):
""" Test check_bug method with an affected device """
mock_connection.vstack_output = affected_output1
result = bug.check_bug(mock_connection)
assert result.impacted is True
def test_check_bug_affected2(self, bug, mock_connection):
""" Test check_bug method with an affected device """
mock_connection.vstack_output = affected_output2
result = bug.check_bug(mock_connection)
assert result.impacted is True
def test_check_bug_command_not_supported(self, bug, mock_connection):
""" Check check_bug method output returns not supported when it is unable to get vstack output"""
result = bug.check_bug(mock_connection)
assert result.impacted is False
assert result.output == not_supported
def test_remediate(self, bug):
""" Test remediate method not implemented """
with pytest.raises(Exception):
bug.remediate()
| 433 | 3,414 | 67 |
6ea84700ad1e51266607033beb6050680d87bcd8 | 1,394 | py | Python | pammfauthenticator/pammfauthenticator.py | cmd-ntrf/pammfauthenticator | 856b10282f560ae4a58fb3058350d51f6d257a89 | [
"MIT"
] | null | null | null | pammfauthenticator/pammfauthenticator.py | cmd-ntrf/pammfauthenticator | 856b10282f560ae4a58fb3058350d51f6d257a89 | [
"MIT"
] | 1 | 2020-11-09T16:24:04.000Z | 2020-11-11T21:56:25.000Z | pammfauthenticator/pammfauthenticator.py | cmd-ntrf/pammfauthenticator | 856b10282f560ae4a58fb3058350d51f6d257a89 | [
"MIT"
] | null | null | null | import pamela
from jupyterhub.auth import PAMAuthenticator
from jupyterhub.handlers.login import LoginHandler
from tornado.concurrent import run_on_executor
from tornado.escape import url_escape
from tornado.httputil import url_concat
| 35.74359 | 81 | 0.65208 | import pamela
from jupyterhub.auth import PAMAuthenticator
from jupyterhub.handlers.login import LoginHandler
from tornado.concurrent import run_on_executor
from tornado.escape import url_escape
from tornado.httputil import url_concat
class PAMMFALoginHandler(LoginHandler):
def _render(self, login_error=None, username=None):
return self.render_template(
'login_otp.html',
next=url_escape(self.get_argument('next', default='')),
username=username,
login_error=login_error,
custom_html=self.authenticator.custom_html,
login_url=self.settings['login_url'],
authenticator_login_url=url_concat(
self.authenticator.login_url(self.hub.base_url),
{'next': self.get_argument('next', '')},
),
)
class PAMMFAuthenticator(PAMAuthenticator):
def authenticate(self, handler, data):
"""Authenticate with PAM, and return the username if login is successful.
Return None otherwise.
"""
username = data['username']
password = data['password']
if data.get('otp'):
password = [password, data['otp']]
data = {'username' : username, 'password' : password}
return super().authenticate(handler, data)
def get_handlers(self, app):
return [('/login', PAMMFALoginHandler),]
| 590 | 496 | 72 |