blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88ebec75815792ee70d0fd841925d841c72a7b81 | 46d3b41f98164bd869ac7f987a644ba0d6839e2c | /accounts/api/views.py | e95bfbe80b8ff95bab7c8053668fe9576ac11920 | [] | no_license | Dimas4/Blog | dc22392c82ffb7050418d05b4f149d79c91bca30 | c9d6322bcf0a4b35f80c51830d1f4d5ad7f41bb3 | refs/heads/master | 2021-07-22T00:31:59.491338 | 2018-12-01T12:55:21 | 2018-12-01T12:55:21 | 135,898,131 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 839 | py | from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import UserCreateSerializer
class Register(APIView):
def post(self, request, *args, **kwargs):
serializer = UserCreateSerializer(data=request.data)
if serializer.is_valid():
username = serializer.data.get('username')
email = serializer.data.get('email')
password = serializer.data.get('password')
user = User.objects.create_user(username, email, password)
token = Token.objects.create(user=user)
context = {
'key': token.key
}
return Response(context)
return Response(data=serializer.errors)
| [
"vaniashatuhomail@mail.ru"
] | vaniashatuhomail@mail.ru |
21ce77bc33800933fde5ac7c54caaaf1d046f917 | 74e53273dc5aa71293a385512b3d239971099738 | /Data_structures_and_Algorithms/arrays/four_numbers_sum_k.py | 806cf72e986f8984ee790d73a21007df20d839de | [] | no_license | BJV-git/Data_structures_and_Algorithms | 3b240bf699e7091453f3a1459b06da1af050c415 | 393c504b2bb17b19e76f6d9d9cce948b4c12dbb2 | refs/heads/master | 2020-04-23T22:32:22.525542 | 2019-02-19T16:09:51 | 2019-02-19T16:09:51 | 171,504,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # logic: O(n3)
def four_sum_k(nums,k):
nums.sort()
lnums=len(nums)
for i in range(lnums-3):
for j in range(i+1, lnums-2):
l=j+1
r = lnums-1
while (l<r):
summ = nums[i]+nums[j] + nums[l] + nums[r]
if summ==k:
l+=1
r-=1
elif summ < k:
l+=1
else:
r-=1 | [
"noreply@github.com"
] | BJV-git.noreply@github.com |
7aa9c8bd9de4bc0f6e608c6eaeb06f35d347808a | 8d47d0bdf0f3bcc8c8f82e7624e391ba2353efe1 | /openstack_dashboard/dashboards/admin/projects/tables.py | 643ac828a0d5f95e54ed87ee06949a210829cd91 | [
"Apache-2.0"
] | permissive | cosgrid001/cosgrid_hh | 48328bbfae69f9978b82fe2c94799fbf8bc978b2 | 9b4dbf3c9c134f0c08c7d0330a3d0e69af12a8f4 | refs/heads/master | 2020-01-23T21:03:04.242315 | 2016-12-11T05:39:33 | 2016-12-11T05:39:33 | 74,579,908 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,882 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.exceptions import ValidationError # noqa
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from keystoneclient.exceptions import Conflict # noqa
from openstack_dashboard import api
from openstack_dashboard.api import keystone
class ViewMembersLink(tables.LinkAction):
name = "users"
verbose_name = _("Modify Users")
url = "horizon:admin:projects:update"
classes = ("ajax-modal", "btn-edit")
# policy_rules = (("identity", "identity:list_users"),
# ("identity", "identity:list_roles"))
def get_link_url(self, project):
step = 'update_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class ViewGroupsLink(tables.LinkAction):
name = "groups"
verbose_name = _("Modify Groups")
url = "horizon:admin:projects:update"
classes = ("ajax-modal", "btn-edit")
def allowed(self, request, project):
return keystone.VERSIONS.active >= 3
def get_link_url(self, project):
step = 'update_group_members'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class UsageLink(tables.LinkAction):
name = "usage"
verbose_name = _("View Usage")
url = "horizon:admin:projects:usage"
classes = ("btn-stats",)
# policy_rules = (("compute", "compute_extension:simple_tenant_usage:show"),)
class CreateProject(tables.LinkAction):
name = "create"
verbose_name = _("Create Project")
url = "horizon:admin:projects:create"
classes = ("btn-launch", "ajax-modal",)
# policy_rules = (('identity', 'identity:create_project'),)
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project()
class UpdateProject(tables.LinkAction):
name = "update"
verbose_name = _("Edit Project")
url = "horizon:admin:projects:update"
classes = ("ajax-modal", "btn-edit")
# policy_rules = (('identity', 'identity:update_project'),)
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project()
class ModifyQuotas(tables.LinkAction):
name = "quotas"
verbose_name = _("Modify Quotas")
url = "horizon:admin:projects:update"
classes = ("ajax-modal", "btn-edit")
# policy_rules = (('compute', "compute_extension:quotas:update"),)
def get_link_url(self, project):
step = 'update_quotas'
base_url = reverse(self.url, args=[project.id])
param = urlencode({"step": step})
return "?".join([base_url, param])
class DeleteTenantsAction(tables.DeleteAction):
data_type_singular = _("Project")
data_type_plural = _("Projects")
# policy_rules = (("identity", "identity:delete_project"),)
def allowed(self, request, project):
return api.keystone.keystone_can_edit_project()
def delete(self, request, obj_id):
api.keystone.tenant_delete(request, obj_id)
class TenantFilterAction(tables.FilterAction):
def filter(self, table, tenants, filter_string):
"""Really naive case-insensitive search."""
# FIXME(gabriel): This should be smarter. Written for demo purposes.
q = filter_string.lower()
def comp(tenant):
if q in tenant.name.lower():
return True
return False
return filter(comp, tenants)
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, project_id):
project_info = api.keystone.tenant_get(request, project_id,
admin=True)
return project_info
class UpdateCell(tables.UpdateAction):
def allowed(self, request, project, cell):
return api.keystone.keystone_can_edit_project()
def update_cell(self, request, datum, project_id,
cell_name, new_cell_value):
# inline update project info
try:
project_obj = datum
# updating changed value by new value
setattr(project_obj, cell_name, new_cell_value)
api.keystone.tenant_update(
request,
project_id,
name=project_obj.name,
description=project_obj.description,
enabled=project_obj.enabled)
except Conflict:
# Returning a nice error message about name conflict. The message
# from exception is not that clear for the users.
message = _("This name is already taken.")
raise ValidationError(message)
except Exception:
exceptions.handle(request, ignore=True)
return False
return True
class TenantsTable(tables.DataTable):
name = tables.Column('name', verbose_name=_('Name'),
form_field=forms.CharField(required=True,
max_length=64),
update_action=UpdateCell)
description = tables.Column(lambda obj: getattr(obj, 'description', None),
verbose_name=_('Description'),
form_field=forms.CharField(
widget=forms.Textarea(),
required=False),
update_action=UpdateCell)
id = tables.Column('id', verbose_name=_('Project ID'))
enabled = tables.Column('enabled', verbose_name=_('Enabled'), status=True,
form_field=forms.BooleanField(
label=_('Enabled'),
required=False),
update_action=UpdateCell)
class Meta:
name = "tenants"
verbose_name = _("Projects")
row_class = UpdateRow
row_actions = (ViewMembersLink, ViewGroupsLink, UpdateProject,
UsageLink, ModifyQuotas, DeleteTenantsAction)
table_actions = (TenantFilterAction, CreateProject,
DeleteTenantsAction)
pagination_param = "tenant_marker"
| [
"jayaprakash.r@cloudenablers.com"
] | jayaprakash.r@cloudenablers.com |
174bd9cd9df62a07c54b3f9e40b8fd97e11f162b | 009bf39bb082d175538d9c667009cadea367b005 | /erp_customization/config/desktop.py | ded7fbf7c1954c72dafd7cab5945bf8888bea260 | [
"MIT"
] | permissive | mbhavesh95863/erp_customization | 0d3c5c107a44b126c5bc085f8f7964fc8c7167fd | 33cb6c98b3b95a221462cea30033efdbca12cbed | refs/heads/master | 2020-05-17T10:07:15.373067 | 2019-04-30T07:41:50 | 2019-04-30T07:41:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "ERP Customization",
"color": "Blue",
"icon": "Bhavesh",
"type": "module",
"label": _("ERP Customization")
}
]
| [
"erpnextdeveloper1@gmail.com"
] | erpnextdeveloper1@gmail.com |
8e8ccb975ca50708e3d1a12dfc8247901cdc5286 | e3d268f17f9b0c43e14c080fca9052765de86614 | /musette/forms.py | bb0630f32e8069dd62e4cc9c5e4cb36c6120dd42 | [] | no_license | tawanda/django-musette | 401559e2670160d34a145f7eff80c2bba3674388 | 19a35531999db357df2dfa79b2d81a78ca15d7d3 | refs/heads/master | 2020-04-05T23:44:49.635378 | 2015-08-02T20:45:08 | 2015-08-02T20:45:08 | 40,992,561 | 1 | 0 | null | 2015-08-18T19:15:16 | 2015-08-18T19:15:15 | Python | UTF-8 | Python | false | false | 5,310 | py | # encoding:utf-8
from django import forms
from django.forms.widgets import ClearableFileInput, CheckboxInput
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from .utils import basename
from .models import Topic, Comment
from .widgets import TextareaWidget
class FormAdminTopic(forms.ModelForm):
'''
Form for topic cadmin
'''
class Meta:
model = Topic
exclude = ('slug', 'id_attachment')
widgets = {
'description': TextareaWidget,
}
class FormAddTopic(forms.ModelForm):
'''
Form for create one new topic
'''
class Meta:
model = Topic
exclude = (
'forum', "user", "slug", "date",
"id_attachment", "moderate", "is_top")
widgets = {
'description': TextareaWidget,
}
def __init__(self, *args, **kwargs):
super(FormAddTopic, self).__init__(*args, **kwargs)
class_css = 'form-control'
for key in self.fields:
if key != "attachment":
self.fields[key].required = True
self.fields[key].widget.attrs['ng-model'] = key
self.fields[key].widget.attrs['class'] = class_css
self.fields[key].widget.attrs['required'] = 'required'
else:
self.fields[key].required = False
class CustomClearableFileInput(ClearableFileInput):
'''
Changes order fields
'''
template_with_initial = (
'%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
template_with_clear = ""
template_with_clear = '<br> <label for="%(clear_checkbox_id)s"> '
template_with_clear += ' %(clear_checkbox_label)s</label> %(clear)s'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(
ClearableFileInput, self).render(name, value, attrs)
if self.is_initial(value):
template = self.template_with_initial
substitutions.update(self.get_template_substitution_values(value))
values = self.get_template_substitution_values(value)
initial = basename(values['initial'])
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(
checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(
checkbox_id)
substitutions['clear'] = CheckboxInput().render(
checkbox_name, False, attrs={'id': checkbox_id})
clear_template = self.template_with_clear % substitutions
substitutions['clear_template'] = clear_template
substitutions['initial'] = conditional_escape(initial)
return mark_safe(template % substitutions)
class FormEditTopic(forms.ModelForm):
'''
Form for edit one new topic
'''
class Meta:
model = Topic
exclude = (
'forum', "user", "slug", "date",
"id_attachment", "moderate", "is_top")
widgets = {
'description': TextareaWidget,
'attachment': CustomClearableFileInput,
}
def __init__(self, *args, **kwargs):
super(FormEditTopic, self).__init__(*args, **kwargs)
class_css = 'form-control'
for key in self.fields:
if key != "attachment":
self.fields[key].required = True
self.fields[key].widget.attrs['ng-model'] = key
if key == 'title':
ng_init = key + "=" + "'" + \
str(kwargs['instance'].title) + "'"
self.fields[key].widget.attrs['ng-init'] = ng_init
elif key == 'description':
ng_init = key + "=" + "'" + \
str(kwargs['instance'].description) + "'"
self.fields[key].widget.attrs['ng-init'] = ng_init
self.fields[key].widget.attrs['class'] = class_css
self.fields[key].widget.attrs['required'] = 'required'
else:
self.fields[key].required = False
class FormAddComment(forms.ModelForm):
'''
Form for add comment to topic
'''
class Meta:
model = Comment
fields = ['description']
widgets = {
'description': TextareaWidget,
}
def __init__(self, *args, **kwargs):
super(FormAddComment, self).__init__(*args, **kwargs)
for key in self.fields:
if key == "description":
self.fields[key].required = True
self.fields[key].widget.attrs['style'] = "width: 100%"
self.fields[key].widget.attrs['ng-model'] = key
self.fields[key].widget.attrs['required'] = 'required'
| [
"martinpeveri@gmail.com"
] | martinpeveri@gmail.com |
b65e882d1259f051cce3a321b1d581c616225de0 | 0ddaa393ff22758621fd1123aa32e298974efa10 | /kblocks/experiments/__init__.py | d994a7ba587d2d4db839665d090d460aec93e623 | [
"MIT"
] | permissive | jackd/kblocks | d0a1d890f66d0783eec75247fd359ad9e2f0d135 | 0ae39f39ad0e797f19dcdc29417a928b11070097 | refs/heads/master | 2021-07-05T22:33:21.219466 | 2021-01-07T02:59:02 | 2021-01-07T02:59:02 | 218,962,609 | 0 | 1 | NOASSERTION | 2020-12-21T00:17:26 | 2019-11-01T10:16:15 | Python | UTF-8 | Python | false | false | 187 | py | from .callbacks import ExperimentCallback
from .core import Experiment, run
from .status import Status
__all__ = [
"Experiment",
"ExperimentCallback",
"Status",
"run",
]
| [
"thedomjack@gmail.com"
] | thedomjack@gmail.com |
26cd827693df39816b4e8f1cb6ead467289b3f8c | 2006a22faca0265cab5f7de486971103c478439e | /tmdbSearch/migrations/0001_initial.py | 195f99d71e4c2058e3519767365bb32f9c42951a | [] | no_license | GuardianBob/portfolio | 4b6060b1ed9b1f59047d21d90732f1fbb3271066 | 1296cb13632c619c18a3513d3064b16f4eab1f8b | refs/heads/main | 2023-04-20T10:08:23.715797 | 2021-04-30T00:46:21 | 2021-04-30T00:46:21 | 362,626,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | # Generated by Django 2.2.13 on 2021-04-20 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"78508863+GuardianBob@users.noreply.github.com"
] | 78508863+GuardianBob@users.noreply.github.com |
e5a1ffe642b9b33247a8e70bb7902591cc486875 | 2407490f32568f4469a71172e74b4e41c7b8231e | /you_app/main/utils.py | 457f007022faed7fe7ef315f005ab778c9fda487 | [] | no_license | sidneyarcidiacono/you | d730695422a576628940374e8a14a52decc2ba54 | e09cba147eac37265611033ec84e5eae653d952c | refs/heads/main | 2023-02-01T12:34:08.407341 | 2020-12-20T05:19:49 | 2020-12-20T05:19:49 | 314,082,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | """Module and package import."""
import os
import secrets
from PIL import Image
from you_app import app
def save_image(form_image, size, folder):
"""Save avatar upload to static."""
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_image.filename)
image_filename = random_hex + f_ext
image_path = os.path.join(
app.root_path, f"static/{folder}", image_filename
)
output_size = (size, size)
i = Image.open(form_image)
i.thumbnail(output_size)
i.save(image_path)
return image_filename
| [
"sidneyarci@gmail.com"
] | sidneyarci@gmail.com |
68ff602265388da8fda949d0229e1b522e8295bf | 272ae95716e530d538937ded59ec5b6e0b6d4db8 | /섹션 4/5. 회의실 배정/AA.py | b83f646899b731a0afb42018fc6a94f735e513ac | [] | no_license | gogoheejun/algorithm | 83a1cb30bff5c349f53be16764e517a46e99cf1c | 39e999abf7170f434a7ac6e1f698f066e55aca03 | refs/heads/main | 2023-06-22T13:06:32.135917 | 2021-07-25T15:46:19 | 2021-07-25T15:46:19 | 383,379,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import sys
# sys.stdin = open("input.txt", "r")
n = int(input())
meeting = []
for _ in range(n):
s, e = map(int, input().split())
meeting.append((s, e))
meeting.sort(key=lambda x: (x[1], x[0]))
et = 0
cnt = 0
for s, e in meeting:
if s >= et:
et = e
cnt += 1
print(cnt)
| [
"heejjuunn@gmail.com"
] | heejjuunn@gmail.com |
0dd2ad1bd7f6a0c7c6051ed6d96eaa5afe02632d | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/server/grr_response_server/gui/api_plugins/artifact_test.py | 098afe7f9d00f1413eca44a3d6ba8e71eb2e859a | [
"Apache-2.0",
"MIT"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 4,800 | py | #!/usr/bin/env python
"""This modules contains tests for artifact API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from absl import app
from grr_response_core import config
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_server import artifact
from grr_response_server.gui import api_call_context
from grr_response_server.gui import api_test_lib
from grr_response_server.gui.api_plugins import artifact as artifact_plugin
from grr.test_lib import artifact_test_lib
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
@db_test_lib.TestDatabases()
class ApiListArtifactsHandlerTest(flow_test_lib.FlowTestsBaseclass):
"""Test for ApiListArtifactsHandler."""
def setUp(self):
super().setUp()
self.handler = artifact_plugin.ApiListArtifactsHandler()
self.context = api_call_context.ApiCallContext("test")
@artifact_test_lib.PatchCleanArtifactRegistry
def testNoArtifacts(self, _):
result = self.handler.Handle(self.handler.args_type(), context=self.context)
self.assertEqual(result.total_count, 0)
self.assertEqual(result.items, [])
@artifact_test_lib.PatchDefaultArtifactRegistry
def testPrepackagedArtifacts(self, registry):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
registry.AddFileSource(test_artifacts_file)
result = self.handler.Handle(self.handler.args_type(), context=self.context)
# Some artifacts are guaranteed to be returned, as they're defined in
# the test_data/artifacts/test_artifacts.json.
self.assertTrue(result.total_count)
# Check that FakeArtifact artifact exists. It's guaranteed to exist, since
# it's defined in test_data/artifacts/test_artifacts.json.
for item in result.items:
if item.artifact.name == "FakeArtifact":
fake_artifact = item
self.assertTrue(fake_artifact)
self.assertTrue(fake_artifact.HasField("is_custom"))
self.assertFalse(fake_artifact.is_custom)
self.assertTrue(fake_artifact.artifact.doc)
self.assertTrue(fake_artifact.artifact.labels)
self.assertTrue(fake_artifact.artifact.supported_os)
@db_test_lib.TestDatabases()
class ApiUploadArtifactHandlerTest(api_test_lib.ApiCallHandlerTest):
def setUp(self):
super().setUp()
self.handler = artifact_plugin.ApiUploadArtifactHandler()
@artifact_test_lib.PatchCleanArtifactRegistry
def testUpload(self, registry):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifact.json")
with open(test_artifacts_file, "rb") as fd:
args = self.handler.args_type(artifact=fd.read())
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
registry.GetArtifact("TestDrivers")
self.handler.Handle(args, context=self.context)
registry.GetArtifact("TestDrivers")
@db_test_lib.TestDatabases()
@artifact_test_lib.PatchDefaultArtifactRegistry
class ApiDeleteArtifactsHandlerTest(api_test_lib.ApiCallHandlerTest):
def setUp(self):
super().setUp()
self.handler = artifact_plugin.ApiDeleteArtifactsHandler()
def UploadTestArtifacts(self):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
with io.open(test_artifacts_file, mode="r", encoding="utf-8") as fd:
artifact.UploadArtifactYamlFile(fd.read())
def testDeletesArtifactsWithSpecifiedNames(self, registry):
self.UploadTestArtifacts()
count = len(registry.GetArtifacts(reload_datastore_artifacts=True))
args = self.handler.args_type(
names=["TestFilesArtifact", "WMIActiveScriptEventConsumer"])
self.handler.Handle(args, context=self.context)
new_count = len(registry.GetArtifacts())
# Check that we deleted exactly 2 artifacts.
self.assertEqual(new_count, count - 2)
def testDeleteDependency(self, registry):
self.UploadTestArtifacts()
args = self.handler.args_type(names=["TestAggregationArtifact"])
with self.assertRaises(ValueError):
self.handler.Handle(args, context=self.context)
def testDeleteNonExistentArtifact(self, registry):
self.UploadTestArtifacts()
args = self.handler.args_type(names=["NonExistentArtifact"])
e = self.assertRaises(ValueError)
with e:
self.handler.Handle(args, context=self.context)
self.assertEqual(
str(e.exception),
"Artifact(s) to delete (NonExistentArtifact) not found.")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
48ae08d55da5746ef76de8913b98bd278a9bbb20 | 23392f060c85b5fee645d319f2fd5560653dfd5c | /01_jumptopy/chap06/practice/Q1_duplicate_numbers.py | a45ed6b9decff9ed5cf440a352e3639d9c2df4e0 | [] | no_license | heyhello89/openbigdata | 65192f381de83e4d153c072ff09fa7574f003037 | b35ff237c32013c3e5380eee782085a64edb9d80 | refs/heads/master | 2021-10-22T04:29:00.852546 | 2019-03-08T02:14:34 | 2019-03-08T02:14:34 | 125,938,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | string=input()
list_str=string.split()
for index in list_str:
for num in range(0, 10):
match=index.count(str(num))
if match!=1:
print("False ", end="")
break
elif num==9 and match==1:
print("True ", end="") | [
"heyhello89@hanmail.net"
] | heyhello89@hanmail.net |
6f372e79944370fefdf90422131141fd69d2fbe5 | 3c41889e19235e5d272d4f184339b3a88d96a0c6 | /_utils/_behaviors.py | 7280b7b6a7049df13a6152d7634be433e425a8a0 | [] | no_license | manfreddiaz/safe-iil | b0908fc7be9fb772fae267f6f2f835fa90f9f0a2 | 9cd5007a7f8e8abd10e844351bec278bfd942387 | refs/heads/master | 2020-04-19T07:58:34.915693 | 2019-01-30T18:55:22 | 2019-01-30T18:55:22 | 168,062,938 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # experiments configurations for submissions, define behavior of the environment after each episode
from ._settings import simulation
class Icra2019Behavior:
def __init__(self, env, at, routine):
self.env = env
self.at = at
self.routine = routine
self.routine.on_step_done(self)
def restart(self):
simulation(self.at, self.env, reset=False)
def reset(self):
simulation(self.at, self.env, reset=True)
def step_done(self, observation, action, reward, done, info):
pass
# if done:
# self.restart()
class Icra2019TestBehavior:
def __init__(self, env, starting_positions, routine):
self.env = env
self.starting = starting_positions
self.current_position = 0
self.routine = routine
self.routine.on_step_done(self)
def restart(self):
self.current_position += 1
self.current_position %= len(self.starting)
simulation(self.starting[self.current_position], self.env, reset=False)
def reset(self):
simulation(self.starting[self.current_position], self.env, reset=True)
def step_done(self, observation, action, reward, done, info):
if done: # if goes out of boundaries...
self.restart()
| [
"takeitallsource@gmail.com"
] | takeitallsource@gmail.com |
a7d2c811b546018c304f178a881df787cc8e2853 | cd40b7cc395f36740000ed4a4144b1c0666ab0fd | /hstrat/_auxiliary_lib/_jit_numpy_bool_t.py | 52bdb898f0c1d675364549536be667f6b30ab1ba | [
"MIT"
] | permissive | mmore500/hstrat | 94fd22c86a87a5707590b9398ef679444ed82d6d | b2d2caded1db5e2dc681d9f171d7c74b322c55c3 | refs/heads/master | 2023-08-31T03:36:44.457576 | 2023-08-25T14:39:29 | 2023-08-25T14:39:29 | 464,531,144 | 5 | 2 | NOASSERTION | 2023-08-25T13:07:52 | 2022-02-28T15:11:45 | Python | UTF-8 | Python | false | false | 541 | py | """Provides numba bool type or numpy bool type.
Type must be declared outside jit'ed function or numba fails.
"""
import numpy as np
from ._is_in_coverage_run import is_in_coverage_run
try:
import numba as nb
except (ImportError, ModuleNotFoundError):
jit_numpy_bool_t = np.bool_
else:
if is_in_coverage_run():
# when numba disabled for coverage testing,
# using nb.types.bool_ causes numpy TypeError
jit_numpy_bool_t = np.bool_
else: # pragma: no cover
jit_numpy_bool_t = nb.types.bool_
| [
"mmore500.login+gpg@gmail.com"
] | mmore500.login+gpg@gmail.com |
a12b81b7189ddaad2b23fb1cf3c39108553e9ad5 | 7b1b4ed8bd4c887362b367625a833c28aa919dd8 | /wpaudit/providers/aliyun/resources/ram/security_policy.py | b93d7eec5d765aa027031811d6f696ba0828c11d | [] | no_license | wperic/wpaudit | 6bbd557c803ce9bceb764c1451daeb5e440a3d9c | ed69c1eabcf85e80ed8fe5397d2d369fd3ff35d8 | refs/heads/main | 2023-07-16T21:36:57.528548 | 2021-09-03T10:35:43 | 2021-09-03T10:35:43 | 402,716,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | from wpaudit.providers.aliyun.resources.base import AliyunResources
from wpaudit.providers.aliyun.facade.base import AliyunFacade
class SecurityPolicy(AliyunResources):
def __init__(self, facade: AliyunFacade):
super().__init__(facade)
async def fetch_all(self):
raw_security_policy = await self.facade.ram.get_security_policy()
security_policy = self._parse_security_policy(raw_security_policy)
self.update(security_policy)
def _parse_security_policy(self, raw_security_policy):
security_policy_dict = {
'login_network_masks':
raw_security_policy.get('LoginProfilePreference', {}).get('LoginNetworkMasks'),
'login_session_duration':
raw_security_policy.get('LoginProfilePreference', {}).get('LoginSessionDuration'),
'enable_save_mfa_ticket':
raw_security_policy.get('LoginProfilePreference', {}).get('EnableSaveMFATicket'),
'allow_user_change_password':
raw_security_policy.get('LoginProfilePreference', {}).get('AllowUserToChangePassword'),
'allow_user_manage_access_keys':
raw_security_policy.get('AccessKeyPreference', {}).get('AllowUserToManageAccessKeys'),
'allow_user_manage_mfa_devices':
raw_security_policy.get('MFAPreference', {}).get('AllowUserToManageMFADevices'),
'allow_user_manage_public_keys':
raw_security_policy.get('PublicKeyPreference', {}).get('AllowUserToManagePublicKeys'),
}
if security_policy_dict['login_network_masks'] == '':
security_policy_dict['login_network_masks'] = None
return security_policy_dict
| [
"90035639+wperic@users.noreply.github.com"
] | 90035639+wperic@users.noreply.github.com |
703a51314294f29c9ed2fcd533c35ee9bf016b26 | c2584c3fbace76b253db1dfbc4fb435b1bbd1717 | /Python/PycharmProjects_1718/Final_Project/Final_Project/venv/Lib/site-packages/pyad/adcontainer.py | 8c3e92be9ea9571fea6c5720cd04f5a626b5389c | [] | no_license | absentee-neptune/Personal-Projects | 9c17e9112eca20a02ae8875c5790116db5170c45 | 5cb7649093fd420c5a6882051aa82f4c947dd667 | refs/heads/master | 2023-01-03T11:57:19.257253 | 2020-10-16T18:53:47 | 2020-10-16T18:53:47 | 304,392,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,068 | py | from __future__ import absolute_import
from .adobject import *
from .aduser import ADUser
from .adcomputer import ADComputer
from .adgroup import ADGroup
from . import pyadconstants
class ADContainer(ADObject):
def get_children_iter(self, recursive=False, filter_=None):
for com_object in self._ldap_adsi_obj:
q = ADObject.from_com_object(com_object)
q.adjust_pyad_type()
if q.type == 'organizationalUnit' and recursive:
for c in q.get_children_iter(recursive=recursive):
if not filter_ or c.__class__ in filter_:
yield c
if not filter_ or q.__class__ in filter_:
yield q
def get_children(self, recursive=False, filter_=None):
"Iterate over the children objects in the container."
return list(self.get_children_iter(recursive=recursive, filter_=filter_))
def __create_object(self, type_, name):
prefix = 'ou' if type_ == 'organizationalUnit' else 'cn'
prefixed_name = '='.join((prefix,name))
return self._ldap_adsi_obj.Create(type_, prefixed_name)
def create_user(self, name, password=None, upn_suffix=None, enable=True,optional_attributes={}):
"""Create a new user object in the container"""
try:
if not upn_suffix:
upn_suffix = self.get_domain().get_default_upn()
upn = '@'.join((name, upn_suffix))
obj = self.__create_object('user', name)
obj.Put('sAMAccountName', optional_attributes.get('sAMAccountName', name))
obj.Put('userPrincipalName', upn)
obj.SetInfo()
pyadobj = ADUser.from_com_object(obj)
if enable:
pyadobj.enable()
if password:
pyadobj.set_password(password)
pyadobj.update_attributes(optional_attributes)
return pyadobj
except pywintypes.com_error as e:
pyadutils.pass_up_com_exception(e)
def create_group(self, name, security_enabled=True, scope='GLOBAL', optional_attributes={}):
"""Create a new group object in the container"""
try:
obj = self.__create_object('group', name)
obj.Put('sAMAccountName',name)
val = pyadconstants.ADS_GROUP_TYPE[scope]
if security_enabled:
val = val | pyadconstants.ADS_GROUP_TYPE['SECURITY_ENABLED']
obj.Put('groupType',val)
obj.SetInfo()
pyadobj = ADGroup.from_com_object(obj)
pyadobj.update_attributes(optional_attributes)
return pyadobj
except pywintypes.com_error as e:
pyadutils.pass_up_com_exception(e)
def create_container(self, name, optional_attributes={}):
"""Create a new organizational unit in the container"""
try:
obj = self.__create_object('organizationalUnit', name)
obj.SetInfo()
pyadobj = ADContainer.from_com_object(obj)
pyadobj.update_attributes(optional_attributes)
return pyadobj
except pywintypes.com_error as e:
pyadutils.pass_up_com_exception(e)
def create_computer(self, name, enable=True,optional_attributes={}):
"""Create a new computer object in the container"""
try:
obj = self.__create_object('computer', name)
obj.Put('sAMAccountName', name)
obj.SetInfo()
pyadobj = ADComputer.from_com_object(obj)
if enable:
pyadobj.enable()
pyadobj.update_attributes(optional_attributes)
return pyadobj
except pywintypes.com_error as e:
pyadutils.pass_up_com_exception(e)
def remove_child(self, child):
"""Rremoves the child object from the domain"""
self._ldap_adsi_obj.Delete(child.type, child.prefixed_cn)
ADObject._py_ad_object_mappings['organizationalUnit'] = ADContainer
ADObject._py_ad_object_mappings['container'] = ADContainer
| [
"brianna.guest20@gmail.com"
] | brianna.guest20@gmail.com |
0a32952a073fb05025a29c20c191ba3dd7b7b545 | 26ec9889c37dfd41c151ca36e292a298f88d2210 | /debug/traverse.py | e9613ff1d568f9a6f25592518ba45ef2b1013202 | [] | no_license | mihai2014/mihai2014.github.io | d11988c42d5c38c53325455069b9de3bdc221f71 | bd479e8d6ffb86c01594259baac54fe5f7262642 | refs/heads/master | 2021-11-15T11:35:11.073938 | 2021-08-19T20:05:50 | 2021-08-19T20:05:50 | 185,383,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import os
import re
from names import names
startStr = "https://nbviewer.jupyter.org/github/mihai2014/mihai2014.github.io/blob/master"
startStr = ""
string = ""
def traverse_dir(dirName):
global string
string += '<ul>\n'
for item in sorted(os.listdir(dirName)):
fullpath = os.path.join(dirName, item)
#==folder==
if os.path.isdir(fullpath):
string += ('<li>%s</li>\n' % item)
#==file==
else:
relativePath = startStr + fullpath
string += ('<li><a href="%s">%s</a></li>\n' % (relativePath,item))
if os.path.isdir(fullpath):
if os.listdir(fullpath) != []:
traverse_dir(fullpath)
string += '</ul>\n'
traverse_dir('.')
#print(string) | [
"mc200520052005@yahoo.com"
] | mc200520052005@yahoo.com |
5e2093794cc2accc1cbebc99ba054c968e19efa6 | 3581148ef2a3e96dda4b9dd1ae5bdb84577c5e57 | /package/diana/utils/endpoint/endpoint.py | 8f4dc487a922de1b0d9bc8c4771c1ba3d75264f8 | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | devhliu/diana2 | 515c82f34f831507f4ad1da825a0ffe8ec4c6b13 | f266c7f5abbfa2a245714c569d090d6676864dfc | refs/heads/master | 2020-05-05T01:10:03.247825 | 2019-03-12T17:39:33 | 2019-03-12T17:39:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | """
CRUD Endpoint API
"""
from abc import ABC
from typing import Mapping, TypeVar, NewType, Union, Sequence, Iterator, Collection
import logging
import attr
Item = TypeVar('Item')
ItemID = NewType('ItemID', str)
Query = NewType('Query', Mapping)
@attr.s
class Endpoint(ABC):
"""
Generic CRUD endpoint API.
"""
name = attr.ib(type=str, default="Endpoint")
ctype = attr.ib(default=None, repr=False)
def check(self) -> bool:
"""Check endpoint health"""
raise NotImplementedError
# Create
def put(self, item: Item, **kwargs) -> ItemID:
"""Add or replace an item in the collection"""
raise NotImplementedError
# Retrieve
def get(self, item: Union[ItemID, Item], **kwargs) -> Item:
"""Retrieve item data"""
raise NotImplementedError
def find(self, query: Query, retrieve: bool=False, **kwargs) -> Union[ItemID, Sequence[ItemID],
Item, Sequence[Item]]:
"""Identify items and optionally retrieve data by query"""
raise NotImplementedError
def exists(self, item: Union[ItemID, Item, Query]) -> bool:
"""Check if an item exists by id or query"""
logger = logging.getLogger(self.name)
logger.debug("Checking exists on {}".format(item))
if isinstance(item, Mapping):
return self.find(item) is not None
else:
try:
return self.get(item) is not None
except Exception:
return False
# Update
def update(self, item: Union[ItemID, Item], data: Mapping, **kwargs) -> ItemID:
"""Update data for an item in the endpoint"""
raise NotImplementedError
def handle(self, item: Union[ItemID, Item], method: str, *args, **kwargs):
"""Call a class-specific method"""
func = self.__getattribute__(method)
return func(item, *args, **kwargs)
# Delete
def delete(self, item: Union[ItemID, Item], **kwargs) -> bool:
"""Remove an item from the endpoint"""
raise NotImplementedError
@attr.s
class BroadcastingEndpoint(Endpoint):
eps = attr.ib(type=Iterator[Endpoint], default=None)
def put(self, item: Item, **kwargs):
for ep in self.eps:
ep.put(item, **kwargs)
@attr.s
class SelectiveEndpoint(Endpoint):
eps = attr.ib(type=Collection[Endpoint], default=None)
def put(self, item: Item, selector=None, **kwargs):
ep = self.eps[selector]
ep.put(item, **kwargs)
| [
"derek_merck@brown.edu"
] | derek_merck@brown.edu |
08075ae1a6f80646aaf8c95c14d832ed43889e21 | 33f3ac31530417d1a901873f26c94f6e9e7e7e82 | /data_structures/graph/test_graph.py | 0c33fbcf5a46c2358516363e995ec991771b299e | [
"MIT"
] | permissive | jpchato/data-structures-and-algorithms-python | 9227b57951384e9518da073ee7124df4bd5efec2 | 5615d3637deea1b876760d981682f7fea0c4988f | refs/heads/master | 2022-11-13T13:40:33.984171 | 2020-06-28T19:10:10 | 2020-06-28T19:10:10 | 263,187,808 | 0 | 0 | MIT | 2020-07-02T15:35:42 | 2020-05-12T00:08:48 | Python | UTF-8 | Python | false | false | 1,833 | py | import pytest
from graph import Graph, Vertex
# 1. Node(vertex) can be successfully added to the graph
def test_add_vertex():
g = Graph()
expected = 'spam'
vertex = g.add_vertex('spam')
actual = vertex.value
assert actual == expected
# 2. An edge can be successfully added to the graph
def test_add_edge():
g = Graph()
apples = g.add_vertex('apples')
bananas = g.add_vertex('bananas')
g.add_edge(apples, bananas)
assert True, ('will be fully exercised in get_neighbors tests')
def test_add_edge_interloper():
g = Graph()
insider = g.add_vertex('insider')
outsider = Vertex('outsider')
with pytest.raises(KeyError):
g.add_edge(outsider, insider)
# 3. A collection of all nodes can be properly retrieved from the graph
def test_get_vertices():
# returns a collection of all vertices
g = Graph()
apples = g.add_vertex('apples')
bananas = g.add_vertex('bananas')
actual = g.get_vertices()
assert len(actual) == 2
# 4. All appropriate neighbors can be retrieved from the graph
def test_get_neighbors():
g = Graph()
apple = g.add_vertex('apple')
banana = g.add_vertex('banana')
g.add_edge(apple, banana)
neighbors = g.get_neighbors(apple)
assert len(neighbors) == 1
print('neighbors', neighbors)
neighbor = neighbors[0]
assert neighbors[0].vertex.value == 'banana'
assert neighbor.weight == 1
# 6. The proper size is returned, representing the number of nodes in the graph
def test_get_size():
g = Graph()
apple = g.add_vertex('apple')
banana = g.add_vertex('banana')
assert len(g) == 2
# 8. An empty graph properly returns null
def test_size_empty():
g = Graph()
expected = 0
actual = len(g)
assert actual == expected | [
"jpchato@gmail.com"
] | jpchato@gmail.com |
da54eaf11a2245398f65e46f3a8f604039b36659 | 30150c7f6ed7a10ac50eee3f40101bc3165ebf9e | /src/catalog/CatalogNotifyDialog.py | 889f471d6b029ae25ef8a038417f93ef6bb94674 | [] | no_license | toontown-restoration-project/toontown | c2ad0d552cb9d5d3232ae6941e28f00c11ca3aa8 | 9bef6d9f823b2c12a176b33518eaa51ddbe3fd2f | refs/heads/master | 2022-12-23T19:46:16.697036 | 2020-10-02T20:17:09 | 2020-10-02T20:17:09 | 300,672,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,125 | py | from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from direct.gui.DirectGui import *
from toontown.toonbase.ToontownModules import *
class CatalogNotifyDialog:
"""CatalogNotifyDialog:
Pops up to tell you when you have a new catalog, or a new delivery
from the catalog.
"""
notify = DirectNotifyGlobal.directNotify.newCategory("CatalogNotifyDialog")
def __init__(self, message):
self.message = message
self.messageIndex = 0
framePosX = 0.40
from toontown.toon import LocalToon # import here to stop cyclic import
if LocalToon.WantNewsPage:
framePosX += LocalToon.AdjustmentForNewsButton
self.frame = DirectFrame(
relief = None,
image = DGG.getDefaultDialogGeom(),
image_color = ToontownGlobals.GlobalDialogColor,
image_scale = (1.2, 1.0, 0.4),
text = message[0],
text_wordwrap = 16,
text_scale = 0.06,
text_pos = (-0.1, 0.1),
pos = (framePosX, 0, 0.78),
)
buttons = loader.loadModel(
'phase_3/models/gui/dialog_box_buttons_gui')
cancelImageList = (buttons.find('**/CloseBtn_UP'),
buttons.find('**/CloseBtn_DN'),
buttons.find('**/CloseBtn_Rllvr'))
okImageList = (buttons.find('**/ChtBx_OKBtn_UP'),
buttons.find('**/ChtBx_OKBtn_DN'),
buttons.find('**/ChtBx_OKBtn_Rllvr'))
self.nextButton = DirectButton(
parent = self.frame,
relief = None,
image = okImageList,
command = self.handleButton,
pos = (0, 0, -0.14),
)
self.doneButton = DirectButton(
parent = self.frame,
relief = None,
image = cancelImageList,
command = self.handleButton,
pos = (0, 0, -0.14),
)
if len(message) == 1:
self.nextButton.hide()
else:
self.doneButton.hide()
def handleButton(self):
self.messageIndex += 1
if self.messageIndex >= len(self.message):
# That was the last message.
self.cleanup()
return
# There's more text to display.
self.frame['text'] = self.message[self.messageIndex]
if self.messageIndex + 1 == len(self.message):
# That's the last message.
self.nextButton.hide()
self.doneButton.show()
def cleanup(self):
"""cleanup(self):
Cancels any pending request and removes the panel from the
screen, unanswered.
"""
if self.frame:
self.frame.destroy()
self.frame = None
if self.nextButton:
self.nextButton.destroy()
self.nextButton = None
if self.doneButton:
self.doneButton.destroy()
self.doneButton = None
def __handleButton(self, value):
self.cleanup()
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
a647f0d480891d488f49205feca83727cde55097 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/coghq/DistributedStageBattle.py | ec71350320aa213743339055c2aa1ec8c3b1e6aa | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 2,727 | py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleBase import *
from toontown.coghq import DistributedLevelBattle
from direct.directnotify import DirectNotifyGlobal
from toontown.toon import TTEmote
from otp.avatar import Emote
from toontown.battle import SuitBattleGlobals
import random
from toontown.suit import SuitDNA
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.toonbase import ToontownGlobals
class DistributedStageBattle(DistributedLevelBattle.DistributedLevelBattle):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedStageBattle')
def __init__(self, cr):
"""
cr is a ClientRepository.
"""
DistributedLevelBattle.DistributedLevelBattle.__init__(self,cr)
# Add a new reward state to the battle ClassicFSM
self.fsm.addState(State.State('StageReward',
self.enterStageReward,
self.exitStageReward,
['Resume']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('StageReward')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('StageReward')
##### StageReward state #####
def enterStageReward(self, ts):
self.notify.debug('enterStageReward()')
self.disableCollision()
self.delayDeleteMembers()
if (self.hasLocalToon()):
NametagGlobals.setMasterArrowsOn(0)
if self.bossBattle:
messenger.send('localToonConfrontedStageBoss')
self.movie.playReward(ts, self.uniqueName('building-reward'),
self.__handleStageRewardDone)
def __handleStageRewardDone(self):
self.notify.debug('stage reward done')
if (self.hasLocalToon()):
self.d_rewardDone(base.localAvatar.doId)
self.movie.resetReward()
# Now request our local battle object enter the Resume state,
# which frees us from the battle. The distributed object may
# not enter the Resume state yet (it has to wait until all the
# toons involved have reported back up), but there's no reason
# we have to wait around for that.
self.fsm.request('Resume')
def exitStageReward(self):
self.notify.debug('exitStageReward()')
# In case we're observing and the server cuts us off
# this guarantees all final animations get started and things
# get cleaned up
self.movie.resetReward(finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
| [
"66761962+satire6@users.noreply.github.com"
] | 66761962+satire6@users.noreply.github.com |
eaaaae0d74223cebb8f81036790229d3ab273d68 | f653f96c26501523d36f67330186e546b9067749 | /19/01/1.py | e812107b39defcf013b2d2980b8f1703d6e3cec8 | [
"CC0-1.0"
] | permissive | pylangstudy/201711 | 6d5fb40d3bf0b1be5310e6c85ac23c76a7f9db56 | be6222dde61373f67d25a2c926868b602463c5cc | refs/heads/master | 2022-11-13T22:11:52.127874 | 2017-11-29T23:32:17 | 2017-11-29T23:32:17 | 109,062,692 | 0 | 1 | null | 2022-10-20T07:22:56 | 2017-10-31T23:22:16 | Python | UTF-8 | Python | false | false | 194 | py | import argparse
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('--foo', nargs='?', help='foo help')
parser.add_argument('bar', nargs='+', help='bar help')
parser.print_help()
| [
"pylangstudy@yahoo.co.jp"
] | pylangstudy@yahoo.co.jp |
8bfc3383fca13b78421259423948ab780bfdf222 | 65388597294f4174ad82c7ccc65d611ce278d2a8 | /tests/test_readthedocs_links.py | 8e737aa9758bb2ddd1be6735b82c9c4b253332db | [] | no_license | ThreatConnect-Inc/threatconnect-developer-docs | 5e13bafa14a7418d14c84f62fa0672690ebeb991 | 3c681a04cef29c5347090b687dfd4e1a28ba3efd | refs/heads/master | 2023-08-07T11:39:36.267492 | 2023-07-28T15:31:07 | 2023-07-28T15:31:07 | 78,577,108 | 8 | 19 | null | 2023-07-21T14:15:20 | 2017-01-10T21:46:46 | Python | UTF-8 | Python | false | false | 7,851 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Find and test each of the links in the docs to make sure they are working properly."""
import os
import re
import sys
from bs4 import BeautifulSoup
import requests
base_url = 'https://docs.threatconnect.com/'
pages = {
"https://docs.threatconnect.com/en/latest/rest_api/": [
'change_log.html',
'quick_start.html',
'overview.html',
'associations/associations.html',
'attributes/attributes.html',
'groups/groups.html',
'indicators/indicators.html',
'owners/owners.html',
'security_labels/security_labels.html',
'tags/tags.html',
'tasks/tasks.html',
'victims/victims.html',
'custom_metrics/custom_metrics.html'
],
"https://docs.threatconnect.com/en/latest/python/": [
'python_sdk.html',
'quick_start.html',
'groups/groups.html',
'indicators/indicators.html',
'owners/owners.html',
'tasks/tasks.html',
'victims/victims.html',
'advanced.html'
],
"https://docs.threatconnect.com/en/latest/javascript/": [
'javascript_sdk.html'
],
"https://docs.threatconnect.com/en/latest/java/": [
'java_sdk.html'
]
}
def _get_heading_ids(soup):
"""Get the href/id of all of the headings in the given html."""
headings = list()
headings.extend([link['href'] for link in soup.findAll('a', {'class': 'headerlink'})])
return headings
def test_links():
"""."""
bad_links = 0
excluded_patterns = ["readthedocs.com"]
for base_page, subpages in pages.items():
for subpage in subpages:
page = base_page + subpage
# request page
r = requests.get(page)
# find all hrefs
soup = BeautifulSoup(r.text, 'html.parser')
# get all of the links and headings from the page
links = soup.find_all('a')
headings = _get_heading_ids(soup)
for link in links:
href = link['href']
# ignore "mailto:" links
if href.startswith("mailto:"):
continue
# ignore links to external locations
elif href.startswith("//") or href.startswith("http"):
matches_exclusion_pattern = False
for pattern in excluded_patterns:
if pattern in href:
matches_exclusion_pattern = True
break
if matches_exclusion_pattern:
continue
if href.startswith("//"):
href = "http:" + href
r = requests.get(href)
if not r.ok:
print("\n\n>>> Reviewing {}".format(page))
print("{} error when requesting: {}".format(r.status_code, href))
bad_links += 1
# check links that are relative to the base url
elif href.startswith("/"):
target_url = base_url + href
r = requests.get(target_url)
if not r.ok:
print("\n\n>>> Reviewing {}".format(page))
print("{} error when requesting: {}".format(r.status_code, target_url))
bad_links += 1
# check links to locations on the current page
elif href.startswith("#"):
# skip any links that are just href="#"
if href == "#":
pass
elif href not in headings:
print("\n\n>>> Reviewing {}".format(page))
print("Link to {} does not exist".format(href))
bad_links += 1
# check links that are relative to the current page
else:
# create a target url by removing the file from the current page and adding the desired href
target_url = "/".join(page.split("/")[:-1]) + "/" + href
r = requests.get(target_url)
if str(r.status_code).startswith("4"):
print("\n\n>>> Reviewing {}".format(page))
print("{} error when requesting: {}".format(r.status_code, target_url))
bad_links += 1
if bad_links > 0:
print("\n\n{} bad links found".format(bad_links))
sys.exit(3)
def test_standard_script_heading_link():
"""Make sure the standard script heading is still in the docs so that the links in the code snippets will work.
See: https://docs.threatconnect.com/en/latest/python/python_sdk.html#standard-script-heading
"""
response = requests.get("https://docs.threatconnect.com/en/latest/python/quick_start.html#standard-script-heading")
soup = BeautifulSoup(response.text, "html.parser")
heading_ids = _get_heading_ids(soup)
heading_found = False
for heading in heading_ids:
if heading == "#standard-script-heading":
heading_found = True
break
if not heading_found:
raise RuntimeError("Unable to find the Standard Script Heading used in the code snippets.")
def test_no_dev_links():
"""Make sure there are no links to the dev version of the docs."""
dev_pattern = "en/dev/"
# iterate through the files in the /docs/ directory to make sure the are no links to the dev version of the documentation
for path, dirs, files in os.walk(os.path.abspath(os.path.join(os.path.dirname(__file__), "../docs"))):
# ignore all directories that start with "_"
if "/_" not in path:
for file in files:
# check to see if the dev pattern is in the file
with open("{}/{}".format(path, file), 'r') as f:
print("Checking: {}/{}".format(path, file))
file_text = f.read()
assert dev_pattern not in file_text
print("check passed\n")
def test_markdown_links():
"""Make sure there are no links to the dev version of the docs."""
markdown_link_pattern = '\[.*?\]\(.+\)'
errors = 0
for path, dirs, files in os.walk(os.path.abspath(os.path.join(os.path.dirname(__file__), "../docs"))):
# ignore all directories that start with "_"
if "/_" not in path:
for file in files:
if not file.startswith('.'):
# check to see if the dev pattern is in the file
with open("{}/{}".format(path, file), 'r') as f:
file_text = f.read()
try:
markdown_links = re.findall(markdown_link_pattern, file_text)
assert markdown_links == []
except AssertionError:
print("Found what appears to be a markdown link in {}/{}: {}".format(path, file, markdown_links))
errors += 1
assert errors == 0
# def test_no_broken_headings():
# """Make sure there are no links to the dev version of the docs."""
# broken_heading_pattern = 'id="id[0-9]'
# for base_page, subpages in pages.items():
# for subpage in subpages:
# page = base_page + subpage
# # request page
# r = requests.get(page)
# try:
# # check to see if there are any broken headings in the page
# assert len(re.findall(broken_heading_pattern, r.text)) == 0
# except AssertionError as e:
# print("\n\n>>> Reviewing {}".format(page))
# raise e
| [
"floyd.hightower27@gmail.com"
] | floyd.hightower27@gmail.com |
a40bd18fba15baaf84f5502990110a80b52f8914 | de5df8163e5507da37435e7d2b82dfdd5eb38be0 | /fabfile.py | 7d5be93f357e902d8af26a91363749bd520bc5fb | [] | no_license | csinchok/Twitter-Roulette | 27634be0b69b56bc88010b7dc111bdca0d658929 | 2cf7f49e5a1534aac172dcc9b375f5e92fce5a91 | refs/heads/master | 2021-01-23T02:28:51.070024 | 2011-09-03T17:52:00 | 2011-09-03T17:52:00 | 2,150,021 | 2 | 0 | null | null | null | null | UTF-8 | Python | true | false | 1,470 | py | from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
env.hosts = ['csinchok.webfactional.com']
env.user = 'csinchok'
webapp_path = '/home/csinchok/webapps/social_roulette'
def test():
local('./bin/test', capture=False)
def pack():
local('git archive --format zip master --output=social_roulette.zip', capture=False)
def prepare_deploy():
pack()
def deploy_prod_code():
prepare_deploy()
put('social_roulette.zip', '/home/csinchok/')
run('unzip -o /home/csinchok/social_roulette.zip -d %s' % webapp_path)
put('twitterroulette/social_keys.py', '/home/csinchok/webapps/social_roulette/twitterroulette/social_keys.py')
with cd(webapp_path):
run('touch bin/django.wsgi')
run('apache2/bin/restart')
run('rm /home/csinchok/social_roulette.zip')
def deploy_prod():
prepare_deploy()
put('./social_roulette.zip', '/home/csinchok/')
run('unzip -o /home/csinchok/social_roulette.zip -d %s' % webapp_path)
put('twitterroulette/social_keys.py', '/home/csinchok/webapps/social_roulette/twitterroulette/social_keys.py')
with cd(webapp_path):
run('python2.6 ./bootstrap.py')
run('bin/buildout -c production.cfg')
run('bin/django syncdb --noinput')
run('bin/django migrate')
run('touch bin/django.wsgi')
run('apache2/bin/restart')
run('rm /home/csinchok/social_roulette.zip') | [
"chris@sinchok.com"
] | chris@sinchok.com |
74fc6b57948f736f797cb83945d8a4829b3f3a69 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/test/test_smtpnet.py | 3897b365aea88acbec5ce7eab639e0c13496946a | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:08adc8ef5440c83129013a75e849ab9f5161edb3c4a891362c47afd72f5c22a8
size 2846
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
04dc80f99264954bfbccc015f965c642b2f59186 | c8c53b7eef6de1d7d8c236a5e6a4c1bf93dbd578 | /text editor/text editor project.py | 91a6aacaadb6678a68de849687881dd51a2df35b | [] | no_license | tanya-agarwal06/project-text-editor | d6e6e3b7d59a6cf86ee34fe3667b073ec22a7b62 | 311d49d767845b2df9257363812b42794a823ee1 | refs/heads/master | 2020-03-23T13:36:20.645485 | 2018-07-20T15:00:50 | 2018-07-20T15:00:50 | 141,627,256 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import os
import tkinter
from tkinter.messagebox import *
from tkinter.filedialog import *
from tkinter import *
class texteditor:
def __init__(self, root):
self.file_path = None
self.menu = Menu(root)
root.config(self.menu)
self.Submenu = Menu(self.menu)
Menu = self.menu.add_cascade(label="File", menu = self.Submenu)
self.Submenu.add_command(label="New File" , command= self.t_1)
def t_1(self):
print("text edit")
root = Tk()
a = texteditor(root)
root.mainloop()
| [
"="
] | = |
d9809f2b9e5da68e9a544248dd175ab612a045f3 | c7603730fe2e0615cb8af85360f4270c6e519dcd | /eu-structural-funds/common/processors/currency_convert.py | 1d99a8e00a7a7bb1e7f4b8628efbb431f9d882df | [
"MIT"
] | permissive | transpresupuestaria/os-data-importers | b58266d03274901bf6104dc10ab725fa97a22d18 | 929e07aefc98ae4788e75c682d4c3adc014bf6ce | refs/heads/master | 2022-07-02T16:21:34.023556 | 2020-05-18T18:48:08 | 2020-05-18T18:48:08 | 112,221,613 | 0 | 0 | MIT | 2018-08-07T00:26:10 | 2017-11-27T16:40:20 | Python | UTF-8 | Python | false | false | 2,255 | py | import os
import json
from decimal import Decimal
from datapackage_pipelines.wrapper import ingest, spew
parameters_, datapackage_, resources_ = ingest()
column = parameters_['column']
currency = parameters_['currency']
currency_column = parameters_['currency-column']
date_columns = parameters_['date-columns']
missing = open('missing-keys.txt', 'a')
written = set()
currencies = json.load(open(os.path.join(os.path.dirname(__file__), 'currencies.json')))
def process(resources):
def process_single(resource):
for row in resource:
row[currency_column] = currency
ncv = row[column]
row[column] = None
if ncv is not None:
the_date = None
for date_column in date_columns:
the_date = row.get(date_column)
if the_date is not None:
break
if the_date is not None:
keys = ["%s-%s" % (currency, the_date.strftime('%Y-%m'))]
else:
funding_period = list(map(int, row['funding_period'].split('-')))
keys = ['%s-%d-06' % (currency, year) for year in range(funding_period[0], funding_period[1])]
assert len(keys)>0
all_rates = [(key, currencies.get(key)) for key in keys]
none_keys = map((lambda x: x[0]),
filter((lambda x: x[1] is None), all_rates))
rates = list(map((lambda x: x[1]),
filter((lambda x: x[1] is not None), all_rates)))
if len(rates) > 0:
rate = sum(rates) / len(rates)
amount = ncv * Decimal(rate)
row[column] = amount
for key in none_keys:
if key not in written:
missing.write(key+'\n')
written.add(key)
yield row
for resource_ in resources:
yield process_single(resource_)
for resource in datapackage_['resources']:
resource['schema']['fields'].append({
'name': currency_column,
'type': 'string'
})
spew(datapackage_, process(resources_))
missing.close() | [
"vitor@vitorbaptista.com"
] | vitor@vitorbaptista.com |
269809d7b627a90f5f11c5c9ace2c3f29fa30f4f | c5d68f58c9523257a8b41954553f5cff2cd5f487 | /Secao_08_Funcoes/08_50_entendo_**kwargs.py | d343ee19fbcd415be4e842c400608dec38b1b742 | [] | no_license | SouzaCadu/guppe | 04bfcde82d4404eb9ec795006c6931ba07dc72b6 | 1f8a672230c5c27712f522e1e34516591c012453 | refs/heads/master | 2023-03-13T01:32:51.019871 | 2021-02-25T17:02:59 | 2021-02-25T17:02:59 | 320,908,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,880 | py | """
Entendendo o **kwargs
É mais um parâmetro, diferentemente do *args que coloca os valores como tupla,
o **kwargs exige que utilizemos parâmetros nomeados, transformado-os em
um dicionário
Nas nossas funções podemos ter parametros obrigatórios, *args, default e **kwargs
nessa ordem obrigatoriamente.
# Exemplos
def cores_favoritas(a, b, c, **kwargs):
for pessoa, cor in kwargs.items():
print(f"A cor favorita de {pessoa} é {cor}.")
cores_favoritas(1, 2, 3, Marcos="Verde", Julia="Branco", Fernanda="Roxo")
def cumprimento_especial(**kwargs):
if "Geek" in kwargs and kwargs["Geek"] == "Python":
return "Você recebeu um cumprimento Pythonico Geek"
elif "Geek" in kwargs:
return f"{kwargs['Geek']} Geek"
return "Não tenho certeza de quem você é..."
print(cumprimento_especial())
print(cumprimento_especial(Geek="Python"))
print(cumprimento_especial(Geek="01"))
print(cumprimento_especial(Geek="Especial"))
def minha_funcao(idade, nome, *args, solteiro=False, **kwargs):
print(f"{nome} tem {idade} anos.")
print(args)
if solteiro:
print("Solteiro")
else:
print("Casado")
print(kwargs)
minha_funcao(18, "Ana")
minha_funcao(25, "Jones", 4, 5, 3, solteiro=False)
minha_funcao(39, "Bruce", eu="Não", você="Vai")
minha_funcao(45, "Charles", 9, 4, 3, java=False, pythpn=True)
# Desempacotar com **kwargs
def mostra_nomes(**kwargs):
return f"{kwargs['nome']} {kwargs['sobrenome']}"
nomes = {'nome': 'Charles', 'sobrenome': 'Xavier'}
print(mostra_nomes(**nomes))
def somatorio(a, b, c):
print(a + b + c)
lista = [1, 2, 3]
tupla = (1, 2, 3)
conjunto = {1, 2, 3}
dicionario = dict(a=1, b=2, c=3)
somatorio(*lista)
somatorio(*tupla)
somatorio(*conjunto)
somatorio(**dicionario)
Os nomes da chave em um dicionário devem ser os mesmos
dos parâmetros da função
"""
| [
"cadu.souza81@gmail.com"
] | cadu.souza81@gmail.com |
3cc0b9cb31a005ae94dc7f3acff88300027a6a1d | 8dfe4b53fae92795405d789d52148d1291836afa | /python/python学习/day1/day1.py | 65bc4a989f226b5bd262f50145244d2b1b28eb90 | [] | no_license | ymyjohnny/python | e07c54a88954e090cf3d30a4c6f6ac46353063fb | b483fd55e577d4dcceb5762bddf833df23874f3a | refs/heads/master | 2021-01-10T01:10:19.038424 | 2019-07-02T02:40:23 | 2019-07-02T02:40:23 | 45,223,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,697 | py | from twisted.python.filepath import FilePath
s = 'abc,def'
###
n = 10
m = 20
a = s.split(',')[0] * n +','+ s.split(',')[1] * m
print a
############
l = range (2,9)
target = 9
#print l
#print
l1 = []
print range(len(l))
print ""
for i in range(len(l)):
for a in range(i+1,len(l)):
# print i, a, l[i], l[a]
if l[i]+l[a] == target: print i,a
print ""
opposite = {}
for i, v in enumerate(l):
o = opposite.get(target -v)
if o is not None:
print o, i
opposite[v] = i # for j, v1 in list(enumerate)
import os
print os.listdir('.')
s = set()
l = []
for i in os.listdir('/home/ymy'):
if '-' in i:
x = i.split('-')[-1]
if x not in l:
l.append(x)
for i in os.listdir('/home/ymy'):
if '-' in i:
x = i.split('-')[-1]
if x not in l:
l.append(x)
print [i.split('-')[-1] for i in os.listdir('/home/ymy') if '-' in i]
print [i for i in os.listdir('/home/ymy') if '-' in i]
print [i.split('-')[-1] for i in os.listdir('/home/ymy') if '-' in i]
print set([i.split('-')[-1] for i in os.listdir('/home/ymy') if '-' in i])
d_size = {}
from os import path
for base, dirs, files in os.walk('/tmp'):
for filename in files:
print path.join(base, filename)
filepath = path.join(base, filename)
try :
st = os.stat(filepath)
except:
continue
print filepath
d_size.setdefault(st.st_size, []).append(filepath)
print st.st_size
for k,v in d_size.items():
if len(v) >1:
print k,v
#for i,v in enumerate(l): | [
"ymyjohnny@adsame.com"
] | ymyjohnny@adsame.com |
06b1d29b7b6c839e81497373b6d46b51d3c3dd6d | 63768dc92cde5515a96d774a32facb461a3bf6e9 | /jacket/api/storage/views/qos_specs.py | 42aa19da5215b83b075b3de08f6075637dade38b | [
"Apache-2.0"
] | permissive | ljZM33nd/jacket | 6fe9156f6f5789e5c24425afa7ce9237c302673d | d7ad3147fcb43131098c2a5210847634ff5fb325 | refs/heads/master | 2023-04-16T11:02:01.153751 | 2016-11-15T02:48:12 | 2016-11-15T02:48:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,489 | py | # Copyright (C) 2013 eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from jacket.api.storage import common
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model QoS specs API responses as a python dictionary."""
_collection_name = "qos-specs"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, qos_specs, qos_count=None):
"""Show a list of qos_specs without many details."""
return self._list_view(self.detail, request, qos_specs, qos_count)
def summary(self, request, qos_spec):
"""Generic, non-detailed view of a qos_specs."""
return {
'qos_specs': qos_spec,
'links': self._get_links(request,
qos_spec['id']),
}
def detail(self, request, qos_spec):
"""Detailed view of a single qos_spec."""
# TODO(zhiteng) Add associations to detailed view
return {
'qos_specs': qos_spec,
'links': self._get_links(request,
qos_spec['id']),
}
def associations(self, request, associates):
"""View of qos specs associations."""
return {
'qos_associations': associates
}
def _list_view(self, func, request, qos_specs, qos_count=None):
"""Provide a view for a list of qos_specs."""
specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs]
specs_links = self._get_collection_links(request, qos_specs,
self._collection_name,
qos_count)
specs_dict = dict(qos_specs=specs_list)
if specs_links:
specs_dict['qos_specs_links'] = specs_links
return specs_dict
| [
"nkapotoxin@gmail.com"
] | nkapotoxin@gmail.com |
7eb014003bb70b5d650dfd37cad67df51eca45ad | 3367451615f082b21a9f65c0f3cf833b269082c0 | /Stripes/HadCRUT5/stripes_single_member_longitude.py | 612f1e91064785482e9aa2deb5a8a04f8526cc83 | [] | no_license | philip-brohan/Posters | dd20eb399f559b6c205c300f96d95d0f13c52808 | d030a926ea1163e7078b2c468dfca22866552f50 | refs/heads/master | 2022-10-16T01:08:39.589886 | 2022-09-25T16:41:08 | 2022-09-25T16:41:08 | 91,101,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,336 | py | #!/usr/bin/env python
# Make a poster showing HadCRUT5 monthly temperatures.
# Inspired by the climate stripes popularised by Ed Hawkins.
import os
import iris
import numpy
import datetime
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import Rectangle
from pandas import qcut
# Choose one ensemble member (arbitrarily)
member = 87
# Load the 20CR data
h=iris.load_cube("/scratch/hadcc/hadcrut5/build/HadCRUT5/analysis/"+
"HadCRUT.5.0.0.0.analysis.anomalies.%d.nc" % member)
# Pick a random latitude at each month
p=h.extract(iris.Constraint(latitude=0))
s=h.data.shape
for t in range(s[0]):
for lon in range(s[2]):
rand_l = numpy.random.randint(0,s[1])
p.data[t,lon]=h.data[t,rand_l,lon]
h=p
ndata=h.data
# Plot the resulting array as a 2d colourmap
fig=Figure(figsize=(72,18), # Width, Height (inches)
dpi=300,
facecolor=(0.5,0.5,0.5,1),
edgecolor=None,
linewidth=0.0,
frameon=False, # Don't draw a frame
subplotpars=None,
tight_layout=None)
#fig.set_frameon(False)
# Attach a canvas
canvas=FigureCanvas(fig)
matplotlib.rc('image',aspect='auto')
# Speckled grey background
s=ndata.shape
ax2 = fig.add_axes([0,0,1,1],facecolor='green')
ax2.set_axis_off() # Don't want surrounding x and y axis
nd2=numpy.random.rand(s[1],s[0])
clrs=[]
for shade in numpy.linspace(.42+.01,.36+.01):
clrs.append((shade,shade,shade,1))
y = numpy.linspace(0,1,s[1])
x = numpy.linspace(0,1,s[0])
img = ax2.pcolormesh(x,y,nd2,
cmap=matplotlib.colors.ListedColormap(clrs),
alpha=1.0,
shading='gouraud',
zorder=10)
ax = fig.add_axes([0,0,1,1],facecolor='black',xlim=(0,1),ylim=(0,1))
ax.set_axis_off() # Don't want surrounding x and y axis
ndata=numpy.transpose(ndata)
s=ndata.shape
y = numpy.linspace(0,1,s[0])
x = numpy.linspace(0,1,s[1])
img = ax.pcolorfast(x,y,numpy.cbrt(ndata),
cmap='RdYlBu_r',
alpha=1.0,
vmin=-1.7,
vmax=1.7,
zorder=100)
fig.savefig('m%d_lon.pdf' % member)
| [
"philip@brohan.org"
] | philip@brohan.org |
0fe41fc67d558792e39af525d95f1f466e64b777 | 6820424467be6d77b5ff44c3f6a55d2273565e21 | /services/layer_initializer_service.py | 5bd94b4a04669f24198e552d2e9da827e30a8d46 | [] | no_license | gbattra/Generator_ML_Framework | ca70ce58c69109fc3a5f974ff1f57c53deea3bc7 | 29dae8ef4c1d83260abc9777735c5537425337be | refs/heads/master | 2021-06-19T10:00:36.191586 | 2021-01-11T19:43:15 | 2021-01-11T19:43:15 | 132,356,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | from models import *
class LayerInitializerService:
@staticmethod
def load_layers(num_classes: int, learning_rate: float):
fc_layer_1 = fully_connected_layer_model.FullyConnectedLayerModel(67500, 50,
'fc1', learning_rate)
activation_layer_1 = activation_layer_model.ActivationLayerModel('relu', 'output_activation')
output_fc = fully_connected_layer_model.FullyConnectedLayerModel(50, num_classes, 'fc2', learning_rate)
output_activation = activation_layer_model.ActivationLayerModel('softmax', 'output_activation')
# layers list
layers = [
fc_layer_1,
activation_layer_1,
output_fc,
output_activation
]
return layers
| [
"greg.attra@gmail.com"
] | greg.attra@gmail.com |
efa37b4b73a3d0a36c690472b2bf52257a42df6f | a09740e643d6277ada23c82d8e87853a1cd1a9e5 | /Z_ALL_FILE/Py1/fndatetime.py | a83056dcaaff17a4c8bb5663424924b5fd04f7f2 | [
"Apache-2.0"
] | permissive | FuckBrains/omEngin | c5fb011887c8b272f9951df3880a879456f202e8 | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | refs/heads/main | 2023-03-20T18:27:53.409976 | 2021-03-14T15:50:11 | 2021-03-14T15:50:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | import pandas as pd
import numpy as np
from datetime import *
nw = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
def add_col_df(df, colname, colval = False, indx=False):
if indx == False:
if colval == False:
ndf = df.assign(coln = 'NWC')
ndf.rename(columns = {'coln': colname}, inplace = True)
return ndf
else:
ndf = df.assign(coln = colval)
ndf.rename(columns = {'coln': colname}, inplace = True)
return ndf
else:
if colval == False:
df.insert(indx, colname, 'NWC', allow_duplicates=False)
return df
else:
df.insert(indx, colname, colval, allow_duplicates=False)
return df
def timediff(df,c1,c2,newcol):
df[c1] = pd.to_datetime(df[c1])
df[c2] = pd.to_datetime(df[c2])
df1 = add_col_df(df,newcol)
df1[newcol] = abs(df1[c2] - df1[c1])
df1[newcol] = df1[newcol].astype("i8")/1e9
df1[newcol] = df1[newcol] / 60
return df1
def timediff_2(df,c1,c2,newcol):
df[c1] = pd.to_datetime(df[c1])
df[c2] = pd.to_datetime(df[c2])
df1 = add_col_df(df,newcol)
df1[newcol] = abs(df1[c2] - df1[c1])
df1[newcol] = df1[newcol].astype('timedelta64[m]')
return df1
def datedif(ndf,nwcol,dt_col1,dt_col2 = False):
df = ndf.replace(r'^\s*$', np.nan, regex=True)
if dt_col2 == False:
df1 = add_col_df(df,'NOW',nw)
df2 = timediff(df1,dt_col1,'NOW',nwcol)
df3 = df2.drop(['NOW'], axis = 1)
return df3
#lst = df[dt_col1]
#ls = list(map (lambda x: ((datetime.now() - datetime.strptime(x, "%d/%m/%Y %H:%M")).total_seconds())/60, lst))
else:
df2 = timediff(df,dt_col1,dt_col2,nwcol)
return df2
#ls = list(map (lambda x , y: ((datetime.strptime(x, "%d/%m/%Y %H:%M") - datetime.strptime(y, "%d/%m/%Y %H:%M")).total_seconds())/60 if ('1970' not in str(y)) else "0", clr,lst))
#df[nwcol] = np.nan
#df[nwcol] = np.array(ls)
#print('In Minutes')
def datediff(unit,Time1,Time2):
print(type(Time1))
print(type(Time2))
| [
"omi.kabirr@gmail.com"
] | omi.kabirr@gmail.com |
309efb252bd66f33562a67370f0b33b5353d3b3c | c5e8eb946f2bcad3658642b4895d86cd811efb36 | /src/biocommons/seqrepo/fastadir/_data/migrations/0001-initial.py | f0bedb0874caf0908ca60ae2db8e1ddd17817a40 | [
"Apache-2.0"
] | permissive | biocommons/biocommons.seqrepo | a96e4bdee79a4409a6d7927aeafe82510350a807 | 9ce861b5b86d1a1aa366fb62f6ed340186b4eed5 | refs/heads/main | 2023-05-15T10:03:17.856522 | 2023-05-08T17:48:08 | 2023-05-08T17:48:08 | 65,880,786 | 31 | 40 | Apache-2.0 | 2023-09-13T22:42:45 | 2016-08-17T06:11:21 | Python | UTF-8 | Python | false | false | 390 | py | from yoyo import step
step("""
create table seqinfo (
seq_id text primary key,
len integer not null,
alpha text not null,
added timestamp not null default current_timestamp,
relpath text not null
)""", """drop table seqinfo""")
step("""create unique index seqinfo_seq_id_idx on seqinfo(seq_id)""")
step("""update meta set value = '1' where key = 'schema version'""")
| [
"reecehart@gmail.com"
] | reecehart@gmail.com |
334b4650c630ade42418535c1e8d04189d672562 | 98cb2f2afbe57bdda9d6b8b1dd8cf624987d91bc | /torchdp/utils/module_modification.py | 4030d29505ea6abafd97f6595903eaefc6e859ca | [
"Apache-2.0"
] | permissive | jyhong836/pytorch-dp | 0e7613b01f09ceb2c3787284372f8e887bf0deb3 | e050b98d630d4db50cacc4fff82575daf345f012 | refs/heads/master | 2023-01-03T15:08:54.976598 | 2020-08-18T01:26:07 | 2020-08-18T01:27:02 | 260,974,801 | 0 | 0 | Apache-2.0 | 2020-05-03T16:40:11 | 2020-05-03T16:40:11 | null | UTF-8 | Python | false | false | 6,490 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
This module includes utils for modifying model layers, replacing layers etc.
"""
from typing import Callable, Type
from torch import nn
def _replace_child(
root: nn.Module, child_name: str, converter: Callable[[nn.Module], nn.Module]
) -> None:
"""
Converts a sub-module to a new module given a helper
function, the root module and a string representing
the name of the submodule to be replaced.
Parameters
----------
root: torch.nn.Module
Root module whose sub module must be replaced.
child_name: str
Name of submodule that must be replaced.
converter: Callable[[torch.nn.Module], torch.nn.Module]
Function or a lambda that takes a module (the submodule to
be replaced) and returns its replacement.
"""
# find the immediate parent
parent = root
nameList = child_name.split(".")
for name in nameList[:-1]:
parent = parent._modules[name]
# set to identity
parent._modules[nameList[-1]] = converter(parent._modules[nameList[-1]])
def replace_all_modules(
root: nn.Module,
target_class: Type[nn.Module],
converter: Callable[[nn.Module], nn.Module],
) -> nn.Module:
"""
Converts all the submodules (of root) that have the same
type as target_class, given a converter, a module root,
and a target class type.
This method is useful for replacing modules that are not
supported by the Privacy Engine.
Parameters
----------
root: torch.nn.Module
Model instance, potentially with sub-modules
target_class: Type[torch.nn.Module]
Target class that needs to be replaced.
converter: Callable[[torch.nn.Module], torch.nn.Module]
Function or a lambda that converts an instance of a given
target_class to another nn.Module.
Returns
-------
torch.nn.Module
Module with all the target_class types replaced using the
converter. root is modified and is equal to the return value.
Example
-------
>>> from torchvision.models import resnet18
>>> from torch import nn
>>> model = resnet18()
>>> print(model.layer1[0].bn1)
BatchNorm2d(64, eps=1e-05, ...
>>> model = replace_all_modules(model, nn.BatchNorm2d, lambda _: nn.Identity())
>>> print(model.layer1[0].bn1)
Identity()
"""
# base case
if isinstance(root, target_class):
return converter(root)
for name, obj in root.named_modules():
if isinstance(obj, target_class):
_replace_child(root, name, converter)
return root
def _batchnorm_to_instancenorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module:
"""
Converts a BatchNorm module to the corresponding InstanceNorm module
Parameters
----------
module: torch.nn.modules.batchnorm._BatchNorm
BatchNorm module to be replaced
Returns
-------
torch.nn.Module
InstanceNorm module that can replace the BatchNorm module provided
"""
def matchDim():
if isinstance(module, nn.BatchNorm1d):
return nn.InstanceNorm1d
elif isinstance(module, nn.BatchNorm2d):
return nn.InstanceNorm2d
elif isinstance(module, nn.BatchNorm3d):
return nn.InstanceNorm3d
return matchDim()(module.num_features)
def _batchnorm_to_groupnorm(module: nn.modules.batchnorm._BatchNorm) -> nn.Module:
"""
Converts a BatchNorm ``module`` to GroupNorm module.
This is a helper function.
Parameters
----------
module: torch.nn.modules.batchnorm._BatchNorm
BatchNorm module to be replaced
Returns
-------
torch.nn.Module
GroupNorm module that can replace the BatchNorm module provided
Notes
-----
A default value of 32 is chosen for the number of groups based on the
paper *Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour*
https://arxiv.org/pdf/1706.02677.pdf
"""
return nn.GroupNorm(min(32, module.num_features), module.num_features, affine=True)
def nullify_batchnorm_modules(root: nn.Module) -> nn.Module:
"""
Replaces all the BatchNorm submodules (e.g. :class:`torch.nn.BatchNorm1d`,
:class:`torch.nn.BatchNorm2d` etc.) in ``root`` with :class:`torch.nn.Identity`.
Parameters
----------
root: torch.nn.Module
Module for which to replace BatchNorm submodules.
Returns
-------
torch.nn.Module
Module with all the BatchNorm sub modules replaced with
Identity. ``root`` is modified and is equal to the return value.
Notes
-----
Most of the times replacing a BatchNorm module with Identity
will heavily affect convergence of the model.
"""
return replace_all_modules(
# pyre-fixme[6]: Expected `Type[nn.Module]` for 2nd param but got
# `Type[nn.modules.batchnorm._BatchNorm]`.
root, nn.modules.batchnorm._BatchNorm, lambda _: nn.Identity()
)
def convert_batchnorm_modules(
model: nn.Module,
converter: Callable[
[nn.modules.batchnorm._BatchNorm], nn.Module
] = _batchnorm_to_groupnorm,
) -> nn.Module:
"""
Converts all BatchNorm modules to another module
(defaults to GroupNorm) that is privacy compliant.
Parameters
----------
model: torch.nn.Module
Module instance, potentially with sub-modules
converter: Callable[[torch.nn.modules.batchnorm._BatchNorm], torch.nn.Module]
Function or a lambda that converts an instance of a
Batchnorm to another nn.Module. Defaults to
:meth:`~torchdp.utils.module_modification._batchnorm_to_groupnorm`.
Returns
-------
torch.nn.Module
Model with all the BatchNorm types replaced by another operation
by using the provided converter, defaulting to GroupNorm if one
isn't provided.
Example
-------
>>> from torchvision.models import resnet18
>>> from torch import nn
>>> model = resnet50()
>>> print(model.layer1[0].bn1)
BatchNorm2d module details
>>> model = convert_batchnorm_moduleta(model)
>>> print(model.layer1[0].bn1)
GroupNorm module details
"""
# pyre-fixme[6]: Expected `Type[nn.Module]` for 2nd param but got
# `Type[nn.modules.batchnorm._BatchNorm]`.
return replace_all_modules(model, nn.modules.batchnorm._BatchNorm, converter)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
f4af02d72276d26a7b42cf74021b8cacbefbc203 | 15e6385746ccf4b8eb6c6e302aca236021bb8781 | /LintcodePartII/li431_ConnectedComponentInUndirectedGraph.py | 0aaef6d8f48f26c5fde250d842fb4cbed53b8f0b | [] | no_license | akb46mayu/Data-Structures-and-Algorithms | 11c4bbddc9b4d286e1aeaa9481eb6a620cd54746 | de98494e14fff3e2a468da681c48d60b4d1445a1 | refs/heads/master | 2021-01-12T09:51:32.618362 | 2018-05-16T16:37:18 | 2018-05-16T16:37:18 | 76,279,268 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | """
Find the number connected component in the undirected graph.
Each node in the graph contains a label and a list of its neighbors.
(a connected component (or just component) of an undirected graph is a subgraph in which any two vertices are connected to each other by paths, and which is connected to no additional vertices in the supergraph.)
Notice
Each connected component should sort by label.
Have you met this question in a real interview? Yes
Clarification
Learn more about representation of graphs
Example
Given graph:
A------B C
\ | |
\ | |
\ | |
\ | |
D E
Return {A,B,D}, {C,E}. Since there are two connected component which is {A,B,D}, {C,E}
"""
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param {UndirectedGraphNode[]} nodes a array of undirected graph node
# @return {int[][]} a connected set of a undirected graph
def connectedSet(self, nodes):
# Write your code here
self.v = {}
for node in nodes:
self.v[node.label] = False
res = []
for node in nodes:
if not self.v[node.label]:
tmp = []
self.dfs(node, tmp)
res.append(sorted(tmp))
return res
def dfs(self, x, tmp):
self.v[x.label] = True
tmp.append(x.label)
for nd in x.neighbors:
if not self.v[nd.label]:
self.dfs(nd, tmp)
| [
"noreply@github.com"
] | akb46mayu.noreply@github.com |
7acb418e83413fd1754a421433ce2a02e64ce114 | 0f38e4df503f9e72982d96d0db415caf68e9d44a | /pytraj/make_meta.py | 8320991e14f2e2022f2f6c2caa1e4e6eeaed09bb | [
"BSD-2-Clause"
] | permissive | hainm/conda-recipes | df0946bc0789252fb112beba23f3f44c2b2c5980 | 79268c30c15370f6d7bf3e802f57852476865bb7 | refs/heads/master | 2020-04-04T20:03:27.563895 | 2016-05-29T06:06:56 | 2016-05-29T06:06:56 | 42,761,866 | 0 | 0 | null | 2016-03-08T06:41:43 | 2015-09-19T05:59:53 | Shell | UTF-8 | Python | false | false | 2,045 | py | #!/usr/bin/env python
meta_template = """
package:
name: {pytraj_status}
version: !!str {pytraj_version}
source:
{source}
requirements:
build:
- python
- cython
- libcpptraj {libcpptraj_version}
- libnetcdf
run:
- python
- libcpptraj {libcpptraj_version}
- libnetcdf
- numpy
test:
commands:
- python -c 'import pytraj as pt; pt.show_versions(); from pytraj.testing import get_remd_fn'
about:
home: http://github.com/amber-md/pytraj
license: GPL v3
summary: Python API for cpptraj - a package for data analysis of MD simulations
"""
source_git = """
git_url: https://github.com/amber-md/pytraj.git
git_tag: master
"""
source_pypi = """
fn: pytraj-{pytraj_version}.tar.gz
# url: https://pypi.python.org/packages/source/p/pytraj/pytraj-{pytraj_version}.tar.gz
url: https://anaconda.org/hainm/pytraj/1.0.6/download/pytraj-{pytraj_version}.tar.gz
"""
if __name__ == '__main__':
import sys
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-lc',
'--libcpptraj-version',
default='4.3.1',
help='libcpptraj version')
parser.add_argument('-r',
'--release',
action='store_true')
parser.add_argument('--version',
help='pytraj version',
default='1.0.6')
args = parser.parse_args(sys.argv[1:])
is_released = args.release
pytraj_version_str = args.version if is_released else args.version + '.dev'
pytraj_status = 'pytraj' if is_released else 'pytraj-dev'
source_ = source_pypi if is_released else source_git
source = source_.format(pytraj_version=pytraj_version_str) if '{pytraj_version}' in source_ else source_
meta_str = meta_template.format(
libcpptraj_version=args.libcpptraj_version,
pytraj_status=pytraj_status,
source=source,
pytraj_version=pytraj_version_str)
print(meta_str)
with open('pytraj/meta.yaml', 'w') as fh:
fh.write(meta_str)
| [
"hainm.comp@gmail.com"
] | hainm.comp@gmail.com |
e495f8bef89008e00064387e3a8ce29270e3524b | 449da7b08bb82654028967aa0fa8efce8b2b10d2 | /apps/hadoop/migrations/0014_clusterinfo_app.py | cfa385a4f7bfc57faa5012ce6414b735cdab2bbd | [] | no_license | sdgdsffdsfff/bk-dop | f1ae15f858f6236405e50e9453554026d2bcfd21 | 97cfac2ba94d67980d837f0b541caae70b68a595 | refs/heads/master | 2023-08-31T22:24:30.616269 | 2021-10-19T17:56:36 | 2021-10-19T17:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Generated by Django 2.2.6 on 2021-05-10 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hadoop', '0013_delete_hadooptaskrecord'),
]
operations = [
migrations.AddField(
model_name='clusterinfo',
name='app',
field=models.CharField(default='', max_length=100, verbose_name='业务'),
),
]
| [
"1297650644@qq.com"
] | 1297650644@qq.com |
0d3762371ed81433d3be36430298dfdd8ae1f5ab | ba7e9a424362ad1fe6d78592de4ffafdf8f778dd | /text_parser/test_utils.py | 9e49ac2fd034db9587bc7acb1dc11959dd7e1c63 | [] | no_license | MrLYC/TextParser | f854b45775f42d153cf616ec53f5bf806772e06d | a0e32ad6831237f01466435e897002a19e9b6364 | refs/heads/master | 2021-01-23T11:32:00.344723 | 2017-06-09T07:23:41 | 2017-06-09T07:23:41 | 93,141,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 374 | py | # encoding: utf-8
from unittest import TestCase
from text_parser import utils
class TestUtils(TestCase):
def test_force_text(self):
self.assertEqual(utils.force_text(1), u"1")
self.assertEqual(
utils.force_text(u"测试".encode("utf-8")),
u"测试",
)
self.assertEqual(utils.force_text(u"测试"), u"测试")
| [
"imyikong@gmail.com"
] | imyikong@gmail.com |
01fef293d0d99d60ac6171d39112bcfc35dfe9bd | 0c0a6a41b5bb15e74f2e938218a971d6036dfd0d | /drf40/api/views.py | abb37cb3888df52bf1e5872828714372fff4e2bf | [] | no_license | kamal0072/API-s-based-on-drf-and-python | 54067cd1b364a50ace2c3f4b35cccaafc977d39f | b31299ff2bc32f836c85f402dbe2cfa34f34dd69 | refs/heads/master | 2023-03-25T16:51:36.511505 | 2021-03-24T16:27:46 | 2021-03-24T16:27:46 | 351,147,386 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | from django.shortcuts import render
from rest_framework import viewsets
from .serializers import StudentSerializer
from .models import Student
from rest_framework.filters import OrderingFilter
from rest_framework.pagination import CursorPagination
class Mycursorpagination(CursorPagination):
page_size=5
ordering='name'
class StudentViewSet(viewsets.ModelViewSet):
queryset=Student.objects.all()
serializer_class=StudentSerializer
# filter_backends=[OrderingFilter]
# ordering_fields=['name']
pagination_class=Mycursorpagination
| [
"hasan.kamaal0072@gmail.com"
] | hasan.kamaal0072@gmail.com |
824ad691679beddca24f539abae8266669640813 | f0e190ae8eff009ca2243551f06c84b56ab27b46 | /q44.py | ff0c11065bd3a98a87e734dbbda42871ffc0574f | [] | no_license | VinayHaryan/Array | 7d042c57619cc1a89bef0eff1a019d66b5a6203c | c19d36054b5d0b39a29673966e14a3b6329da9da | refs/heads/main | 2023-05-29T23:41:07.612633 | 2021-06-17T08:38:17 | 2021-06-17T08:38:17 | 377,736,580 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,864 | py | '''
MAJORITY ELEMENT
Write a function which takes an array and prints the majority element
(if it exists), otherwise prints “No Majority Element”.
A majority element in an array A[] of size n is an element that appears more than
n/2 times (and hence there is at most one such element).
Input : {3, 3, 4, 2, 4, 4, 2, 4, 4}
Output : 4
Explanation: The frequency of 4 is 5 which is greater
than the half of the size of the array size.
Input : {3, 3, 4, 2, 4, 4, 2, 4}
Output : No Majority Element
Explanation: There is no element whose frequency is
greater than the half of the size of the array size.
'''
# python program for finding out majarity
# element in an array
# def findMajority(arr, size):
# m = {}
# for i in range(size):
# if arr[i] in m:
# m[arr[i]] += 1
# else:
# m[arr[i]] = 1
# count = 0
# for key in m:
# if m[key] > size/2:
# count = 1
# print("majority found : ",key)
# break
# if (count == 0):
# print("no majority element")
# # Driver code
# arr = [2,2,2,2,5,5,2,3,3]
# n = len(arr)
# findMajority(arr,n)
def GETMEJORITY(ARR,SIZE):
M = {}
for I in range(SIZE):
if ARR[I] in M:
M[ARR[I]] += 1
else:
M[ARR[I]] = 1
COUNT = 0
for KEY in M:
if M[KEY] > SIZE/2:
COUNT = 1
print('MEJORITY OF ELEMENT IS: ',KEY)
break
if (COUNT == 0):
print("NO MEJORITY")
# DRIVER MODE
if __name__ == '__main__':
ARR = [22,22,22,22,55,55,22,33,33]
SIZE = len(ARR)
GETMEJORITY(ARR,SIZE)
'''
TIME COMPLEXITY O(n)
ONE TRAVERSAL OF THE ARRAY IS NEEDED, SO THE TIME COMPLEXITY IS LINEAR
AUXILIARY SPACE O(n)
SINCE A HASHMAP LINEAR SPACE
''' | [
"noreply@github.com"
] | VinayHaryan.noreply@github.com |
dd440a9f5411f168d6a47869bc44632c78ce7bf6 | 49217cbcb100c1e92ab707065dfa7c1c684e6440 | /spacegraphcats/search/test_workflow.py | ea63248fc443ff0e9c9af48ec34a2344f24a292d | [
"LicenseRef-scancode-biopython",
"BSD-3-Clause"
] | permissive | bluegenes/spacegraphcats | 5bba5e6ff63ecd0f632803a6d6103a0888eb250b | 35f8057068e4fe79ab83ac4efe91d1b0f389e1ea | refs/heads/master | 2022-04-12T15:37:13.372545 | 2018-12-18T03:17:35 | 2018-12-18T03:17:35 | 160,425,418 | 0 | 0 | NOASSERTION | 2018-12-18T03:19:14 | 2018-12-04T22:11:30 | Standard ML | UTF-8 | Python | false | false | 4,683 | py | import os.path
import tempfile
import shutil
import screed
from spacegraphcats.catlas import catlas
from spacegraphcats.index import index_contigs_by_kmer
from spacegraphcats.search import extract_nodes_by_query
from spacegraphcats.search import characterize_catlas_regions
from spacegraphcats.search import extract_unassembled_nodes
from spacegraphcats.search import catlas_info
from spacegraphcats.search import extract_contigs
from spacegraphcats.search import estimate_query_abundance
from spacegraphcats.utils import make_bgzf
from spacegraphcats.cdbg import label_cdbg
from spacegraphcats.search import extract_reads
import sourmash_lib
class TempDirectory(object):
def __init__(self):
self.tempdir = tempfile.mkdtemp(prefix='sgc_test')
def __enter__(self):
return self.tempdir
def __exit__(self, exc_type, exc_value, traceback):
try:
shutil.rmtree(self.tempdir, ignore_errors=True)
except OSError:
pass
if exc_type:
return False
def relative_filename(filename):
thisdir = os.path.dirname(__file__)
pkgdir = os.path.join(thisdir, '../..')
newpath = os.path.join(pkgdir, filename)
return os.path.abspath(newpath)
class Args(object):
pass
def test_dory():
with TempDirectory() as location:
from spacegraphcats.cdbg import bcalm_to_gxt
# make the output directory
try:
os.mkdir('dory_k21_r1')
except FileExistsError:
pass
# convert the bcalm file to gxt
args = ['-k', '-21', '-P',
relative_filename('dory/bcalm.dory.k21.unitigs.fa'),
'dory_k21_r1/cdbg.gxt',
'dory_k21_r1/contigs.fa.gz']
bcalm_to_gxt.main(args)
# build catlas
args = Args()
args.no_checkpoint = True
args.level = 0
args.radius = 1
args.project = 'dory_k21_r1'
catlas.main(args)
# make k-mer search index
args = '-k 21 dory_k21_r1'.split()
index_contigs_by_kmer.main(args)
# do search!!
args='dory_k21_r1 dory_k21_r1_search_oh0 --query data/dory-head.fa -k 21 --overhead=0.0'.split()
try:
extract_nodes_by_query.main(args)
except SystemExit as e:
assert e.code == 0, str(e)
# check output!
output_path = 'dory_k21_r1_search_oh0/'
assert os.path.exists(output_path + 'command.txt')
assert os.path.exists(output_path + 'dory-head.fa.frontier.txt.gz')
assert os.path.exists(output_path + 'dory-head.fa.cdbg_ids.txt.gz')
assert os.path.exists(output_path + 'dory-head.fa.response.txt')
assert os.path.exists(output_path + 'dory-head.fa.contigs.sig')
assert os.path.exists(output_path + 'results.csv')
with open(output_path + 'results.csv') as fp:
lines = fp.readlines()
assert len(lines) == 2
last_line = lines[-1].strip()
assert last_line == 'data/dory-head.fa,1.0,1.0,1671,2,21,1631,1.0,0.0,0.0'
# run characterize_catlas_regions
args = 'dory_k21_r1 dory_k1_r1.vec'.split()
characterize_catlas_regions.main(args)
# run extract_unassembled_regions
args = 'dory_k21_r1 data/dory-head.fa dory.regions -k 21'.split()
extract_unassembled_nodes.main(args)
# run catlas info
catlas_info.main(['dory_k21_r1'])
# run extract_contigs
args = ['dory_k21_r1',
'dory_k21_r1_search_oh0/dory-head.fa.cdbg_ids.txt.gz',
'-o',
'dory_k21_r1_search_oh0/dory-head.fa.cdbg_ids.contigs.fa.gz']
extract_contigs.main(args)
assert os.path.exists('dory_k21_r1_search_oh0/dory-head.fa.cdbg_ids.contigs.fa.gz')
# run make_bgzf
args = ['data/dory-subset.fa', '-o', 'dory/dory.reads.bgz']
make_bgzf.main(args)
# run label_cdbg
args = ['dory_k21_r1',
'dory/dory.reads.bgz', 'dory_k21_r1/reads.bgz.labels']
label_cdbg.main(args)
# run extract_reads
args = ['dory/dory.reads.bgz',
'dory_k21_r1/reads.bgz.labels',
'dory_k21_r1_search_oh0/dory-head.fa.cdbg_ids.txt.gz',
'-o',
'dory_k21_r1_search_oh0/dory-head.fa.cdbg_ids.reads.fa.gz']
extract_reads.main(args)
# calculate query abundances
args = 'dory_k21_r1 data/dory-head.fa -o abundances.csv -k 21'.split()
estimate_query_abundance.main(args)
abunds = open('abundances.csv', 'rt').read()
assert 'data/dory-head.fa,1.0,1.05' in abunds
| [
"titus@idyll.org"
] | titus@idyll.org |
c19e4d29857ba91dcc6287fda9784e3aa0622f9a | e5b9d8327634bd3a0cb6baa94738013fc01e14a4 | /extras/examples/example.py | edcd2c8d66e24d10dce933cffdc328fba4b7c289 | [
"Apache-2.0"
] | permissive | google/tmppy | 42ef444eb11675f10f3433180414d6818a602342 | faf67af1213ee709f28cc5f492ec4903c51d4104 | refs/heads/master | 2023-09-02T19:20:45.172571 | 2020-06-01T02:31:28 | 2020-06-01T02:31:28 | 105,428,926 | 32 | 10 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tmppy import Type
class MyError(Exception):
def __init__(self, b: bool):
self.message = 'Something went wrong'
self.b = b
def f(x: Type):
if x == Type('float'):
raise MyError(True)
return True
| [
"poletti.marco@gmail.com"
] | poletti.marco@gmail.com |
25030ec11209b9e14609ffce77d412fbd91f108e | b7eed26cf8a0042a61f555eed1e9bf0a3227d490 | /students/piotrowski_stanislaw/lesson_02_flow_control/how_many_zeroes.py | 8379f81dc8394b9c4067827762a18bd7cb6915a7 | [] | no_license | jedzej/tietopythontraining-basic | e8f1ac5bee5094c608a2584ab19ba14060c36dbe | a68fa29ce11942cd7de9c6bbea08fef5541afa0f | refs/heads/master | 2021-05-11T11:10:05.110242 | 2018-08-20T12:34:55 | 2018-08-20T12:34:55 | 118,122,178 | 14 | 84 | null | 2018-08-24T15:53:04 | 2018-01-19T12:23:02 | Python | UTF-8 | Python | false | false | 240 | py | # https://snakify.org/lessons/for_loop_range/problems/how_many_zeroes/
# piotrsta
number_of_numbers = int(input())
zeros = 0
for i in range(number_of_numbers):
number = int(input())
if number == 0:
zeros += 1
print(zeros)
| [
"32517941+kadilak83@users.noreply.github.com"
] | 32517941+kadilak83@users.noreply.github.com |
3fe182a7305979372bdb7c5590c323baa6dda99f | dea8cfa596d52d5db0e28ac43504e7212b43081b | /python/AtCoder Beginner Contest 152/D_new.py | 16f861824d8037f0975636f52839b592b1e36b9d | [] | no_license | Yuta123456/AtCoder | 9871a44f12a8fca87b0e2863a999b716128de1ac | ca04422699719563e311f7d973459ba1dc238c2c | refs/heads/master | 2023-01-04T22:33:54.120454 | 2020-11-04T05:20:37 | 2020-11-04T05:20:37 | 286,409,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | n = int(input())
memo = [[0 for i in range(10)] for j in range(10)]
for i in range(1,n+1):
i_first = int(str(i)[0])
i_last = int(str(i)[-1])
memo[i_first][i_last] += 1
ans = 0
for i in range(1,n+1):
i_first = int(str(i)[0])
i_last = int(str(i)[-1])
ans += memo[i_last][i_first]
print(ans) | [
"yuuta09090530@icloud.com"
] | yuuta09090530@icloud.com |
91f058583cc024417caa7cd714d9ec19a965205b | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow_nightly/source2.7/tensorflow/contrib/cmake/tools/create_def_file.py | 53c2285699a6ca94e1e6b147080338b507f4d768 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 6,305 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import codecs
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
r"tensorflow::internal::CheckOpMessageBuilder|"
r"tensorflow::internal::PickUnusedPortOrDie|"
r"tensorflow::internal::ValidateDevice|"
r"tensorflow::ops::internal::Enter|"
r"tensorflow::strings::internal::AppendPieces|"
r"tensorflow::strings::internal::CatPieces|"
r"tensorflow::io::internal::JoinPathImpl")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"^(TFE_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"nsync_|"
r"perftools::gputools")
# We want to identify data members explicitly in the DEF file, so that no one
# can implicitly link against the DLL if they use one of the variables exported
# from the DLL and the header they use does not decorate the symbol with
# __declspec(dllimport). It is easier to detect what a data symbol does
# NOT look like, so doing it with the below regex.
DATA_EXCLUDE_RE = re.compile(r"[)(]|"
r"vftable|"
r"vbtable|"
r"vcall|"
r"RTTI|"
r"protobuf::internal::ExplicitlyConstructed")
def get_args():
"""Parse command line."""
filename_list = lambda x: x.split(";")
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=filename_list,
help="paths to input libraries separated by semicolons",
required=True)
parser.add_argument("--output", help="output deffile", required=True)
parser.add_argument("--target", help="name of the target", required=True)
args = parser.parse_args()
return args
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from libs.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
for lib_path in args.input:
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
stdout=subprocess.PIPE)
for line in codecs.getreader("utf-8")(proc.stdout):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
tmpfile.file.close()
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file.
def_fp.write("LIBRARY " + args.target + "\n")
def_fp.write("EXPORTS\n")
def_fp.write("\t ??1OpDef@tensorflow@@UEAA@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
if "deleting destructor" in line:
# Some of the symbols convered by INCLUDEPRE_RE export deleting
# destructor symbols, which is a bad idea.
# So we filter out such symbols here.
continue
if DATA_EXCLUDE_RE.search(line):
def_fp.write("\t" + decorated + "\n")
else:
def_fp.write("\t" + decorated + " DATA\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
d6b1d62fefd154fc216753ece44b8a604cc0fd8a | e567b06c895054d88758366e769de77ee693a568 | /SciComputing with Python/lesson_05-01/sample.py | e0cab4169f719e7eeb254d1eea52b6d3135cd176 | [
"MIT"
] | permissive | evtodorov/aerospace | 68986b4ae772e1de8cc7982b4f8497b6423ac8cc | 54a1b58c3c0b02c0eaa3aef14d0e732d7f867566 | refs/heads/main | 2023-01-19T17:52:29.520340 | 2020-11-29T13:23:31 | 2020-11-29T13:23:31 | 315,653,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | import sys, pygame
pygame.init()
size = width, height = 320, 240
speed = [2, 2]
black = 0, 0, 0
screen = pygame.display.set_mode(size)
ball = pygame.image.load("ball.gif")
ballrect = ball.get_rect()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = -speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = -speed[1]
screen.fill(black)
screen.blit(ball, ballrect)
pygame.display.flip() | [
"evgeni.todorov@tum.de"
] | evgeni.todorov@tum.de |
a3505eec5c244a934af89056da0444db4381f88d | 4fb4899758f3d8c5f1db07e8bc405b13ab4eb0b8 | /kth-largest-element-in-an-array.py | 94892abc4fb04595156d8e98c265ae30e953475a | [] | no_license | stephenosullivan/LT-Code | e70df6c52c4944c56bb604109c2fc5b537e28ae6 | b93e46f6526be7a92ebe16f45998527e2b1dc494 | refs/heads/master | 2020-04-05T14:39:57.569699 | 2016-09-14T16:01:22 | 2016-09-14T16:01:22 | 15,329,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | __author__ = 'stephenosullivan'
class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {integer}
def findKthLargest(self, nums, k):
return self.recursiveFind(nums,0,len(nums)-1,len(nums)-k)
def recursiveFind(self,nums,left,right,k):
if left == right:
return nums[left]
pivotIndex = int((left+right+1)/2)
pivotIndex = self.partition(nums,left,right,pivotIndex)
if k==pivotIndex:
return nums[k]
elif k<pivotIndex:
return self.recursiveFind(nums,left,pivotIndex-1,k)
else:
return self.recursiveFind(nums,pivotIndex+1,right,k)
def partition(self,nums,left,right,pivot):
pivotVal = nums[pivot]
index = left
outindex = left
nums[pivot],nums[right] = nums[right],nums[pivot]
for index in range(left,right):
if nums[index] < pivotVal:
nums[index],nums[outindex] = nums[outindex],nums[index]
outindex += 1
nums[outindex],nums[right] = nums[right],nums[outindex]
return outindex | [
"osullisg@gmail.com"
] | osullisg@gmail.com |
e9d0c5ae757e80692fa2dc5b31504379db90cd78 | 2dadc071440a236c32645e5af81d0aa72d68050f | /statistics/estimate_time_visit.py | c0a81f14a1fa6fb9114a84a2e5f8c36978262938 | [] | no_license | sunary/data-science | a58d572d05c7c14b7ab5d281889d7206838d97ab | ac5d2af680948e76cf21248797a7014b27e96703 | refs/heads/master | 2023-04-12T08:42:13.950495 | 2023-03-28T01:05:32 | 2023-03-28T01:05:32 | 43,934,997 | 4 | 3 | null | 2023-03-31T14:29:53 | 2015-10-09T05:31:13 | Python | UTF-8 | Python | false | false | 6,795 | py | __author__ = 'sunary'
from datetime import datetime, timedelta
class BehaviorEstimate():
'''
Estimate next time visit page in REST crawler
'''
LENGTH_HISTOGRAM = 50
CUMULATIVE_DAYS_VISIT = 14
PERCENT_SUBTRACT = 2.0
def __init__(self, min_times=0.2, max_times=10.0, scale_times=3.0):
self.MIN_TIMES_VISIT_PER_DAY = min_times
self.MAX_TIMES_VISIT_PER_DAY = max_times
self.SCALE_TIMES_VISIT = scale_times
self.histogram = [100.0/self.LENGTH_HISTOGRAM] * self.LENGTH_HISTOGRAM
self.average_times_visit_per_day = self.SCALE_TIMES_VISIT
self.activated_date = None
self.range_day = 0
self.date_visit = [{'date': datetime(2015, 1, 1), 'times_visit': 0} for _ in range(self.CUMULATIVE_DAYS_VISIT + 6)]
self.time_msg = None
self.date_has_message = None
def set(self, data):
'''
Set estimate data
Args:
data (dict): {'histogram': , 'activated_date': , 'date_visit': }
'''
self.histogram = data.get('histogram') if data.get('histogram') else self.histogram
self.activated_date = data.get('activated_date') if data.get('activated_date') else self.activated_date
self.date_visit = data.get('date_visit') if data.get('date_visit') else self.date_visit
def get(self):
data_update = {}
data_update['histogram'] = self.histogram
data_update['activated_date'] = self.activated_date
data_update['date_visit'] = self.date_visit
data_update['next_crawl_time'] = self._next_time_visit()
return data_update
def has_messages(self, time_msg):
'''
Change status if page have a message
Args:
time_msg: time message was created
'''
self.time_msg = time_msg
self.date_has_message = self.time_msg.replace(hour= 0, minute= 0, second= 0, microsecond= 0)
if not self.activated_date or self.activated_date > self.time_msg:
self.activated_date = self.date_has_message
elif self.range_day < self.CUMULATIVE_DAYS_VISIT:
self.range_day = (self.time_msg - self.activated_date).days
self.range_day = self.CUMULATIVE_DAYS_VISIT if self.range_day > self.CUMULATIVE_DAYS_VISIT else self.range_day
for i in range(len(self.date_visit)):
if self.date_visit[i]['date'] == self.date_has_message:
self.date_visit[i]['times_visit'] += 1
break
else:
for i in range(len(self.date_visit)):
if self.date_visit[i]['date'] < (self.date_has_message - timedelta(days=self.CUMULATIVE_DAYS_VISIT)):
self.date_visit[i]['date'] = self.date_has_message
self.date_visit[i]['times_visit'] = 1
break
self._update()
def _update(self):
total_visit = 0
for i in range(len(self.date_visit)):
if self.date_visit[i]['date'] < self.date_has_message and self.date_visit[i]['date'] >= (self.date_has_message - timedelta(days=self.CUMULATIVE_DAYS_VISIT)):
total_visit += self.date_visit[i]['times_visit']
if self.range_day == 0:
for i in range(len(self.date_visit)):
if self.date_visit[i]['date'] == self.date_has_message:
self.average_times_visit_per_day = max(self.date_visit[i]['times_visit'], self.SCALE_TIMES_VISIT)
break
else:
self.average_times_visit_per_day = total_visit*self.SCALE_TIMES_VISIT/self.range_day
self.average_times_visit_per_day = self.MAX_TIMES_VISIT_PER_DAY if (self.average_times_visit_per_day > self.MAX_TIMES_VISIT_PER_DAY)\
else self.average_times_visit_per_day
self.average_times_visit_per_day = self.MIN_TIMES_VISIT_PER_DAY if (self.average_times_visit_per_day < self.MIN_TIMES_VISIT_PER_DAY)\
else self.average_times_visit_per_day
for i in range(len(self.histogram)):
self.histogram[i] -= self.histogram[i]*self.PERCENT_SUBTRACT/100
self.histogram[self._order_histogram()] += self.PERCENT_SUBTRACT
def _next_time_visit(self):
'''
Get next time visit page by status
Returns:
(datetime) next time visit
'''
num_unit = 0
order_histogram = self._order_histogram()
probability_visit = 0
while probability_visit < 1:
num_unit += 1
probability_visit += self.histogram[order_histogram]*self.average_times_visit_per_day/100
order_histogram = (order_histogram + 1) % self.LENGTH_HISTOGRAM
return datetime.today() + timedelta(minutes=num_unit*24*60/self.LENGTH_HISTOGRAM)
def _order_histogram(self):
'''
get order histogram to next time visit
Returns:
int: order of histogram
'''
if not self.time_msg:
self.time_msg = datetime.utcnow()
minutes = self.time_msg.hour*60 + self.time_msg.minute
return minutes*self.LENGTH_HISTOGRAM/(24*60)
def _change_len_histogram(self, new_len_histogram):
new_histogram = [0]*new_len_histogram
for i in range(len(new_histogram)):
new_histogram[i] = self.histogram[int(round(i*self.LENGTH_HISTOGRAM*1.0/new_len_histogram))]
sum_new_histogram = 0
for i in range(len(new_histogram)):
sum_new_histogram += new_histogram[i]
for i in range(len(new_histogram)):
new_histogram[i] *= 100.0/sum_new_histogram
self.histogram = new_histogram
self.LENGTH_HISTOGRAM = new_len_histogram
@staticmethod
def datetime_from_string(str_date, date_format='iso'):
'''
convert string to datetime
Examples:
>>> BehaviorEstimate.datetime_from_string('2015-07-17 06:07:22.375866')
datetime.datetime(2015, 07, 17, 06, 07, 22)
>>> BehaviorEstimate.datetime_from_string('Wed Oct 07 15:49:44 +0000 2009')
datetime.datetime(2009, 10, 07, 15, 49, 44)
'''
if date_format == 'iso':
return datetime.strptime(str_date, '%Y-%m-%d %H:%M:%S')
elif date_format == 'mongo':
str_date = str_date.split('.')[0]
return datetime.strptime(str_date, '%Y-%m-%d %H:%M:%S')
elif date_format == 'twitter':
str_date = str_date.split(' ')
del str_date[4]
str_date = ' '.join(str_date)
return datetime.strptime(str_date, '%a %b %d %H:%M:%S %Y')
return None
if __name__ == '__main__':
actor = BehaviorEstimate()
actor.set({})
actor.has_messages(datetime(2015, 6, 3))
print actor.get()['next_crawl_time'] | [
"v2nhat@gmail.com"
] | v2nhat@gmail.com |
517b023f89130276edb683420abccce55bff849d | 9cd180fc7594eb018c41f0bf0b54548741fd33ba | /sdk/python/pulumi_azure_nextgen/apimanagement/v20191201/get_content_item.py | 666f2b1d836dc8b4fea2b9349128bcf7cba80097 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | MisinformedDNA/pulumi-azure-nextgen | c71971359450d03f13a53645171f621e200fe82d | f0022686b655c2b0744a9f47915aadaa183eed3b | refs/heads/master | 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetContentItemResult',
'AwaitableGetContentItemResult',
'get_content_item',
]
@pulumi.output_type
class GetContentItemResult:
"""
Content type contract details.
"""
def __init__(__self__, name=None, type=None):
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetContentItemResult(GetContentItemResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetContentItemResult(
name=self.name,
type=self.type)
def get_content_item(content_item_id: Optional[str] = None,
content_type_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContentItemResult:
"""
Use this data source to access information about an existing resource.
:param str content_item_id: Content item identifier.
:param str content_type_id: Content type identifier.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['contentItemId'] = content_item_id
__args__['contentTypeId'] = content_type_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20191201:getContentItem', __args__, opts=opts, typ=GetContentItemResult).value
return AwaitableGetContentItemResult(
name=__ret__.name,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
6238116c5732ba605c7c4205d8ae65222be4acd1 | a9f7e40e18c935fb004fe813f98e298ded0581af | /JOI/JOI_2007_Camp/JOI_2007_Camp_G.py | a7007ccc9f95f65bb2a0d47716a06309f9ed71d5 | [] | no_license | happa64/AtCoder_Beginner_Contest | 2eb350f500f4bd65f5491b98cdf002ac9b174165 | 2526e72de9eb19d1e1c634dbd577816bfe39bc10 | refs/heads/master | 2023-07-08T15:13:51.449555 | 2021-08-11T14:18:09 | 2021-08-11T14:18:09 | 255,601,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # https://atcoder.jp/contests/joisc2007/submissions/17078836
# anagram - アナグラム (Anagram)
import sys
from collections import Counter
sys.setrecursionlimit(10 ** 7)
f_inf = float('inf')
mod = 10 ** 9 + 7
def resolve():
S = input()
n = len(S)
D = Counter(S)
fact = [1, 1]
for i in range(2, n + 1):
fact.append(fact[-1] * i)
res = 1
for i in range(n):
small = 0
dup = 1
for k, v in D.items():
if k < S[i]:
small += v
dup *= fact[v]
res += small * fact[n - (i + 1)] // dup
D[S[i]] -= 1
print(res)
if __name__ == '__main__':
resolve()
| [
"happa_iidx@yahoo.co.jp"
] | happa_iidx@yahoo.co.jp |
0a9e88f11e6bbce3e103e9e4c169699781bddd66 | 32cf9c3099c36a46804e393dd1491a8954f50263 | /2019.05.14 - 팀프로젝트(3)/final_project/workspace/Movies/movies/admin.py | ccf1bccecced7ac7c71989de629aae9c701e9d50 | [] | no_license | ash92kr/s_code | ce3bda6a403600892750e181dca5ed8c4caebcb1 | 92eace551d132b91ee91db6c0afd38b93f9b647b | refs/heads/master | 2020-04-12T00:27:07.043091 | 2019-05-21T08:17:39 | 2019-05-21T08:17:39 | 162,200,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | from django.contrib import admin
from .models import Movie, Actor, Genre
# Register your models here.
class MovieAdmin(admin.ModelAdmin):
list_display =['movie_name', 'time', 'year', 'nation', 'director', 'company',]
admin.site.register(Movie, MovieAdmin)
class ActorAdmin(admin.ModelAdmin):
list_display =['actor_id', 'name',]
admin.site.register(Actor, ActorAdmin)
class GenreAdmin(admin.ModelAdmin):
list_display =['name',]
admin.site.register(Genre, GenreAdmin) | [
"ash92kr@gmail.com"
] | ash92kr@gmail.com |
72d0ef308a76fcc82ad27bdfc8e1eddc24236f9e | 88b4b883c1a262b5f9ca2c97bf1835d6d73d9f0b | /src/api/python/hce/app/LogFormatter.py | b175cd80a595a9ebd2b83b0161c0e00772cde50d | [] | no_license | hce-project/hce-bundle | 2f93dc219d717b9983c4bb534884e4a4b95e9b7b | 856a6df2acccd67d7af640ed09f05b2c99895f2e | refs/heads/master | 2021-09-07T22:55:20.964266 | 2018-03-02T12:00:42 | 2018-03-02T12:00:42 | 104,993,955 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | '''
HCE project, Python bindings.
Logging messages formatters classes.
@package: dtm
@author bgv bgv.hce@gmail.com
@link: http://hierarchical-cluster-engine.com/
@copyright: Copyright © 2013-2014 IOIX Ukraine
@license: http://hierarchical-cluster-engine.com/license/
@since: 0.1
'''
##Log formatter event, defines the object to format message string
#
#The LogFormatterEvent object used to create log message string representation according with special format.
class LogFormatterEvent(object):
OBJECT_DUMP_DELIMITER = "\n"
MESSAGE_PREFIX = ""
##constructor
#initialize fields
#
#@param event Event object instance to dump itself and eventObj inside
#@param objectsList List of objects to dump
#
def __init__(self, event, objectsList, descriptionText):
self.event = event
self.objects = objectsList
self.description = descriptionText
def __str__(self):
ret = self.MESSAGE_PREFIX + str(vars(self.event))
if self.event.eventObj and hasattr(self.event.eventObj, "__dict__"):
ret = ret + self.OBJECT_DUMP_DELIMITER + str(vars(self.event.eventObj)) + self.OBJECT_DUMP_DELIMITER
for obj in self.objects:
ret = ret + self.OBJECT_DUMP_DELIMITER + str(vars(obj)) + self.OBJECT_DUMP_DELIMITER
ret = ret + self.description + self.OBJECT_DUMP_DELIMITER
return ret
| [
"bgv@bgv-d9"
] | bgv@bgv-d9 |
c18961ccd3a21b980f7e0491796ac9a3161fe109 | fe9b840db1f522c5fbf92d36bd00717549ec39e0 | /Classify/mobilenet.py | 7e2b70829f0e59d7ffdd4c611f6586023cc4d7d2 | [] | no_license | Interesting6/video_query | 82ae32ed75766ead8ac1a8ae7554811535b51454 | 139df472669ff658a787ddd35e87e8e27bec50a9 | refs/heads/master | 2021-01-05T05:51:48.625492 | 2020-05-22T12:41:53 | 2020-05-22T12:41:53 | 240,904,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,340 | py | from torch import nn
# from .utils import load_state_dict_from_url
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self,
num_classes=1000,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
x = x.mean([2, 3])
x = self.classifier(x)
return x
def forward(self, x):
return self._forward_impl(x)
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict)
return model
| [
"cym1106515697@outlook.com"
] | cym1106515697@outlook.com |
be414c6d9c8a4fa38608306f213f02dc4f0b0ddb | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /tests/opcodes/cases/test_set_car_201.py | fa6ea91aeb693b4a4e81f6c6103373dac6100b92 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 859 | py | from unittest import TestCase
from tests import abspath
from pytezos.repl.interpreter import Interpreter
from pytezos.michelson.converter import michelson_to_micheline
from pytezos.repl.parser import parse_expression
class OpcodeTestset_car_201(TestCase):
def setUp(self):
self.maxDiff = None
self.i = Interpreter(debug=True)
def test_opcode_set_car_201(self):
res = self.i.execute(f'INCLUDE "{abspath("opcodes/contracts/set_car.tz")}"')
self.assertTrue(res['success'])
res = self.i.execute('RUN "world" (Pair "hello" 0)')
self.assertTrue(res['success'])
exp_val_expr = michelson_to_micheline('(Pair "world" 0)')
exp_val = parse_expression(exp_val_expr, res['result']['storage'].type_expr)
self.assertEqual(exp_val, res['result']['storage']._val)
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
ed66cc6768c9405f0f187a6c1e81ccb5a154645c | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/chromite/appengine/cq_stats/deploy_app.py | 2b5698fd6f5dd69aab98fd2d0d4eaa2730b642c9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 4,551 | py | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to deploy the cq_stats app to our appengine instances."""
from __future__ import print_function
import os
import time
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
APP_INSTANCE_DEBUG = 'debug'
APP_INSTANCE_PROD = 'prod'
APP_INSTANCE_NAME = {
APP_INSTANCE_DEBUG: 'google.com:chromiumos-build-annotator-dbg',
APP_INSTANCE_PROD: 'google.com:chromiumos-build-annotator',
}
APP_INSTANCE_CIDB = {
APP_INSTANCE_DEBUG: 'debug-cidb',
APP_INSTANCE_PROD: 'cidb',
}
def _GetParser():
"""Get parser for deploy_app cli.
Returns:
commandline.ArgumentParser object to parse the commandline args.
"""
parser = commandline.ArgumentParser()
parser.add_argument('instance', type=str,
choices=(APP_INSTANCE_DEBUG, APP_INSTANCE_PROD),
help='The app instance to deploy to')
parser.add_argument('--secret-key', type=str, required=True,
help='The secret key to sign django cookies.')
return parser
def _GetDeploySettings(options):
"""The autogenerated part of django settings.
Returns:
python "code" as str to be written to the settings file.
"""
content = [
'# DO NOT EDIT! Autogenerated by %s.' % os.path.basename(__file__),
'DEBUG = False',
'TEMPLATE_DEBUG = False',
'SECRET_KEY = "%s"' % options.secret_key,
'CIDB_PROJECT_NAME = "cosmic-strategy-646"',
'CIDB_INSTANCE_NAME = "%s"' % APP_INSTANCE_CIDB[options.instance],
]
return '\n'.join(content)
def _DeployApp(basedir):
"""Deploy the prepared app from basedir.
Args:
basedir: The base directory where the app has already been prepped.
"""
cros_build_lib.RunCommand(
['./ae_shell', 'cq_stats', '--',
'python', 'cq_stats/manage.py', 'collectstatic', '--noinput'],
cwd=basedir)
# Remove sensetive files that are needed to run tools locally to prepare the
# deploy directory, but that we don't want to push to AE.
cidb_cred_path = os.path.join(basedir, 'cq_stats', 'annotator_cidb_creds')
osutils.SafeUnlink(os.path.join(cidb_cred_path, 'client-cert.pem'))
osutils.SafeUnlink(os.path.join(cidb_cred_path, 'client-key.pem'))
osutils.SafeUnlink(os.path.join(cidb_cred_path, 'server-ca.pem'))
cros_build_lib.RunCommand(
['./ae_shell', 'cq_stats', '--',
'appcfg.py', '--oauth2', 'update', 'cq_stats'],
cwd=basedir)
def _Hang(tempdir):
"""How else will you ever work on this script?
Args:
tempdir: The directory prepared for deploying the app.
"""
logging.info('All the real stuff\'s done. Tempdir: %s', tempdir)
while True:
logging.info('Sleeping... Hit Ctrl-C to exit.')
time.sleep(30)
def main(argv):
parser = _GetParser()
options = parser.parse_args(argv)
options.Freeze()
with osutils.TempDir() as tempdir:
# This is rsync in 'archive' mode, but symlinks are followed to copy actual
# files/directories.
rsync_cmd = ['rsync', '-qrLgotD', '--exclude=\'*/*.pyc\'']
chromite_dir = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))
cmd = rsync_cmd + [
'chromite/appengine/', tempdir,
'--exclude=google_appengine_*',
]
cros_build_lib.RunCommand(cmd, cwd=os.path.dirname(chromite_dir))
cmd = rsync_cmd + [
'chromite', os.path.join(tempdir, 'cq_stats'),
'--exclude=appengine',
'--exclude=third_party',
'--exclude=ssh_keys',
'--exclude=contrib',
'--exclude=.git',
]
cros_build_lib.RunCommand(cmd, cwd=os.path.dirname(chromite_dir))
osutils.WriteFile(os.path.join(tempdir, 'cq_stats', 'cq_stats',
'deploy_settings.py'),
_GetDeploySettings(options))
# update the instance we're updating.
# Use absolute path. Let's not update sourcedir by mistake.
app_yaml_path = os.path.join(tempdir, 'cq_stats', 'app.yaml')
regex = (r's/^application:[ \t]*[a-zA-Z0-9_-\.:]\+[ \t]*$'
'/application: %s/')
cmd = [
'sed', '-i',
'-e', regex % APP_INSTANCE_NAME[options.instance],
app_yaml_path,
]
cros_build_lib.RunCommand(cmd, cwd=tempdir)
_DeployApp(tempdir)
# _Hang(tempdir)
| [
"zeno.albisser@hemispherian.com"
] | zeno.albisser@hemispherian.com |
3c1b311d02211bb08bdd94372cca6e663346bf05 | 0f9c9e4c60f28aa00aff8b80e1e4c142c61d24ce | /Python/LeetCode/242_isAnagram.py | 3e5cd108a2aaca80b538583e09a2f80aca6d9f97 | [] | no_license | shouliang/Development | c56fcc69e658393c138b63b507b96c48232128d5 | b7e3b02c50d54515e584cb18dff83109224245d0 | refs/heads/master | 2020-03-22T09:14:51.070228 | 2019-08-29T02:50:26 | 2019-08-29T02:50:26 | 139,825,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | '''
有效字母的异位词
242. Valid Anagram:https://leetcode.com/problems/valid-anagram/description/
思路:利用两个哈希表分别记录两个字符串中每个字母的数量,然后再判断这两个哈希表是否相等
'''
class Solution:
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
hash_map1, hash_map2 = {}, {}
for c in s:
if c not in hash_map1:
hash_map1[c] = 1
else:
hash_map1[c] += 1
for c in t:
if c not in hash_map2:
hash_map2[c] = 1
else:
hash_map2[c] += 1
return hash_map1 == hash_map2
def isAnagram_01(self, s, t):
hash_map1, hash_map2 = {}, {}
for c in s:
hash_map1[c] = hash_map1.get(c, 0) + 1
for c in t:
hash_map2[c] = hash_map2.get(c, 0) + 1
return hash_map1 == hash_map2
def isAnagram_02(self, s, t):
hash_map1, hash_map2 = [0] * 26, [0] * 26
for c in s:
hash_map1[ord(c) - ord('a')] += 1
for c in t:
hash_map2[ord(c) - ord('a')] += 1
return hash_map1 == hash_map2
s = 'abcd'
t = 'bdca'
solution = Solution()
flag = solution.isAnagram(s, t)
print(flag)
flag = solution.isAnagram_01(s, t)
print(flag)
flag = solution.isAnagram_02(s, t)
print(flag)
| [
"git@git.dxl.cc:node/hunqing.git"
] | git@git.dxl.cc:node/hunqing.git |
717fe40bd5b19f91cfcf1f8aedd7b268368f6a91 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03722/s574255848.py | cb0707f0d06dcc5e4c5b8d71974a4054c8626065 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | N, M = map(int, input().split())
edges = [None]*M
INF = 10**18
for i in range(M):
a, b, c = map(int, input().split())
edges[i] = (a-1, b-1, -c)
def bellman(n, edges, r):
d = [INF] * n
d[r] = 0
for i in range(n):
for (u, v, c) in edges:
if(d[u] != INF and d[u]+c < d[v]):
d[v] = d[u] + c
if(i == n-1 and v == n-1):
return "inf"
return -d[-1]
ans = bellman(N, edges, 0)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
890c89205dc57f63f4cc67df2d9cc8e40adbd865 | 760f598fef1f9a75c4f900c055162387ecbb2e90 | /tests/__init__.py | a3ecd56802b0474657bf520da4d90bc201b491d5 | [
"MIT"
] | permissive | kernel1983/Fukei | 5bb418b1d923c36aa8b241377f39796a0158e084 | c2ab6a59166e8b84d19db0774009db39066a1764 | refs/heads/master | 2021-01-18T10:59:54.528768 | 2016-01-28T14:07:25 | 2016-01-28T14:07:25 | 50,578,445 | 2 | 0 | null | 2016-01-28T11:29:31 | 2016-01-28T11:29:30 | null | UTF-8 | Python | false | false | 197 | py | #!/usr/bin/env python
import unittest
from test_crypto import TestCrypto
from fukei.utils import log_config
if __name__ == '__main__':
log_config('test', True)
unittest.main(verbosity=2)
| [
"lyanghwy@gmail.com"
] | lyanghwy@gmail.com |
d1f9cd3c08607ebe0da6416f4b5e42eb2652fd12 | 2634f30c03e5d57574c4d9b29011159bc50cb141 | /pythonds/map/node.py | a556c740f57ad91a0aa9105d5bbd8859e43b0401 | [] | no_license | ZhouPan1998/DataStructures_Algorithms | 800e5066cdd20929b9a365af349ea9616134f148 | b2981a7323ff0b42a1e16f5488d5812c5e93dce4 | refs/heads/master | 2023-07-14T21:18:15.128089 | 2021-08-26T15:47:12 | 2021-08-26T15:47:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # -*- coding: utf-8 -*-
from typing import Union
class HeaderNode:
"""跳表头节点"""
def __init__(self):
self.__down: Union[HeaderNode, DataNode, None] = None
self.__next: Union[DataNode, None] = None
@property
def down(self):
return self.__down
@down.setter
def down(self, node):
self.__down = node
@property
def next(self):
return self.__next
@next.setter
def next(self, node):
self.__next = node
class DataNode(HeaderNode):
"""数据节点"""
def __init__(self, key, val):
super().__init__()
self.__key = key
self.__val = val
@property
def key(self):
return self.__key
@property
def value(self):
return self.__val
@value.setter
def value(self, val):
self.__val = val
| [
"2435128850@qq.com"
] | 2435128850@qq.com |
54e61c7d98c653417ee8363334fe3665ac157d65 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/compute_operation_value_paged.py | 06165ad1a5048168557c142ad220a768e0f20a82 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 997 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ComputeOperationValuePaged(Paged):
"""
A paging container for iterating over a list of :class:`ComputeOperationValue <azure.mgmt.compute.v2019_03_01.models.ComputeOperationValue>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ComputeOperationValue]'}
}
def __init__(self, *args, **kwargs):
super(ComputeOperationValuePaged, self).__init__(*args, **kwargs)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
e090552291bf9bbc3c1b7220ea1a12fc8ad7e716 | 2118f244be2e09508e3c89dee432d4a75343b430 | /CSV/csv_ex1.py | 308dfa85d7e93e0b02c3b1f1aecdf46cc0bddaf4 | [] | no_license | RamiJaloudi/Python-Scripts | 91d139093a95f9498a77b1df8ec2f790c4f4dd4c | 37e740a618ae543a02c38dc04a32ef95202ff613 | refs/heads/master | 2020-04-29T14:55:41.108332 | 2019-03-18T05:42:06 | 2019-03-18T05:42:06 | 176,212,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # From csv.py
def next(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = self.reader.next()
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = self.reader.next()
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
| [
"rjaloudi@gmail.com"
] | rjaloudi@gmail.com |
8a624bb6c252d01e91e85c01db568090a196805a | d33fa69a6948e4f7ec95e9fe8f3ff2befbd51779 | /mmdet/core/bbox/samplers/pseudo_sampler.py | 6c7189c5902ed0fe0f3155c91af02fd1c86a6bd9 | [
"Apache-2.0"
] | permissive | chengdazhi/mmdetection | 83848d7d26940ca982460e6a5c54f625b94f47db | 08cb54216479e59b4e4fad19ea2c9b3c72fb0405 | refs/heads/master | 2020-04-10T13:46:51.481551 | 2018-12-25T13:01:27 | 2018-12-25T13:01:27 | 161,059,155 | 2 | 0 | Apache-2.0 | 2018-12-09T16:32:58 | 2018-12-09T16:32:58 | null | UTF-8 | Python | false | false | 789 | py | import torch
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
class PseudoSampler(BaseSampler):
def __init__(self):
pass
def _sample_pos(self):
raise NotImplementedError
def _sample_neg(self):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes):
pos_inds = torch.nonzero(
assign_result.gt_inds > 0).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| [
"chenkaidev@gmail.com"
] | chenkaidev@gmail.com |
33db64af4b6ad37a8ec321099aa706a1b8b4384c | 07f837d8c5236fe5e75ef510cd296814452370ce | /py/testdir_single_jvm/test_mixed_causes_NA.py | a24a12cd046b8e8886ec2a49e8b3b2181c4a941f | [
"Apache-2.0"
] | permissive | vkuznet/h2o | 6f9006a5186b964bac266981d9082aec7bc1067c | e08f7014f228cbaecfb21f57379970e6a3ac0756 | refs/heads/master | 2021-08-28T11:37:52.099953 | 2021-08-10T22:43:34 | 2021-08-10T22:43:34 | 20,032,996 | 0 | 0 | Apache-2.0 | 2021-08-10T22:43:35 | 2014-05-21T18:46:27 | Java | UTF-8 | Python | false | false | 1,074 | py | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud()
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_mixed_causes_NA(self):
csvFilename = 'mixed_causes_NA.csv'
parseResult = h2i.import_parse(bucket='smalldata', path=csvFilename, timeoutSecs=15, schema='put')
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
missingValuesList = h2o_cmd.infoFromInspect(inspect, csvFilename)
print missingValuesList
self.assertEqual(sum(missingValuesList), 0,
"Single column of data with mixed number/string should not have NAs")
if __name__ == '__main__':
h2o.unit_main()
| [
"kevin@0xdata.com"
] | kevin@0xdata.com |
34b846756d284503d3d3b0c6ba934e8b12c9642d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_studding.py | 6678ad5abd0de12b25008f628d0975d1f26a230f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
#calss header
class _STUDDING():
def __init__(self,):
self.name = "STUDDING"
self.definitions = stud
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['stud']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
911f6962a4e6534eb1a2956b6024c1d1ae530963 | 87ced16167203723557f75dc005c3aaae7e3f404 | /online-judges/leetcode/design-add-and-search-words-data-structure.py | 2f71bb888414a1be337580044ae83a8a74ae8f26 | [] | no_license | joao-conde/competitive-programming | 87e0c46f06bc017eea2701b9be860ee614c0e159 | 0d2d7375f0603142febab69707496d3b5b985054 | refs/heads/master | 2023-08-07T01:47:19.864827 | 2023-07-25T11:43:39 | 2023-07-25T11:43:39 | 132,962,490 | 6 | 3 | null | 2020-04-20T23:15:25 | 2018-05-10T22:55:01 | C++ | UTF-8 | Python | false | false | 1,304 | py | # https://leetcode.com/problems/design-add-and-search-words-data-structure/
class Trie:
def __init__(self) -> None:
self.terminal = False
self.children = dict()
class WordDictionary:
def __init__(self):
self.root = Trie()
def addWord(self, word: str) -> None:
cur = self.root
for c in word:
if c not in cur.children:
cur.children[c] = Trie()
cur = cur.children[c]
cur.terminal = True
def search(self, word: str) -> bool:
return self.search_dfs(self.root, word)
def search_dfs(self, cur: Trie, word: str) -> bool:
if len(word) == 0:
return cur.terminal
c = word[0]
if c == ".":
terminal = False
for child in cur.children.values():
terminal = terminal or self.search_dfs(child, word[1:])
return terminal
if c not in cur.children:
return False
return self.search_dfs(cur.children[c], word[1:])
# Tests
word_dict = WordDictionary()
word_dict.addWord("bad")
word_dict.addWord("dad")
word_dict.addWord("mad")
assert word_dict.search("pad") == False
assert word_dict.search("bad") == True
assert word_dict.search(".ad") == True
assert word_dict.search("b..") == True
| [
"joaodiasconde@gmail.com"
] | joaodiasconde@gmail.com |
666a7c5bf826c9d8442621b3bb6f45d2ab638655 | 90e089c800d3ac6c9b338618574ce21a306f1399 | /core/migrations/0001_initial.py | 307b6fd284f8d2f50bffe710697675fced609aba | [] | no_license | Matroskins/fortypes | 962e8f28297a565884703ed68ba8a0b93c8e16b3 | 708a7cc67f6e510152b796759f7e556d96a93522 | refs/heads/master | 2021-04-27T17:13:41.959941 | 2018-02-22T14:48:57 | 2018-02-22T14:48:57 | 122,317,047 | 0 | 0 | null | 2018-02-21T09:38:05 | 2018-02-21T09:38:05 | null | UTF-8 | Python | false | false | 667 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-26 10:04
from __future__ import unicode_literals
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImageObj',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_original', models.ImageField(storage=django.core.files.storage.FileSystemStorage(location='media/'), upload_to='')),
],
),
]
| [
"chepe4pi@gmail.com"
] | chepe4pi@gmail.com |
3644ea56d205d33828d5068c5f09c042d86d7eaa | 4a63e96d7015e3e13d9b5204fc0261c05f600d3b | /Standard Library/argparse/Tutorial/06.1_getting_Advanced.py | afa833359fb3417e73abc11605b031434e4b5584 | [
"Apache-2.0"
] | permissive | shubhamnag14/Python-Documents | 0e38f58298d35b4df5b61adb361d720337148a00 | d3fee0ad90232b413f6ac1b562588fb255b79e42 | refs/heads/master | 2023-06-08T23:51:26.089840 | 2021-06-20T15:07:44 | 2021-06-20T15:07:44 | 380,832,776 | 0 | 0 | Apache-2.0 | 2021-06-27T20:33:08 | 2021-06-27T20:31:41 | null | UTF-8 | Python | false | false | 654 | py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("x",
type=int,
help="The Base")
parser.add_argument("y",
type=int,
help="The Exponent")
parser.add_argument("-v",
"--verbosity",
action="count",
default=0,
help="Increase Output Verbosity")
args = parser.parse_args()
answer = args.x**args.y
if args.verbosity >= 2:
print(f"{args.x} to the power {args.y} equals {answer}")
elif args.verbosity >= 1:
print(f"{args.x}^{args.y} == {answer} ")
else:
print(answer)
| [
"subhadeep@klizos.com"
] | subhadeep@klizos.com |
41782d3bc803c312156388e6359672049401a10e | e62d5b3e9cb8e3549c711067e3f17d7c761f1b2a | /Namedtuple.py | 88513e75c97b7868d741eab0904caf7a3f50ff1b | [] | no_license | UncleBob2/MyPythonCookBook | 5fe658b3e281861015e154493480753d169764a7 | 131051c884415cde789a487acb7e7d1f60aea3ac | refs/heads/master | 2023-01-03T04:26:15.493832 | 2020-10-29T20:09:45 | 2020-10-29T20:09:45 | 259,091,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from collections import namedtuple
# a good compromise between tuple and dictionary
color = {'red': 55, 'green': 155, 'blue': 255}
print('Using dictionary for color', color['red'])
Color = namedtuple('Color', ['red', 'green', 'blue'])
color = Color(55, 155, 255)
white = Color(255, 255, 255)
print('Using namedtuple for color', color[0])
print('Using namedtuple for color', color.red) # making the code more readable
print(white.blue)
| [
"trohoang@yahoo.com"
] | trohoang@yahoo.com |
e9d586534bf615faad2f0f3aa15943c1fd224d86 | 875bb84440094ce058a2ec25a661a7da6bb2e129 | /algo_py/boj/bj11652.py | 86054ecf9b20fc9613ea6167f4bd7af1a8400746 | [] | no_license | shg9411/algo | 150e4291a7ba15990f17ca043ae8ab59db2bf97b | 8e19c83b1dbc0ffde60d3a3b226c4e6cbbe89a7d | refs/heads/master | 2023-06-22T00:24:08.970372 | 2021-07-20T06:07:29 | 2021-07-20T06:07:29 | 221,694,017 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | card = dict()
for tmp in map(int, __import__('sys').stdin.read().split()[1:]):
if tmp not in card:card[tmp] = 1
else:card[tmp] += 1
print(sorted(card.items(), key=lambda a: (-a[1], a[0]))[0][0]) | [
"shg9411@naver.com"
] | shg9411@naver.com |
fae30accf7693ee02154afb6779d5637a7101c64 | 4a255da2c8d4c4589ab2c38c378282ea4a65f1d9 | /tests/deprecated_api/__init__.py | 91c7ef1c1f8801ce7e8c8f96c9d7a65ac3e8d502 | [
"Apache-2.0"
] | permissive | wandb/pytorch-lightning | 7a2fcdd03d67757c7eb5be7fffc090f55d7927ef | fe34bf2a653ebd50e6a3a00be829e3611f820c3c | refs/heads/master | 2022-02-10T12:09:00.469103 | 2022-01-24T08:19:57 | 2022-01-24T08:19:57 | 223,654,630 | 3 | 0 | Apache-2.0 | 2021-06-16T10:34:53 | 2019-11-23T21:19:50 | Python | UTF-8 | Python | false | false | 1,752 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test deprecated functionality which will be removed in vX.Y.Z."""
import sys
from contextlib import contextmanager
from typing import Optional, Type
import pytest
def _soft_unimport_module(str_module):
# once the module is imported e.g with parsing with pytest it lives in memory
if str_module in sys.modules:
del sys.modules[str_module]
@contextmanager
def no_warning_call(expected_warning: Type[Warning] = UserWarning, match: Optional[str] = None):
with pytest.warns(None) as record:
yield
if match is None:
try:
w = record.pop(expected_warning)
except AssertionError:
# no warning raised
return
else:
for w in record.list:
if w.category is expected_warning and match in w.message.args[0]:
break
else:
return
msg = "A warning" if expected_warning is None else f"`{expected_warning.__name__}`"
raise AssertionError(f"{msg} was raised: {w}")
@contextmanager
def no_deprecated_call(match: Optional[str] = None):
with no_warning_call(expected_warning=DeprecationWarning, match=match):
yield
| [
"noreply@github.com"
] | wandb.noreply@github.com |
20dd0349455e1e9388db2d86e19ca31baca89048 | 1a758ef862f733d98ddd8ebc8ade5cefd95c24f2 | /coffees/migrations/0008_auto_20160814_0003.py | 5e41ec9efc4be1c908ae538d5189a08874945ae6 | [] | no_license | ajajul/ReactJS_Python | f116b35394666c5b3f2419eb5d8d7aeb077d4a24 | 08310d56fa88f326ddbfdd4b189f2a3a71f76d99 | refs/heads/master | 2020-03-19T03:16:57.510672 | 2018-06-01T10:36:36 | 2018-06-01T10:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('coffees', '0007_sharedcoffeesticker'),
]
operations = [
migrations.AddField(
model_name='coffeetype',
name='amount_one_off',
field=models.DecimalField(default=18, verbose_name=b'Amount for One-off', max_digits=6, decimal_places=2),
),
migrations.AlterField(
model_name='coffeetype',
name='amount',
field=models.DecimalField(default=14, verbose_name=b'Amount', max_digits=6, decimal_places=2),
),
]
| [
"web.expert@aol.com"
] | web.expert@aol.com |
dc8f2cec24f68baa30d471ab0336fd32849e72b3 | b3e3284f3d7b66f237e60fdfb1a37db706363139 | /RST/app/ventas/migrations/0006_auto_20181115_1448.py | 2c7d3cc5a4d2ca91049f8615f3723445a2e45792 | [] | no_license | corporacionrst/administracion | 4caf1545c313eb36408850bb4506bbd0bf43d6e6 | 7405442b4f14a589d75a5e04250be123403180ec | refs/heads/master | 2020-04-11T00:04:06.017147 | 2018-12-11T21:46:49 | 2018-12-11T21:46:49 | 161,374,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.1.2 on 2018-11-15 14:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ventas', '0005_auto_20181114_2140'),
]
operations = [
migrations.AlterField(
model_name='orden',
name='autoriza',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='autoriza_compra', to='usuario.Usuario'),
),
]
| [
"admin@corporacionrst.com"
] | admin@corporacionrst.com |
892133e6a10f09894aeb9a8d4cde9b7b9621cc7d | 0be2afad29a71785d64a0c493fcd2cec803464d8 | /train_visualization/plot.py | 5b32d22cd95c63aa4fffc276e43fc9384d490180 | [
"Apache-2.0"
] | permissive | hereismari/ajna | 7bf9fe58a4b64bc34b953e38936e23992d5e1f49 | e5db9a1cde88aba20e7b5738d8c434b9086721d5 | refs/heads/master | 2021-09-20T14:14:56.677711 | 2018-08-10T16:12:13 | 2018-08-10T16:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,723 | py | import matplotlib.pyplot as plt
import csv
import os
def read_and_preprocess_data(filename):
# Lê arquivo e salva csv como dicionário
res = {}
if os.path.exists(filename):
with open(filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
step = int(row['Step'])
loss = float(row['Value'])
if step not in res:
res[step] = (1, loss)
else:
counter = res[step][0] + 1
sum_loss = res[step][1] + loss
res[step] = (counter, sum_loss)
for step in res:
counter = res[step][0]
sum_loss = res[step][1]
res[step] = sum_loss / counter
return res
else:
raise Exception('Arquivo %s não existe' % filename)
def plot_loss(data, loss):
def _plot_loss(d, label):
steps, losses = [], []
for (step, loss) in d.items():
steps.append(step)
losses.append(loss)
plt.plot(steps, losses, label=label)
_plot_loss(data[0], 'Treino')
_plot_loss(data[1], 'Validação')
leg = plt.legend(loc='best', ncol=2, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title(loss)
plt.show()
def main():
base_filename = 'run_%s-tag-%s.csv'
for loss in ['heatmaps_mse', 'radius_mse']:
plot_data = []
for dataset in ['train', 'test']:
plot_data.append(read_and_preprocess_data(base_filename % (dataset, loss)))
plot_loss(plot_data, loss)
if __name__ == '__main__':
main()
| [
"mariannelinharesm@gmail.com"
] | mariannelinharesm@gmail.com |
e61371dd76bda43a1630895f7d5de5b4dcc87d4d | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-sql/azure/mgmt/sql/models/operation_display.py | a5b2f841200641e4d361e2bfe4953bfad84d2a10 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,798 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDisplay(Model):
"""Display metadata associated with the operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provider: The localized friendly form of the resource provider name.
:vartype provider: str
:ivar resource: The localized friendly form of the resource type related
to this action/operation.
:vartype resource: str
:ivar operation: The localized friendly name for the operation.
:vartype operation: str
:ivar description: The localized friendly description for the operation.
:vartype description: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
'description': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self):
super(OperationDisplay, self).__init__()
self.provider = None
self.resource = None
self.operation = None
self.description = None
| [
"autorestci@microsoft.com"
] | autorestci@microsoft.com |
6c5e15a223ab40fd0c865b6b2acf3b6f0832efc0 | 4b7791aa4a93ccfa6e2c3ffb10dfcbe11d042c66 | /estudos/estudo_02.py | cf804e6be1490fbd787b503f0869c96b7be1f3df | [] | no_license | Dev33Renan/Python-Exercises | bbf895f241d4142a6172b911228227cd4a6fe6ab | ffc73bc0b912b41564034e7740ea29a9f5249553 | refs/heads/main | 2023-06-11T05:44:11.902322 | 2021-06-20T19:11:28 | 2021-06-20T19:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,049 | py | frase = "O joão Foi andar ,de bicicleta."
frase_02 = 'A maria não gosta de bicicleta'
#seleção por posição (index)
print(frase[5])
#seleção por intervalo (index e quantidade de casas)
print(frase[2:6])
#seleção por intervalo (index , quantidade e passo)
print(frase[5:1:-1])
#substituição por low case (letra minúscula )
print(frase.lower())
#substituição por upper case (letra maiúscula)
print(frase.upper())
# função title substitui a primeira letra de toda palavra na istring por upper case(letra maiúscula)
print(frase.title())
# função captalize torna todas as letra da frase minúsculas além de tornar a primeira letra da frase em maiúsculo
print(frase.capitalize())
# swapcase inverte letras maiúsculas se tornam minúsculas vice e versa
print(frase.swapcase())
#ljust ajusta o valor da string para o tamanho definido
frase = frase.ljust(50)
frase += "!" #concatenação de strings (mesmo que frase = frase + "!")
print(frase)
#split separa todos os itens dentro de uma string em uma lista
print(frase.split())
#replace substituir um iten por outro dentro da string
print(frase.replace(",","."))
#strip remove todos caracteres definidos no inicio da string
print(frase.strip("O, "))
#center ajustar a string adicionando espaços necessários no inicio e no fim da string
print(frase.center(100))
#find procura e retorna a posição de um valor
print(frase.find("a"))
#startswith retorna true ou false em caso a string começar com um valor determinado
print(frase)
print(frase.startswith('O'))
#endswith retorna // // em caso de a string finalizar com um valor determinado
print(frase.endswith('!'))
#islower retorna true ou false em caso de a frase possuir apenas letras minusculas ou não
print(frase.lower().islower())
#isupper // letras maiúsculas ou não
print(frase.upper().isupper())
#type retorna tipo de variavel
print(type(frase))
print(type(1))
#count retorna quantidade de itens
print(frase.count('o'))
#len() retorna tamanho string
print(len(frase))
| [
"hikarofcarvalho@gmail.com"
] | hikarofcarvalho@gmail.com |
c4fc32732a38e01ea2ca2aacee54f77ae3b99850 | 2b791f6d4cf4b18fc8bcc2b6e3cb3a516c59077d | /test/examples/test_pysc2.py | 2aa8a6b259f0b00abc9a8180e1b2168cc65e29f9 | [
"MIT"
] | permissive | osu-xai/abp | 9c99c66de1d9c91da360d133900e29e7b85d45d2 | cd83eaa2810a1c5350c849303d61639576c0bb0d | refs/heads/master | 2021-06-11T16:01:30.210471 | 2020-11-30T23:05:50 | 2020-11-30T23:05:50 | 155,762,226 | 0 | 1 | MIT | 2020-11-30T23:05:52 | 2018-11-01T19:01:12 | Python | UTF-8 | Python | false | false | 584 | py | import sys
import unittest
class PySC2Tests(unittest.TestCase):
def test_pysc2_shards_dqn(self):
sys.argv = ['',
'--task', 'abp.examples.pysc2.collect_shards.dqn',
'--folder', 'test/tasks/pysc2_collect_shards_dqn']
from abp.trainer.task_runner import main
main()
def test_pysc2_shards_hra(self):
sys.argv = ['',
'--task', 'abp.examples.pysc2.collect_shards.hra',
'--folder', 'test/tasks/pysc2_collect_shards_hra']
from abp.trainer.task_runner import main
main()
| [
"nealla@lwneal.com"
] | nealla@lwneal.com |
7f60604b39fc937cd4de9b6d0a1569ae01f700be | 0f099ceacd4afabc92874dc9ab836c6baa4f0dbc | /mrbelvedereci/testresults/admin.py | 25b2c02d8a522a2454a6c730c4181ffebf878eed | [
"BSD-3-Clause"
] | permissive | davidjray/mrbelvedereci | 84d6c317a7d395602c421496d7dc6196f3b19258 | a5cfebdad2fafff648f705785df95739023f2af8 | refs/heads/master | 2020-05-18T14:22:48.037304 | 2017-02-25T00:40:59 | 2017-02-25T00:40:59 | 84,244,028 | 0 | 0 | null | 2017-03-07T20:40:57 | 2017-03-07T20:40:57 | null | UTF-8 | Python | false | false | 563 | py | from django.contrib import admin
from mrbelvedereci.testresults.models import TestResult
from mrbelvedereci.testresults.models import TestMethod
class TestResultAdmin(admin.ModelAdmin):
list_display = ('build_flow', 'method', 'duration', 'outcome')
list_filter = ('build_flow__build__repo', 'method', 'method__testclass')
admin.site.register(TestResult, TestResultAdmin)
class TestMethodAdmin(admin.ModelAdmin):
list_display = ('name', 'testclass')
list_filter = ('testclass__repo', 'testclass')
admin.site.register(TestMethod, TestMethodAdmin)
| [
"jlantz@salesforce.com"
] | jlantz@salesforce.com |
111f2a2d1b3b762a31d45f54fa8a37bcc757338e | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/timeseries/periodograms/lombscargle/implementations/tests/test_utils.py | a0bb22fd6950e860070e9bbb8178f5ff5be32784 | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 2,191 | py |
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.timeseries.periodograms.lombscargle.implementations.utils import extirpolate, bitceil, trig_sum
@pytest.mark.parametrize('N', 2 ** np.arange(1, 12))
@pytest.mark.parametrize('offset', [-1, 0, 1])
def test_bitceil(N, offset):
assert_equal(bitceil(N + offset),
int(2 ** np.ceil(np.log2(N + offset))))
@pytest.fixture
def extirpolate_data():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate(N, M, extirpolate_data):
x, y, f = extirpolate_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
@pytest.fixture
def extirpolate_int_data():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate_with_integers(N, M, extirpolate_int_data):
x, y, f = extirpolate_int_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
@pytest.fixture
def trig_sum_data():
rng = np.random.RandomState(0)
t = 10 * rng.rand(50)
h = np.sin(t)
return t, h
@pytest.mark.parametrize('f0', [0, 1])
@pytest.mark.parametrize('adjust_t', [True, False])
@pytest.mark.parametrize('freq_factor', [1, 2])
@pytest.mark.parametrize('df', [0.1])
def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):
t, h = trig_sum_data
tfit = t - t.min() if adjust_t else t
S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True,
f0=f0, freq_factor=freq_factor, oversampling=10)
S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False,
f0=f0, freq_factor=freq_factor, oversampling=10)
assert_allclose(S1, S2, atol=1E-2)
assert_allclose(C1, C2, atol=1E-2)
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
d2da12a87c1b55b04a218cf8a15a54b23fea5b08 | 3d65a2d72e65083c752281368cf040ae977e4757 | /generate_empty_data_directory.py | 5537758e68a618c65078169d960cab67860cdd18 | [] | no_license | florisvb/OdorAnalysis | 6b4b2c32979b9139856aee20cc63c34cfe63819e | 18beae8d3c6be271f171b1c36c9fd932a8a404ba | refs/heads/master | 2020-06-03T14:48:34.962795 | 2012-10-23T22:28:21 | 2012-10-23T22:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import sys, os
from optparse import OptionParser
def main(path, config):
os.mkdir(os.path.join(path, config.data_path))
os.mkdir(os.path.join(path, config.raw_datasets_path))
os.mkdir(os.path.join(path, config.culled_datasets_path))
os.mkdir(os.path.join(path, config.h5_path))
os.mkdir(os.path.join(path, config.tmp_data_path))
os.mkdir(os.path.join(path, config.odor_control_path))
os.mkdir(os.path.join(path, config.figure_path))
for fig in config.figures:
os.mkdir(os.path.join(path, config.figure_path, fig))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--path", type="str", dest="path", default='',
help="path to empty data folder, where you have a configuration file")
(options, args) = parser.parse_args()
path = options.path
sys.path.append(path)
import analysis_configuration
config = analysis_configuration.Config()
main(path, config)
| [
"florisvb@gmail.com"
] | florisvb@gmail.com |
a5721a4ecf125350389de8fc1870448f3186c310 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/containerservice/v20200101/list_managed_cluster_access_profile.py | 5a33bdd1c4b4c49b22b00ddb4f2856092048bf12 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 4,186 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListManagedClusterAccessProfileResult',
'AwaitableListManagedClusterAccessProfileResult',
'list_managed_cluster_access_profile',
]
@pulumi.output_type
class ListManagedClusterAccessProfileResult:
"""
Managed cluster Access Profile.
"""
def __init__(__self__, kube_config=None, location=None, name=None, tags=None, type=None):
if kube_config and not isinstance(kube_config, str):
raise TypeError("Expected argument 'kube_config' to be a str")
pulumi.set(__self__, "kube_config", kube_config)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="kubeConfig")
def kube_config(self) -> Optional[str]:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kube_config")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableListManagedClusterAccessProfileResult(ListManagedClusterAccessProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAccessProfileResult(
kube_config=self.kube_config,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def list_managed_cluster_access_profile(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
role_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAccessProfileResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
:param str role_name: The name of the role for managed cluster accessProfile resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['roleName'] = role_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:containerservice/v20200101:listManagedClusterAccessProfile', __args__, opts=opts, typ=ListManagedClusterAccessProfileResult).value
return AwaitableListManagedClusterAccessProfileResult(
kube_config=__ret__.kube_config,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
6649df6a8169655213193e4015de1facfed50fec | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /models/research/attention_ocr/python/metrics.py | 83f67809c7f18bd28777139ee752b99c790da44b | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | """Quality metrics for the model."""
import tensorflow as tf
def char_accuracy(predictions, targets, rej_char, streaming=False):
"""Computes character level accuracy.
Both predictions and targets should have the same shape
[batch_size x seq_length].
Args:
predictions: predicted characters ids.
targets: ground truth character ids.
rej_char: the character id used to mark an empty element (end of sequence).
streaming: if True, uses the streaming mean from the slim.metric module.
Returns:
a update_ops for execution and value tensor whose value on evaluation
returns the total character accuracy.
"""
with tf.variable_scope("CharAccuracy"):
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
targets = tf.to_int32(targets)
const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
weights = tf.to_float(tf.not_equal(targets, const_rej_char))
correct_chars = tf.to_float(tf.equal(predictions, targets))
accuracy_per_example = tf.div(tf.reduce_sum(tf.multiply(correct_chars, weights), 1), tf.reduce_sum(weights, 1))
if streaming:
return tf.contrib.metrics.streaming_mean(accuracy_per_example)
else:
return tf.reduce_mean(accuracy_per_example)
def sequence_accuracy(predictions, targets, rej_char, streaming=False):
"""Computes sequence level accuracy.
Both input tensors should have the same shape: [batch_size x seq_length].
Args:
predictions: predicted character classes.
targets: ground truth character classes.
rej_char: the character id used to mark empty element (end of sequence).
streaming: if True, uses the streaming mean from the slim.metric module.
Returns:
a update_ops for execution and value tensor whose value on evaluation
returns the total sequence accuracy.
"""
with tf.variable_scope("SequenceAccuracy"):
predictions.get_shape().assert_is_compatible_with(targets.get_shape())
targets = tf.to_int32(targets)
const_rej_char = tf.constant(rej_char, shape=targets.get_shape(), dtype=tf.int32)
include_mask = tf.not_equal(targets, const_rej_char)
include_predictions = tf.to_int32(tf.where(include_mask, predictions, tf.zeros_like(predictions) + rej_char))
correct_chars = tf.to_float(tf.equal(include_predictions, targets))
correct_chars_counts = tf.cast(tf.reduce_sum(correct_chars, reduction_indices=[1]), dtype=tf.int32)
target_length = targets.get_shape().dims[1].value
target_chars_counts = tf.constant(target_length, shape=correct_chars_counts.get_shape())
accuracy_per_example = tf.to_float(tf.equal(correct_chars_counts, target_chars_counts))
if streaming:
return tf.contrib.metrics.streaming_mean(accuracy_per_example)
else:
return tf.reduce_mean(accuracy_per_example) | [
"sokolov.yas@gmail.com"
] | sokolov.yas@gmail.com |
89b7174617d0b48116861a28f2447630f2a8f87e | 9e1b884b94a0570e0c0781a7f7023e8ec482b5b4 | /codes/T20.py | 3eed41f3258b1e41601825e791c60a4addaa61a0 | [] | no_license | sunjunee/offer_book_python_codes | 568579434d82a7231074e41c67476c3ab8b9f181 | ecc852a5d38c8a02b9c2d0473065579363035f83 | refs/heads/master | 2020-03-12T09:49:40.587404 | 2018-06-10T13:55:24 | 2018-06-10T13:55:24 | 130,560,256 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # -*- coding: utf-8 -*-
"""
@ Author: Jun Sun {Python3}
@ E-mail: sunjunee@qq.com
@ Date: 2018-04-22 15:03:02
"""
#表示数值的字符串:实现一个函数来判断一个字符串是否表示数值
#如+100, 5e2, -123, 3.1415, -1e-16
#{+-}{digits}.{digits}[eE]{+-}{digits}
def judgeString(string):
pA = pB = pC = True
index, pA = getInteger(string, 0)
if(index <= len(string) - 1 and string[index] == "."):
index, pB = getUsignInteger(string, index + 1)
if(index <= len(string) - 1 and (string[index] == "e" or string[index] == "E")):
index, pC = getInteger(string, index + 1)
if(pA and pB and pC and index == len(string)):
return True
return False
def getUsignInteger(string, index):
p = index
pA = False
while(p <= len(string) - 1 and string[p] >= '0' and string[p] <= '9'):
p += 1
if(p > index):
pA = True
return p, pA
def getInteger(string, index):
if(string[index] == '-' or string[index] == "+"):
index += 1
return getUsignInteger(string, index)
if __name__ == "__main__":
testCase = ["+100", "5e2", "-123", "3.145678", "-12.56e+23",
"-0.13e.w", ".e2", "+", "-.23", "5e0.2"]
print(list(map(judgeString, testCase))) | [
"sunjunee@qq.com"
] | sunjunee@qq.com |
f8b441fb5a799a8054e3d99e1f9a9577ca133ded | bd87d8947878ccb2f5b720e70a22493b00868fd3 | /fluent/11_interfaces/monkey_patching.py | 376ef7d9da46e44a141c6e9d89cca318520fdec9 | [] | no_license | damiansp/completePython | 4cbf12ef682a1d4a5498f77e407dc02e44a7d7ac | 3f5e2f14d79c93df5147b82d901190c054535158 | refs/heads/master | 2023-09-01T20:50:03.444440 | 2023-08-28T00:27:57 | 2023-08-28T00:27:57 | 99,197,610 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 744 | py | import collections
from random import shuffle
Card = collections.namedtuple('Card', ['rank', 'suit'])
class FrenchDeck:
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank, suit) for suit in self.suits
for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
deck = FrenchDeck()
print(deck[:5])
# shuffle(deck) # error: FrenchDeck does not support item assignment
def set_card(deck, position, card):
deck._cards[position] = card
FrenchDeck.__setitem__ = set_card
shuffle(deck)
print(deck[:5])
| [
"damiansp@gmail.com"
] | damiansp@gmail.com |
99b46dc88fa2141bbc84b499ca4f5cd6a537b7f9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02576/s456985906.py | 8c3a47e877dfd91f6009c8645fef18b303b19a4f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import sys
read = sys.stdin.read
#readlines = sys.stdin.readlines
from math import ceil
def main():
n, x, t = map(int, input().split())
print(ceil(n / x) * t)
if __name__ == '__main__':
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
cda854759b81092d95dc44d05842714eb9b494ba | e483b0515cca39f4ddac19645f03fc1695d1939f | /google/ads/google_ads/v1/proto/enums/product_bidding_category_status_pb2.py | 077aaca8546340153b855c91a6e7ebd3e2e0b8bb | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | BrunoWMello/google-ads-python | 0af63d2ca273eee96efd8a33252d27112c049442 | 9b074a037d10f0c1208a00d5d41a8e5e25405f28 | refs/heads/master | 2020-05-27T04:37:47.669144 | 2019-05-24T17:07:31 | 2019-05-24T17:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,249 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/enums/product_bidding_category_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/enums/product_bidding_category_status.proto',
package='google.ads.googleads.v1.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v1.enumsB!ProductBiddingCategoryStatusProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V1.Enums\312\002\035Google\\Ads\\GoogleAds\\V1\\Enums\352\002!Google::Ads::GoogleAds::V1::Enums'),
serialized_pb=_b('\nIgoogle/ads/googleads_v1/proto/enums/product_bidding_category_status.proto\x12\x1dgoogle.ads.googleads.v1.enums\x1a\x1cgoogle/api/annotations.proto\"z\n ProductBiddingCategoryStatusEnum\"V\n\x1cProductBiddingCategoryStatus\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\n\n\x06\x41\x43TIVE\x10\x02\x12\x0c\n\x08OBSOLETE\x10\x03\x42\xf6\x01\n!com.google.ads.googleads.v1.enumsB!ProductBiddingCategoryStatusProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V1.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V1\\Enums\xea\x02!Google::Ads::GoogleAds::V1::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS = _descriptor.EnumDescriptor(
name='ProductBiddingCategoryStatus',
full_name='google.ads.googleads.v1.enums.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACTIVE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OBSOLETE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=174,
serialized_end=260,
)
_sym_db.RegisterEnumDescriptor(_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS)
_PRODUCTBIDDINGCATEGORYSTATUSENUM = _descriptor.Descriptor(
name='ProductBiddingCategoryStatusEnum',
full_name='google.ads.googleads.v1.enums.ProductBiddingCategoryStatusEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=138,
serialized_end=260,
)
_PRODUCTBIDDINGCATEGORYSTATUSENUM_PRODUCTBIDDINGCATEGORYSTATUS.containing_type = _PRODUCTBIDDINGCATEGORYSTATUSENUM
DESCRIPTOR.message_types_by_name['ProductBiddingCategoryStatusEnum'] = _PRODUCTBIDDINGCATEGORYSTATUSENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProductBiddingCategoryStatusEnum = _reflection.GeneratedProtocolMessageType('ProductBiddingCategoryStatusEnum', (_message.Message,), dict(
DESCRIPTOR = _PRODUCTBIDDINGCATEGORYSTATUSENUM,
__module__ = 'google.ads.googleads_v1.proto.enums.product_bidding_category_status_pb2'
,
__doc__ = """Status of the product bidding category.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.enums.ProductBiddingCategoryStatusEnum)
))
_sym_db.RegisterMessage(ProductBiddingCategoryStatusEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"noreply@github.com"
] | BrunoWMello.noreply@github.com |
faf060d7b38be5fea1712230d9bbb6f91e45b7f9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy3275.py | b7544f2ecf5b480e932e0f8dbee5116b5707548d | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,327 | py | # qubit number=4
# total number=43
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.x(input_qubit[2]) # number=38
prog.h(input_qubit[2]) # number=40
prog.cz(input_qubit[0],input_qubit[2]) # number=41
prog.h(input_qubit[2]) # number=42
prog.y(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.y(input_qubit[3]) # number=20
prog.y(input_qubit[1]) # number=12
prog.rx(-2.158274153016188,input_qubit[3]) # number=24
prog.h(input_qubit[0]) # number=16
prog.cz(input_qubit[2],input_qubit[0]) # number=17
prog.h(input_qubit[0]) # number=18
prog.cx(input_qubit[1],input_qubit[0]) # number=21
prog.z(input_qubit[1]) # number=22
prog.cx(input_qubit[1],input_qubit[0]) # number=23
prog.h(input_qubit[0]) # number=25
prog.cz(input_qubit[2],input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=27
prog.x(input_qubit[0]) # number=35
prog.x(input_qubit[0]) # number=36
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy3275.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
34adc483132c9caf762c7cacb07b61a87b41f63f | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /tNFTY9ggpTkeHvBaN_8.py | a1976203fc818883d4a044522788b64c44d4a5b8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
def total_volume(*args):
sum = 0
for each_list in args:
mul=1
for num in each_list:
mul = mul*num
sum = sum + mul
return sum
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
128e264329fe51285f1f6f5e2510c05123196b45 | 46244bb6af145cb393846505f37bf576a8396aa0 | /leetcode/152.maximum_product_subarray.py | 0fc1bda3b921136bc8e52fbde9679d2808d39979 | [] | no_license | aoeuidht/homework | c4fabfb5f45dbef0874e9732c7d026a7f00e13dc | 49fb2a2f8a78227589da3e5ec82ea7844b36e0e7 | refs/heads/master | 2022-10-28T06:42:04.343618 | 2022-10-15T15:52:06 | 2022-10-15T15:52:06 | 18,726,877 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,379 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
class Solution:
# @param A, a list of integers
# @return an integer
def maxProduct(self, A):
al = len(A)
if al < 1:
return 0
elif al == 1:
return A[0]
max_product = A[0]
neg_lo, neg_hi = None, None
lo, hi = 0, None
_p = 0
for idx, a in enumerate(A):
if a == 0:
max_product = max(max_product, 0)
hi = idx - 1
if _p < 0:
max_product = self.calc_negative(A, lo, hi,
neg_lo, neg_hi,
_p, max_product)
lo = idx + 1
neg_lo, neg_hi = None, None
_p = 0
continue
if a < 0:
if neg_lo is None:
neg_lo = idx
else:
neg_hi = idx
_p = a if (_p == 0) else (_p * a)
max_product = _p if (_p > max_product) else max_product
hi = idx
if _p < 0:
max_product = self.calc_negative(A, lo, hi,
neg_lo, neg_hi,
_p, max_product)
return max_product
def calc_negative(self, A, lo, hi,
neg_lo, neg_hi,
p, max_product):
if hi == lo:
return max_product
elif (hi - lo) == 1:
return max(max_product, A[lo], A[hi])
print lo, hi, neg_lo, neg_hi, p, max_product
# try to remove the first part
__p = p
for i in range(neg_lo, -1, -1):
if A[i] != 0:
__p /= A[i]
else:
break
max_product = __p if (__p > max_product) else max_product
# try to remove the right part
neg_hi = neg_hi if not (neg_hi is None) else neg_lo
__p = p
for i in range(neg_hi, len(A)):
if A[i] != 0:
__p /= A[i]
else:
break
max_product = __p if (__p > max_product) else max_product
return max_product
if __name__ == '__main__':
s = Solution()
r = s.maxProduct(map(int, sys.argv[1].split(',')))
print r
| [
"sockpuppet.lea@gmail.com"
] | sockpuppet.lea@gmail.com |
ff897b58afc4069f5b6d6e1cdadf44950299b683 | dddd18198df381e61f5dd71985edd362d19001c4 | /djstell/reactor/models.py | ed65438e73460408a7b344e10df947a5ed0d71be | [] | no_license | nedbat/nedbatcom | 1b148e3cf30cff3c121dbe5412017e8a7f42a30a | d460a606c77ebf856557ccdee29dd27a69455896 | refs/heads/master | 2023-09-01T09:28:37.368419 | 2023-08-29T18:14:38 | 2023-08-29T18:14:38 | 7,422,053 | 8 | 6 | null | 2022-07-10T23:28:37 | 2013-01-03T11:52:06 | HTML | UTF-8 | Python | false | false | 1,753 | py | from django.conf import settings
from django.db import models
from django.urls import reverse
from .tools import md5
class Comment(models.Model):
entryid = models.CharField(max_length=40, db_index=True)
name = models.CharField(max_length=60)
email = models.CharField(max_length=100, null=True)
website = models.CharField(max_length=100, null=True)
posted = models.DateTimeField(db_index=True)
body = models.TextField()
notify = models.BooleanField()
def __str__(self):
return f"Comment from {self.name} on {self.entryid}"
def admin_url(self):
aurl = reverse(f'admin:{self._meta.app_label}_{self._meta.model_name}_change', args=(self.pk,))
return settings.EXT_BASE + aurl
def gravatar_url(self):
anum = int(md5(self.email, self.website)[:4], 16) % 282
email_hash = md5(self.email)
avhost = "https://nedbatchelder.com"
default_url = f"{avhost}/pix/avatar/a{anum}.jpg"
url = f"https://www.gravatar.com/avatar/{email_hash}.jpg?default={default_url}&size=80"
return url
def fixed_website(self):
"""Ancient comments might be missing http://, so add it."""
if self.website and "://" not in self.website:
return "http://" + self.website
else:
return self.website
class ReactorRouter:
def db_for_read(self, model, **hints):
if model._meta.app_label == "reactor":
return "reactor"
return None
db_for_write = db_for_read
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if app_label == "reactor":
return db == "reactor"
return None
| [
"ned@nedbatchelder.com"
] | ned@nedbatchelder.com |
730a99c3692fb4c357608ee9bc85fb75f8fc6ec0 | 9141e27444128fa8474235f63225f8c121c42878 | /tests/extensions/test_registry.py | 0a0de60d4e5c7a1b0cfcc2229d21f243b13e32de | [
"BSD-3-Clause"
] | permissive | gc-ss/pyapp | 00a8ab7c517fe6613049640991e27880a00887fb | 1fa2651d8b42f6e28b0c33b2b4fd287affd3a88f | refs/heads/master | 2023-04-08T01:51:43.374810 | 2020-09-14T00:22:07 | 2020-09-14T00:22:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,257 | py | from unittest import mock
import pytest
from pyapp.extensions.registry import ExtensionDetail
from pyapp.extensions.registry import ExtensionEntryPoints
from pyapp.extensions.registry import ExtensionRegistry
from pyapp.extensions.registry import pkg_resources
from tests.sample_ext import SampleExtension
from tests.sample_ext_simple import SampleSimpleExtension
class TestExtensionDetail:
@pytest.fixture
def target(self):
return ExtensionDetail(
SampleExtension(), "SampleExtension", "Sample Extension", "1.1"
)
@pytest.fixture
def target_simple(self):
return ExtensionDetail(
SampleSimpleExtension(),
"SampleSimpleExtension",
"Sample Simple Extension",
"1.2",
)
def test_default_settings(self, target: ExtensionDetail):
assert target.default_settings == "tests.sample_ext.default_settings"
def test_default_settings__absolute(self, target_simple: ExtensionDetail):
assert target_simple.default_settings == "tests.sample_ext.default_settings"
def test_checks_module(self, target: ExtensionDetail):
assert target.checks_module == "tests.sample_ext.checks"
def test_checks_module__absolute(self, target_simple: ExtensionDetail):
assert target_simple.checks_module == "tests.sample_ext.checks"
def test_register_commands(self, target: ExtensionDetail):
target.register_commands("abc")
assert target.extension.register_commands_called == "abc"
def test_checks_module__not_defined(self, target_simple: ExtensionDetail):
target_simple.register_commands("abc")
assert target_simple.extension.register_commands_called is False
def test_ready(self, target: ExtensionDetail):
target.ready()
assert target.extension.ready_called is True
def test_ready__not_defined(self, target_simple: ExtensionDetail):
target_simple.ready()
assert target_simple.extension.ready_called is False
def _make_entry_point(name, project_name, version):
mock_entry_point = mock.Mock()
mock_entry_point.name = name
mock_entry_point.dist.project_name = project_name
mock_entry_point.dist.version = version
mock_entry_point.resolve.return_value = "ExtensionInstance"
return mock_entry_point
class TestExtensionEntryPoints:
@pytest.fixture
def patchentrypoints(self, monkeypatch):
entry_points = (
_make_entry_point("FooExtension", "foo-extension", "0.1.2"),
_make_entry_point("BarExtension", "bar-extension", "3.2.1"),
)
mock_iter_entry_points = mock.Mock(return_value=entry_points)
monkeypatch.setattr(pkg_resources, "iter_entry_points", mock_iter_entry_points)
@pytest.fixture
def target(self, patchentrypoints):
return ExtensionEntryPoints()
def test_entry_points(self, target: ExtensionEntryPoints):
actual = [ep.name for ep in target._entry_points()]
assert ["FooExtension", "BarExtension"] == actual
def test_entry_points__with_white_list(self, patchentrypoints):
target = ExtensionEntryPoints(white_list=("BarExtension",))
actual = [ep.name for ep in target._entry_points()]
assert ["BarExtension"] == actual
def test_extensions(self, target: ExtensionEntryPoints):
actual = list(target.extensions())
assert [
ExtensionDetail(
"ExtensionInstance", "FooExtension", "foo-extension", "0.1.2"
),
ExtensionDetail(
"ExtensionInstance", "BarExtension", "bar-extension", "3.2.1"
),
] == actual
def test_extensions__no_load(self, target: ExtensionEntryPoints):
actual = list(target.extensions(False))
assert [
ExtensionDetail(None, "FooExtension", "foo-extension", "0.1.2"),
ExtensionDetail(None, "BarExtension", "bar-extension", "3.2.1"),
] == actual
class TestExtensionRegistry:
@pytest.fixture
def target(self):
return ExtensionRegistry(
[
ExtensionDetail(
SampleExtension(), "SampleExtension", "Sample Extension", "1.1"
)
]
)
def test_load_from(self, target: ExtensionRegistry):
target.load_from(
[
ExtensionDetail(
SampleSimpleExtension(),
"SampleSimpleExtension",
"Sample Simple Extension",
"1.2",
)
]
)
assert len(target) == 2
def test_register_commands(self, target: ExtensionRegistry):
mock_extension = mock.Mock()
target.append(
ExtensionDetail(mock_extension, "MockExtension", "Mock Extension", "1.1")
)
target.register_commands("abc")
mock_extension.register_commands.assert_called_with("abc")
def test_ready(self, target: ExtensionRegistry):
mock_extension = mock.Mock()
target.append(
ExtensionDetail(mock_extension, "MockExtension", "Mock Extension", "1.1")
)
target.ready()
mock_extension.ready.assert_called()
| [
"tim@savage.company"
] | tim@savage.company |
6ba578146ce74c3dceec8a819b6e7c89264220a3 | 23d962a8e36b4a58e63e15f3c61a88b537a80f6e | /test/unit/mongo_class/repsetcoll_ins_doc.py | 8a75acee80fa5504bdf2fc7ed82626d1c2633972 | [
"MIT"
] | permissive | deepcoder42/mongo-lib | 3a893d38edb3e03decff0cfbcbf29339026909f9 | fa2b65587ab88ee90c9d85f12dd642c6295e0d94 | refs/heads/master | 2023-06-14T10:10:12.032877 | 2021-07-13T15:22:17 | 2021-07-13T15:22:17 | 337,179,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | #!/usr/bin/python
# Classification (U)
"""Program: repsetcoll_ins_doc.py
Description: Unit testing of RepSetColl.ins_doc in mongo_class.py.
Usage:
test/unit/mongo_class/repsetcoll_ins_doc.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mongo_class
import version
__version__ = version.__version__
class InsDoc(object):
"""Class: InsDoc
Description: Class stub holder for RepSetColl class.
Methods:
__init__
insert_one
"""
def __init__(self):
"""Function: __init__
Description: Class intialization.
Arguments:
"""
self.doc = None
def insert_one(self, doc):
"""Function: insert_one
Description: Stub holder for RepSetColl.db_coll.insert_one method.
Arguments:
(input) doc -> Document
"""
self.doc = doc
return True
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_ins_doc
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.name = "Mongo_Server"
self.user = "mongo_user"
self.japd = "mongo_pd"
self.host = "host_server"
self.port = 27017
self.dbs = "test"
self.coll = None
self.db_auth = None
self.repset = "mongo_repset"
self.doc = {"Document"}
def test_ins_doc(self):
"""Function: test_ins_doc
Description: Test ins_doc method.
Arguments:
"""
mongo = mongo_class.RepSetColl(
self.name, self.user, self.japd, self.host, self.port,
repset=self.repset)
mongo.db_coll = InsDoc()
self.assertFalse(mongo.ins_doc(self.doc))
if __name__ == "__main__":
unittest.main()
| [
"deepcoder42@gmail.com"
] | deepcoder42@gmail.com |
648e57155207f80d8186777a0f154c09b450a3b0 | 0cf704d61352737ba7a7698043375446ee1f7b03 | /travis/waitdb | 1574aee0701962e71aab9108ede02fe2ebadeb5d | [
"BSD-2-Clause-Views"
] | permissive | experimentAccount0/c2cgeoportal | 4be327e7be6c39b631e4500118507d9a8823122e | 5f7f781cf18fe396d0e940bc3dc6a5f928e030cd | refs/heads/master | 2021-01-15T21:33:58.169936 | 2017-08-07T07:51:41 | 2017-08-07T07:51:41 | 99,875,202 | 0 | 0 | null | 2017-08-10T03:04:56 | 2017-08-10T03:04:55 | null | UTF-8 | Python | false | false | 401 | #!/usr/bin/python
import sys
import time
import sqlalchemy
sleep_time = 1
# wait for the DB to be UP
while True:
print("Waiting for the DB to be reachable")
try:
sqlalchemy.create_engine(sys.argv[1]).connect()
exit(0)
except Exception as e:
print(str(e))
print("Waiting for the DB to be reachable")
time.sleep(sleep_time)
sleep_time *= 2
| [
"stephane.brunner@camptocamp.com"
] | stephane.brunner@camptocamp.com | |
4d971e6b6f550716ebcddbf6d61b9b7b7ab4e0aa | 546b8c3e1b876aab272e587765951e8acd7b3122 | /irlc/ex00/listcomp.py | b68fff096aa522d77f88184abdb258b84db06642 | [] | no_license | natashanorsker/RL_snakes | 2b8a9da5dd1e794e832830ab64e57ab7d4b0d6c3 | be8c75d1aa7a5ba7a6af50a0a990a97b0242c49d | refs/heads/main | 2023-04-21T14:08:30.840757 | 2021-05-11T17:33:35 | 2021-05-11T17:33:35 | 358,572,447 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | """
This file may not be shared/redistributed without permission. Please read copyright notice in the git repo. If this file contains other copyright notices disregard this text.
"""
nums = [1, 2, 3, 4, 5, 6]
plusOneNums = [x + 1 for x in nums]
oddNums = [x for x in nums if x % 2 == 1]
print(oddNums)
oddNumsPlusOne = [x + 1 for x in nums if x % 2 == 1]
print(oddNumsPlusOne)
"""
Dictionary comprehension. We make a new dictionary where both the keys and values may be changed
"""
toy_cost = {'toy car': 10, 'skipping rope': 6, 'toy train': 20}
print(toy_cost)
double_cost = {k: 2*v for k, v in toy_cost.items()}
print(double_cost)
bad_toys = {"broken "+k: v//2 for k, v in toy_cost.items()}
print(bad_toys)
expensive_toys = {k: v for k, v in toy_cost.items() if v >= 10}
print(expensive_toys)
| [
"natashanorsker@gmail.com"
] | natashanorsker@gmail.com |
7673db1213b13fbf378ed98baa8e76b8dc967d4b | 74be814f7cd10d3c91a53460bd6698aa8bc95704 | /AcWing算法基础课/记忆化搜索/901. 滑雪.py | aa5469dd0aafe27d88af53c536c5fa9b8188db37 | [] | no_license | weiyuyan/LeetCode | 7202f7422bc3bef6bd35ea299550b51905401656 | 19db0e78826d3e3d27d2574abd9d461eb41458d1 | refs/heads/master | 2020-12-03T17:10:53.738507 | 2020-05-27T08:28:36 | 2020-05-27T08:28:36 | 231,402,839 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,204 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:ShidongDu time:2020/4/1
'''
给定一个R行C列的矩阵,表示一个矩形网格滑雪场。
矩阵中第 i 行第 j 列的点表示滑雪场的第 i 行第 j 列区域的高度。
一个人从滑雪场中的某个区域内出发,每次可以向上下左右任意一个方向滑动一个单位距离。
当然,一个人能够滑动到某相邻区域的前提是该区域的高度低于自己目前所在区域的高度。
下面给出一个矩阵作为例子:
1 2 3 4 5
16 17 18 19 6
15 24 25 20 7
14 23 22 21 8
13 12 11 10 9
在给定矩阵中,一条可行的滑行轨迹为24-17-2-1。
在给定矩阵中,最长的滑行轨迹为25-24-23-…-3-2-1,沿途共经过25个区域。
现在给定你一个二维矩阵表示滑雪场各区域的高度,请你找出在该滑雪场中能够完成的最长滑雪轨迹,并输出其长度(可经过最大区域数)。
输入格式
第一行包含两个整数R和C。
接下来R行,每行包含C个整数,表示完整的二维矩阵。
输出格式
输出一个整数,表示可完成的最长滑雪长度。
数据范围
1≤R,C≤300,
0≤矩阵中整数≤10000
输入样例:
5 5
1 2 3 4 5
16 17 18 19 6
15 24 25 20 7
14 23 22 21 8
13 12 11 10 9
输出样例:
25
'''
# 状态表示:①集合:f[i][j]所有从[i][j]开始滑的路径的集合 ②属性:max
# 状态计算:按照第一步往哪个方向滑,分成4类:向上、向下、向左、向右
# f[i][j]的最大值 == max(f[i][j]向上,f[i][j]向下,f[i][j]向左,f[i][j]向右)(前提是a[i][j]得大于它们)
# from typing import List
# class Solution:
# def iceSkating(self, R: int, C:int, a:List[List[int]]):
# dp = [[1 for _ in range(C+2)] for _ in range(R+2)]
# for i in range(1, R+1):
# for j in range(1, C+1):
# if a[i][j] > a[i][j+1]:
# dp[i][j] = max(dp[i][j], dp[i][j+1]+1)
# if a[i][j] > a[i][j-1]:
# dp[i][j] = max(dp[i][j], dp[i][j-1]+1)
# if a[i][j] > a[i+1][j]:
# dp[i][j] = max(dp[i][j], dp[i+1][j]+1)
# if a[i][j] > a[i-1][j]:
# dp[i][j] = max(dp[i][j], dp[i-1][j]+1)
# res = dp[1][1]
# for i in range(1, R+1):
# for j in range(1, C+1):
# if res<dp[i][j]:
# res=dp[i][j]
# return res
#
# if __name__ == '__main__':
# R, C = list(map(int, input().split()))
# a = [[100000 for _ in range(C+2)] for _ in range(R+2)]
# for r in range(1, R+1):
# a[r] = [100000] + list(map(int, input().split())) + [100000]
# solution = Solution()
# res = solution.iceSkating(R, C, a)
# print(res)
#
r,c = map(int, input().split())
arr = [[0 for i in range(c+1)] for j in range(r+1)]
for i in range(1, r+1):
in_li = list(map(int, input().split()))
for j in range(1, c+1):
arr[i][j] = in_li[j-1]
# 2. 初始化dp数组
dp = [[-1 for i in range(c+1)] for j in range(r+1)]
# 3.递归搜索
dx = [-1,0,1,0] # !!!技巧:遍历方格的上下左右四个方向的技巧,新建两个这样的数组
dy = [0,1,0,-1]
def dfs(i, j):
if dp[i][j]!=-1:
return dp[i][j]
dp[i][j] = 1
for d in range(4):
a = i+dx[d]
b = j+dy[d]
if a>=1 and a<=r and b>=1 and b<=c and arr[a][b]<arr[i][j]:
dp[i][j] = max(dp[i][j], dfs(a,b)+1)
return dp[i][j]
# def dfs(i,j):
# if dp[i][j]!=-1:
# return dp[i][j]
# dp[i][j] = 1 # 初始化dp[i][j]等于1,表示路劲只包含[i,j]元素,长度为1
# for d in range(4):
# a = i+dx[d]
# b = j+dy[d]
# if a>=1 and a<=r and b>=1 and b<=c and arr[a][b]<arr[i][j]: # !出错,最后要比较arr数组中移动一个位置前后的高度大小
# dp[i][j] = max(dp[i][j], dfs(a,b)+1)
# return dp[i][j]
res = 0
for i in range(1, r+1):
for j in range(1, c+1):
res = max(res, dfs(i, j))
print(res)
# res = 0
# for i in range(1, r+1):
# for j in range(1, c+1):
# res = max(res, dfs(i,j))
# print(res) | [
"244128764@qq.com"
] | 244128764@qq.com |
5bb0f1b8c1083657bb7ed66ce5158451716ad0f7 | 4ea675fdab4b7aab8b84fd8b6127ff452e4e4a8f | /dc_mosaic.py | 6ec9f6214c790cb63e89745bd668433d2386bb32 | [
"Apache-2.0"
] | permissive | v0lat1le/data_cube_utilities | f1819ed5686a92c0ca20a40972afb161600d2462 | 5015da91c4380925ce7920e18a7d78268a989048 | refs/heads/master | 2020-05-18T15:10:35.361663 | 2017-03-08T00:18:58 | 2017-03-08T00:18:58 | 84,257,036 | 0 | 0 | null | 2017-03-07T23:36:53 | 2017-03-07T23:36:53 | null | UTF-8 | Python | false | false | 8,063 | py |
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gdal, osr
import collections
import gc
import numpy as np
import xarray as xr
from datetime import datetime
import collections
from collections import OrderedDict
import datacube
from . import dc_utilities as utilities
# Author: KMF
# Creation date: 2016-06-14
# Modified by: AHDS
# Last modified date:
def create_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Creates a most recent - oldest mosaic of the input dataset. If no clean mask is given,
the 'cf_mask' variable must be included in the input dataset, as it will be used
to create a clean mask
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: variables to be mosaicked
If user does not provide a clean_mask, dataset_in must also include the cf_mask
variable
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, one will be created using cfmask
no_data (int/float) - no data pixel value; default: -9999
Output:
dataset_out (xarray.Dataset) - mosaicked data with
coordinates: latitude, longitude
variables: same as dataset_in
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
##dataset_in = dataset_in.drop('cf_mask')
#masks data with clean_mask. all values that are clean_mask==False are set to nodata.
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
time_slices = reversed(range(len(clean_mask))) if kwargs and kwargs['reverse_time'] else range(len(clean_mask))
for index in time_slices:
dataset_slice = dataset_in.isel(time=index).astype("int16").drop('time')
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_in.data_vars):
dataset_out[key].values[dataset_out[key].values==-9999] = dataset_slice[key].values[dataset_out[key].values==-9999]
return dataset_out
def create_median_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Method for calculating the median pixel value for a given dataset.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
#dataset_in = dataset_in.drop('cf_mask')
#required for np.nan
dataset_in = dataset_in.astype("float64")
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
dataset_out = dataset_in.isel(time=0).drop('time').copy(deep=True)
dataset_out.attrs = OrderedDict()
# Loop over every key.
for key in list(dataset_in.data_vars):
dataset_in[key].values[dataset_in[key].values==no_data] = np.nan
dataset_out[key].values = np.nanmedian(dataset_in[key].values, axis=0)
dataset_out[key].values[dataset_out[key].values==np.nan] = no_data
return dataset_out.astype('int16')
def create_max_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Method for calculating the pixel value for the max ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
#dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = -1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values > dataset_out.ndvi.values]
return dataset_out
def create_min_ndvi_mosaic(dataset_in, clean_mask=None, no_data=-9999, intermediate_product=None, **kwargs):
"""
Description:
Method for calculating the pixel value for the min ndvi value.
-----
Input:
dataset_in (xarray dataset) - the set of data with clouds and no data removed.
Optional Inputs:
no_data (int/float) - no data value.
"""
# Create clean_mask from cfmask if none given
if clean_mask is None:
cfmask = dataset_in.cf_mask
clean_mask = utilities.create_cfmask_clean_mask(cfmask)
#dataset_in = dataset_in.drop('cf_mask')
for key in list(dataset_in.data_vars):
dataset_in[key].values[np.invert(clean_mask)] = no_data
if intermediate_product is not None:
dataset_out = intermediate_product.copy(deep=True)
else:
dataset_out = None
for timeslice in range(clean_mask.shape[0]):
dataset_slice = dataset_in.isel(time=timeslice).astype("float64").drop('time')
ndvi = (dataset_slice.nir - dataset_slice.red) / (dataset_slice.nir + dataset_slice.red)
ndvi.values[np.invert(clean_mask)[timeslice,::]] = 1000000000
dataset_slice['ndvi'] = ndvi
if dataset_out is None:
dataset_out = dataset_slice.copy(deep=True)
#clear out the params as they can't be written to nc.
dataset_out.attrs = OrderedDict()
else:
for key in list(dataset_slice.data_vars):
dataset_out[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values] = dataset_slice[key].values[dataset_slice.ndvi.values < dataset_out.ndvi.values]
return dataset_out
| [
"alfredo.h.delos_santos@ama-inc.com"
] | alfredo.h.delos_santos@ama-inc.com |
2b3195ba262ba0da86a95bc47374e0749c55102d | 083ca3df7dba08779976d02d848315f85c45bf75 | /BinaryTreePostorderTraversal3.py | 58cab01e0197a9503d8d6e5bbc2ec068195ac364 | [] | no_license | jiangshen95/UbuntuLeetCode | 6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94 | fa02b469344cf7c82510249fba9aa59ae0cb4cc0 | refs/heads/master | 2021-05-07T02:04:47.215580 | 2020-06-11T02:33:35 | 2020-06-11T02:33:35 | 110,397,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
postorder = []
if not root:
return postorder
stack = [root]
while stack:
cur = stack.pop()
postorder = [cur.val] + postorder
if cur.left:
stack.append(cur.left)
if cur.right:
stack.append(cur.right)
return postorder
if __name__=='__main__':
a = TreeNode(1)
b = TreeNode(2)
c = TreeNode(3)
d = TreeNode(4)
e = TreeNode(5)
a.left = b;
a.right = c;
b.left = d;
d.right = e;
solution = Solution()
print(solution.postorderTraversal(a))
| [
"jiangshen95@163.com"
] | jiangshen95@163.com |
664a8438d8355afbb35e1f4a504b4cf54c2ed6bd | 266947fd84eed629ed0c21f6d91134239512afd9 | /BeginnerContest_A/078.py | d4f876810de243df887f94455a3fc46497abefcc | [] | no_license | SkiMsyk/AtCoder | c86adeec4fa470ec14c1be7400c9fc8b3fb301cd | 8102b99cf0fb6d7fa304edb942d21cf7016cba7d | refs/heads/master | 2022-09-03T01:23:10.748038 | 2022-08-15T01:19:55 | 2022-08-15T01:19:55 | 239,656,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | X, Y = input().split()
d = {'A':1, 'B':2, 'C':3,
'D':4, 'E':5, 'F':6}
if d[X] < d[Y]:
print('<')
elif d[X] == d[Y]:
print('=')
else:
print('>')
| [
"sakaimasayuki@sakaimasayukinoMacBook-puro.local"
] | sakaimasayuki@sakaimasayukinoMacBook-puro.local |
061c123ad1b7b5a44b4b25024ccb37b6e4d35897 | 1f7c4413d1e03bb431fd3efd34127693b192c7f2 | /tests/actions/save_group.py | 45cdeccef99723d54170a5f520498d8b644866e8 | [
"MIT"
] | permissive | blueshed/blueshed-micro | 8c6f8f572561f09b18f48a7b8a0466d9531ad16b | d52b28aaabf0ac6d43594155561d6cb3fce64a80 | refs/heads/master | 2020-12-25T17:13:36.458520 | 2016-11-18T17:57:27 | 2016-11-18T17:57:27 | 58,062,960 | 0 | 0 | null | 2016-10-31T11:28:17 | 2016-05-04T15:26:17 | Python | UTF-8 | Python | false | false | 724 | py | from blueshed.micro.orm.orm_utils import serialize
from tests.actions import model
def save_group(context: 'micro_context',
name: str,
id: int=None):
'''
Adds a group to the database if
it is not already there, otherwise
it updates it.
'''
with context.session as session:
if id:
group = session.query(model.Group).get(id)
group.name = name
signal = "group-changed"
else:
group = model.Group(name=name)
signal = "group-added"
session.add(group)
session.commit()
result = serialize(group)
context.broadcast(signal, result)
return result
| [
"pete@blueshed.co.uk"
] | pete@blueshed.co.uk |
294e69324ddea04aaeb771cab56347d49297d63a | eb12c383dca56dcd4a1e362bc02588bbd6e2dda6 | /racecar-ws/build/racecar/racecar/catkin_generated/pkg.develspace.context.pc.py | 9069f579dfa2ee8ea219e891d4f79361ff3f0e4e | [] | no_license | 174high/race-car | 2e5b403900496753bb71d5297cc3d1177620622a | 9fa772a84fee1489e9b52670f07e30d54db35fe8 | refs/heads/master | 2020-03-20T08:32:48.984720 | 2018-06-14T05:48:56 | 2018-06-14T05:48:56 | 137,311,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "razor_imu_9dof;tf;tf2_ros;urg_node;joy;rosbag;rostopic;rviz;gmapping;hector_mapping;robot_pose_ekf".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "racecar"
PROJECT_SPACE_DIR = "/home/ubuntu/racecar-ws/devel"
PROJECT_VERSION = "0.0.1"
| [
"you@example.com"
] | you@example.com |
c00b80fb096539a331e5b7ea6a66d592b34a175c | e64e88c5561698555381d9ed48d17dadbbcd753b | /crimefiles/migrations/0058_auto_20200711_1856.py | c3e7241fa313325d5d4020271b65ce47d68acb8c | [] | no_license | shreyasharma98/MS336_TechDhronas | 393979e67c3d5bbf5ff7a4924c71a637cc1337ea | cb2580cb64de8a52b9d1ddc4225ab077415ade7e | refs/heads/master | 2022-11-30T21:37:16.275763 | 2020-08-17T17:07:21 | 2020-08-17T17:07:21 | 284,041,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # Generated by Django 2.0 on 2020-07-11 13:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crimefiles', '0057_auto_20200711_1814'),
]
operations = [
migrations.AlterField(
model_name='caseclose',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='casestatus',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='copstatus',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='declinereason',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
migrations.AlterField(
model_name='fir',
name='complaintid',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='crimefiles.Complaint'),
),
]
| [
"65544777+aakriti1435@users.noreply.github.com"
] | 65544777+aakriti1435@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.