blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84e191ba583ee19dc3188ec624ee40a7210a743b | 3468fe20cd1128eb8e18354c30490421e504e4af | /portal/context_processors.py | 0a47dca0d4d1b2ed920de4681821c9ea8c8b7f4c | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | djpeluca/utopia-cms | 7da45422ffc4f1f397f385ea37243f2745a758de | 1e444afea565fdc734abf449b8ebe9b7c2c47d80 | refs/heads/main | 2023-08-19T23:04:44.666527 | 2021-10-27T01:55:11 | 2021-10-27T01:55:11 | 387,323,009 | 0 | 0 | BSD-3-Clause | 2021-07-19T03:03:48 | 2021-07-19T03:03:48 | null | UTF-8 | Python | false | false | 4,157 | py | # -*- coding: utf-8 -*-
import pycountry
from django_mobile import get_flavour
from django.conf import settings
from django.contrib.sites.models import Site
from core.models import Publication, Category
def urls(request):
url_dict = {}
for attr in dir(settings):
if attr.endswith('_URL'):
try:
url_dict[attr] = getattr(settings, attr).replace('%s', '')
except AttributeError:
pass
url_dict['URL_SCHEME'] = settings.URL_SCHEME
return url_dict
def gtm(request):
return {'GTM_CONTAINER_ID': settings.GTM_CONTAINER_ID, 'GTM_AMP_CONTAINER_ID': settings.GTM_AMP_CONTAINER_ID}
def site(request):
site = Site.objects.get_current()
meta_robots_content = 'noindex' if any(['/' in r.disallowed_urls() for r in site.rule_set.all()]) else 'all'
return {
'site': site, 'meta_robots_content': meta_robots_content,
'country_name': pycountry.countries.get(alpha2=settings.LOCAL_COUNTRY).name,
'site_description': getattr(settings, 'HOMEV3_SITE_DESCRIPTION', site.name)}
def publications(request):
DEFAULT_PUB = settings.DEFAULT_PUB
try:
default_pub = Publication.objects.get(slug=DEFAULT_PUB)
except Publication.DoesNotExist:
default_pub = None
result = {
'BASE_SUB': settings.BASE_SUB,
'DEFAULT_PUB': DEFAULT_PUB,
'default_pub': default_pub,
'custom_icons_publications': getattr(settings, 'CORE_CUSTOM_ICONS_PUBLICATIONS', None),
}
for p in Publication.objects.exclude(slug=DEFAULT_PUB).iterator():
result.update({p.slug.upper() + '_SUB': p.slug, p.slug + '_pub': p})
if get_flavour(request) == 'amp':
result['extra_header_template'] = getattr(settings, 'HOMEV3_EXTRA_HEADER_TEMPLATE_AMP', None)
else:
result['extra_header_template'] = getattr(settings, 'HOMEV3_EXTRA_HEADER_TEMPLATE', None)
result['footer_template'] = settings.HOMEV3_FOOTER_TEMPLATE
# use this context processor to load also some other useful variables configured in settings
result.update(
(
(var, getattr(settings, var, None)) for var in (
'HOMEV3_CUSTOM_CSS',
'HOMEV3_CUSTOM_PRINT_CSS',
'HOMEV3_LOGO',
'HOMEV3_LOGO_WIDTH',
'HOMEV3_SECONDARY_LOGO',
'HOMEV3_LOGO_FOOTER',
'HOMEV3_LOGO_FOOTER_WIDTH',
'HOMEV3_LOGO_PRINTABLE',
'HOMEV3_LOGO_PRINTABLE_WIDTH',
'HOMEV3_LOGO_ALT_TEXT',
'HOMEV3_TWITTER_SITE_META',
'HOMEV3_EXTRA_META',
'CORE_ARTICLE_DETAIL_PUBLISHER_META',
'PWA_MANIFEST_STATIC_PATH',
)
)
)
return result
def main_menus(request):
"""
Fills context variables to be shown or needed in the main menus.
Also fill another context variables using to the visualization of many ux "modules".
"""
result = {
'MENU_CATEGORIES': Category.objects.filter(order__isnull=False),
'CORE_ENABLE_PODCAST': getattr(settings, 'CORE_ENABLE_PODCAST', False),
'MOBILE_NAV_EXTRA_TEMPLATE': getattr(settings, 'HOMEV3_MOBILE_NAV_EXTRA_TEMPLATE', None),
'LOGIN_NO_REDIRECT_URLPATHS': ['/usuarios/sesion-cerrada/', '/usuarios/error/login/', '/admin/logout/'],
}
mobile_nav_ths = 4 + getattr(settings, 'HOMEV3_MOBILE_NAV_EXTRA_THS', 0)
menu_lal = getattr(settings, 'HOMEV3_LATEST_ARTICLE_LINKS', ())
if menu_lal:
result['MENU_LATEST_ARTICLE_LINKS'] = menu_lal
mobile_nav_ths += 1
if len(menu_lal) > 1:
result['MENU_LATEST_ARTICLE_LINKS_DROPDOWN'] = getattr(
settings, 'HOMEV3_LATEST_ARTICLE_LINKS_DROPDOWN', 'latest'
)
try:
menu_publications = Publication.objects.filter(public=True).exclude(
slug__in=getattr(settings, 'HOMEV3_EXCLUDED_MENU_PUBLICATIONS', ()))
except Exception:
menu_publications = "no-menu"
result.update({'MENU_PUBLICATIONS': menu_publications, 'mobile_nav_ths': mobile_nav_ths})
return result
| [
"apacheco@ladiaria.com.uy"
] | apacheco@ladiaria.com.uy |
08ce49dff12d8cd48db82a6193beb842cf53e16a | 11f7499cc543ee0704ddd79728c92ac9e550ccab | /frontend/__init__.py | 2e8d088b48426ee69192d33af989cec91174f8af | [
"MIT"
] | permissive | lampwins/orangengine-ui | daeef1eb7ff062cd74bd2b7304129bd76afd0ba2 | 8c864cd297176aa0ff9ead9682f2085f9fd3f1c0 | refs/heads/develop | 2021-01-11T14:35:02.138278 | 2017-04-25T00:38:01 | 2017-04-25T00:38:01 | 80,165,960 | 1 | 4 | null | 2017-04-20T22:40:19 | 2017-01-26T23:30:20 | CSS | UTF-8 | Python | false | false | 211 | py |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
# dissable directly running this
pass
| [
"lampwins@gmail.com"
] | lampwins@gmail.com |
0bf3f23db7e2058769e4e2f2eeb45303b302ee3a | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-monitor/azure/mgmt/monitor/models/metric_alert_status_properties_py3.py | 5990828126e4ce5a1944e301d027d2e3f9f2d076 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 1,278 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricAlertStatusProperties(Model):
"""An alert status properties.
:param dimensions:
:type dimensions: dict[str, str]
:param status: status value
:type status: str
:param timestamp: UTC time when the status was checked.
:type timestamp: datetime
"""
_attribute_map = {
'dimensions': {'key': 'dimensions', 'type': '{str}'},
'status': {'key': 'status', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
}
def __init__(self, *, dimensions=None, status: str=None, timestamp=None, **kwargs) -> None:
super(MetricAlertStatusProperties, self).__init__(**kwargs)
self.dimensions = dimensions
self.status = status
self.timestamp = timestamp
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
cb59d89b368e5ee2b874cd455bb6ac8c130f4f51 | ce6f72cdc8b72d05fad900d2aaaffdbdf05b1d05 | /0x05-python-exceptions/101-main.py | 187409e6c781b65b8d5bd19da5deaafb8efe771f | [] | no_license | Cu7ious/holbertonschool-higher_level_programming | 4ae7f145f88b333cbd93e3a151c8a1892f942adb | b86439b7c2e4b3d199dbd638888524579aa69de9 | refs/heads/master | 2020-03-09T13:11:00.370612 | 2018-09-09T02:17:26 | 2018-09-09T02:17:26 | 128,804,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | #!/usr/bin/python3
safe_function = __import__('101-safe_function').safe_function
def my_div(a, b):
return a / b
result = safe_function(my_div, 10, 2)
print("result of my_div: {}".format(result))
result = safe_function(my_div, 10, 0)
print("result of my_div: {}".format(result))
def print_list(my_list, len):
i = 0
while i < len:
print(my_list[i])
i += 1
return len
result = safe_function(print_list, [1, 2, 3, 4], 10)
print("result of print_list: {}".format(result))
| [
"328@holbertonschool.com"
] | 328@holbertonschool.com |
1b775ed02fe183a9ce73135aef444f8280749b75 | 677b9b1aab74d998fa0dd26b0309ed5bbe8775e9 | /T53/webapp/traders/admin.py | 22a67f4b9eb4c73f951d58e37fc1992fd92d55f9 | [
"MIT"
] | permissive | University-of-Petroleum-Energy-Studies/Hackathon_5.0 | db7291a8d69569322dd3dbd7547e74d6460b9275 | 6a5e6547c4aaf9a1fc2cb8ed49fa5fd54ad7c351 | refs/heads/master | 2023-03-11T20:22:44.770520 | 2021-02-22T17:47:58 | 2021-02-22T17:47:58 | 340,362,356 | 2 | 69 | MIT | 2021-03-03T17:16:38 | 2021-02-19T12:27:01 | HTML | UTF-8 | Python | false | false | 168 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Cart)
admin.site.register(CartItem)
admin.site.register(Order)
| [
"500077185@stu.upes.ac.in"
] | 500077185@stu.upes.ac.in |
64547fc947b04b977f1f3b6781322b2e9c136185 | 45da9cd96c91be944ecbd0d5b0b1a7dfc975ed8f | /octavia/tests/unit/api/v2/types/test_health_monitors.py | d4a706f7cca992d2bc714d30f654a00078b0a461 | [
"Apache-2.0"
] | permissive | mail2nsrajesh/octavia | 7635be2f69de18c1d10d218c3c2f3122f343490a | 7466016ae982af2a560a94327f9e63a7e7151cc5 | refs/heads/master | 2023-08-28T15:22:00.984583 | 2017-06-29T22:04:35 | 2017-06-29T22:04:35 | 96,141,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,946 | py | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from wsme import exc
from wsme.rest import json as wsme_json
from wsme import types as wsme_types
from octavia.api.v2.types import health_monitor as hm_type
from octavia.common import constants
from octavia.tests.unit.api.v2.types import base
class TestHealthMonitor(object):
_type = None
def test_invalid_type(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": 1, "pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_delay(self):
body = {"delay": "one", "timeout": 1, "max_retries": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_timeout(self):
body = {"delay": 1, "timeout": "one", "max_retries": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_max_retries_down(self):
body = {"delay": 1, "timeout": 1, "max_retries": "one"}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_max_retries(self):
body = {"delay": 1, "timeout": 1, "max_retries": "one"}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(ValueError, wsme_json.fromjson, self._type, body)
def test_invalid_http_method(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1,
"http_method": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_url_path(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1, "url_path": 1}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_expected_codes(self):
body = {"delay": 1, "timeout": 1, "max_retries": 1,
"expected_codes": "lol"}
if self._type is hm_type.HealthMonitorPOST:
body.update({"type": constants.PROTOCOL_HTTP,
"pool_id": uuidutils.generate_uuid()})
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestHealthMonitorPOST(base.BaseTypesTest, TestHealthMonitor):
_type = hm_type.HealthMonitorPOST
def test_health_monitor(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "max_retries_down": 1, "max_retries": 1,
"pool_id": uuidutils.generate_uuid()}
hm = wsme_json.fromjson(self._type, body)
self.assertTrue(hm.admin_state_up)
def test_type_mandatory(self):
body = {"delay": 80, "timeout": 1, "max_retries": 1,
"pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_delay_mandatory(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "timeout": 1,
"max_retries": 1, "pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_timeout_mandatory(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"max_retries": 1, "pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_max_retries_mandatory(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "pool_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_default_health_monitor_values(self):
# http_method = 'GET'
# url_path = '/'
# expected_codes = '200'
# max_retries_down = 3
# admin_state_up = True
# The above are not required but should have the above example defaults
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "max_retries": 1,
"pool_id": uuidutils.generate_uuid()}
hmpost = wsme_json.fromjson(self._type, body)
self.assertEqual('GET', hmpost.http_method)
self.assertEqual('/', hmpost.url_path)
self.assertEqual('200', hmpost.expected_codes)
self.assertEqual(3, hmpost.max_retries_down)
self.assertTrue(hmpost.admin_state_up)
def test_non_uuid_project_id(self):
body = {"type": constants.HEALTH_MONITOR_HTTP, "delay": 1,
"timeout": 1, "max_retries_down": 1, "max_retries": 1,
"project_id": "non-uuid",
"pool_id": uuidutils.generate_uuid()}
hm = wsme_json.fromjson(self._type, body)
self.assertEqual(hm.project_id, body['project_id'])
class TestHealthMonitorPUT(base.BaseTypesTest, TestHealthMonitor):
_type = hm_type.HealthMonitorPUT
def test_health_monitor(self):
body = {"http_method": constants.HEALTH_MONITOR_HTTP_METHOD_HEAD}
hm = wsme_json.fromjson(self._type, body)
self.assertEqual(wsme_types.Unset, hm.admin_state_up)
| [
"flux.adam@gmail.com"
] | flux.adam@gmail.com |
ce730cf624457dbefcf7601c397a5f443a058437 | 5f3c8eddb8c5a14fb3b5931f332d401207666036 | /src/core/prometheus/server/multiserver.py | dd17e6f81fe23066c61aa76845e3af8a1a2d5313 | [
"Apache-2.0"
] | permissive | hwinther/lanot | dec8fe48efb6245af009bedf65b2bc089e92efa0 | f6700cacb3946535081624467b746fdfd38e021d | refs/heads/master | 2021-03-24T12:02:47.530833 | 2019-05-01T11:56:05 | 2019-05-01T11:56:05 | 91,605,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | # coding=utf-8
import gc
gc.collect()
class WrappedServer(object):
def __init__(self, server, kwargs):
self.server = server
self.kwargs = kwargs
class MultiServer(object):
def __init__(self):
self.wrapped_servers = list()
def add(self, server, **kwargs):
self.wrapped_servers.append(WrappedServer(server, kwargs))
def start(self):
for wrapped_server in self.wrapped_servers:
wrapped_server.server.pre_loop(**wrapped_server.kwargs)
wrapped_server.server.loop_active = True
loop_active = True
while loop_active:
for wrapped_server in self.wrapped_servers:
wrapped_server.server.loop_tick(**wrapped_server.kwargs)
if not wrapped_server.server.loop_active:
loop_active = False
break
gc.collect()
for wrapped_server in self.wrapped_servers:
wrapped_server.server.post_loop(**wrapped_server.kwargs)
| [
"hc@wsh.no"
] | hc@wsh.no |
d3b5341008c433c4c9e272a4682211eb0a7769b7 | b993df6e8d563a75ea6f6ce40a41aa3e59675dbf | /20190826/swea 1959 teacher.py | fda05257747cebbaa7d780eed6d9350a18fa3e62 | [] | no_license | BuankerC/projectswea | 7d6a5f35b73e553037efd01d6a57811a9778834d | ea169d2352f3785ff2ce47475d203c3f751716cb | refs/heads/master | 2020-07-03T11:31:43.283821 | 2020-01-04T03:19:25 | 2020-01-04T03:19:25 | 201,892,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | def f(X, Y): # X 긴 리스트, Y 짧은 리스트
maxV = 0
for i in range(0, len(X)-len(Y)+1): # 긴 리스트에서 곱의 합을 구할 구간의 시작
s = 0
for j in range(0, len(Y)): # 짧은 리스트의 인덱스
s += X[i+j]*Y[j]
if maxV < s:
maxV = s
return maxV
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
if N > M:
r = f(A, B)
else:
r = f(B, A)
print('#{} {}'.format(tc, r)) | [
"ergaster91@gmail.com"
] | ergaster91@gmail.com |
8abdea02a3e0be361bce98471f2e44687fb8b1f8 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/costmanagement/azure-mgmt-costmanagement/generated_samples/mca_billing_account_query_grouping.py | 3e3dffe50f130a730bac03849e93ecc7ec43a642 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,873 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.costmanagement import CostManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-costmanagement
# USAGE
python mca_billing_account_query_grouping.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CostManagementClient(
credential=DefaultAzureCredential(),
)
response = client.query.usage(
scope="providers/Microsoft.Billing/billingAccounts/12345:6789",
parameters={
"dataset": {
"aggregation": {"totalCost": {"function": "Sum", "name": "PreTaxCost"}},
"granularity": "None",
"grouping": [{"name": "ResourceGroup", "type": "Dimension"}],
},
"timeframe": "TheLastMonth",
"type": "Usage",
},
)
print(response)
# x-ms-original-file: specification/cost-management/resource-manager/Microsoft.CostManagement/stable/2022-10-01/examples/MCABillingAccountQueryGrouping.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
9230dacc7da1f64be7a93e490cfe38d4a197efb9 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_1_1/IvanJobs/A.py | cdbbafacd6ac8a847e937d62c39bffd9717cbf23 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 530 | py | in_file = 'A-large.in'
out_file = 'A-small-attempt0.out'
def solve(fin, fout):
T = int(fin.readline().strip())
for _t in range(1, T + 1):
S = list(fin.readline().strip())
N = len(S)
C = [S[0],]
for i in range(1, N):
if S[i] >= C[0]:
C.insert(0, S[i])
else:
C.append(S[i])
fout.write('Case #%d: %s\n' % (_t, ''.join(C)))
with open(in_file, 'r') as fin, open(out_file, 'w') as fout:
solve(fin, fout) | [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
53b85586e8e6200bf65b2c761bf26cd0697947c3 | 3e276ce46afcdaf365fd62b45ceba19327535f14 | /src/plugins/github/libs/repo.py | bcb8e7fb11a49342478d116e5e4c779dd2d7f605 | [
"MIT"
] | permissive | 17Y9E81/QQ-GitHub-Bot | 1ca28ccc4b1a2bbbbb24419271389599dcd8ceb4 | 35c20d28aafaedc1813c6213ede9f2f51e56d5a2 | refs/heads/master | 2023-07-13T12:26:33.201661 | 2021-08-25T09:17:20 | 2021-08-25T09:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2021-03-12 15:36:14
@LastEditors : yanyongyu
@LastEditTime : 2021-06-15 22:16:20
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
from typing import Optional
from src.libs.github import Github
from .. import github_config as config
from src.libs.github.models import Repository
async def get_repo(owner: str,
repo_name: str,
token: Optional[str] = None) -> Repository:
if token:
g = Github(token)
elif config.github_client_id and config.github_client_secret:
g = Github(config.github_client_id, config.github_client_secret)
else:
g = Github()
async with g:
return await g.get_repo(f"{owner}/{repo_name}", False)
| [
"yanyongyu_1@126.com"
] | yanyongyu_1@126.com |
c20cc8afb23cffd3e7ae7a58f51025f5392d97e7 | 8a1686aeeefa80afeb0aa9f45ed72a75883458c4 | /dit/other/tests/test_lautum_information.py | 704b2bffab2cb42985e37083b41e312dfa12d31b | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | heleibin/dit | 70afd57f31be346e48b9b28c67fd6e019132ac36 | ebd0c11600e559bf34cf12a6b4e451057838e324 | refs/heads/master | 2020-09-27T07:42:15.991500 | 2019-11-23T06:10:11 | 2019-11-23T06:10:11 | 226,466,522 | 1 | 0 | BSD-3-Clause | 2019-12-07T06:26:50 | 2019-12-07T06:26:50 | null | UTF-8 | Python | false | false | 550 | py | """
Tests for dit.multivariate.lautum_information.
"""
from __future__ import division
import pytest
from dit import Distribution as D
from dit.other import lautum_information as L
def test_lm1():
""" Test L """
outcomes = ['000', '001', '010', '011', '100', '101', '110', '111']
pmf = [3/16, 1/16, 1/16, 3/16, 1/16, 3/16, 3/16, 1/16]
d = D(outcomes, pmf)
assert L(d) == pytest.approx(0.20751874963942196)
assert L(d, [[0], [1]]) == pytest.approx(0)
assert L(d, [[0], [1]], [2]) == pytest.approx(0.20751874963942196)
| [
"ryangregoryjames@gmail.com"
] | ryangregoryjames@gmail.com |
354fd35838836b5e7f00b6bfabc282a025f8a154 | c6e23f02b28604d43ad37cb339738560e8fefdee | /python3/python_open_file.py | b9b0275a6444dabcc5ba7fdefa3314ebc9c11899 | [] | no_license | testdata6/python-test | 5fcf3449c3ed84be3f0d526048bb2a26baf4fa7f | 3539ddfc70416943b28691c2cac9f7f03aad5991 | refs/heads/master | 2020-04-09T07:07:26.503612 | 2019-10-02T10:57:17 | 2019-10-02T10:57:17 | 160,141,238 | 0 | 3 | null | 2019-10-02T10:57:19 | 2018-12-03T06:10:20 | Python | UTF-8 | Python | false | false | 1,587 | py | #!/usr/bin/python
## Reading files
## Use open() function to read your files from the outside of python.
## We can open this file with below modes
## r = read the information
## w = write the information
## a = Append information
## r+ = read and write
## close() Function is use to close your open file.
#-------------------
## In this example,try to read file "employees.txt"
#open("employees.txt", "r")
## store in variable.
employee_file = open("hello-string.py", "r")
print(employee_file.readable()) # Check the condition and print True if it is in read mode else print false. (It wont continue if the condition is false.)
print(employee_file.read()) # It will read and print all the content of the file.
print(employee_file.readline())
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readline()) # Print first line of the file
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readline()) # Print first line of the file
print(employee_file.readline()) # Print second line of the file
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readlines()) # print all the lines in arry elements
employee_file.close()
#-------------------
print("---------------")
employee_file = open("hello-string.py", "r")
print(employee_file.readlines()[1]) # To print specific element from the arry
employee_file.close() | [
"amit.ganvir6@gmail.com"
] | amit.ganvir6@gmail.com |
1c20b1148f8f1df51f01782f03d855c1f2c26a1b | 82aace1431e0af949b1294d979a16f8dc18f48c2 | /Python_OOP_Softuni/Decorators_Exercises/venv/even_params.py | 9ac76d3d246628be691e00ddf5d93d62b584cc0b | [
"MIT"
] | permissive | borisboychev/SoftUni | 6778450417f889f8e89c709897b9e26c7129dbf6 | 22062312f08e29a1d85377a6d41ef74966d37e99 | refs/heads/master | 2023-03-27T13:11:17.378197 | 2021-03-26T09:14:25 | 2021-03-26T09:14:25 | 295,463,442 | 1 | 0 | null | 2020-10-12T12:54:13 | 2020-09-14T15:46:12 | Python | UTF-8 | Python | false | false | 520 | py | def even_parameters(func):
def wrapper(*args):
evens = [x for x in args if isinstance(x, int) and x % 2 == 0]
if len(evens) != len(args):
return f"Please use only even numbers!"
return func(*args)
return wrapper
@even_parameters
def add(a, b):
return a + b
print(add(2, 4))
print(add("Peter", 1))
@even_parameters
def multiply(*nums):
result = 1
for num in nums:
result *= num
return result
print(multiply(2, 4, 6, 8))
print(multiply(2, 4, 9, 8))
| [
"borisboychev007@Gmail.com"
] | borisboychev007@Gmail.com |
a956f55dd99c58bf49160ba1899bf027d6403a56 | 4e006370c50da435941297e940b742d5c5ef124c | /reference-code/puppy/model/query.py | 2afd810ab94bac1c6c14627b11ba119b5a4f29ef | [
"MIT"
] | permissive | Granvanoeli/ifind | 12328bec166483f2c6a0c29d2026bfca0097bb53 | ed72aee466649bd834d5b4459eb6e0173df6e2ec | refs/heads/master | 2021-01-17T23:16:39.122348 | 2015-03-11T11:37:35 | 2015-03-11T11:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | # -*- coding: utf8 -*-
import urllib
class Query(object):
"""
OpenSearch Query.
Models an OpenSearch Query element.
See: http://www.opensearch.org/Specifications/OpenSearch/1.1#OpenSearch_Query_element
"""
def __init__(self, search_terms):
"""
Constructor for Query.
Parameters:
* search_terms (str): the search terms of the query
"""
super(Query, self).__init__()
self.search_terms = search_terms
self.count = 0
self.start_index = 0
self.start_page = 0
self.language = ''
self.service = ''
self.suggestions = {}
from puppy.query.tokenizer import BasicTokenizer
self.tokenizer = BasicTokenizer()
def __eq__(self, q):
a = self.search_terms
if isinstance(q, Query):
b = q.search_terms
else:
b = q
return a == b
def __hash__(self):
return hash(self.search_terms)
def url_quote(self):
return urllib.quote(self.search_terms)
def lower(self):
return Query(self.search_terms.lower())
def __str__(self):
return self.search_terms
def tokenize(self):
return self.tokenizer(self.search_terms)
def write_xml(self):
"""
Creates XML for OpenSearch Query.
Returns:
* query_xml (str): OpenSearch Query as XML
TODO code Query.write_xml()
"""
pass
@staticmethod
def parse_xml(self, oss_xml):
"""
Parse OpenSearch Query XML.
Parameters:
* oss_xml (str): OpenSearch Query XML
Returns:
* puppy.model.OpenSearch.Query
TODO code Query.parse_xml()
"""
pass
| [
"leifos@acm.org"
] | leifos@acm.org |
34885a081c4a4f125fe7154617bfd4f10dd4e75c | ff294d3e93cba8d03bfc7ac9e3d99e8a7d04acb4 | /wukong.py | 7be67d3e98163eb46ca73e328417b6b836b08235 | [
"MIT"
] | permissive | weixiaopassking/wukong-robot | 65dd1aa2822bee112ec2611af2b20e6e68bcd8c8 | 3a9732472ec64504b2879ba8f47d73e1bac03612 | refs/heads/master | 2020-06-02T11:11:28.663600 | 2019-02-23T13:24:19 | 2019-02-23T13:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,641 | py | # -*- coding: utf-8-*-
from snowboy import snowboydecoder
from robot import config, utils, constants, logging, statistic, Player
from robot.Updater import Updater
from robot.ConfigMonitor import ConfigMonitor
from robot.Conversation import Conversation
from server import server
from watchdog.observers import Observer
from subprocess import call
import sys
import os
import signal
import yaml
import requests
import hashlib
import os
import fire
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger(__name__)
class Wukong(object):
def init(self):
global conversation
self.detector = None
self._interrupted = False
print('''
********************************************************
* wukong-robot - 中文语音对话机器人 *
* (c) 2019 潘伟洲 <m@hahack.com> *
* https://github.com/wzpan/wukong-robot.git *
********************************************************
如需退出,可以按 Ctrl-4 组合键。
''')
config.init()
self._conversation = Conversation()
self._conversation.say('{} 你好!试试对我喊唤醒词叫醒我吧'.format(config.get('first_name', '主人')), True)
self._observer = Observer()
event_handler = ConfigMonitor(self._conversation)
self._observer.schedule(event_handler, constants.CONFIG_PATH, False)
self._observer.schedule(event_handler, constants.DATA_PATH, False)
self._observer.start()
def _signal_handler(self, signal, frame):
self._interrupted = True
utils.clean()
self._observer.stop()
def _detected_callback(self):
if not utils.is_proper_time():
logger.warning('勿扰模式开启中')
return
if self._conversation.isRecording:
logger.warning('正在录音中,跳过')
return
Player.play(constants.getData('beep_hi.wav'))
logger.info('开始录音')
self._conversation.interrupt()
self._conversation.isRecording = True;
def _do_not_bother_on_callback(self):
utils.do_not_bother = True
Player.play(constants.getData('off.wav'))
logger.info('勿扰模式打开')
def _do_not_bother_off_callback(self):
utils.do_not_bother = False
Player.play(constants.getData('on.wav'))
logger.info('勿扰模式关闭')
def _interrupt_callback(self):
return self._interrupted
def run(self):
self.init()
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, self._signal_handler)
# site
server.run(self._conversation, self)
statistic.report(0)
self.initDetector()
def initDetector(self):
if self.detector is not None:
self.detector.terminate()
models = [
constants.getHotwordModel(config.get('hotword', 'wukong.pmdl')),
constants.getHotwordModel(utils.get_do_not_bother_on_hotword()),
constants.getHotwordModel(utils.get_do_not_bother_off_hotword())
]
self.detector = snowboydecoder.HotwordDetector(models, sensitivity=config.get('sensitivity', 0.5))
# main loop
try:
self.detector.start(detected_callback=[self._detected_callback,
self._do_not_bother_on_callback,
self._do_not_bother_off_callback],
audio_recorder_callback=self._conversation.converse,
interrupt_check=self._interrupt_callback,
silent_count_threshold=config.get('silent_threshold', 15),
recording_timeout=config.get('recording_timeout', 5) * 4,
sleep_time=0.03)
self.detector.terminate()
except Exception as e:
logger.critical('离线唤醒机制初始化失败:{}'.format(e))
def md5(self, password):
return hashlib.md5(password.encode('utf-8')).hexdigest()
def update(self):
updater = Updater()
return updater.update()
def fetch(self):
updater = Updater()
updater.fetch()
def restart(self):
logger.critical('程序重启...')
python = sys.executable
os.execl(python, python, * sys.argv)
if __name__ == '__main__':
if len(sys.argv) == 1:
wukong = Wukong()
wukong.run()
else:
fire.Fire(Wukong)
| [
"m@hahack.com"
] | m@hahack.com |
0c16a7a90a73e965be94888d55b81d775af1d692 | c1267fbec95318184e7388cddf9b7085f797d514 | /2022/11 November/db11292022.py | 24d6448cf5f12453bf1a06dd12662031c34363b4 | [
"MIT"
] | permissive | vishrutkmr7/DailyPracticeProblemsDIP | 1aedfd2e173847bf22989a6b0ec550acebb2bd86 | 2c365f633a1e1bee281fbdc314969f03b17ac9ec | refs/heads/master | 2023-05-31T23:49:52.135349 | 2023-05-28T09:32:12 | 2023-05-28T09:32:12 | 199,596,248 | 10 | 4 | MIT | 2022-11-02T21:31:59 | 2019-07-30T07:12:46 | Python | UTF-8 | Python | false | false | 1,371 | py | """
You are Given an image represented as a matrix. Each value in the matrix represents the color of
an individual pixel. Given a new color represented as an integer and a starting row and column,
transform every adjacent pixel to the starting pixel that has the same color to the new color.
Note: This is effectively implementing a “bucket fill” in a software like Microsoft paint.
Ex: Given the following image, row, column, and color…
image = [
[0,1,1],
[0,1,0],
[1,1,1]
], row = 1, column = 1, color = 3 modify image to be as follows...
image = [
[0, 3, 3],
[0, 3, 0],
[3, 3, 3],
].
"""
class Solution:
def floodFill(
self, image: list[list[int]], sr: int, sc: int, newColor: int
) -> list[list[int]]:
color = image[sr][sc]
if color == newColor:
return image
def dfs(r, c):
if image[r][c] != color:
return
image[r][c] = newColor
if r >= 1:
dfs(r - 1, c)
if r + 1 < len(image):
dfs(r + 1, c)
if c >= 1:
dfs(r, c - 1)
if c + 1 < len(image[0]):
dfs(r, c + 1)
dfs(sr, sc)
return image
# Test Cases
if __name__ == "__main__":
solution = Solution()
print(solution.floodFill([[0, 1, 1], [0, 1, 0], [1, 1, 1]], 1, 1, 3))
| [
"vishrutkmr7@gmail.com"
] | vishrutkmr7@gmail.com |
7a72814e45a60165eb83e2f468fe15e8a6e6f252 | 02952fc67147a2f11a9ed8c4eb29210bec5672ed | /business/service/controllers/asset.py | f8689535b840944700446821a8c74e7c96365255 | [] | no_license | cuijianzhe/cow | b110a70398b09a401dadc7d3ed24dfe2bae50f5b | 3539cab6e73571f84b7f17391d9a363a756f12e1 | refs/heads/main | 2023-06-04T10:33:33.975885 | 2021-06-19T10:40:36 | 2021-06-19T10:40:36 | 340,634,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | from django.db import transaction
from django.db.models import Q
from base import errors
from base import controllers as base_ctl
from business.service.models import ServiceAssetModel
from asset.manager.models import AssetModel
def create_service_asset(obj_id, asset_id, operator=None):
'''
创建服务关联资产模块
'''
query = {
'service_id': obj_id,
'asset_id': asset_id,
}
if ServiceAssetModel.objects.filter(**query).exists():
raise errors.CommonError('服务已关联此资产模块')
data = query
obj = base_ctl.create_obj(ServiceAssetModel, data, operator)
data = obj.to_dict()
return data
def delete_service_asset(obj_id, asset_id, operator=None):
'''
删除服务关联资产模块
'''
query = {
'service_id': obj_id,
'asset_id': asset_id,
}
obj = ServiceAssetModel.objects.filter(**query).first()
if not obj:
raise errors.CommonError('服务未关联此资产模块')
base_ctl.delete_obj(ServiceAssetModel, obj.id, operator)
def get_service_assets(obj_id, page_num=None, page_size=None, operator=None):
'''
获取服务关联资产模块列表
'''
batch_ids = ServiceAssetModel.objects.filter(service_id=obj_id)\
.values_list('asset_id', flat=True).all()
base_query = AssetModel.objects.filter(id__in=batch_ids)
total = base_query.count()
objs = base_ctl.query_objs_by_page(base_query, page_num, page_size)
data_list = []
for obj in objs:
data = obj.to_dict()
data_list.append(data)
data = {
'total': total,
'data_list': data_list,
}
return data
| [
"598941324@qq.com"
] | 598941324@qq.com |
df96f22537c11dc40db74f536f445dbf791445d9 | b1bc75cd16aaf393920c4b49f19696c14e762a4d | /src/Comprehensions_Exercise/08_Heroes_Inventory.py | 71fa4895f59a7ae8aaccaae8b621e4efd45def4b | [] | no_license | ivelinakaraivanova/SoftUniPythonAdvanced | 848ef5bd4332438a8b061ff5fb2d425103a1d4a4 | 0a90f30b724f716303dc33b2a6a6234f78218c5e | refs/heads/main | 2023-01-08T19:03:15.719055 | 2020-11-08T12:25:10 | 2020-11-08T12:25:10 | 311,052,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | names = input().split(", ")
inventory = {name: {} for name in names}
while True:
info = input()
if info == "End":
break
else:
split_info = info.split("-")
name = split_info[0]
item = split_info[1]
cost = int(split_info[2])
if item not in inventory[name]:
inventory[name][item] = cost
for name, items in inventory.items():
items_cost = 0
for cost in inventory[name].values():
items_cost += cost
print(f"{name} -> Items: {len(inventory[name])}, Cost: {items_cost}")
# [print(f"{name} -> Items: {len(inventory[name])}, Cost: {sum(inventory[name].values())}") for name in inventory]
| [
"73067985+ivelinakaraivanova@users.noreply.github.com"
] | 73067985+ivelinakaraivanova@users.noreply.github.com |
9b7f78505b524d6179cf5728fe0c1145ea7b6752 | d8346eaf1c910ff02c7b243692a2766b8b089f06 | /for-post/python-standard-library-inspect/s1-is-what/isfunction.py | 7024a5e72808a4b90f8fa453d4d5dbed0ed797ee | [] | no_license | dustinpfister/examples-python | 55304c99ba3af82cd8784ee98745546632155c68 | a9910ee05d4df524f951f61b6d9778531a58ccbf | refs/heads/master | 2023-03-06T21:10:18.888654 | 2021-02-26T20:32:52 | 2021-02-26T20:32:52 | 318,595,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | import inspect
def foo():
return 'bar'
print(inspect.isfunction(foo)) # True
print( inspect.isfunction('') ) # False
print( inspect.isfunction(42) ) # False
print( inspect.isfunction([]) ) # False
| [
"dustin.pfister@gmail.com"
] | dustin.pfister@gmail.com |
ce0b323cb50ab3b2911cba7b87a7286982cf0a08 | 85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b | /api/tacticalrmm/accounts/migrations/0020_role_can_manage_roles.py | 2768c2e431be1755bae81a13959ab0f191794daa | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | sadnub/tacticalrmm | a4ecaf994abe39244a6d75ed2166222abb00d4f4 | 0af95aa9b1084973642da80e9b01a18dcacec74a | refs/heads/develop | 2023-08-30T16:48:33.504137 | 2023-04-10T22:57:44 | 2023-04-10T22:57:44 | 243,405,684 | 0 | 2 | MIT | 2020-09-08T13:03:30 | 2020-02-27T01:43:56 | Python | UTF-8 | Python | false | false | 387 | py | # Generated by Django 3.2.1 on 2021-05-11 17:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0019_user_role'),
]
operations = [
migrations.AddField(
model_name='role',
name='can_manage_roles',
field=models.BooleanField(default=False),
),
]
| [
"dcparsi@gmail.com"
] | dcparsi@gmail.com |
70c795892317f9d87e01e06f716470718f0d8793 | 2305ce053d16652d31823bd07faf38553b4f9b63 | /books/AutomateTheBoringStuffWithPython/Chapter15/P06_multithreading.py | 07eaa3a08feacddc40392fc2cca9fa838ed80fa3 | [
"MIT"
] | permissive | leihuagh/python-tutorials | cff3c5e250a152252d4b725bca19f55721483249 | 33831b983d7bd1491e367b6c7654e687d5ba709b | refs/heads/master | 2020-03-29T17:59:31.226400 | 2018-09-24T08:41:26 | 2018-09-24T08:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | # This program uses the threading module to manipulate threads
import threading
# Passing Arguments to the Thread's Target Function
threadObj = threading.Thread(target=print, args=['Cats', 'Dogs', 'Frogs'], kwargs={'sep': ' & '})
threadObj.start()
| [
"jose@JoseALerma.com"
] | jose@JoseALerma.com |
fff3485b667977e7f7ca2a6ff3f4a59c0275e427 | fde4ec12ffb460d8c952923571c25ff7e231e274 | /lib/support.py | 86fe69ac2a2e6a77842060b848371d5780cefead | [] | no_license | alexdawn/final-lib-mod-tools | 0b8188c44a053acb90ab5d5ab3156f97beb0d636 | 3ba136fee9db43237e87eb48724f1a2aff02fafb | refs/heads/main | 2023-05-06T11:29:41.450438 | 2021-05-22T16:32:47 | 2021-05-22T16:32:47 | 347,744,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | from utilities import check_eof
from flags import nationalities, unit_types, support_flags
def read_support(file):
supports = []
header = file.read(13)
if header != b'SUPPORT\x00\x001.0\x00':
raise RuntimeError("File is not a SUPPORT.DAT")
count = int.from_bytes(file.read(4), 'little')
for i in range(count):
cid = int.from_bytes(file.read(2), 'little')
flags = int.from_bytes(file.read(4), 'little')
if flags & (0xffff - 0x600b) != 0:
raise RuntimeError(f"Unknown flag, {cid} {flags}")
flag_text = []
for f, v in support_flags.items():
if f & flags:
flag_text.append(v)
licon = int.from_bytes(file.read(4), 'little')
ricon = int.from_bytes(file.read(4), 'little')
nation = int.from_bytes(file.read(4), 'little')
string_length1 = int.from_bytes(file.read(1), 'little')
string1 = bytes(file.read(string_length1)).decode('utf-8')
string_length2 = int.from_bytes(file.read(1), 'little')
string2 = bytes(file.read(string_length2)).decode('utf-8')
cost = int.from_bytes(file.read(2), 'little')
breakpoint = int.from_bytes(file.read(1), 'little')
# TODO handle special morale values
#define CLAN_MORALE -1
#define MORALE_SPECIAL -2
#define ALLWAYS_PASS_MORALE 0
morale = int.from_bytes(file.read(1), 'little', signed=True)
vp = int.from_bytes(file.read(1), 'little')
u_count = int.from_bytes(file.read(1), 'little')
allow_nation = int.from_bytes(file.read(4), 'little') # allow nat flag
nation_flags = []
for f, v in nationalities.items():
if f & allow_nation:
nation_flags.append(v)
allow_type = int.from_bytes(file.read(4), 'little') # allow type flag
allow_type_flags = []
for f, v in unit_types.items():
if f & allow_type:
allow_type_flags.append(v)
turns_to_build = int.from_bytes(file.read(2), 'big')
uids = [
int.from_bytes(file.read(2), 'little')
for i in range(u_count)
]
#unames = [unit_lookup[x] for x in uids]
#detachments[cid] = string1
supports.append({
"id": cid,
"name1": string1,
"name2": string2,
"cost": cost,
"breakpoint": breakpoint,
"morale": morale,
"victory_points": vp,
"units": uids,
"flags": flag_text,
"licon": licon,
"ricon": ricon,
"nation": nation,
"allow_nation": nation_flags,
"type": allow_type_flags,
"time_to_build": turns_to_build
})
check_eof(file)
return supports
def write_support(file, data):
support_flags_lookups = {v: k for k, v in support_flags.items()}
nationalities_lookup = {v: k for k, v in nationalities.items()}
unit_types_lookup = {v: k for k, v in unit_types.items()}
file.write(b'SUPPORT\x00\x001.0\x00')
file.write(len(data).to_bytes(4, 'little'))
for d in data:
file.write(d['id'].to_bytes(2, 'little'))
#encode flags
flag_value = 0
for x in d['flags']:
flag_value += support_flags_lookups[x]
file.write(flag_value.to_bytes(4, 'little'))
file.write(d['licon'].to_bytes(4, 'little'))
file.write(d['ricon'].to_bytes(4, 'little'))
file.write(d['nation'].to_bytes(4, 'little'))
file.write(len(d['name1']).to_bytes(1, 'little'))
file.write(d['name1'].encode('ascii'))
file.write(len(d['name2']).to_bytes(1, 'little'))
file.write(d['name2'].encode('ascii'))
file.write(d['cost'].to_bytes(2, 'little'))
file.write(d['breakpoint'].to_bytes(1, 'little'))
file.write(d['morale'].to_bytes(1, 'little', signed=True))
file.write(d['victory_points'].to_bytes(1, 'little'))
file.write(len(d['units']).to_bytes(1, 'little'))
# nation flag
flag_value = 0
for x in d['allow_nation']:
flag_value += nationalities_lookup[x]
file.write(flag_value.to_bytes(4, 'little'))
# allow type flags
flag_value = 0
for x in d['type']:
flag_value += unit_types_lookup[x]
file.write(flag_value.to_bytes(4, 'little'))
file.write(d['time_to_build'].to_bytes(2, 'big'))
for u in d['units']:
file.write(u.to_bytes(2, 'little'))
| [
"al_4242@hotmail.co.uk"
] | al_4242@hotmail.co.uk |
8d07e5610c20f8a30a00db726be0afc5b8e0e99d | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc087/C/2448233.py | 0025206cebd9820324b3c8fdd297e25509fe70e7 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | N, L = map(int, input().split())
make = lambda:[None, None, 0]
root = make()
def construct(s):
n = root
for i in s:
if n[i] is None:
n[i] = n = make()
else:
n = n[i]
n[2] = 1
for i in range(N):
s = map(int, input())
construct(s)
caps = {}
st = [(root, 0, 0)]
while st:
n, i, l = st.pop()
if i:
if n[1] is None:
caps[L - l] = caps.get(L - l, 0) + 1
else:
if not n[1][2]:
st.append((n[1], 0, l+1))
else:
st.append((n, 1, l))
if n[0] is None:
caps[L - l] = caps.get(L - l, 0) + 1
else:
if not n[0][2]:
st.append((n[0], 0, l+1))
ans = 0
for v in caps:
k = caps[v]
if k % 2 == 0:
continue
v -= 1
r = 1
while v % 4 == 3:
v //= 4
r *= 4
if v % 4 == 1:
ans ^= r * 2
else:
ans ^= r
print('Alice' if ans else 'Bob') | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
8d2737a017cfb3ed8e38396bf711a649d34cd224 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/pybites/intermediate/290_v2/test_class_rosters.py | 867ffcc720b31176660563cf54d6c02a576c0193 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,294 | py | # _______ p__
#
# ____ ? _______ ?
#
# full """
# 17409,"Matheson, Rick",,,,,,,,,,
# 36283,"Jones, Tom",SCI09-4 - SU,MATH09-2 - PH,TA09-1 - AB,IS09-4 - LM,SCI09-3 - NdN,MATH09-2 - RB,DE09-3 - KmQ,ENG09-3 - KaR,PE09-3 - PS
# 99415,"Blake, Arnold",,,,,,,,,,
# """ # noqa E501
# p.. """
# 17409,"Jones, Tom",,,,,,,,,,
# 17409,"Matheson, Rick",,IS09-1 - BR,,SCI09-4 - SU,MATH09-2 - RB,,ENG09-4 - LE,,PE09-1 - MR,
# 99415,"Blake, Arnold",,,,,,,,,,
# """ # noqa E501
# empty """
# 99415,"Blake, Arnold",,,,,,,,,,
# 21692,"Prest, Phil",,,,,,,,,,
# 36283,"Jones, Tom",,,,,,,,,,
# """ # noqa E501
#
#
# ?p__.m__.p. "content, expected", [
# (? 'SCI09-4,2020,36283',
# 'MATH09-2,2020,36283',
# 'TA09-1,2020,36283',
# 'IS09-4,2020,36283',
# 'SCI09-3,2020,36283',
# 'MATH09-2,2020,36283',
# 'DE09-3,2020,36283',
# 'ENG09-3,2020,36283',
# 'PE09-3,2020,36283' ),
# (p.., 'IS09-1,2020,17409',
# 'SCI09-4,2020,17409',
# 'MATH09-2,2020,17409',
# 'ENG09-4,2020,17409',
# 'PE09-1,2020,17409' ),
# (? # list),
#
# ___ test_class_rosters content e.. tmp_path
# csvfile ? / "content"
# ?.w.. ?.l..
# a.. ? ?
# ... a.. __ e..
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
67079c5188c8255386f1a779167f04afdd8b0222 | a5103b7d5066138ac1a9aabc273361491a5031cd | /daily/8/pytorch_tutoral/nmt/utils.py | 18f8ce03f52f5026e6e9c64dfede2228fae0242d | [] | no_license | mckjzhangxk/deepAI | 0fa2f261c7899b850a4ec432b5a387e8c5f13e83 | 24e60f24b6e442db22507adddd6bf3e2c343c013 | refs/heads/master | 2022-12-13T18:00:12.839041 | 2021-06-18T03:01:10 | 2021-06-18T03:01:10 | 144,862,423 | 1 | 1 | null | 2022-12-07T23:31:01 | 2018-08-15T14:19:10 | Jupyter Notebook | UTF-8 | Python | false | false | 1,250 | py | import torch
def subseqenceMask(x):
'''
x:(N,T)
return:
(1,T,T)
'''
T=x.size(-1)
return torch.tril(torch.ones((T,T))).to(x.device).unsqueeze(0).byte()
def standardMask(x,paddingidx):
'''
x:(N,T)
paddingidx:set coresponding mask=0 when occur
paddingidx
return:(N,1,T)
'''
return (x!=paddingidx).unsqueeze(1).byte()
def makeMask(x,y,paddingidx):
xmask=standardMask(x,paddingidx)
ymask=standardMask(y,paddingidx)&subseqenceMask(y)
return xmask,ymask
def translation(x,parser,eos):
'''
x:(N)
'''
ret=[]
N=x.shape[0]
for line in x:
wordlist=[parser.vocab.itos[w] for w in line]
if eos in wordlist:
wordlist=wordlist[:wordlist.index(eos)]
l=' '.join(wordlist)
ret.append(l)
return ret
if __name__=='__main__':
import matplotlib.pyplot as plt
N,T=38,25
X=torch.randint(0,10,(N,T))
mask1=standardMask(X,0)
mask2=subseqenceMask(X)
assert mask1.shape==(N,1,T)
assert mask2.shape==(1,T,T)
plt.figure()
plt.imshow(mask1[0],cmap='gray')
plt.figure()
plt.imshow(mask2[0],cmap='gray')
plt.figure()
plt.imshow(mask1[0]&mask2[0],cmap='gray')
plt.show()
| [
"mckj_zhangxk@163.com"
] | mckj_zhangxk@163.com |
9b7bcbbd5ad71852ef309eabb6c4791ae139d2ec | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R2/benchmark/startQiskit243.py | fd8225ab38340539504426ed59c0d2a4ae4171d8 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,565 | py | # qubit number=3
# total number=48
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.cx(input_qubit[0],input_qubit[2]) # number=45
prog.x(input_qubit[2]) # number=46
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.cx(input_qubit[0],input_qubit[2]) # number=33
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit243.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
7e348e9e9eefeca6a65cfeb167a6469c154902b8 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/66188be9f74aff684d30eb43f897a3a8f4f5af3aMultipleBrokersVirtualTopic.py | 66188be9f74aff684d30eb43f897a3a8f4f5af3a | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 3,763 | py | #!/usr/bin/env python
# Massimo Paladin
# Massimo.Paladin@cern.ch
import os
from MultipleProducerConsumer import MultipleProducerConsumer, TimeoutException
import sys
import time
from utils.Timer import Timer
import logging
logging.basicConfig()
log = logging.getLogger(__file__)
class MultipleBrokersVirtualTopic(MultipleProducerConsumer):
def __init__(self, mainBrokerName, mainBrokerHost, otherBrokers, port=6163, destination='test.virtualtopic', vtPrefix='Consumer', hostcert=None, hostkey=None, messages=10, timeout=15):
MultipleProducerConsumer.__init__(self)
self.mainBrokerName = mainBrokerName
self.mainBrokerHost = mainBrokerHost
self.otherBrokers = otherBrokers
self.port = port
self.destination = destination
self.vtPrefix = vtPrefix
self.hostcert = hostcert
self.hostkey = hostkey
self.messages = messages
self.timeout = timeout
def setup(self):
self.destinationTopic = '/topic/%s' % self.destination
if self.hostcert and self.hostkey:
self.setSSLAuthentication(self.hostcert, self.hostkey)
self.createBroker(self.mainBrokerName, self.mainBrokerHost, self.port)
for name, host in self.otherBrokers.items():
self.createBroker(name, host, self.port)
def run(self):
timer = Timer(self.timeout)
''' Starting consumers '''
for name, host in self.otherBrokers.items():
self.createConsumer(name,
'/queue/%s.%s.%s' % (self.vtPrefix, name, self.destination),
timer.left)
time.sleep(1)
''' Creating producer and sending messages '''
self.createProducer(self.mainBrokerName, self.destinationTopic, timer.left)
for i in range(self.messages):
self.sendMessage(self.mainBrokerName,
self.destinationTopic,
{'persistent':'true'},
'testing-%s' % i)
self.waitForMessagesToBeSent(self.mainBrokerName,
self.destinationTopic,
self.messages)
for broker in self.otherBrokers:
self.waitForMessagesToArrive(broker, '/queue/%s.%s.%s' % (self.vtPrefix, broker, self.destination), self.messages, timer.left)
''' Wait a couple of seconds to see if we get duplicated '''
time.sleep(2)
for broker in self.otherBrokers:
self.assertMessagesNumber(broker, '/queue/%s.%s.%s' % (self.vtPrefix, broker, self.destination), self.messages)
def stop(self):
self.destroyAllBrokers()
if __name__ == '__main__':
log.setLevel(logging.INFO)
logging.getLogger('MultipleProducerConsumer').setLevel(logging.INFO)
broker = 'vtb-71'
brokerHost = 'vtb-generic-71'
brokers = {'vtb-71':'vtb-generic-71',
'vtb-72':'vtb-generic-72',
'vtb-73':'vtb-generic-73',
'vtb-74':'vtb-generic-74'}
# broker = 'gridmsg1'
# brokerHost = 'gridmsg101.cern.ch'
# brokers = {'gridmsg1':'gridmsg101.cern.ch',
# 'gridmsg2':'gridmsg102.cern.ch',
# 'auth':'broker.afroditi.hellasgrid.gr',
# 'srce':'msg.cro-ngi.hr'}
mbvt = MultipleBrokersVirtualTopic(broker, brokerHost, brokers, 6163)
mbvt.setup()
try:
mbvt.start()
except KeyboardInterrupt:
print "keyboard interrupt"
except TimeoutException, e:
print '%s' % e
except AssertionError, e:
print '%s' % e
mbvt.stop()
print 'Test passed!'
| [
"aliostad+github@gmail.com"
] | aliostad+github@gmail.com |
5e6ec243dd20422d54285f93ee30bdd41b8f52bb | 73e147e1d49656fafba5d4bf84df5ded2c4dca73 | /team_9/cocos/test/test_scene_add_rotated.py | 6147c4160ead9a0f710d5cc59d65fce542588dd8 | [
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] | permissive | Donnyvdm/dojo19 | 2278747366c57bfc80eb9ee28ca617ec0a79bae3 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | refs/heads/master | 2020-07-26T12:22:15.882800 | 2019-09-15T20:34:36 | 2019-09-15T20:34:36 | 208,642,183 | 1 | 0 | BSD-3-Clause | 2019-09-15T18:57:53 | 2019-09-15T18:57:52 | null | UTF-8 | Python | false | false | 1,083 | py | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, q"
tags = "Scene, rotation"
import cocos
from cocos.director import director
from cocos.sprite import Sprite
from cocos.layer import *
import pyglet
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
x,y = director.get_window_size()
sprite1 = Sprite( 'grossini.png' , (x//4, y//2) )
sprite2 = Sprite( 'grossinis_sister1.png', (x//2, y//2) )
sprite3 = Sprite( 'grossinis_sister2.png', (x/(4/3.0), y//2) )
self.add( sprite2 )
self.add( sprite1 )
self.add( sprite3 )
def main():
director.init()
main_scene = cocos.scene.Scene()
main_scene.add( ColorLayer( 255, 0, 0, 255 ) )
l = TestLayer()
l.rotation = 45
main_scene.add( l )
director.run (main_scene)
if __name__ == '__main__':
main()
| [
"a.campello@wellcome.ac.uk"
] | a.campello@wellcome.ac.uk |
a03527cfea5f7c60f818ab1ab24315d3cf61bff2 | 7554a16508c7f57afe7e99a0ee1ddec5a7150018 | /test/email_tut.py | 65d4c1db9959fd303f315fb6fd5d25ac514706c0 | [] | no_license | csrgxtu/Hummable | 05414f6c8bc856d10d39af9997029f04293c79a9 | 17444c3f9a98149cf409fa72d3dc710e3957bf9f | refs/heads/master | 2021-01-23T00:14:46.615266 | 2017-04-13T16:36:26 | 2017-04-13T16:36:26 | 85,707,147 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,264 | py | import imaplib, struct, time
import email as Email
class Mail():
def __init__(self):
self.user = '*'
self.password = 'fuck'
# self.ser = serial.Serial('/dev/tty.usbmodem621', 9600)
self.M = imaplib.IMAP4_SSL('imap.gmail.com', '993')
self.M.login(self.user, self.password)
def get_emails(self, email_ids):
data = []
for e_id in email_ids:
_, response = self.M.fetch(e_id, '(UID BODY[TEXT])')
data.append(response[0][1])
return data
def get_subjects(self, email_ids):
subjects = []
for e_id in email_ids:
_, response = self.M.fetch(e_id, '(body[header.fields (from)])')
print('From', str(response[0][1]).split(' <')[1][0:-10])
_, response = self.M.fetch(e_id, '(body[header.fields (subject)])')
print('Subject', str(response[0][1][9:])[2:-9])
subjects.append(response[0][1][9:])
return subjects
def emails_from(self, name):
'''Search for all mail from name'''
status, response = self.M.search(None, '(FROM "%s")' % name)
email_ids = [e_id for e_id in response[0].split()]
print('Number of emails from %s: %i. IDs: %s' % (name, len(email_ids), email_ids))
return email_ids
def checkMail(self):
self.M.select('INBOX')
status, response = self.M.status('INBOX', "(UNSEEN)")
status, email_ids = self.M.search(None, '(UNSEEN)')
print(str(email_ids[0])[1:].replace("'", "").split(' '))
emails = self.get_subjects(str(email_ids[0])[1:].replace("'", "").split(' '))
for email in emails:
print(email)
# self.unRead = self.M.search(None, '(UNSEEN)')
# print(self.unRead[0].split())
# print(self.unRead[1][0].split())
# # return len(self.unRead[1][0].split())
# return self.unRead[1][0].split()[-1]
def sendData(self):
self.numMessages = self.checkMail()
# turn the string into packed binary data to send int
self.ser.write(struct.pack('B', self.numMessages))
email = Mail()
# check for new mail every minute
while 1:
mid = email.checkMail()
# result, data = email.M.uid('fetch', mid, '(RFC822)')
# print(result)
# # b = Email.message_from_string(data[0][1])
# b = Email.message_from_bytes(data[0][1])
# print(b['From'])
# print(b['Subject'])
# payloads = b.get_payload()
# for payload in payloads:
# # print(payload)
# print(payload.get_payload())
time.sleep(60) | [
"1246506786@qq.com"
] | 1246506786@qq.com |
5d5b6f3ee3813a35cf978786bddba84d270686ee | 7a3389ad132779916574091e3b909fed0b93ace7 | /pom/asker/asker_home_page.py | 2d9a3b14e47f57b210bdfa24a3a7ed1c9f9eef2e | [] | no_license | bomcon123456/GI_TestAutomationOnboard | 793f594e5209cff365d3089d3103fe83af253114 | dc8b1c0a8a8ac05a23aefc42052251febc8160d5 | refs/heads/master | 2020-09-04T05:34:38.936616 | 2019-11-05T11:14:16 | 2019-11-05T11:14:16 | 219,668,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | import time
from .base_asker_page import BaseAskerPage
class AskerHomePage(BaseAskerPage):
query_field = 'textarea[name=\'text\']'
start_query_button = '.gi-Button.gi-Button--accent.gi-Button--lg.u-width-100'
def login_and_query(self):
login_locator = '#test-login-button'
user = 'input[name=\'email\']'
password = 'input[name=\'password\']'
login_button = 'button#login-button'
self.asker.find_element(login_locator).click()
time.sleep(1)
self.asker.find_element(user).send_keys('askerSelenium2@gmail.com')
self.asker.find_element(password).send_keys('MotConVit123!@')
self.asker.find_element(login_button).click()
self.query()
def query(self):
problem_text = 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. ' \
'Donec bibendum, turpis dignissim lobortis dapibus, ' \
'libero arcu cursus elit, a luctus felis lacus et orci. ' \
'Aenean cursus, risus non sodales blandit, ' \
'dui nunc sagittis mi, ac gravida sapien magna at ipsum.'
self.asker.get_waited_visible_element(self.query_field).send_keys(problem_text)
self.asker.get_waited_clickable_element(self.start_query_button).click()
| [
"bomcon123456@gmail.com"
] | bomcon123456@gmail.com |
07fe709101a5b5e53ea109ffbf256a9ac0e00b8a | b76d4db5f996a431615af302a087bada90e7e802 | /Matplotlib_Learn/直方图均衡化.py | cab646d486f3d95af897b56a965c9317f805b3b4 | [] | no_license | budaLi/AI_learn | 1571b27810f004b4a00f9fe0f3dfe7472fa6ee65 | c8b1bf0d5dde7b02ff8b3d0121fd7b6b0ab06862 | refs/heads/master | 2020-09-20T05:42:55.405552 | 2019-12-03T04:42:13 | 2019-12-03T04:42:13 | 224,390,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,579 | py | # @Time : 2019/12/3 10:43
# @Author : Libuda
# @FileName: 直方图均衡化.py
# @Software: PyCharm
#直方图均衡化是指将一副图像的灰度直方图变平 使变换后的图像中每个灰度值的分布概率都相同
#目的是增强图像的对比度
# 如果一幅图像的灰度直方图几乎覆盖了整个灰度的取值范围,并且除了个别灰度值的个数较为突出,
# 整个灰度值分布近似于均匀分布,那么这幅图像就具有较大的灰度动态范围和较高的对比度,
# 同时图像的细节更为丰富。已经证明,仅仅依靠输入图像的直方图信息,就可以得到一个变换函数,
# 利用该变换函数可以将输入图像达到上述效果,该过程就是直方图均衡化。
from PIL import Image
from pylab import *
image_path=r'C:\Users\lenovo\PycharmProjects\AI_learn\Matplotlib_Learn\1.jpg'
im = array(Image.open(image_path).convert('L'))
def histeq(im,nbr_bins =256):
"""
对一副灰度图像进行直方图均值化
:param im:
:param nbr_bins:
:return:
"""
#计算图像的直方图
#histrgram 直方统计图函数
# histogram(a, bins=10, range=None, weights=None, density=False);
# a是待统计数据的数组;
# bins指定统计的区间个数;
# range是一个长度为2的元组,表示统计范围的最小值和最大值,默认值None,表示范围由数据的范围决定
# weights为数组的每个元素指定了权值, histogram()
# 会对区间中数组所对应的权值进行求和
# density为True时,返回每个区间的概率密度;为False,返回每个区间中元素的个数
print(im.shape)
imhist, bins = histogram(im.flatten(),nbr_bins,density=False)
print(imhist) #每个像素出现的次数
print(bins) #像素
#累计分布函数 (cumulative distribution function,简写为 cdf,将像素值的范围映射到目标范围的归一化操作
#cumsum 累加函数 https://blog.csdn.net/feng_jlin/article/details/82790746
cdf = imhist.cumsum()
#归一化
cdf = 255*cdf /cdf[-1]
#使用累计分布函数的线性插值 计算新的像素值
#flatten 将图片多维向量转化为一维向量
# interp 线性插值函数 https://blog.csdn.net/hfutdog/article/details/87386901
im2 = interp(im.flatten(),bins[:-1],cdf)
im2 = im2.reshape(im.shape)
new_image = Image.fromarray(uint8(im2))
# new_image.save("直方图均衡化图.jpg")
new_image.show()
if __name__ == '__main__':
histeq(im) | [
"1364826576@qq.com"
] | 1364826576@qq.com |
dc2732a1e7b3eaca2dd341a07f466ef1b8af2a2a | f972e22df004b419d23b4b03d3c7e42e604a2e2b | /compute/wps/tests/test_backend_ophidia.py | e3b0b3f1d98a35e239a5cba845155eb7e3ba2d2b | [] | no_license | OphidiaBigData/esgf-compute-wps | 9ec663b1701f2336f08117a6fb0725d71adfe078 | 8dd26dde385fbe861c78e432e0954725d7bf9b18 | refs/heads/master | 2020-04-28T10:20:49.718253 | 2019-02-04T09:46:43 | 2019-02-04T09:46:43 | 175,198,536 | 0 | 0 | null | 2019-03-12T11:39:20 | 2019-03-12T11:39:19 | null | UTF-8 | Python | false | false | 1,713 | py | import unittest
import cwt
import mock
from django import test
from django.conf import settings
from wps import models
from wps import WPSError
from wps.backends import ophidia
#class OphidiaBackendTestCase(test.TestCase):
# fixtures = ['servers.json', 'users.json', 'processes.json']
#
# def setUp(self):
# self.backend = ophidia.Ophidia()
#
# self.user = models.User.objects.first()
#
# def test_execute_missing_operation(self):
# mock_job = mock.MagicMock()
#
# variables = {
# 'v0': cwt.Variable('file:///test.nc', 'tas', name='v0'),
# }
#
# domains = {'d0': cwt.Domain([cwt.Dimension('time', 0, 200)])}
#
# with self.assertRaises(WPSError) as e:
# self.backend.execute('Oph.max', variables, domains, {}, job=mock_job, user=self.user)
#
# def test_execute(self):
# mock_job = mock.MagicMock()
#
# variables = {
# 'v0': cwt.Variable('file:///test.nc', 'tas', name='v0'),
# }
#
# domains = {'d0': cwt.Domain([cwt.Dimension('time', 0, 200)])}
#
# operation = cwt.Process(identifier='Oph.max', name='max')
#
# operation.inputs = variables.values()
#
# operation.domain = domains['d0']
#
# result = self.backend.execute('Oph.max', variables, domains, {'max': operation}, job=mock_job, user=self.user)
#
# self.assertIsNotNone(result)
#
# def test_populate_processes(self):
# process_count = len(ophidia.PROCESSES)
#
# self.backend.populate_processes()
#
# self.assertEqual(len(self.backend.processes), process_count)
#
# def test_initialize(self):
# with self.assertNumQueries(0):
# self.backend.initialize()
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
aaf5614c687126755584a1a474e49dc112cd19ea | 2e6fbadca5364eb3c7271cef68c4745fde59259a | /dmu_utils/sqlachemy/types.py | 34b4f9958bf0fa293f9226789a8dca4379b3b6c9 | [] | no_license | dmugtasimov/dmu-utils | b25369c264f426800d916580af4ebb43c2c9f408 | c4f91fb43675a4bcc54cc7e5ea52a789e46f6439 | refs/heads/master | 2021-01-23T03:37:42.182066 | 2017-11-10T07:44:38 | 2017-11-10T07:44:38 | 86,107,402 | 0 | 0 | null | 2017-04-09T16:06:14 | 2017-03-24T20:21:17 | Python | UTF-8 | Python | false | false | 1,852 | py | from six import iteritems
from schematics import types as sch_types
from schematics.undefined import Undefined
from sqlalchemy import types as sa_types, Column
from dmu_utils.schematics.types import NonUnicodeStringType, JSONType, CustomDecimalType
SCHEMATICS_TO_SQLALCHEMY_TYPE_MAP = {
sch_types.StringType: sa_types.Unicode,
NonUnicodeStringType: sa_types.String,
sch_types.IntType: sa_types.Integer,
sch_types.DateTimeType: sa_types.DateTime,
# TODO(dmu) HIGH: Is sch_types.DecimalType really supported?
sch_types.DecimalType: sa_types.Numeric,
CustomDecimalType: sa_types.Numeric,
sch_types.FloatType: sa_types.Float,
sch_types.BooleanType: sa_types.Boolean,
JSONType: sa_types.JSON,
sch_types.ModelType: sa_types.JSON,
sch_types.ListType: sa_types.JSON,
}
def get_sqlalchemy_type(schematics_type):
sqlalchemy_type = SCHEMATICS_TO_SQLALCHEMY_TYPE_MAP.get(schematics_type)
if sqlalchemy_type:
return sqlalchemy_type
for from_type, to_type in iteritems(SCHEMATICS_TO_SQLALCHEMY_TYPE_MAP):
if issubclass(schematics_type, from_type):
return to_type
raise ValueError('Unsupported schematics type: {}'.format(schematics_type))
def schematics_field_to_sqlalchemy_column(field):
schematics_type = type(field)
sqlalchemy_type = get_sqlalchemy_type(schematics_type)
kwargs = {}
if issubclass(schematics_type, sch_types.StringType):
kwargs['length'] = field.max_length
if issubclass(schematics_type, CustomDecimalType):
kwargs['precision'] = field.precision
kwargs['scale'] = field.scale
column_kwargs = {
'nullable': not field.required
}
if field._default is not Undefined:
column_kwargs['default'] = field._default
return Column(sqlalchemy_type(**kwargs), **column_kwargs)
| [
"dmugtasimov@gmail.com"
] | dmugtasimov@gmail.com |
719073220d1c46074a1cbeaab8a80b7b66eec773 | c53b3e120c59557daaa2fa5b7626413105eb5965 | /tendenci/libs/storage.py | a9fe3d80c705775eb3eb7fb84553790eb2a4c435 | [] | no_license | chendong0444/ams | 8483334d9b687708d533190b62c1fa4fd4690f2c | f2ac4ecc076b223c262f2cde4fa3b35b4a5cd54e | refs/heads/master | 2021-05-01T03:59:18.682836 | 2018-07-23T06:33:41 | 2018-07-23T06:33:41 | 121,194,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | from django.conf import settings
from django.core.files.storage import get_storage_class
def get_default_storage():
"""
Get a default storage class.
"""
return get_storage_class(settings.DEFAULT_FILE_STORAGE)()
def get_static_storage():
"""
Get a static storage class.
"""
return get_storage_class(settings.STATICFILES_STORAGE)()
def get_file_content(name, storage_type='default'):
"""
Get the file content from the specified storage.
"""
if storage_type == 'static':
storage = get_static_storage()
else:
storage = get_default_storage()
f = storage.open(name)
content = f.read()
f.close()
return content
def save_file_content(name, content, storage_type='default'):
"""
Save the file content to the specified storage.
"""
if storage_type == 'static':
storage = get_static_storage()
else:
storage = get_default_storage()
return storage.save(name, content)
| [
"chendong@shinezone.com"
] | chendong@shinezone.com |
0722c3f10d8375fc61deb3496915dc5b7d77272f | cef8e528087eeb0ece37bb908ffea7cf81ade0db | /tp2.py | 50cfce313755a1f4d1eaea18bbb02fb979cbd83c | [] | no_license | ashrafulemon/python | f03ef07c77885bd00e73674e8767796c9bcac058 | 5d257288a52195500cf1f6b5cc1e017dae0fdcc0 | refs/heads/main | 2023-01-31T11:08:17.985674 | 2020-12-16T14:59:39 | 2020-12-16T14:59:39 | 322,011,688 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # 11. basic syntax
# 12. identifiers
# 13. keywords
# 14.lines and indentation
# 15. multiline statements
# 19.command line argument
import sys
print("number arg",len(sys.argv),"argument")
print("ar list ",str(sys.argv))
x=int(sys.argv[1])
y=int(sys.argv[2])
z=x+y
print(x,y,z) | [
"emon118.bd@gmail.com"
] | emon118.bd@gmail.com |
30f2d70ef4d8c8ff6ec88003d1820c4e03b22fe7 | b3f40953c1876f5916a2dbd2d7b9903a635e512e | /Dojo_Python/Django_Level_2/dojo_secrets/apps/secrets/migrations/0006_auto_20170227_2020.py | 135d75a24c6fc12637b6702c966810ef92428ab4 | [] | no_license | rudietuesdays/dojo_python | c9d9907096995d6e247b6b7732a908ece246b466 | 5e1fd46c45c90dfcd046f0fe922c64f96625a6c0 | refs/heads/master | 2021-01-11T13:28:56.029473 | 2017-03-07T02:49:48 | 2017-03-07T02:49:48 | 81,473,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-28 01:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('secrets', '0005_secret_liked'),
]
operations = [
migrations.AlterField(
model_name='liked',
name='liked',
field=models.BooleanField(default=True),
),
]
| [
"amanda.gaines@gmail.com"
] | amanda.gaines@gmail.com |
fee337d6bdd75a68b697954d025a3b015ea33b3f | 87ddcf61c2faaaa795b9c25af334a76018337f62 | /frictionless/formats/sql/adapter.py | 5f49b7b4b5ee8c675554de78cbb65104b38cb5dd | [
"MIT"
] | permissive | frictionlessdata/frictionless-py | 17d663ad34c18964113c97e4d657004610fe0df0 | 740319edeee58f12cc6956a53356f3065ff18cbb | refs/heads/main | 2023-08-26T16:24:25.353929 | 2023-08-04T07:55:37 | 2023-08-04T07:55:37 | 28,409,905 | 295 | 79 | MIT | 2023-09-04T05:01:33 | 2014-12-23T17:11:11 | Python | UTF-8 | Python | false | false | 6,870 | py | from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, List, Optional
from ... import models
from ...package import Package
from ...platform import platform
from ...resource import Resource
from ...system import Adapter
from . import settings
from .control import SqlControl
from .mapper import SqlMapper
if TYPE_CHECKING:
from sqlalchemy import MetaData, Table
from sqlalchemy.engine import Engine
from ...report import Report
from ...resources import TableResource
from ...schema import Schema
from ...table import IRowStream, Row
class SqlAdapter(Adapter):
"""Read and write data from/to SQL database"""
engine: Engine
control: SqlControl
mapper: SqlMapper
metadata: MetaData
def __init__(self, engine: Engine, *, control: Optional[SqlControl] = None):
sa = platform.sqlalchemy
self.engine = engine
self.control = control or SqlControl()
self.mapper = SqlMapper(self.engine.dialect.name)
with self.engine.begin() as conn:
# It will fail silently if this function already exists
if self.engine.dialect.name.startswith("sqlite"):
conn.connection.create_function("REGEXP", 2, regexp) # type: ignore
self.metadata = sa.MetaData(schema=self.control.namespace)
self.metadata.reflect(conn, views=True)
# Delete
def delete_resource(self, table_name: str) -> None:
with self.engine.begin() as conn:
table = self.metadata.tables[table_name]
self.metadata.drop_all(conn, tables=[table])
# Read
def read_package(self) -> Package:
package = Package(resources=[])
for table in self.metadata.sorted_tables:
name = str(table.name)
control = SqlControl(table=name)
path = self.engine.url.render_as_string(hide_password=False)
schema = self.mapper.read_schema(table)
resource = Resource(path, name=name, schema=schema, control=control)
package.add_resource(resource)
return package
def read_schema(self, table_name: str) -> Schema:
table = self.metadata.tables[table_name]
return self.mapper.read_schema(table, with_metadata=self.control.with_metadata)
def read_cell_stream(self, control: SqlControl) -> Generator[List[Any], None, None]:
sa = platform.sqlalchemy
table = self.metadata.tables[control.table] # type: ignore
with self.engine.begin() as conn:
# Prepare columns
columns = table.c
if self.control.with_metadata:
columns = [
column
for column in table.c
if column.name not in settings.METADATA_IDENTIFIERS
]
# Prepare query
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
query = sa.select(*columns).execution_options(stream_results=True)
if control.order_by:
query = query.order_by(sa.text(control.order_by))
if control.where:
query = query.where(sa.text(control.where))
# Stream cells
result = conn.execute(query)
yield list(result.keys())
for item in result:
cells = list(item)
yield cells
# Write
def write_package(self, package: Package):
with self.engine.begin() as conn:
tables: List[Table] = []
for res in package.resources:
assert res.name
table = self.mapper.write_schema(res.schema, table_name=res.name)
table = table.to_metadata(self.metadata)
tables.append(table)
self.metadata.create_all(conn, tables=tables)
for table in self.metadata.sorted_tables:
if package.has_table_resource(table.name):
resource = package.get_table_resource(table.name)
with resource:
self.write_row_stream(resource.row_stream, table_name=table.name)
return models.PublishResult(
url=self.engine.url.render_as_string(hide_password=True),
context=dict(engine=self.engine),
)
def write_schema(
self,
schema: Schema,
*,
table_name: str,
force: bool = False,
with_metadata: bool = False,
) -> None:
with self.engine.begin() as conn:
if force:
existing_table = self.metadata.tables.get(table_name)
if existing_table is not None:
self.metadata.drop_all(conn, tables=[existing_table])
self.metadata.remove(existing_table)
table = self.mapper.write_schema(
schema, table_name=table_name, with_metadata=with_metadata
)
table = table.to_metadata(self.metadata)
self.metadata.create_all(conn, tables=[table])
def write_row_stream(
self,
row_stream: IRowStream,
*,
table_name: str,
on_row: Optional[Callable[[Row], None]] = None,
) -> None:
sa = platform.sqlalchemy
with self.engine.begin() as conn:
buffer: List[Dict[str, Any]] = []
table = self.metadata.tables[table_name]
for row in row_stream:
buffer.append(self.mapper.write_row(row))
if len(buffer) > settings.BUFFER_SIZE:
conn.execute(sa.insert(table), buffer)
buffer.clear()
on_row(row) if on_row else None
if len(buffer):
conn.execute(sa.insert(table), buffer)
def write_resource_with_metadata(
self,
resource: TableResource,
*,
table_name: str,
on_row: Optional[Callable[[Row], None]] = None,
) -> Report:
sa = platform.sqlalchemy
with self.engine.begin() as conn:
# Write row
def process_row(row: Row):
buffer.append(self.mapper.write_row(row, with_metadata=True))
if len(buffer) > settings.BUFFER_SIZE:
conn.execute(sa.insert(table), buffer)
buffer.clear()
on_row(row) if on_row else None
# Validate/iterate
buffer: List[Dict[str, Any]] = []
table = self.metadata.tables[table_name]
report = resource.validate(on_row=process_row)
if len(buffer):
conn.execute(sa.insert(table), buffer)
return report
# Internal
def regexp(expr: str, item: str):
reg = re.compile(expr)
return reg.search(item) is not None
| [
"noreply@github.com"
] | frictionlessdata.noreply@github.com |
828c6edc99064b737f2531777d5caca2b67c08a4 | 2057ba21d3cfc17c88692c62b35e9c1697bbd720 | /digit_string.py | 3de66a0d9c558e84f386d48ddbee0ebe16948a02 | [] | no_license | crobil/project | bd35bce5e1e57c72dc7ac7747b0f646c2331f78b | 6f91eda5c961893a67a0b8ced08304e07e184a84 | refs/heads/master | 2020-03-07T08:44:03.205807 | 2018-06-08T08:57:13 | 2018-06-08T08:57:13 | 124,166,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | def DashInsert(input):
res = ''
for idx, var in enumerate(input):
if idx == 0:
res += '%s' % (var)
elif (int(var) % 2 == int(input[idx-1]) % 2) and int(var) % 2 == 1:
res += '-%s' % (var)
elif (int(var) % 2 == int(input[idx-1]) % 2) and int(var) % 2 == 0:
res += '+%s' % (var)
else:
res += '%s' % (var)
return res
print DashInsert('4546793')
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
c6aa63ffe81952ad736edec1545d368a4a989eec | e2f0806ca1cdd887ea40d050a19fa2710427bd38 | /기본 문제/03주차_정렬/2750_수 정렬하기/강승훈.py | 44cb4a0940575efcd28ce0ca7b4fc870ebdb5276 | [] | no_license | JY-Dev/AlgorithmStudy-1 | 001f94d80097c850c79eeb2bc86971a01aa5bd5d | 2ad1df0fd65c72a6f6d1feeba09f889000ff8c15 | refs/heads/main | 2023-08-21T18:38:18.235994 | 2021-09-28T07:07:11 | 2021-09-28T07:07:11 | 406,208,087 | 1 | 0 | null | 2021-09-14T03:14:32 | 2021-09-14T03:14:31 | null | UTF-8 | Python | false | false | 604 | py | from sys import stdin
# 입력
n = int(stdin.readline())
arr = [int(stdin.readline()) for _ in range(n)]
# 버블정렬
for i in range(n-1): # 조건식에서 +1 시키기 때문에, n-1 까지만 반복.
for j in range(n-1):
if arr[j] > arr[j+1]: # 만약 현재 요소가, 바로 다음 요소보다 크면, 두개의 위치를 바꿔줌.
tmp_1 = arr[j]; tmp_2 = arr[j+1] # tmp_1에 앞에 요소, tmp_2에 바로 다음 요소를 저장.
arr[j] = tmp_2; arr[j+1] = tmp_1 # 그리고 반대로 다시 재할당 시킴.
# 출력
for crnt in arr:
print(crnt)
| [
"noreply@github.com"
] | JY-Dev.noreply@github.com |
2d16134229da9c97663682df26d2177427bf90f0 | de725b742e69f38318c04cd44ac970e7135857a5 | /assets/myauth.py | 7af5b88d8abcaff8d9dd6b4c10a35cc8225f7d33 | [] | no_license | haochenxiao666/itelftool | e5c0811b48e01d0eeff13d15d33b89960091960a | 8558dce6d97e7443c95513aa1389910c3902043f | refs/heads/master | 2020-04-14T22:55:46.732111 | 2018-10-18T09:00:44 | 2018-10-18T09:00:44 | 164,183,750 | 1 | 0 | null | 2019-01-05T05:05:32 | 2019-01-05T05:05:31 | null | UTF-8 | Python | false | false | 4,431 | py | #!/usr/bin/env python
#coding:utf-8
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser,Group,PermissionsMixin
)
import django
from django import utils
from accounts.models import RoleList
class UserManager(BaseUserManager):
def create_user(self, email, name, password=None):
"""
Creates and saves a User with the given email, date of
birth and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
name=name,
#token=token,
#department=department,
#tel=tel,
#memo=memo,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name ,password):
"""
Creates and saves a superuser with the given email, date of
birth and password.
"""
user = self.create_user(email,
password=password,
name=name,
#token=token,
#department=department,
#tel=tel,
#memo=memo,
)
user.is_admin = True
user.save(using=self._db)
return user
#class UserProfile(AbstractBaseUser):
class UserProfile(AbstractBaseUser,PermissionsMixin):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
# on_delete=models.SET_NULL,只有当null=True的时候 ,被删除的时候将内容置空
role = models.ForeignKey(RoleList, null=True, blank=True, on_delete=models.SET_NULL)
name = models.CharField(max_length=32)
token = models.CharField(u'token', max_length=128,default=None,blank=True,null=True)
department = models.CharField(u'部门', max_length=32,default=None,blank=True,null=True)
#business_unit = models.ManyToManyField(BusinessUnit)
tel = models.CharField(u'座机', max_length=32,default=None,blank=True,null=True)
mobile = models.CharField(u'手机', max_length=32,default=None,blank=True,null=True)
memo = models.TextField(u'备注', blank=True,null=True,default=None)
date_joined = models.DateTimeField(blank=True, auto_now_add=True)
#valid_begin = models.DateTimeField(blank=True, auto_now=True)
# 秋飞修改
# valid_begin_time = models.DateTimeField(default=django.utils.timezone.now)
valid_begin_time = models.DateTimeField(default=utils.timezone.now)
valid_end_time = models.DateTimeField(blank=True,null=True)
groups = models.ManyToManyField
USERNAME_FIELD = 'email'
#REQUIRED_FIELDS = ['name','token','department','tel','mobile','memo']
REQUIRED_FIELDS = ['name']
def get_full_name(self):
# The user is identified by their email address
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self): # __unicode__ on Python 2
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_perms(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
@property
def is_staff(self):
"Is the user a member of staff?"
# Simplest possible answer: All admins are staff
return self.is_admin
class Meta:
verbose_name = u'用户信息'
verbose_name_plural = u"用户信息"
def __unicode__(self):
return self.name
objects = UserManager()
class LoginRecord(models.Model):
name = models.ForeignKey(
UserProfile,
null=False,
blank=False,
verbose_name=u"登录用户"
)
logintime = models.DateTimeField(u'登录时间', auto_now_add=True)
loginsource = models.GenericIPAddressField(u'登录来源IP', blank=True, null=True)
| [
"420521738@qq.com"
] | 420521738@qq.com |
79622b9129c74f9d947d06ef504aa39e5b5e0023 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/cctbx/source_generators/eltbx/generate_henke_cpp.py | 7bae6088b98f86b7eb183ef1711372e11162e216 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 5,440 | py | from __future__ import absolute_import, division, print_function
from scitbx.source_generators.utils import join_open
from scitbx.source_generators.utils import write_this_is_auto_generated
import libtbx.load_env
import string
import os
from six.moves import range
this = "cctbx.source_generators.eltbx.generate_henke_cpp"
reference_tables_directory = libtbx.env.under_dist(
"cctbx", "reference/henke/tables")
def print_header(f):
write_this_is_auto_generated(f, this)
print("""\
#include <cctbx/eltbx/henke.h>
namespace cctbx { namespace eltbx { namespace henke {
""", file=f)
def print_ftp_info(f):
print("""\
/*
Henke Tables
The original data can be found at:
ftp://grace.lbl.gov/pub/sf/
From ftp://grace.lbl.gov/pub/sf/read.me:
Low-Energy X-ray Interaction Coefficients:
Photoabsorption, Scattering, and Reflection
E = 30-30,000 eV, Z = 1-92
B. L. Henke, E. M. Gullikson, and J. C. Davis
Center for X-Ray Optics, 2-400
Lawrence Berkeley Laboratory
Berkeley, California 94720
Reference: B. L. Henke, E. M. Gullikson, and J. C. Davis,
Atomic Data and Nuclear Data Tables Vol. 54 No. 2 (July 1993).
*/
""", file=f)
def collect_tables():
nff_files = []
for file in os.listdir(reference_tables_directory):
fn = file.lower().capitalize()
if (fn[-4:] == ".nff"): nff_files.append(file)
tables = [0] * 120
for file in nff_files:
f = join_open(reference_tables_directory, file, "r")
header = f.readline()
table = f.readlines()
f.close()
Symbol = header[1:3].strip()
Z = int(header[7:9])
assert len(Symbol) > 0
assert Symbol[0] in 'abcdefghijklmnopqrstuvwxyz'
assert Symbol[-1] in 'abcdefghijklmnopqrstuvwxyz'
assert Z > 0 and Z < len(tables)
assert tables[Z] == 0
Symbol = Symbol.capitalize()
tables[Z] = (Symbol, table)
Z = tables[1:].index(0) + 1
rest = tables[Z:]
assert rest == [0] * len(rest)
tables = tables[:Z]
return tables
def print_table_block(f, tables, Z_begin, Z_end, define_noval=0):
print("namespace table_data {", file=f)
print(file=f)
print("using anomalous::e_fp_fdp;", file=f)
print(file=f)
# Visual C++ 7.0 compilation is very slow with define_noval=1
if (define_noval): print("#define NOVAL fp_fdp_undefined", file=f)
for Z in range(Z_begin, Z_end):
tab = tables[Z]
print("e_fp_fdp " + tab[0].lower() \
+ "[] = { /* Z = " + str(Z) + " */", file=f)
for line in tab[1]:
flds = line.split()
assert len(flds) == 3
if (define_noval and flds[1] == "-9999.00"): flds[1] = "NOVAL"
print("{%s, %s, %s}," % tuple(flds), file=f)
print("{0, 0, 0}", file=f)
print("};", file=f)
print(file=f)
if (define_noval):
print("#undef NOVAL", file=f)
print(file=f)
print("} // namespace table_data", file=f)
print(file=f)
print("}}} // namespace cctbx::eltbx::henke", file=f)
def print_henke_cpp(f, tables):
print("namespace table_data {", file=f)
print(file=f)
print("using anomalous::e_fp_fdp;", file=f)
print(file=f)
for tab in tables[1:]:
print("extern e_fp_fdp " + tab[0].lower() + "[];", file=f)
print(file=f)
print("static const anomalous::label_z_e_fp_fdp all[] = {", file=f)
i = 0
for tab in tables[1:]:
i += 1
print("{\"" + tab[0] + "\", " + str(i) + ", " + tab[0].lower() + "},", file=f)
print("{0, 0, 0}", file=f)
print("};", file=f)
print("""
} // namespace table_data
table::table(
std::string const& label,
bool exact,
bool exception_if_no_match)
{
std::string work_label = basic::strip_label(label, exact);
label_z_e_fp_fdp_ = anomalous::find_entry(
table_data::all, work_label, exact, exception_if_no_match);
}
fp_fdp
table::at_ev(double energy) const
{
fp_fdp raw = anomalous::interpolate(label_z_e_fp_fdp_, energy);
if (!raw.is_valid_fp()) return raw;
// subtract the number of electrons
return fp_fdp(raw.fp() - label_z_e_fp_fdp_->z, raw.fdp());
}
table_iterator::table_iterator()
:
current_("H", true)
{}
table
table_iterator::next()
{
table result = current_;
if (current_.is_valid()) current_.label_z_e_fp_fdp_++;
return result;
}
}}} // namespace cctbx::eltbx::henke""", file=f)
def collect_points(lines):
points = []
for line in lines:
points.append(line.split()[0])
return points
def collect_tab_points(tables):
tab_points = []
for tab in tables[1:]:
tab_points.append(collect_points(tab[1]))
return tab_points
def compare_points(tables):
tab_points = collect_tab_points(tables)
for i in range(len(tab_points)-1):
for j in range(i+1, len(tab_points)):
if (tab_points[i] == tab_points[j]):
print("points %d==%d" % (i+1,j+1))
def run(target_dir):
tables = collect_tables()
compare_points(tables) # establish that each list of data points is unique
f = join_open(target_dir, "henke.cpp", "w")
print_header(f)
print_ftp_info(f)
print_henke_cpp(f, tables)
f.close()
Z_block = 12
for Z_begin in range(1, len(tables), Z_block):
Z_end = min(len(tables), Z_begin + Z_block)
f = join_open(
target_dir, "henke_tables_%02d_%02d.cpp" % (Z_begin, Z_end-1), "w")
print_header(f)
print_table_block(f, tables, Z_begin, Z_end)
f.close()
if (__name__ == "__main__"):
run(".")
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
0d1fc572c58a6bfa42ebe16a77abc85250542703 | 88c1f9ccb62e91d6b0574bcde1043921bdeb0126 | /test_utilities/src/d1_test/test_files.py | 297b4ed0ef73c4895d00055f25c2d0d454ce1cef | [
"Apache-2.0"
] | permissive | jevans97utk/d1_python | 83b8de8780287c655779844f367b9189413da074 | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | refs/heads/master | 2020-05-21T01:16:50.677816 | 2019-04-22T16:09:44 | 2019-04-22T16:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for loading test files."""
import codecs
import json
import logging
import os
# Path to the test document root, relative to this file
import d1_common
import d1_common.types
import d1_common.types.dataoneTypes
import d1_common.util
import d1_common.utils.filesystem
def get_abs_test_file_path(rel_path):
return os.path.join(d1_common.utils.filesystem.abs_path('./test_docs'), rel_path)
def load_bin(rel_path):
bin_path = get_abs_test_file_path(rel_path)
with open(bin_path, 'rb') as f:
return f.read()
def load_utf8_to_str(rel_path):
"""Load file, decode from UTF-8 and return as str."""
logging.debug('Loading test file. rel_path="{}"'.format(rel_path))
utf8_path = get_abs_test_file_path(rel_path)
with codecs.open(utf8_path, encoding='utf-8', mode='r') as f:
unicode_str = f.read()
return unicode_str
def load_xml_to_pyxb(filename):
return d1_common.types.dataoneTypes.CreateFromDocument(load_xml_to_str(filename))
def load_xml_to_str(filename):
return load_utf8_to_str(os.path.join('xml', filename))
def load_json(filename):
return json.loads(load_utf8_to_str(os.path.join('json', filename)))
def load_cert(filename):
return load_bin(os.path.join('cert', filename))
def load_jwt(filename):
return load_bin(os.path.join('jwt', filename))
def save(obj_str, rel_path, encoding='utf-8'):
with open(get_abs_test_file_path(rel_path), 'w', encoding=encoding) as f:
return f.write(obj_str)
| [
"git@dahlsys.com"
] | git@dahlsys.com |
61fd076ee0bec381a73beaf4c8fc9e62f16688e9 | 8606267410dabfeacb4b7ff285a8d2250c139acc | /store/views/home.py | 2a799439f55833dad0516d1fccc5b42d0acb7181 | [] | no_license | Taraltinu/chopping-Site | a5e6f6eeeecb4fef92f90770a3c2493eca0f0bde | 1b722d53de1baaa5780701416f78dab62ef7d057 | refs/heads/master | 2022-12-20T07:06:16.602476 | 2020-10-02T18:07:31 | 2020-10-02T18:07:31 | 300,697,693 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | from django.shortcuts import render,redirect
from store.models import Product,Category,Costomer
from django.views import View
class Home(View):
def post(self,request):
Product = request.POST.get('product')
remove = request.POST.get('remove')
cart = request.session.get('cart')
if cart:
quantity=cart.get(Product)
if quantity:
if remove:
if quantity <=1:
cart.pop(Product)
else:
cart[Product] = quantity-1
else:
cart[Product] = quantity+1
else:
cart[Product] = 1
else:
cart = {}
cart[Product]=1
request.session['cart'] = cart
return redirect('/')
def get(self, request):
cart = request.session.get('cart')
if not cart:
request.session['cart'] = {}
products= None
category = Category.get_all_category()
categoryID = request.GET.get('category')
if categoryID:
products = Product.get_all_product_by_category_id(categoryID)
else:
products = Product.get_all_products()
data = {}
data['product']=products
data['category']=category
return render(request,'store/index.html',data) | [
"tinu1316@gmail.com"
] | tinu1316@gmail.com |
c34ed3439f3d7661d4b7129f79fe9f80006dc229 | 7087a5dd1772c9456f098bc024a894dcaeef5432 | /calbin/build/calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1beta1_volume_attachment_status.py | dd12c23922f449d21303a3a99e4861fc7618ae31 | [] | no_license | santhoshchami/kubecctl-python | 5be7a5a17cc6f08ec717b3eb1c11719ef7653aba | cd45af465e25b0799d65c573e841e2acb983ee68 | refs/heads/master | 2021-06-23T11:00:43.615062 | 2019-07-10T16:57:06 | 2019-07-10T16:57:06 | 145,669,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,370 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1VolumeAttachmentStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attach_error': 'V1beta1VolumeError',
'attached': 'bool',
'attachment_metadata': 'dict(str, str)',
'detach_error': 'V1beta1VolumeError'
}
attribute_map = {
'attach_error': 'attachError',
'attached': 'attached',
'attachment_metadata': 'attachmentMetadata',
'detach_error': 'detachError'
}
def __init__(self, attach_error=None, attached=None, attachment_metadata=None, detach_error=None):
"""
V1beta1VolumeAttachmentStatus - a model defined in Swagger
"""
self._attach_error = None
self._attached = None
self._attachment_metadata = None
self._detach_error = None
self.discriminator = None
if attach_error is not None:
self.attach_error = attach_error
self.attached = attached
if attachment_metadata is not None:
self.attachment_metadata = attachment_metadata
if detach_error is not None:
self.detach_error = detach_error
@property
def attach_error(self):
"""
Gets the attach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:return: The attach_error of this V1beta1VolumeAttachmentStatus.
:rtype: V1beta1VolumeError
"""
return self._attach_error
@attach_error.setter
def attach_error(self, attach_error):
"""
Sets the attach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:param attach_error: The attach_error of this V1beta1VolumeAttachmentStatus.
:type: V1beta1VolumeError
"""
self._attach_error = attach_error
@property
def attached(self):
"""
Gets the attached of this V1beta1VolumeAttachmentStatus.
Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:return: The attached of this V1beta1VolumeAttachmentStatus.
:rtype: bool
"""
return self._attached
@attached.setter
def attached(self, attached):
"""
Sets the attached of this V1beta1VolumeAttachmentStatus.
Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:param attached: The attached of this V1beta1VolumeAttachmentStatus.
:type: bool
"""
if attached is None:
raise ValueError("Invalid value for `attached`, must not be `None`")
self._attached = attached
@property
def attachment_metadata(self):
"""
Gets the attachment_metadata of this V1beta1VolumeAttachmentStatus.
Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:return: The attachment_metadata of this V1beta1VolumeAttachmentStatus.
:rtype: dict(str, str)
"""
return self._attachment_metadata
@attachment_metadata.setter
def attachment_metadata(self, attachment_metadata):
"""
Sets the attachment_metadata of this V1beta1VolumeAttachmentStatus.
Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.
:param attachment_metadata: The attachment_metadata of this V1beta1VolumeAttachmentStatus.
:type: dict(str, str)
"""
self._attachment_metadata = attachment_metadata
@property
def detach_error(self):
"""
Gets the detach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.
:return: The detach_error of this V1beta1VolumeAttachmentStatus.
:rtype: V1beta1VolumeError
"""
return self._detach_error
@detach_error.setter
def detach_error(self, detach_error):
"""
Sets the detach_error of this V1beta1VolumeAttachmentStatus.
The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.
:param detach_error: The detach_error of this V1beta1VolumeAttachmentStatus.
:type: V1beta1VolumeError
"""
self._detach_error = detach_error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1VolumeAttachmentStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"root@kube-node02.local"
] | root@kube-node02.local |
68e77021a73f93d497900e999f534e311be2a3ad | 5ead804c0fc2afb510de84293da9c7003626777d | /p2p/nat.py | b25c7b42fe600af71ec460d1949a0f593ae8f004 | [
"MIT"
] | permissive | YaoyaoBae/py-evm | 2501437a4227ddffe4ffa584a2b9ec705dad35bf | 4a3e8dfa15d81841fde434db352bcb7721ad5c04 | refs/heads/master | 2020-03-21T01:55:09.261489 | 2018-06-20T01:02:05 | 2018-06-20T01:02:05 | 137,969,468 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,621 | py | import asyncio
from concurrent.futures import (
ThreadPoolExecutor,
)
import ipaddress
from typing import (
AsyncGenerator,
NamedTuple,
)
from urllib.parse import urlparse
from p2p.cancel_token import (
CancelToken,
)
from p2p.exceptions import (
NoInternalAddressMatchesDevice,
OperationCancelled,
)
import netifaces
from p2p.service import BaseService
import upnpclient
# UPnP discovery can take a long time, so use a loooong timeout here.
UPNP_DISCOVER_TIMEOUT_SECONDS = 30
class PortMapping(NamedTuple):
internal: str # of the form "192.2.3.4:56"
external: str # of the form "192.2.3.4:56"
def find_internal_ip_on_device_network(upnp_dev: upnpclient.upnp.Device) -> str:
"""
For a given UPnP device, return the internal IP address of this host machine that can
be used for a NAT mapping.
"""
parsed_url = urlparse(upnp_dev.location)
# Get an ipaddress.IPv4Network instance for the upnp device's network.
upnp_dev_net = ipaddress.ip_network(parsed_url.hostname + '/24', strict=False)
for iface in netifaces.interfaces():
for family, addresses in netifaces.ifaddresses(iface).items():
# TODO: Support IPv6 addresses as well.
if family != netifaces.AF_INET:
continue
for item in addresses:
if ipaddress.ip_address(item['addr']) in upnp_dev_net:
return item['addr']
raise NoInternalAddressMatchesDevice(device_hostname=parsed_url.hostname)
class UPnPService(BaseService):
"""
Generate a mapping of external network IP address/port to internal IP address/port,
using the Universal Plug 'n' Play standard.
"""
_nat_portmap_lifetime = 30 * 60
def __init__(self, port: int, token: CancelToken = None) -> None:
"""
:param port: The port that a server wants to bind to on this machine, and
make publicly accessible.
"""
super().__init__(token)
self.port = port
self._mapping: PortMapping = None # when called externally, this never returns None
async def _run(self):
"""Run an infinite loop refreshing our NAT port mapping.
On every iteration we configure the port mapping with a lifetime of 30 minutes and then
sleep for that long as well.
"""
while self.is_running:
try:
# Wait for the port mapping lifetime, and then try registering it again
await self.wait(asyncio.sleep(self._nat_portmap_lifetime))
await self.add_nat_portmap()
except OperationCancelled:
break
except Exception:
self.logger.exception("Failed to setup NAT portmap")
async def _cleanup(self):
pass
async def add_nat_portmap(self):
"""
Set up the port mapping
:return: the IP address of the new mapping (or None if failed)
"""
self.logger.info("Setting up NAT portmap...")
try:
async for upnp_dev in self._discover_upnp_devices():
try:
external_ip = await self._add_nat_portmap(upnp_dev)
except NoInternalAddressMatchesDevice as exc:
self.logger.info(
"No internal addresses were managed by the UPnP device at %s",
exc.device_hostname,
)
continue
else:
return external_ip
except upnpclient.soap.SOAPError as e:
if e.args == (718, 'ConflictInMappingEntry'):
# An entry already exists with the parameters we specified. Maybe the router
# didn't clean it up after it expired or it has been configured by other piece
# of software, either way we should not override it.
# https://tools.ietf.org/id/draft-ietf-pcp-upnp-igd-interworking-07.html#errors
self.logger.info("NAT port mapping already configured, not overriding it")
else:
self.logger.exception("Failed to setup NAT portmap")
self._mapping = None
def current_mapping(self) -> PortMapping:
if self._mapping is None:
unbound = ':%d' % self.port
return PortMapping(unbound, unbound)
else:
return self._mapping
async def _add_nat_portmap(self, upnp_dev: upnpclient.upnp.Device) -> str:
# Detect our internal IP address (which raises if there are no matches)
internal_ip = find_internal_ip_on_device_network(upnp_dev)
external_ip = upnp_dev.WANIPConn1.GetExternalIPAddress()['NewExternalIPAddress']
for protocol, description in [('TCP', 'ethereum p2p'), ('UDP', 'ethereum discovery')]:
upnp_dev.WANIPConn1.AddPortMapping(
NewRemoteHost=external_ip,
NewExternalPort=self.port,
NewProtocol=protocol,
NewInternalPort=self.port,
NewInternalClient=internal_ip,
NewEnabled='1',
NewPortMappingDescription=description,
NewLeaseDuration=self._nat_portmap_lifetime,
)
self._mapping = PortMapping(
'%s:%d' % (internal_ip, self.port),
'%s:%d' % (external_ip, self.port),
)
self.logger.info("NAT port forwarding successfully set up: %r", self._mapping)
return external_ip
async def _discover_upnp_devices(self) -> AsyncGenerator[upnpclient.upnp.Device, None]:
loop = asyncio.get_event_loop()
# Use loop.run_in_executor() because upnpclient.discover() is blocking and may take a
# while to complete. We must use a ThreadPoolExecutor() because the
# response from upnpclient.discover() can't be pickled.
try:
devices = await self.wait(
loop.run_in_executor(ThreadPoolExecutor(max_workers=1), upnpclient.discover),
timeout=UPNP_DISCOVER_TIMEOUT_SECONDS,
)
except TimeoutError:
self.logger.info("Timeout waiting for UPNP-enabled devices")
return
# If there are no UPNP devices we can exit early
if not devices:
self.logger.info("No UPNP-enabled devices found")
return
# Now we loop over all of the devices until we find one that we can use.
for device in devices:
try:
device.WANIPConn1
except AttributeError:
continue
yield device
| [
"ut96caarrs@snkmail.com"
] | ut96caarrs@snkmail.com |
6018f93cdb60ae3b44f7efc5dadd97b15e2df5d7 | a80e9eb7ade3d43ce042071d796c00dd10b93225 | /ch_5/plot_Gaussian.py | 4c8f99e7999d1500ef7b8aa4a47a74dd10962194 | [] | no_license | ksjpswaroop/python_primer | 69addfdb07471eea13dccfad1f16c212626dee0a | 99c21d80953be3c9dc95f3a316c04b0c5613e830 | refs/heads/master | 2020-07-14T17:37:45.923796 | 2014-06-06T22:30:48 | 2014-06-06T22:30:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # Exercise 5.4
from numpy import sqrt, exp, pi, linspace
from matplotlib.pyplot import plot, show, xlabel, ylabel
def h(x):
return 1 / sqrt(2 * pi) * exp(-0.5 * x * x)
xlist = linspace(-4, 4, 41)
hlist = h(xlist)
plot(xlist, hlist)
xlabel('x')
ylabel('h')
show()
| [
"noahwaterfieldprice@gmail.com"
] | noahwaterfieldprice@gmail.com |
ea85dfc55a49d9f7394cf5204d62a669202be72b | 518bf342bc4138982af3e2724e75f1d9ca3ba56c | /solutions/1553. Minimum Number of Days to Eat N Oranges/1553.py | ae0d574b19fee8a7c8ccd112c60ba1dde29433ef | [
"MIT"
] | permissive | walkccc/LeetCode | dae85af7cc689882a84ee5011f0a13a19ad97f18 | a27be41c174565d365cbfe785f0633f634a01b2a | refs/heads/main | 2023-08-28T01:32:43.384999 | 2023-08-20T19:00:45 | 2023-08-20T19:00:45 | 172,231,974 | 692 | 302 | MIT | 2023-08-13T14:48:42 | 2019-02-23T15:46:23 | C++ | UTF-8 | Python | false | false | 209 | py | class Solution:
@functools.lru_cache(None)
def minDays(self, n: int) -> int:
if n <= 1:
return n
return 1 + min(self.minDays(n // 3) + n % 3,
self.minDays(n // 2) + n % 2)
| [
"me@pengyuc.com"
] | me@pengyuc.com |
868718ebefe1069ff6fe9964fe7e073242170198 | cf47ecc26210fd51caab0a1ea927ac768f388eb8 | /app/requests.py | 397faf3ab5d12a0a4ecec31234ca8b51e38bf458 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | BrendaMwiza/News | 3eb55d324b40f089d8c3110d3792af4a66341a2d | b68d53d0aefac78fae3de557c145cefd69e03c6b | refs/heads/master | 2022-09-28T17:47:48.972932 | 2019-09-20T09:01:01 | 2019-09-20T09:01:01 | 208,076,219 | 0 | 0 | null | 2022-09-16T18:09:55 | 2019-09-12T14:53:45 | Python | UTF-8 | Python | false | false | 3,234 | py | import urllib.request, json
from .models import Sources, Articles
#get api key
api_key = None
#get the news base url
base_source_url = None
base_article_url = None
def config_request(app):
global api_key, base_article_url, base_source_url
api_key = app.config['NEWS_API_KEY']
base_source_url = app.config['SOURCE_API_BASE_URL']
base_article_url = app.config['ARTICLES_API_BASE_URL']
def get_sources():
'''
Function for getting the json response to the url request
'''
get_source_url = base_source_url.format(api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
source_results = None
if get_source_response['sources']:
source_results_list = get_source_response['sources']
source_results = process_source(source_results_list)
return source_results
def process_source(source_list):
'''
Function for processing the source results and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
source_results = []
for sources_item in source_list:
id = sources_item.get('id')
name = sources_item.get('name')
description = sources_item.get('description')
url = sources_item.get('url')
category = sources_item.get('category')
language = sources_item.get('language')
country = sources_item.get('country')
sources_object = Sources(id, name, description, url, category, language, country)
source_results.append(sources_object)
return source_results
def get_article(id):
'''
Function for getting the json response to our url request
'''
get_article_url = base_article_url.format(id, api_key) + "&sources="
with urllib.request.urlopen(get_article_url) as url:
get_article_data = url.read()
get_article_response = json.loads(get_article_data)
article_results = None
if get_article_response['articles']:
article_results_list = get_article_response['articles']
article_results = process_articles(article_results_list)
return article_results
def process_articles(article_list):
'''
Function that processes the article results and transform them to a list of Objects
Args:
article_list: A list of dictionaries that contain article details
Returns :
article_results: A list of article objects
'''
article_results = []
for article_item in article_list:
id = article_item.get('id')
name = article_item.get('name')
author = article_item.get('author')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
urlToImage = article_item.get('urlToImage')
publishedAt = article_item.get('publishedAt')
article_results.append(Articles(id, name, author, title, description, url, urlToImage, publishedAt))
return article_results | [
"brendabrizy@gmail.com"
] | brendabrizy@gmail.com |
159e90f30c0888505f00ac771b040dfe89acf3ac | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/project_info.py | 84c0c398d075ddeabe08545806afd80eb148187b | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,718 | py | # coding: utf-8
import pprint
import re
import six
class ProjectInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain': 'DomainInfo',
'id': 'str',
'name': 'str'
}
attribute_map = {
'domain': 'domain',
'id': 'id',
'name': 'name'
}
def __init__(self, domain=None, id=None, name=None):
"""ProjectInfo - a model defined in huaweicloud sdk"""
self._domain = None
self._id = None
self._name = None
self.discriminator = None
if domain is not None:
self.domain = domain
if id is not None:
self.id = id
self.name = name
@property
def domain(self):
"""Gets the domain of this ProjectInfo.
:return: The domain of this ProjectInfo.
:rtype: DomainInfo
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this ProjectInfo.
:param domain: The domain of this ProjectInfo.
:type: DomainInfo
"""
self._domain = domain
@property
def id(self):
"""Gets the id of this ProjectInfo.
project id
:return: The id of this ProjectInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProjectInfo.
project id
:param id: The id of this ProjectInfo.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ProjectInfo.
project name
:return: The name of this ProjectInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProjectInfo.
project name
:param name: The name of this ProjectInfo.
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
43510b7202af8f2f999f3f9b7e402ff45eede68e | 5864e86954a221d52d4fa83a607c71bacf201c5a | /testfixtures/tests/test_should_raise.py | 351daad7e26d3622750b3f95c8d9a93e46200e43 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,104 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\stdlib\testfixtures\tests\test_should_raise.py
from testfixtures import Comparison as C, ShouldRaise, should_raise
from unittest import TestCase
from .compat import py_33_plus
class TestShouldRaise(TestCase):
def test_no_params(self):
def to_test():
raise ValueError('wrong value supplied')
should_raise(ValueError('wrong value supplied'))(to_test)()
def test_no_exception(self):
def to_test():
pass
try:
should_raise(ValueError())(to_test)()
except AssertionError as e:
self.assertEqual(e, C(AssertionError('None raised, ValueError() expected')))
else:
self.fail('No exception raised!')
def test_wrong_exception(self):
def to_test():
raise ValueError('bar')
try:
should_raise(ValueError('foo'))(to_test)()
except AssertionError as e:
self.assertEqual(e, C(AssertionError("ValueError('bar',) raised, ValueError('foo',) expected")))
else:
self.fail('No exception raised!')
def test_only_exception_class(self):
def to_test():
raise ValueError('bar')
should_raise(ValueError)(to_test)()
def test_no_supplied_or_raised(self):
def to_test():
pass
try:
should_raise()(to_test)()
except AssertionError as e:
self.assertEqual(e, C(AssertionError('No exception raised!')))
else:
self.fail('No exception raised!')
def test_args(self):
def to_test(*args):
raise ValueError('%s' % repr(args))
should_raise(ValueError('(1,)'))(to_test)(1)
def test_kw_to_args(self):
def to_test(x):
raise ValueError('%s' % x)
should_raise(ValueError('1'))(to_test)(x=1)
def test_kw(self):
def to_test(**kw):
raise ValueError('%r' % kw)
should_raise(ValueError("{'x': 1}"))(to_test)(x=1)
def test_both(self):
def to_test(*args, **kw):
raise ValueError('%r %r' % (args, kw))
should_raise(ValueError("(1,) {'x': 2}"))(to_test)(1, x=2)
def test_method_args(self):
class X:
def to_test(self, *args):
self.args = args
raise ValueError()
x = X()
should_raise(ValueError)(x.to_test)(1, 2, 3)
self.assertEqual(x.args, (1, 2, 3))
def test_method_kw(self):
class X:
def to_test(self, **kw):
self.kw = kw
raise ValueError()
x = X()
should_raise(ValueError)(x.to_test)(x=1, y=2)
self.assertEqual(x.kw, {'x': 1,
'y': 2})
def test_method_both(self):
class X:
def to_test(self, *args, **kw):
self.args = args
self.kw = kw
raise ValueError()
x = X()
should_raise(ValueError)(x.to_test)(1, y=2)
self.assertEqual(x.args, (1,))
self.assertEqual(x.kw, {'y': 2})
def test_class_class(self):
class Test:
def __init__(self, x):
pass
should_raise(TypeError)(Test)()
def test_raised(self):
with ShouldRaise() as s:
raise ValueError('wrong value supplied')
self.assertEqual(s.raised, C(ValueError('wrong value supplied')))
def test_catch_baseexception_1(self):
with ShouldRaise(SystemExit):
raise SystemExit()
def test_catch_baseexception_2(self):
with ShouldRaise(KeyboardInterrupt):
raise KeyboardInterrupt()
def test_with_exception_class_supplied(self):
with ShouldRaise(ValueError):
raise ValueError('foo bar')
def test_with_exception_supplied(self):
with ShouldRaise(ValueError('foo bar')):
raise ValueError('foo bar')
def test_with_exception_supplied_wrong_args(self):
try:
with ShouldRaise(ValueError('foo')):
raise ValueError('bar')
except AssertionError as e:
self.assertEqual(e, C(AssertionError("ValueError('bar',) raised, ValueError('foo',) expected")))
else:
self.fail('No exception raised!')
def test_neither_supplied(self):
with ShouldRaise():
raise ValueError('foo bar')
def test_with_no_exception_when_expected(self):
try:
with ShouldRaise(ValueError('foo')):
pass
except AssertionError as e:
self.assertEqual(e, C(AssertionError("None raised, ValueError('foo',) expected")))
else:
self.fail('No exception raised!')
def test_with_no_exception_when_neither_expected(self):
try:
with ShouldRaise():
pass
except AssertionError as e:
self.assertEqual(e, C(AssertionError('No exception raised!')))
else:
self.fail('No exception raised!')
def test_with_getting_raised_exception(self):
with ShouldRaise() as s:
raise ValueError('foo bar')
self.assertEqual(C(ValueError('foo bar')), s.raised)
def test_import_errors_1(self):
if py_33_plus:
message = "No module named 'textfixtures'"
else:
message = 'No module named textfixtures.foo.bar'
with ShouldRaise(ImportError(message)):
import textfixtures.foo.bar
def test_import_errors_2(self):
with ShouldRaise(ImportError('X')):
raise ImportError('X')
def test_custom_exception(self):
class FileTypeError(Exception):
def __init__(self, value):
self.value = value
with ShouldRaise(FileTypeError('X')):
raise FileTypeError('X')
def test_assert_keyerror_raised(self):
expected = "KeyError('foo',) raised, AttributeError('foo',) expected"
class Dodgy(dict):
def __getattr__(self, name):
return self[name]
try:
with ShouldRaise(AttributeError('foo')):
Dodgy().foo
except AssertionError as e:
self.assertEqual(C(AssertionError(expected)), e)
else:
self.fail('No exception raised!')
def test_decorator_usage(self):
@should_raise(ValueError('bad'))
def to_test():
raise ValueError('bad')
to_test()
def test_unless_false_okay(self):
with ShouldRaise(unless=False):
raise AttributeError()
def test_unless_false_bad(self):
try:
with ShouldRaise(unless=False):
pass
except AssertionError as e:
self.assertEqual(e, C(AssertionError('No exception raised!')))
else:
self.fail('No exception raised!')
def test_unless_true_okay(self):
with ShouldRaise(unless=True):
pass
def test_unless_true_not_okay(self):
try:
with ShouldRaise(unless=True):
raise AttributeError('foo')
except AssertionError as e:
self.assertEqual(e, C(AssertionError("AttributeError('foo',) raised, no exception expected")))
else:
self.fail('No exception raised!')
def test_unless_decorator_usage(self):
@should_raise(unless=True)
def to_test():
pass
to_test()
def test_identical_reprs(self):
class AnnoyingException(Exception):
def __init__(self, **kw):
self.other = kw.get('other')
try:
with ShouldRaise(AnnoyingException(other='bar')):
raise AnnoyingException(other='baz')
except AssertionError as e:
print repr(e)
self.assertEqual(C(AssertionError("AnnoyingException() raised, AnnoyingException() expected, attributes differ:\n other:'bar' != 'baz'")), e)
else:
self.fail('No exception raised!')
| [
"le02005@163.com"
] | le02005@163.com |
5044b8ef4ace4287373c06bc7916c93db1ba3994 | caeea08f9d0362609ccae39ed0ac73345af87494 | /s3/s3_1_budget.py | f62dbfc6fe2a4aa9734fa82f5df2017821cc2860 | [] | no_license | barcern/cfg-python | 9133709fd0139894a80aed2fd95dd28371cef495 | aad9cf4feb4e2447bc442d6b90adebfadf1ed6a1 | refs/heads/master | 2023-03-11T20:19:13.051492 | 2021-03-01T23:31:31 | 2021-03-01T23:31:31 | 295,569,412 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # You have a budget of £10 and want to write a program to decide which burger restaurant to go to.
# - Input the price of a burger using input()
# - Check whether the price is less than or equal (<=) 10.00
# - Print the result in the format below
# Burger is within budget: True
# User input and set budget
price = float(input("How much does this burger cost? "))
budget = 10.0
# Check whether price is within the budget
in_budget = price <= budget
# Print result
print(f"Burger is within budget: {in_budget}")
#print("Burger is within budget: {}".format(in_budget)) | [
"bcernakova01@gmail.com"
] | bcernakova01@gmail.com |
c3a0938f836e7c0c1fac968c208fadfad3b06696 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/004_trees/_exercises/templates/16 Trees/Tree Representation Implementation (Nodes and References).py | 725c8abb61a726933d9508e885547cc1571944ae | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,309 | py | # # Nodes and References Implementation of a Tree
# #
# # In this notebook is the code corresponding to the lecture for implementing the representation of a Tree as a class with nodes and references!
#
# c_ BinaryTree o..
# ___ - rootObj
# key _ ?
# leftChild _ N..
# rightChild _ N..
#
# ___ insertLeft newNode
# __ l.. __ N..
# l.. _ ? ?
# ____
# t _ ? ?
# t.l.. _ l..
# l.. _ t
#
# ___ insertRight newNode
# __ r.. __ N..
# r.. _ ? ?
# ____
# t _ ? ?
# t.r.. _ r..
# r.. _ t
#
#
# ___ getRightChild
# r_ ?
#
# ___ getLeftChild
# r_ ?
#
# ___ setRootVal obj
# key _ ?
#
# ___ getRootVal
# r_ k..
#
# # We can see some examples of creating a tree and assigning children. Note that some outputs are Trees themselves!
# #
# # from __future__ import print_function
# #
# r _ ?('a')
# print(?.gRV..
# print(?.gLC..
# ?.iL.. ('b')
# print(?.gLC..
# print(?.gLC__.gRV..
# ?.iR.. 'c')
# print(?.gRC..
# print(?.gRC__.gRV..
# ?.gRC__.sRV.. 'hello')
# print(?.gRC__.gRV..
#
# # a
# # N..
# # <__main__.BinaryTree object at 0x104779c10>
# # b
# # <__main__.BinaryTree object at 0x103b42c50>
# # c
# # hello | [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
543dadfc2ac4efbea7de63620013b771b4ad04ff | 79e1d04867c4298b23c907f92c7119e4bea8ef02 | /ParlAI/parlai/agents/vsepp_caption/modules.py | d94d1f268f6c0b8bab8e2ea1c86bf295d82acfd2 | [
"MIT",
"Apache-2.0"
] | permissive | ethanjperez/convince | 53db0bcd978831799c68fe63ecb0c91473ec40c4 | ccf60824b28f0ce8ceda44a7ce52a0d117669115 | refs/heads/master | 2023-01-08T09:12:16.722614 | 2021-11-03T18:50:30 | 2021-11-03T18:50:30 | 205,189,291 | 27 | 8 | Apache-2.0 | 2023-01-05T22:43:12 | 2019-08-29T15:03:34 | Python | UTF-8 | Python | false | false | 6,876 | py | #!/usr/bin/env python3
# This file is covered under the Apache 2.0 License listed here
# <https://github.com/fartashf/vsepp/blob/master/LICENSE> as it is a
# Derivative Work of the repo.
import torch
from torch import optim
from torch.nn.utils.rnn import pack_padded_sequence
import torch.nn as nn
import torchvision.models as models
import numpy as np
class VSEpp(nn.Module):
"""
Model based on:
- VSE++: Improving Visual-Semantic Embeddings with Hard Negatives
`(Faghri et al. 2017) <arxiv.org/abs/1707.05612>`
Original Implementation found here: <https://github.com/fartashf/vsepp>
"""
def __init__(self, opt, dict):
super().__init__()
self.opt = opt
self.dict = dict
self.img_enc = EncoderImage(embed_size=opt['embed_size'],
finetune=opt['finetune'],
cnn_type=opt['cnn_type'],
no_imgnorm=opt['no_imgnorm'])
self.txt_enc = EncoderText(vocab_size=len(self.dict.tok2ind),
word_dim=opt['word_dim'],
embed_size=opt['embed_size'],
num_layers=opt['num_layers'])
def forward(self, images, captions, lengths):
img_emb = self.img_enc(images) if images is not None else None
cap_emb = self.txt_enc(captions, lengths) if captions is not None else None
return img_emb, cap_emb
def get_optim(self):
kwargs = {'lr': float(self.opt['learning_rate']),
'amsgrad': True}
params = list(self.txt_enc.parameters())
params += list(self.img_enc.fc.parameters())
if self.opt['finetune']:
params += list(self.img_enc.cnn.parameters())
optimizer = optim.Adam(params, **kwargs)
return optimizer
def dot_sim(im, s):
"""
Dot product similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def l2norm(X):
"""
L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt()
X = torch.div(X, norm)
return X
class ContrastiveLoss(nn.Module):
"""
Compute contrastive loss.
"""
def __init__(self, use_cuda, margin=0, max_violation=True):
super().__init__()
self.use_cuda = use_cuda
self.margin = margin
self.sim = dot_sim
self.max_violation = max_violation
def forward(self, im, caps, offset=0):
# Compute the similarity of each image/caption pair
scores = self.sim(im, caps)
diagonal = scores.diag().view(im.shape[0], 1)
d1 = diagonal.expand(scores.size())
d2 = diagonal.t().expand(scores.size())
# Caption retrieval score
cost_cap = (self.margin + scores - d1).clamp(min=0)
# image retrieval score
cost_im = (self.margin + scores - d2).clamp(min=0)
mask = torch.eye(im.shape[0]) > 0.5
if self.use_cuda:
mask = mask.cuda()
cost_cap = cost_cap.masked_fill(mask, 0)
cost_im = cost_im.masked_fill(mask, 0)
# Compute the metrics (ranks, top1)
if self.use_cuda:
sorted_ranks = np.flip(np.argsort(scores.detach().cpu().numpy()), 1)
else:
sorted_ranks = np.flip(np.argsort(scores.detach().numpy()), 1)
top1 = sorted_ranks[:, 0]
ranks = []
for idx in range(im.shape[0]):
ranks.append(np.where(sorted_ranks[idx, :] == (idx + offset))[0][0])
# keep the maximum violating negative for each query
if self.max_violation:
cost_cap = cost_cap.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_cap.sum() + cost_im.sum(), ranks, top1
class EncoderImage(nn.Module):
def __init__(self, embed_size, finetune=False, cnn_type='resnet152',
no_imgnorm=False):
"""Load pretrained CNN and replace top fc layer."""
super().__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
# Load a pre-trained model
self.cnn = self.get_cnn(cnn_type)
# For efficient memory usage.
for param in self.cnn.parameters():
param.requires_grad = finetune
# Replace the last fully connected layer of CNN with a new one
if cnn_type.startswith('vgg'):
self.fc = nn.Linear(self.cnn.classifier._modules['6'].in_features,
embed_size)
self.cnn.classifier = nn.Sequential(
*list(self.cnn.classifier.children())[:-1])
elif cnn_type.startswith('resnet'):
self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)
self.cnn.module.fc = nn.Sequential()
self.init_weights()
def get_cnn(self, arch):
"""Load a pretrained CNN and parallelize over GPUs
"""
print("=> using pre-trained model '{}'".format(arch))
model = models.__dict__[arch](pretrained=True)
if arch.startswith('alexnet') or arch.startswith('vgg'):
model.features = nn.DataParallel(model.features)
else:
model = nn.DataParallel(model)
return model
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.cnn(images)
# normalization in the image embedding space
features = l2norm(features)
# linear projection to the joint embedding space
features = self.fc(features)
# normalization in the joint embedding space
if not self.no_imgnorm:
features = l2norm(features)
return features
class EncoderText(nn.Module):
def __init__(self, vocab_size, word_dim, embed_size, num_layers):
super().__init__()
self.embed_size = embed_size
# word embedding
self.embed = nn.Embedding(vocab_size, word_dim)
# caption embedding
self.rnn = nn.GRU(word_dim, embed_size, num_layers, batch_first=True)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_(-0.1, 0.1)
def forward(self, x, lengths):
"""Handles variable size captions
"""
# Embed word ids to vectors
x = self.embed(x)
packed = pack_padded_sequence(x, lengths, batch_first=True)
# Forward propagate RNN
_, out = self.rnn(packed)
out = out.squeeze(0)
# normalization in the joint embedding space
out = l2norm(out)
return out
| [
"ethanperez18@gmail.com"
] | ethanperez18@gmail.com |
ce8b24e9eb6af166554ad5e15d4b7dfdc7662b72 | 87ad372898e793faf1ad89f4bb3b6e84a8002131 | /tests/unit/FundManager/test_remove_strategy_from_queue.py | 3aaa736133d80b2747e318a8d05899b695b809e4 | [] | no_license | atsignhandle/unagii-vault-v2 | 6a9a96c11d34257bc3fdae57455ec3b2f9c0029a | 548f715f34329eb5abebffe40acbeb56a31cb6f3 | refs/heads/main | 2023-08-27T00:59:48.080152 | 2021-09-28T02:47:36 | 2021-09-28T02:47:36 | 413,448,825 | 0 | 0 | null | 2021-10-04T14:07:37 | 2021-10-04T14:07:36 | null | UTF-8 | Python | false | false | 1,123 | py | import brownie
from brownie import ZERO_ADDRESS
import pytest
def test_remove_strategy_from_queue(fundManager, admin, testStrategy, user):
strategy = testStrategy
timeLock = fundManager.timeLock()
fundManager.approveStrategy(strategy, {"from": timeLock})
fundManager.addStrategyToQueue(strategy, 123, 0, 0, {"from": admin})
# revert if not authorized
with brownie.reverts("!auth"):
fundManager.removeStrategyFromQueue(strategy, {"from": user})
def snapshot():
return {"totalDebtRatio": fundManager.totalDebtRatio()}
before = snapshot()
tx = fundManager.removeStrategyFromQueue(strategy, {"from": admin})
after = snapshot()
strat = fundManager.strategies(strategy)
assert not strat["active"]
assert strat["debtRatio"] == 0
assert after["totalDebtRatio"] == before["totalDebtRatio"] - 123
assert fundManager.queue(0) == ZERO_ADDRESS
assert tx.events["RemoveStrategyFromQueue"].values() == [strategy]
# revert if not active
with brownie.reverts("!active"):
fundManager.removeStrategyFromQueue(strategy, {"from": admin}) | [
"tsk.nakamura@gmail.com"
] | tsk.nakamura@gmail.com |
3ab1e9c75667769ee77fc58051c71e5a4e31100c | 8242bc1caacbc1d50a3d0458760457ddb45e9df3 | /post/migrations/0003_auto_20200915_1353.py | 9dad5265249e16d5e30700d66c45c85f0e33e9d7 | [] | no_license | ajy720/Outstagram | d0ad1318ff588811070d67b85e844b2770c6596e | bedad304011fcace1e7a589628c5f68753aad147 | refs/heads/master | 2022-12-31T14:36:33.223069 | 2020-10-21T01:43:04 | 2020-10-21T01:43:04 | 295,584,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 3.1.1 on 2020-09-15 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0002_auto_20200915_1143'),
]
operations = [
migrations.AlterField(
model_name='post',
name='picture',
field=models.ImageField(upload_to='postings/'),
),
]
| [
"ajy720@gmail.com"
] | ajy720@gmail.com |
b4e98124357bf95756dce5ab0ea3eccabdcb4f80 | 6e322ec7477a0105d5b95055dfafb75ba00a3f43 | /example/manage.py | fa318242823951e3cc2177feba6ab8aa36b03288 | [] | no_license | tomatohater/Django-API-Playground | ca79dbed28701e214c41f20b7bdc7fd5fa09d5a7 | bc950a65bd6f65211e250e71e9777ec865f5d6f6 | refs/heads/master | 2021-01-21T01:39:51.368593 | 2012-11-07T05:15:58 | 2012-11-07T05:15:58 | 6,568,919 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
sys.path.append("../")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"fatiherikli@gmail.com"
] | fatiherikli@gmail.com |
47259b4d09cadfbf3fc1ac65f5944bd815ba5c23 | 45c170fb0673deece06f3055979ece25c3210380 | /toontown/coghq/CashbotMintGearRoom_Battle00_Cogs.py | 317a112ac560c39265cefffa3d94bb97f0dac647 | [] | no_license | MTTPAM/PublicRelease | 5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f | 825f562d5021c65d40115d64523bb850feff6a98 | refs/heads/master | 2021-07-24T09:48:32.607518 | 2018-11-13T03:17:53 | 2018-11-13T03:17:53 | 119,129,731 | 2 | 6 | null | 2018-11-07T22:10:10 | 2018-01-27T03:43:39 | Python | UTF-8 | Python | false | false | 1,185 | py | #Embedded file name: toontown.coghq.CashbotMintGearRoom_Battle00_Cogs
from toontown.coghq.SpecImports import *
from toontown.toonbase import ToontownGlobals
CogParent = 10000
BattleCellId = 0
BattleCells = {BattleCellId: {'parentEntId': CogParent,
'pos': Point3(0, 0, 0)}}
CogData = [{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': BattleCellId,
'pos': Point3(-6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel + 1,
'battleCell': BattleCellId,
'pos': Point3(-2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel,
'battleCell': BattleCellId,
'pos': Point3(2, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0},
{'parentEntId': CogParent,
'boss': 0,
'level': ToontownGlobals.CashbotMintCogLevel + 1,
'battleCell': BattleCellId,
'pos': Point3(6, 0, 0),
'h': 180,
'behavior': 'stand',
'path': None,
'skeleton': 0}]
ReserveCogData = []
| [
"linktlh@gmail.com"
] | linktlh@gmail.com |
92adebbd1164f1aa26247842f6b9790d98d0c262 | 3d8027f2ef3f723e13b31e056d0c03da4ed74aa8 | /09-09-2020(Day15)/EmailSend/EmailSend/wsgi.py | 1f3a4b5a820f4d8ae86f13ff4fde0be1c4f2cf32 | [] | no_license | satyavani462/Django-Batch5 | 2efbc99223008954896667dee46d2606b6559c82 | 1b975bc21e7fdeed11bef7505d22d4fed126656c | refs/heads/master | 2022-12-08T19:57:33.996903 | 2020-09-10T14:23:15 | 2020-09-10T14:23:15 | 294,688,262 | 1 | 0 | null | 2020-09-11T12:22:16 | 2020-09-11T12:22:15 | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for EmailSend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EmailSend.settings')
application = get_wsgi_application()
| [
"nivas0803@gmail.com"
] | nivas0803@gmail.com |
dd815bb4dafa784e93eeaed5ca27ace6491cc7b3 | f3eebcb7800bf2bfba537fc017a3ad3bfe9f9264 | /1495.py | ea2f0bf4d5e8a02e479b1474c86f22adffffad06 | [] | no_license | do-park/baekjoon | c7a881f8eb50a8303cf8fa05bd0fef8d68e87de7 | 767a98f743f2fb304b091affe5a9f1c54e0946b8 | refs/heads/master | 2020-12-19T11:42:03.188674 | 2020-12-13T07:17:55 | 2020-12-13T07:17:55 | 235,723,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | # BOJ 1495 메모리 초과
from collections import deque
N, S, M = map(int, input().split())
V = list(map(int, input().split()))
dp = [[] for _ in range(N + 1)]
dp[0].append(S)
q = deque()
q.append([S, 0])
while q:
volume, idx = q.popleft()
if idx < N:
if 0 <= volume - V[idx]:
nxt = volume - V[idx]
dp[idx + 1].append(nxt)
q.append([nxt, idx + 1])
if volume + V[idx] <= M:
nxt = volume + V[idx]
dp[idx + 1].append(nxt)
q.append([nxt, idx + 1])
print(max(dp[-1]) if dp[-1] else -1) | [
"dohee.pa@gmail.com"
] | dohee.pa@gmail.com |
04bf10e4a7b62ec6d9e671d32a689f37802b8256 | bba02b96608e53bed25eae8fcc30334f238b6a6b | /tests/test_parse_url.py | e4f8b7a62dec7df5d512ede72f1d49a7b9c98898 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | wkentaro/gdown | de1a3ce1058b3457ed4a3087b70cf620d85e9c5a | 5c7507f02718048899b85d4010a6ed93316cbf27 | refs/heads/main | 2023-07-25T11:25:03.088818 | 2023-04-19T19:39:39 | 2023-04-22T06:02:17 | 44,421,756 | 3,266 | 319 | MIT | 2023-09-12T20:53:04 | 2015-10-17T03:01:23 | Python | UTF-8 | Python | false | false | 1,079 | py | import pytest
from gdown.parse_url import parse_url
def test_parse_url():
file_id = "0B_NiLAzvehC9R2stRmQyM3ZiVjQ"
# list of (url, expected, check_warn)
urls = [
(
"https://drive.google.com/open?id={}".format(file_id),
(file_id, False),
True,
),
(
"https://drive.google.com/uc?id={}".format(file_id),
(file_id, True),
False,
),
(
"https://drive.google.com/file/d/{}/view?usp=sharing".format(
file_id
), # NOQA
(file_id, False),
True,
),
(
"https://drive.google.com/a/jsk.imi.i.u-tokyo.ac.jp/uc?id={}&export=download".format( # NOQA
file_id
),
(file_id, True),
False,
),
]
for url, expected, check_warn in urls:
if check_warn:
with pytest.warns(UserWarning):
assert parse_url(url) == expected
else:
assert parse_url(url) == expected
| [
"www.kentaro.wada@gmail.com"
] | www.kentaro.wada@gmail.com |
055ee322157756c9560886ef28e16fc0bbf63495 | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /1381. Design a Stack With Increment Operation.py | 2e7491daea2c04801a5525ecdf77a3a73cd84aec | [] | no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | class CustomStack(object):
def __init__(self, maxSize):
"""
:type maxSize: int
"""
self.stack = []
self.increase = [] # same size as self.stack
self.maxSize = maxSize
def push(self, x):
"""
:type x: int
:rtype: None
"""
if len(self.stack) < self.maxSize:
self.stack.append(x)
self.increase.append(0)
def pop(self):
"""
:rtype: int
"""
if not self.stack:
return -1
res = self.stack[-1] + self.increase[-1]
self.stack.pop()
inc = self.increase.pop()
if self.increase:
self.increase[-1] += inc
return res
def increment(self, k, val):
"""
:type k: int
:type val: int
:rtype: None
"""
k = min(k, len(self.stack))
if k-1 >= 0:
self.increase[k-1] += val
# Your CustomStack object will be instantiated and called as such:
# obj = CustomStack(maxSize)
# obj.push(x)
# param_2 = obj.pop()
# obj.increment(k,val)
"""
Design a stack which supports the following operations.
Implement the CustomStack class:
CustomStack(int maxSize) Initializes the object with maxSize which is the
maximum number of elements in the stack or do nothing if the stack reached the maxSize.
void push(int x) Adds x to the top of the stack if the stack hasn't reached the maxSize.
int pop() Pops and returns the top of stack or -1 if the stack is empty.
void inc(int k, int val) Increments the bottom k elements of the stack by val.
If there are less than k elements in the stack, just increment all the elements in the stack.
Example 1:
Input
["CustomStack","push","push","pop","push","push","push","increment",
"increment","pop","pop","pop","pop"]
[[3],[1],[2],[],[2],[3],[4],[5,100],[2,100],[],[],[],[]]
Output
[null,null,null,2,null,null,null,null,null,103,202,201,-1]
Explanation
CustomStack customStack = new CustomStack(3); // Stack is Empty []
customStack.push(1); // stack becomes [1]
customStack.push(2); // stack becomes [1, 2]
customStack.pop(); // return 2 --> Return top of the stack 2, stack becomes [1]
customStack.push(2); // stack becomes [1, 2]
customStack.push(3); // stack becomes [1, 2, 3]
customStack.push(4); // stack still [1, 2, 3], Don't add another elements as size is 4
customStack.increment(5, 100); // stack becomes [101, 102, 103]
customStack.increment(2, 100); // stack becomes [201, 202, 103]
customStack.pop(); // return 103 --> Return top of the stack 103, stack becomes [201, 202]
customStack.pop(); // return 202 --> Return top of the stack 102, stack becomes [201]
customStack.pop(); // return 201 --> Return top of the stack 101, stack becomes []
customStack.pop(); // return -1 --> Stack is empty return -1.
Constraints:
1 <= maxSize <= 1000
1 <= x <= 1000
1 <= k <= 1000
0 <= val <= 100
At most 1000 calls will be made to each method of increment, push and pop each separately.
"""
| [
"wzhou007@ucr.edu"
] | wzhou007@ucr.edu |
0c4ab292fbcdcac73da82da08c133ce1417f5d29 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /1058.py | 3aab8b5c6e234d2664ad3c6d742f7112a9165bee | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | def check(a):
for x in range(1, 1<<10):
for y in range(1, 1<<10):
res = (x*y < 4*a) or (x >= 21) or (x < 4*y)
if not res:
return False
return True
for a in range(1, 1<<20):
if check(a):
break
print(a) | [
"a926788@gmail.com"
] | a926788@gmail.com |
ae33221e90b454202350927d0a8ce65837dc6a9c | 2fcf738ee1aa51697739f83d93af5481ac1d8881 | /24pts(1).py | db26abd65bcf6b46bae06779bbc2668195535bf9 | [] | no_license | fanzhangg/24-points | 9c87f0a0b90b75bef3668188809ae1ec2bf571b5 | b1af45e4e664665efe2cc6551c73c30d7de9fe9e | refs/heads/master | 2020-04-09T08:18:06.248872 | 2018-12-03T12:40:39 | 2018-12-03T12:40:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,301 | py | import random
from Twenty_Four import Game
def tran(s):
if s == '+':
a = 1
def cal(a, b, c, d):
a_ = str(a)
b_ = str(b)
c_ = str(c)
d_ = str(d)
perm = [[a_] + [b_] + [c_] + [d_], [a_] + [b_] + [d_] + [c_], [a_] + [c_] + [b_] + [d_], [a_] + [c_] + [d_] + [b_],
[a_] + [d_] + [b_] + [c_], [a_] + [d_] + [c_] + [b_], [b_] + [a_] + [c_] + [d_], [b_] + [a_] + [d_] + [c_],
[b_] + [c_] + [a_] + [d_], [b_] + [c_] + [d_] + [a_], [b_] + [d_] + [a_] + [c_], [b_] + [d_] + [c_] + [a_],
[c_] + [a_] + [b_] + [d_], [c_] + [a_] + [d_] + [b_], [c_] + [b_] + [a_] + [d_], [c_] + [b_] + [d_] + [a_],
[c_] + [d_] + [a_] + [b_], [c_] + [d_] + [b_] + [a_], [d_] + [a_] + [b_] + [c_], [d_] + [a_] + [c_] + [b_],
[d_] + [b_] + [a_] + [c_], [d_] + [b_] + [c_] + [a_], [d_] + [c_] + [a_] + [b_], [d_] + [c_] + [b_] + [a_]]
symbols_1 = ['+++', '*++', '+*+', '**+', '*+*', '***', '++*', '+**',
'-++', '/++', '-*+', '/*+', '/+*', '/**', '-+*', '-**',
'+-+', '*-+', '+/+', '*/+', '*-*', '*/*', '+-*', '+/*',
'++-', '*+-', '+*-', '**-', '*+/', '**/', '++/', '+*/',
'*--', '*/-', '*-/',
'-*-', '/*-', '-*/',
'/-*', '--*', '-/*',
'//-', '/-/'
]
for nums in perm:
for syms in symbols_1:
exp = nums[0] + syms[0] + nums[1] + syms[1] + nums[2] + syms[2] + nums[3]
if eval(exp) == 24:
print(exp)
return True
for syms in symbols_1:
exp = nums[0] + syms[0] + '(' + nums[1] + syms[1] + nums[2] + syms[2] + nums[3] + ')'
if eval(nums[1] + syms[1] + nums[2] + syms[2] + nums[3]) != 0:
if eval(exp) == 24:
print(exp)
return True
for syms in symbols_1:
exp = '(' + nums[0] + syms[0] + nums[1] + ')' + syms[1] + '(' + nums[2] + syms[2] + nums[3] + ')'
if eval(nums[2] + syms[2] + nums[3]) != 0 or syms[1] != '/':
if eval(exp) == 24:
print(exp)
return True
for syms in symbols_1:
exp = nums[0] + syms[0] + '(' + nums[1] + syms[1] + nums[2] + ')' + syms[2] + nums[3]
if eval(nums[1] + syms[1] + nums[2]) != 0:
if eval(exp) == 24:
print(exp)
return True
for syms in symbols_1:
exp = '(' + nums[0] + syms[0] + nums[1] + ')' + syms[1] + nums[2] + syms[2] + nums[3]
if eval(exp) == 24:
print(exp)
return True
print("No solution")
return 0
if __name__ == '__main__':
# nums_collection = get_nums_collection(1000)
#
# test(nums_collection)
#
# count = 0
#
# for i in nums_collection:
# a = i[0]
# b = i[1]
# c = i[2]
# d = i[3]
#
# if cal(a, b, c, d) != 0:
# count += 1
#
#
# print("lv's Game has"+str(count)+"solutions")
if __name__ == "__main__":
count = 1
for a in range(1, 11):
for b in range(1, 11):
for c in range(1, 11):
for d in range(1, 11):
print("Given numbers : " + str(a) + ',' + str(b) + ',' + str(c) + ',' + str(d))
if cal(a, b, c, d) is True:
count += 1
print("Number of Solutions : " + str(count))
# if __name__ == '__main__':
# i = 0
# count = 0
# li = [0,0,0,0,0]
# dic = {'+++':0, '*++':0, '+*+':0, '**+':0, '*+*':0, '***':0, '++*':0, '+**':0,
# '-++':0, '/++':0, '-*+':0, '/*+':0, '/+*':0, '/**':0, '-+*':0, '-**':0,
# '+-+':0, '*-+':0, '+/+':0, '*/+':0, '*-*':0, '*/*':0, '+-*':0, '+/*':0,
# '++-':0, '*+-':0, '+*-':0, '**-':0, '*+/':0, '**/':0, '++/':0, '+*/':0,
# '+--':0, '*--':0, '+/-':0, '*/-':0, '*-/':0, '*//':0, '+-/':0, '+//':0,
# '-+-':0, '/+-':0, '-*-':0, '/*-':0, '/+/':0, '/*/':0, '-+/':0, '-*/':0,
# '--+':0, '/-+':0, '-/+':0, '//+':0, '/-*':0, '//*':0, '--*':0, '-/*':0,
# '---':0, '/--':0, '-/-':0, '//-':0, '/-/':0, '///':0, '--/':0, '-//':0
# }
# while i < 1000:
# a = random.randint(1, 10)
# b = random.randint(1, 10)
# c = random.randint(1, 10)
# d = random.randint(1, 10)
# print("Given numbers : " + str(a) + ',' + str(b) + ',' + str(c) + ',' + str(d))
# m = cal(a, b, c, d)
# if m != 0:
# li[m-1] += 1
# count += 1
# i += 1
# print("Number of Solutions : " + str(count))
# print(li)
# for a in range(1, 11):
# for b in range(1, 11):
# for c in range(1, 11):
# for d in range(1, 11):
# print("Given numbers : " + str(a) + ',' + str(b) + ',' + str(c) + ',' + str(d))
# m = cal(a, b, c, d)
# if m in dic:
# count += 1
# dic[m] += 1
# print("Number of Solutions : " + str(count))
# print(dic)
| [
"vanadiumzhang@gmail.com"
] | vanadiumzhang@gmail.com |
677f423defa59332ac699b790fc922baf8137943 | 99f43f4591f63d0c57cd07f07af28c0b554b8e90 | /python/beckjun/A형 특훈/14889 스타트와 링크 itertools.py | 04ec3ba1f53a5ef03e836f75486e2e84597a49e4 | [] | no_license | SINHOLEE/Algorithm | 049fa139f89234dd626348c753d97484fab811a7 | 5f39d45e215c079862871636d8e0306d6c304f7e | refs/heads/master | 2023-04-13T18:55:11.499413 | 2023-04-10T06:21:29 | 2023-04-10T06:21:29 | 199,813,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 900 | py | '''
탐색종료시점을 잘 생각하자.
if min_score ==0:
break
이거 하나로 몇배나 빨라졌다.
'''
from itertools import combinations
n = int(input())
mat = [list(map(int, input().split())) for _ in range(n)]
def cal(lis1):
lis2 = list(filter(lambda x: x not in lis1, range(n)))
l = len(lis1)
a_val = 0
b_val = 0
visited = [0] * l
for i in range(l):
visited[i] = 1
for j in range(i+1, l):
if j > i and visited[j] == 0:
a_val = a_val + mat[lis1[i]][lis1[j]] + mat[lis1[j]][lis1[i]]
b_val = b_val + mat[lis2[i]][lis2[j]] + mat[lis2[j]][lis2[i]]
visited[i] = 0
return abs(a_val - b_val)
min_score = 9876543211
for comb in combinations(range(n), n//2):
sub_val = cal(comb)
if min_score > sub_val:
min_score = sub_val
if min_score == 0:
break
print(min_score)
| [
"dltlsgh5@naver.com"
] | dltlsgh5@naver.com |
9dd7045bbccedcd5b763377bf2a8b5edb9d5cc0a | 49b54185d467fdcdf51fb4f363c09026cef29c5e | /code_examples/4.1B_subscribing_to_an_observable.py | 118da04aed8f7e446bc5d953f45c779a4e9b5f79 | [] | no_license | kinowarrior/oreilly_reactive_python_for_data | ef0f580856aff0a398315a4f7c5f7e22533a686e | 5206fd3cc7e40d871dcfea6b4d30022f8d06c16c | refs/heads/master | 2020-05-19T03:08:05.634115 | 2019-05-06T12:07:38 | 2019-05-06T12:07:38 | 184,793,899 | 0 | 0 | null | 2019-05-03T17:21:01 | 2019-05-03T17:21:00 | null | UTF-8 | Python | false | false | 383 | py | from rx import Observable, Observer
letters = Observable.from_(["Alpha","Beta","Gamma","Delta","Epsilon"])
class MySubscriber(Observer):
def on_next(self, value):
print(value)
def on_completed(self):
print("Completed!")
def on_error(self, error):
print("Error occured: {0}".format(error))
letters.subscribe(MySubscriber())
| [
"thomasnield@live.com"
] | thomasnield@live.com |
f9601982ec27c65ea0ec27744fa3fdefebf9e866 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /wEDHiAcALvS2KuRBJ_17.py | ea2deab4f3a764fd9d1a287738403df66623257f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py |
class StackCalc:
def __init__(self):
self.s = []
self.err = False
def run(self, instructions):
for i in instructions.split():
if not self.err:
if i.isdigit():
self.s += [int(i)]
elif i == '+':
self.s += [self.s.pop() + self.s.pop()]
elif i == '-':
self.s += [self.s.pop() - self.s.pop()]
elif i == '*':
self.s += [self.s.pop() * self.s.pop()]
elif i == '/':
self.s += [int(self.s.pop() / self.s.pop())]
elif i == 'DUP':
self.s += [self.s[-1]]
elif i == 'POP':
self.s.pop()
else:
self.ret = 'Invalid instruction: '+i
self.err = True
def getValue(self):
if self.err:
return self.ret
if not self.s:
return 0
return self.s[-1]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
dd7db101067a85f5d04748535f2d184c404bdd04 | 8b9d3fa48e87579a74b187abf781d5916b6b47df | /geoutils/bin/geogdelt | 01cea63360e9ada367f374463fcfb621578c4749 | [] | no_license | loum/geoutils | 3f34b10bfaff8978af09f01de03723b71cd8be4f | 034787d9a54856dac12988aaa05c366c5da4d7ec | refs/heads/master | 2021-01-19T08:54:54.983763 | 2014-12-01T04:59:50 | 2014-12-01T04:59:50 | 22,494,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | #!/usr/bin/python
"""Load GDELT files into the Accumulo datastore.
"""
import os
import inspect
import sys
import daemoniser
import geoutils
CONF = os.path.join(os.sep, 'etc', 'geoutils', 'conf', 'geoutils.conf')
def main():
"""Script entry point.
"""
service = daemoniser.Service()
service.parser.add_option('-f', '--file',
dest='file',
help='file to process inline (start only)')
service.parser.add_option('-r', '--remove',
dest='remove',
action='store_true',
help='delete file after processing')
script_name = os.path.basename(inspect.getfile(inspect.currentframe()))
service.check_args(script_name)
# Check if a filename was provided on the command line.
command_line_file = None
if service.options.file:
command_line_file = service.options.file
remove_file = False
if service.options.remove:
remove_file = service.options.remove
config_file = service.options.config
if config_file is None:
if os.path.exists(CONF):
config_file = CONF
if config_file is None:
sys.exit('Unable to source the geoutils.conf')
else:
conf = geoutils.GdeltConfig(config_file)
conf.parse_config()
# OK, start processing.
gdeltd = geoutils.GdeltDaemon(pidfile=service.pidfile,
filename=command_line_file,
dry=service.dry,
batch=service.batch,
conf=conf,
delete=remove_file)
service.launch_command(gdeltd, script_name)
if __name__ == '__main__':
main()
| [
"lou.markovski@gmail.com"
] | lou.markovski@gmail.com | |
dc99239af8da0845d4171c157e926644349a9b68 | d54cb7a8a26dbf57423511a14cbb6f150ea4bafb | /setup.py | 195fdc163936b3bc230a2a38da75f82c5744e50c | [] | no_license | inclement/vivarium-old | 8957bfbf0073d0b772e47820969b585e627518cf | e9f826edcba35caad3856fc7093f728975d190bd | refs/heads/master | 2021-06-16T02:05:46.221998 | 2017-05-07T21:49:37 | 2017-05-07T21:49:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py |
from setuptools import setup, find_packages
from os import walk
from os.path import join, dirname, sep
import os
import glob
packages = find_packages()
package_data = {'pywlc': ['*.py',
'*_cdef.h',
'wlc.c'], }
data_files = []
setup(name='pywm',
version='0.1',
description='An experimental Wayland compositor using wlc.',
author='Alexander Taylor',
author_email='alexanderjohntaylor@gmail.com',
url='https://github.com/inclement/pywm',
license='MIT',
# install_requires=['cffi>=1.0.0'],
# cffi_modules=['pywm/make_callbacks.py:ffibuilder'],
packages=packages,
package_data=package_data,
)
| [
"alexanderjohntaylor@gmail.com"
] | alexanderjohntaylor@gmail.com |
1e90815b46685bf505b7aaac11595e9b89aa21eb | 28a124b6a2f22a53af3b6bb754e77af88b4138e1 | /DJANGO/ecommerce/ecommerce/settings.py | 83512d1e10f2ad147fb23f05983f4075b7b7c344 | [] | no_license | mebaysan/LearningKitforBeginners-Python | f7c6668a9978b52cad6cc2b969990d7bbfedc376 | 9e1a47fb14b3d81c5b009b74432902090e213085 | refs/heads/master | 2022-12-21T03:12:19.892857 | 2021-06-22T11:58:27 | 2021-06-22T11:58:27 | 173,840,726 | 18 | 4 | null | 2022-12-10T03:00:22 | 2019-03-04T23:56:27 | Python | UTF-8 | Python | false | false | 3,215 | py | """
ADMIN USER
username -> admin
password -> 123
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y_-l5@jh%s_s%01gkb6jj-le7a17k@vqquy$8$v@95%38ob(vt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'store.apps.StoreConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'store.context_processor.menu_links',
'store.context_processor.counter',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "staticfiles"),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
MEDIA_URL = '/media/'
| [
"menesbaysan@gmail.com"
] | menesbaysan@gmail.com |
d75e2100383263a57b5092934b0d2aafcf1626e4 | 211618a491e8031551c7bffbc867b9f85134f650 | /dao/tdkspider.py | ccd1261db75f18506d2ca7701dfdefb82ab8398e | [] | no_license | xjl12322/xinnetSpiderCode | 260c90cee89e070e4cba24d21f80f836440b435a | 3afdccd6797ad61a2de29fb39cc28cb1cc916bb1 | refs/heads/master | 2020-03-26T08:09:58.521521 | 2018-08-14T07:07:46 | 2018-08-14T07:07:46 | 144,689,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,788 | py | #! /usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = "X"
__date__ = "2017/11/6 20:09"
import requests,redis
import json,sys
import re
from lxml import etree
dict_encode = {"utf-8": "utf-8", "GB2312": "gbk", "Windows-": "utf-8"}
# def return_code(url):
# data = {
# "domain": url,
# "title": "",
# "description": "",
# "keywords": ""
# }
# jsondata = json.dumps(data)
# return -1, jsondata
def get_list_url(url,es):
'''发送网页请求'''
header = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"}
# r = redis.StrictRedis(host="127.0.0.1", port=6379, db=0)
# r = redis.StrictRedis(host="10.2.1.173", port=17480, password="J9O543637e5SYaymJge", db=0)
try:
response = requests.get(url="http://www.{}".format(url),headers=header,timeout=6)
except Exception as e:
# return return_code(url)
return None
if response.status_code!=200:
return None
try:
judgment = response.apparent_encoding
if judgment in dict_encode:
response.encoding = dict_encode[judgment]
elif judgment.startswith("ISO-"):
response.encoding = dict_encode["gbk"]
elif judgment.startswith("Windows-"):
response.encoding = dict_encode["utf-8"]
else:
return None
except:
return None
'''解析详情页'''
try:
selector = etree.HTML(response.text)
except:
return None
try:
title = selector.xpath("//title/text()")[0].strip()
except:
title = ""
try:
keywords = selector.xpath("//*[contains(@name,'keywords')]/@content|//*[contains(@name,'Keywords')]/@content")[0].strip()
except:
keywords = ""
try:
description = selector.xpath("//*[contains(@name,'Description')]/@content|//*[contains(@name,'description')]/@content")[0].strip()
except:
description = ""
keywords = keywords.replace("\xa0", "").replace("\r", "").replace("\n","").replace("\t", "").replace("_", ",")\
.replace("、", ",").replace("|", ",").replace(",", ",").replace(" ", ",").replace("'", r"\'").replace('"', r"\"")
description = description.replace("\xa0", "").replace("\r", "").replace("\n", "").replace("\t", "") \
.replace(",", ",").replace(" ", ",").strip()
data = {
"domainName": url,
"title": title,
"description": description,
"keywords": keywords
}
# jsondata = json.dumps(data,ensure_ascii=False)
# print(data)
jsondata = data
jsondata["domainName"] = url
boolean = es.c_tdk(jsondata)
if boolean == True:
print("插入成功")
if __name__ =="__main__":
# domainname = "hao123.com"
# domainname = domainname.strip()
# realdomain = domainname
# try:
# c = re.search(u"[\u4e00-\u9fa5]+", domainname).group()
# if c:
# domainname = domainname.split(".")
# realdomain = domainname[0].encode("utf-8").encode("punycode")
# index = 1
# while index < len(domainname):
# realdomain += "."
# c1 = re.search(u"[\u4e00-\u9fa5]+", domainname[index]).group()
# if c1:
# realdomain1 = domainname[index].encode("utf-8")
# realdomain = realdomain + "xn--" + realdomain1
# else:
# realdomain = realdomain + domainname[index]
# index += 1
# realdomain = "xn--" + realdomain
# except:
# pass
# domain = realdomain
domain = "meituan.com"
rtcode, jsondata = get_list_url(domain)
print(jsondata)
| [
"xjl12322@126.com"
] | xjl12322@126.com |
c82702ec10d3658cf6351e71d23074e94c6fdda0 | d460dc3e406e8458d0c5384026dd84bb400e0598 | /COJ/python/2382.py | b08cd7d114188885155f5ae1cefa63524ce7abb0 | [] | no_license | schiob/OnlineJudges | 8490b5a6ed31c97820f6fb494d22862a54a1df28 | f29f57e100f0f7c04d007285768cb60d43df4016 | refs/heads/master | 2023-06-23T03:55:52.600689 | 2023-06-08T01:48:35 | 2023-06-08T01:48:35 | 29,932,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py |
# coding: utf-8
# In[11]:
num = input().split(" ")
# In[16]:
print('YES') if int(num[0], int(num[1])) % int(num[2]) == 0 else print('NO')
# In[ ]:
| [
"schiob4@gmail.com"
] | schiob4@gmail.com |
de9b2cb84c2a897ff015137e6d2acac906582dda | 8396606fcb98d8ab2d5e5a139da83e7ce0880324 | /rps/bots_open/multiStrategies5.py | 4f545ea10cfb38b9e33fc6f57d4721c5a5837860 | [] | no_license | bopopescu/Learn2Mine-Main | 87b3d6d7fa804568a93c4c7e8324c95574afc819 | acc0267b86ad6a9e5e1619d494c20407d4710e90 | refs/heads/master | 2021-05-29T17:48:17.844094 | 2015-09-14T16:19:36 | 2015-09-14T16:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | #multiStrategies5
STRATEGY_ONE = 'forecastMyMove'
STRATEGY_TWO = 'forecastOponentMove'
def defeatMove(obj):
if obj == 'R':
return 'P'
elif obj == 'P':
return 'S'
else:
return 'R'
def lostLastMatche(myMove, oponentMove):
if myMove == defeatMove(oponentMove) or myMove == oponentMove:
return False
else:
return True
def mostPlayed(dic):
if dic['R'] > dic['P'] and dic['R'] > dic['S']:
return 'R'
elif dic['P'] > dic['S']:
return 'P'
else:
return 'S'
if input == '':
oponentMoves = {'R': 0, 'P': 0, 'S': 0}
myMoves = {'R': 0, 'P': 0, 'S': 0}
consecutiveLoses = 0
strategy = STRATEGY_ONE
output = mylastMove = 'R'
else:
oponentMoves[input] += 1
if lostLastMatche(mylastMove, input):
consecutiveLoses += 1
if consecutiveLoses == 2: #swap stretegies
strategy = STRATEGY_TWO if strategy == STRATEGY_ONE else STRATEGY_ONE
# 1 calculate my most frequent move
# 2 assume that the oponent would try to defeat this move
# 3 defeat this move
if strategy == STRATEGY_ONE:
myMostPlayed = mostPlayed(myMoves)
move = defeatMove(defeatMove(myMostPlayed))
# 1 calculate my oponent's most frequent move
# 2 defeat this move
else:
oponentMostPlayed = mostPlayed(oponentMoves)
move = defeatMove(oponentMostPlayed)
myMoves[move] += 1
output = mylastMove = move | [
"pauleanderson@gmail.com"
] | pauleanderson@gmail.com |
97ac4863627042b6aa3459be2e50fbd99441e66d | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2013_mescog/db_clinic/00_MISSING_CADASIL.py | 6494bf29bfd92f231c3b55162526c88ffe412081 | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 3 10:44:49 2013
Add DATEINCL and DATENAIS an compute AGE_AT_INCLUSION in CADASIL subjects
INPUT
-----
"base_commun.csv"
"france2012.csv" => date DATEINCL and DATENAIS for french
"CAD_Munich_Dates.txt" => date DATEINCL and DATENAIS for german
OUTPUT
------
"base_commun_20140109.csv" == "base_commun.csv" + Date (from "france2012.csv" + CAD_Munich_Dates.txt)
"""
##############################################################################
## Add DATEINCL and DATENAIS in CADASIL
# 1) Check presence in france2012.csv
# 2) Compare base_commun.csv vs france2012.csv
# 3) Count missing in base_commun.csv
##############################################################################
import os.path
import string
import pandas as pd
import numpy as np
WD = "/neurospin/mescog"
INPUT_cadasil_base_commun_filepath = os.path.join(WD, "clinic", "base_commun.csv")
INPUT_cadasil_france2012_filepath = os.path.join(WD, "clinic", "france2012.csv")
INPUT_cadasil_munich_date_filepath = os.path.join(WD, "clinic", "CAD_Munich_Dates.txt")
OUTPUT = os.path.join(WD, "clinic", "base_commun_20140109.csv")
cadasil_base_commun = pd.read_table(INPUT_cadasil_base_commun_filepath, header=0, sep=",").replace("-", np.nan)
cadasil_france2012 = pd.read_table(INPUT_cadasil_france2012_filepath, header=0, sep=",").replace("-", np.nan)
cadasil_munich_date = pd.read_table(INPUT_cadasil_munich_date_filepath, header=0)
# Look for differences between cadasil_base_commun & cadasil_france2012
cadasil_base_commun.columns = [s.upper() for s in cadasil_base_commun.columns]
cadasil_france2012.columns = [s.upper() for s in cadasil_france2012.columns]
# drop DATEINCL and DATENAIS of cadasil_base_commun
cadasil_base_commun = cadasil_base_commun.drop(["DATEINCL", "DATENAIS"], 1)
# 1) Recode date in cadasil_france2012
# 1/28/10 12:00 AM => 2010-28-01
def format_date(dates, pref="20"):
out = list()
for l in dates:
date = l.split()[0].split("/")
out.append('%s%s-%s-%s' % (pref, date[0], "{:0>2d}".format(int(date[1])), "{:0>2d}".format(int(date[2]))))
return out
cadasil_france2012.ID
cadasil_france2012.DATEINCL = format_date(cadasil_france2012.DATEINCL, pref="")
cadasil_france2012.DATENAIS = format_date(cadasil_france2012.DATENAIS, pref="")
cadasil_france2012[["ID", "DATENAIS", "DATEINCL"]]
# 2) reformat cadasil_munich_date.ID
#cadasil_munich_date.ID contains "CAD_" remove, it will be added later
cadasil_munich_date.ID = [int(s.replace('CAD_', '')) for s in cadasil_munich_date.ID]
cadasil_munich_date.ID
# 3) Merge to cadasil_base_commun
cada_dates = cadasil_france2012[["ID", "DATENAIS", "DATEINCL"]].append(cadasil_munich_date, ignore_index=True)
age = pd.DataFrame(dict(AGE_AT_INCLUSION=[int(cada_dates.DATEINCL[i].split("-")[0]) - int(cada_dates.DATENAIS[i].split("-")[0])
for i in xrange(cada_dates.shape[0])]))
cada_dates = pd.concat([cada_dates, age], axis=1)
print cada_dates.to_string()
# Merge with cadasil_base_commun and save
merge = pd.merge(cada_dates, cadasil_base_commun, on="ID")
merge.to_csv(OUTPUT, sep=",", index=False)
| [
"edouard.duchesnay@gmail.com"
] | edouard.duchesnay@gmail.com |
ba079ef7005657c485428d1ab73b3303ac4578e5 | ab28f1bb7e77309fe5afbc0ef3c57d8cd226d0ef | /0x07-python-test_driven_development/4-print_square.py | 55dce02c1a10e4db40711ebeff01ac6de5bf7a5f | [] | no_license | rakiasomai/holbertonschool-higher_level_programming | aebc5245fc0e6d30faf24b1e55dad93ae71fb537 | f07fedb05405f4ca6b45354ca1ccb90c1f48a215 | refs/heads/master | 2020-12-10T14:45:19.245460 | 2020-05-14T16:58:43 | 2020-05-14T16:58:43 | 233,623,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py | #!/usr/bin/python3
'''
This function called "Print Square"
this function prints a square using this symbol "#"
The arguments must be an integer
'''
def print_square(size):
'''
Print a square by using arguments
'''
if not isinstance(size, (int, float)) or isinstance(size, bool):
raise TypeError("size must be an integer")
if isinstance(size, float) and size < 0:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
for i in range(int(size)):
print("#" * int(size))
| [
"somai.rakia@hotmail.fr"
] | somai.rakia@hotmail.fr |
46cfb3ce8fe6fdffa1f4bec06b4dcf8eefd0b399 | fc90b69b4381a8696d77e5c98976888c93e939f7 | /gsee/trigon.py | a99744e61cfae49b7e2a214ef37027813a90e4a7 | [
"BSD-3-Clause"
] | permissive | ManiaGary/gsee | aaa9aeae89dc492101fb924b940a86f72db7a789 | 468ba4fb3119f8303aeffb5f2cca5a9e62614a8c | refs/heads/master | 2021-01-23T07:50:52.010402 | 2016-09-06T08:34:33 | 2016-09-06T08:34:33 | 86,457,944 | 1 | 0 | null | 2017-03-28T12:35:15 | 2017-03-28T12:35:15 | null | UTF-8 | Python | false | false | 8,112 | py | """
Irradiance on an inclined plane
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using trigonometry (Lambert's cosine law, etc).
"""
import datetime
import ephem
import numpy as np
import pandas as pd
def _sun_rise_set(datetime_index, obs):
"""
Returns a list of (sunrise, sunset) tuples for the datetimes
in the given datetime_index, assuming that the index is daily.
Sunrise or sunset will be None if the sun doesn't rise/set.
"""
sun = ephem.Sun()
times = []
def _get_datetime(date):
obs.date = date
sun.compute(obs)
if sun.rise_time is None:
rise_time = None
else:
rise_time = sun.rise_time.datetime()
if sun.set_time is None:
set_time = None
else:
set_time = sun.set_time.datetime()
return (rise_time, set_time)
for i in datetime_index:
times.append(_get_datetime(i))
return times
def sun_rise_set_times(datetime_index, coords):
"""
Return sunrise and set times for the given datetime_index and coords.
The datetime_index will be resampled to daily frequency.
"""
obs = ephem.Observer()
obs.lat = str(coords[0])
obs.lon = str(coords[1])
# Ensure datetime_index is daily
dtindex = pd.DatetimeIndex(datetime_index.to_series().map(pd.Timestamp.date).unique())
return _sun_rise_set(dtindex, obs)
def sun_angles(datetime_index, coords, rise_set_times=None):
"""Calculate sun angles. Returns a dataframe containing `sun_alt`,
`sun_zenith`, `sun_azimuth` and `duration` over the passed datetime index.
Parameters
----------
datetime_index : pandas datetime index
Handled as if they were UTC not matter what timezone info
they may supply.
coords : (float, float) or (int, int) tuple
Latitude and longitude.
rise_set_times : list, default None
List of (sunrise, sunset) time tuples, if not passed, is computed
here.
"""
def _sun_alt_azim(sun, obs):
sun.compute(obs)
return sun.alt, sun.az
# Initialize ephem objects
obs = ephem.Observer()
obs.lat = str(coords[0])
obs.lon = str(coords[1])
sun = ephem.Sun()
# Calculate daily sunrise/sunset times
if not rise_set_times:
rise_set_times = _sun_rise_set(datetime_index, obs)
# Calculate hourly altitute, azimuth, and sunshine
alts = []
azims = []
durations = []
for index, item in enumerate(datetime_index):
obs.date = item
# rise/set times are indexed by day, so need to scale the index
rise_time, set_time = rise_set_times[int(index / 24)]
# Set angles, sun altitude and duration based on hour of day:
if rise_time is not None and item.hour == rise_time.hour:
# Special case for sunrise hour
duration = 60 - rise_time.minute - (rise_time.second / 60.0)
obs.date = rise_time + datetime.timedelta(minutes=duration / 2)
sun_alt, sun_azimuth = _sun_alt_azim(sun, obs)
elif set_time is not None and item.hour == set_time.hour:
# Special case for sunset hour
duration = set_time.minute + set_time.second / 60.0
obs.date = item + datetime.timedelta(minutes=duration / 2)
sun_alt, sun_azimuth = _sun_alt_azim(sun, obs)
else:
# All other hours
duration = 60
obs.date = item + datetime.timedelta(minutes=30)
sun_alt, sun_azimuth = _sun_alt_azim(sun, obs)
if sun_alt < 0: # If sun is below horizon
sun_alt, sun_azimuth, duration = 0, 0, 0
alts.append(sun_alt)
azims.append(sun_azimuth)
durations.append(duration)
df = pd.DataFrame({'sun_alt': alts, 'sun_azimuth': azims,
'duration': durations},
index=datetime_index)
df['sun_zenith'] = (np.pi / 2) - df.sun_alt
return df
def _incidence_fixed(sun_alt, tilt, azimuth, sun_azimuth):
return np.arccos(np.sin(sun_alt) * np.cos(tilt)
+ np.cos(sun_alt) * np.sin(tilt)
* np.cos(azimuth - sun_azimuth))
def _incidence_single_tracking(sun_alt, tilt, azimuth, sun_azimuth):
if tilt == 0:
return np.arccos(np.sqrt(1 - np.cos(sun_alt) ** 2
* np.cos(sun_azimuth - azimuth) ** 2))
else:
return np.arccos(np.sqrt(1 - (np.cos(sun_alt + tilt) * np.cos(tilt)
* np.cos(sun_alt) * (1 - np.cos(sun_azimuth
- azimuth))) ** 2))
def _tilt_single_tracking(sun_alt, tilt, azimuth, sun_azimuth):
if tilt == 0:
return np.arctan(np.sin(sun_azimuth - azimuth) / np.tan(sun_alt))
else:
return np.arctan((np.cos(sun_alt) * np.sin(sun_azimuth - azimuth))
/ (np.sin(sun_alt - tilt) + np.sin(tilt)
* np.cos(sun_alt) * (1 - np.cos(sun_azimuth)
- azimuth)))
def aperture_irradiance(direct, diffuse, coords,
tilt=0, azimuth=0, tracking=0, albedo=0.3,
dni_only=False, angles=None):
"""
Args:
direct : a series of direct horizontal irradiance with a datetime index
diffuse : a series of diffuse horizontal irradiance with the same
datetime index as for direct
coords : (lat, lon) tuple of location coordinates
tilt : angle of panel relative to the horizontal plane, 0 = flat
azimuth : deviation of the tilt direction from the meridian,
0 = towards pole, going clockwise, 3.14 = towards equator
tracking : 0 (none, default), 1 (tilt), or 2 (tilt and azimuth).
If 1, azimuth is the orientation of the tilt axis, which
can be horizontal (tilt=0) or tilted.
albedo : reflectance of the surrounding surface
dni_only : only calculate and directly return a DNI time series
(ignores tilt, azimuth, tracking and albedo arguments)
angles : solar angles, if default (None), is computed here
"""
# 0. Correct azimuth if we're on southern hemisphere, so that 3.14
# points north instead of south
if coords[0] < 0:
azimuth = azimuth + np.pi
# 1. Calculate solar angles
if angles is None:
sunrise_set_times = sun_rise_set_times(direct.index, coords)
angles = sun_angles(direct.index, coords, sunrise_set_times)
# 2. Calculate direct normal irradiance
dni = (direct * (angles['duration'] / 60)) / np.cos(angles['sun_zenith'])
if dni_only:
return dni
# 3. Calculate appropriate aperture incidence angle
if tracking == 0:
incidence = _incidence_fixed(angles['sun_alt'], tilt, azimuth,
angles['sun_azimuth'])
panel_tilt = tilt
elif tracking == 1:
# 1-axis tracking with horizontal or tilted tracking axis
incidence = _incidence_single_tracking(angles['sun_alt'],
tilt, azimuth,
angles['sun_azimuth'])
panel_tilt = _tilt_single_tracking(angles['sun_alt'], tilt, azimuth,
angles['sun_azimuth'])
elif tracking == 2:
# 2-axis tracking means incidence angle is zero
# Assuming azimuth/elevation tracking for tilt/azimuth angles
incidence = 0
panel_tilt = angles['sun_zenith']
azimuth = angles['sun_azimuth']
else:
raise ValueError('Invalid setting for tracking: {}'.format(tracking))
# 4. Compute direct and diffuse irradiance on plane
plane_direct = (dni * np.cos(incidence)).fillna(0)
plane_diffuse = (diffuse * ((1 + np.cos(panel_tilt)) / 2)
+ albedo * (direct + diffuse)
* ((1 - np.cos(panel_tilt)) / 2)).fillna(0)
return pd.DataFrame({'direct': plane_direct, 'diffuse': plane_diffuse})
| [
"stefan@pfenninger.org"
] | stefan@pfenninger.org |
054a786ca85332a9c5e410c27396930e714de064 | 5419d8d02a7fdec15f542498840d19af939fa130 | /LeetCode30DaysChallenge/Day_26_LongestCommonSubsequence.py | 50b86c08a66089e50faf5d0ac9108d508a4030c0 | [] | no_license | foolchauhan/DataStructureAndAlgorithms | 3cf1d4927b6b11592789cb8d1a6800c9de4822e2 | d53281a724a8b58ccc67ebe8c0e0af6c4ae63c4a | refs/heads/master | 2022-10-10T08:29:14.461248 | 2020-06-10T16:18:36 | 2020-06-10T16:18:36 | 244,270,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | def lcs(X , Y):
m = len(X)
n = len(Y)
L = [[None]*(n+1) for i in range(m+1)]
for i in range(m+1):
for j in range(n+1):
if i == 0 or j == 0 :
L[i][j] = 0
elif X[i-1] == Y[j-1]:
L[i][j] = L[i-1][j-1]+1
else:
L[i][j] = max(L[i-1][j] , L[i][j-1])
return L[m][n]
X = "abcde"
Y = "ace"
print ("Length of LCS is ", lcs(X, Y) )
| [
"chauhanchetan82@gmail.com"
] | chauhanchetan82@gmail.com |
b27c407b97b61db14f0ef5557a0a98f303bcd9a6 | afbae26b958b5ef20548402a65002dcc8e55b66a | /release/stubs.min/Revit/Application.py | 13621cf5daeedceac8a610c432a9699fff210b93 | [
"MIT"
] | permissive | gtalarico/ironpython-stubs | d875cb8932c7644f807dc6fde9dd513d159e4f5c | c7f6a6cb197e3949e40a4880a0b2a44e72d0a940 | refs/heads/master | 2023-07-12T01:43:47.295560 | 2022-05-23T18:12:06 | 2022-05-23T18:12:06 | 95,340,553 | 235 | 88 | NOASSERTION | 2023-07-05T06:36:28 | 2017-06-25T05:30:46 | Python | UTF-8 | Python | false | false | 1,032 | py | # encoding: utf-8
# module Revit.Application calls itself Application
# from RevitNodes,Version=1.2.1.3083,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class Document(object):
""" A Revit Document """
ActiveView=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get the active view for the document
Get: ActiveView(self: Document) -> View
"""
FilePath=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The full path of the Document.
Get: FilePath(self: Document) -> str
"""
IsFamilyDocument=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Is the Document a Family?
Get: IsFamilyDocument(self: Document) -> bool
"""
Location=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Extracts Latitude and Longitude from Revit
Get: Location(self: Document) -> Location
"""
Current=None
| [
"gtalarico@gmail.com"
] | gtalarico@gmail.com |
f5e93ea4f56961c6f7fe468c04989918d124c81b | 97be97cfc56fb2170b60b91063dbfe5f1449e3c0 | /python/ARC118/A.py | 8433baa2c379b11ff6a0977f6734dd8082f1d73f | [] | no_license | iWonder118/atcoder | 73d965a0a9ade189733808e47634f2b7776aad4b | 3ab7271e838a2903ff0e07f94015ef13c59577e1 | refs/heads/master | 2022-01-25T10:10:55.007340 | 2021-12-31T14:04:54 | 2021-12-31T14:04:54 | 245,155,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | t, N = map(int, input().split())
handled = [False] * (100 + t)
for i in range(1, 100):
handled[int(i * (100 + t) / 100)] = True
unhandled = []
for i in range(1, 100 + t):
if not handled[i]:
unhandled.append(i)
x, y = divmod(N - 1, len(unhandled))
print((100 + t) * x + unhandled[y])
| [
"52240372+iWonder118@users.noreply.github.com"
] | 52240372+iWonder118@users.noreply.github.com |
90563c027e3c2d7b242cddfec0fee30ac8093d2c | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9Kuah39g997SvZmex_1.py | ed002987b0ece5b4a8e68e0089af9c017d6ed7be | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | """
Create a function that takes in a sentence as input and returns the **most
common last vowel** in the sentence as a single character string.
### Examples
common_last_vowel("Hello World!") ➞ "o"
common_last_vowel("Watch the characters dance!") ➞ "e"
common_last_vowel("OOI UUI EEI AAI") ➞ "i"
### Notes
* There will only be one common last vowel in each sentence.
* If the word has one vowel, treat the vowel as the last one **even if it is at the start of the word**.
* The question asks you to compile all of the last vowels of each word and returns the vowel in the list which appears most often.
* **y** won't count as a vowel.
* Return outputs in **lowercase**.
"""
from collections import Counter
def common_last_vowel(txt):
v = [[c for c in word if c in "aeiou"][-1] for word in txt.lower().split()]
return Counter(v).most_common()[0][0]
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
e5715d9c0c8cfb7d4476e1ce2ced6e8132fecf58 | 4580dcd5eeb99b88716721625e243f888ddbca08 | /src/Ch10/age_lambda_filter.py | 3daeb3c0ca1163577da41b096aab792e330c0566 | [] | no_license | dongupak/Prime-Python | c9c7a211e61f06fe518719b499b01eeb34c1e827 | 767cc9cb9f47fda5efa78776c1d1aa68b43dc6c4 | refs/heads/master | 2022-02-08T13:10:06.797737 | 2022-01-18T02:01:29 | 2022-01-18T02:01:29 | 224,655,276 | 23 | 23 | null | 2020-07-30T05:06:33 | 2019-11-28T13:05:28 | Jupyter Notebook | UTF-8 | Python | false | false | 275 | py | # 코드 10-5 : 람다 함수를 이용한 간략화된 필터
## "으뜸 파이썬", p. 579
ages = [34, 39, 20, 18, 13, 54]
print('성년 리스트 :')
for a in filter(lambda x: x >= 19, ages): # filter() 함수를 사용한 ages의 필터
print(a, end = ' ')
| [
"dongupak@gmail.com"
] | dongupak@gmail.com |
c01e3c700ea2c7a4bda0f7afd1829a96df5727cb | a35d07b11f013a26901942f730d4b720f4e27355 | /warmup1/diff21.py | a8ddabe0c6331fafd4d549bd53c89af715e1e6f7 | [] | no_license | PMiskew/codingbat_solutions_python | 7cbbf293fb6b230e274a8cee373a2222a5a27e8d | 6e62fd0080c2a9bcd59fd4f803cc7966a2cb88d1 | refs/heads/master | 2022-11-13T13:24:53.078833 | 2020-07-14T18:38:06 | 2020-07-14T18:38:06 | 255,197,455 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | '''
Question
Given two int values, return their sum. Unless the two values are the same, then return double their sum.
sum_double(1, 2) → 3
sum_double(3, 2) → 5
sum_double(2, 2) → 8
'''
def diff21(n):
#Note: All the other exampels returned booleans so far. This example
# returns an intger. Always pay attention to the return type.
#Approach 1:This is a case where we need to first check the value of n
#once we know that we can decide if we send the absolute difference or
#the absolute different*2
'''
if n > 21:
return (n - 21)* 2
return 21 - n
#'''
#Approach 2: Modification of approach 1.
#Notice that in both cases we calculate the different and the order matters
#based on the size of n. We can use the absolute function, a build in function
#in the standard library. We can store the absolute different in a variable and
#use that.
'''
diff = abs(n - 21)
if n > 21:
return diff*2
return diff
#'''
#Approch 1: Python supports a single line conditional statement that we can
#use in this case.
'''
return abs(n-21)*2 if n > 21 else abs(n-21)
#'''
#Approach 2: I am proud of this one, but it is horribly designed in terms
#of readability.
'''
return int(abs((n)-21)*round(min(abs(n)/21+0.5,2)))
#'''
#LONGER EXPLAINATION
#return abs((n)-21)
#return round(min(abs(n)/21+0.5,2))
'''
Powerful idea, have seen it on the AP exam
x = round(n + 0.5)
n = 1.1 round(1.1 + 0.5) = round(1.6) = 2.0
n = 1.51 round(1.51 + 0.5) = round(2.01) = 2.0
n = 0.95 round(0.95 + 0.5) = round(1.45) = 1.0
n = 22
round(min(abs(22)/21+0.5,2)))
round(min(22/21 + 0.5, 2)))
round(min(1.0476 + 0.5, 2)))
round(min(1.5476,2))
round(1.547)
2.0
n = 2
round(min(abs(2)/21+0.5,2)))
round(min(2/21+0.5,2)))
round(min(0.0952+0.5,2)))
round(min(0.5952,2))
round(0.5952)
1.0
n = -2
round(min(abs(-2)/21+0.5,2)))
round(min(2/21+0.5,2)))
round(min(0.0952+0.5,2)))
round(min(0.5952,2))
round(0.5952)
1.0
'''
| [
"paul.miskew@gmail.com"
] | paul.miskew@gmail.com |
6c06011d81adf54862a37378abd3221e37f6e4bc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_212/ch57_2020_06_19_14_53_17_093175.py | c97fcb636614fa2a80d8da7e6fef8d15eb100590 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | def verifica_progressao (lista):
pa = False
pg =False
if len(lista) == 0:
pa = True
pg = True
else:
aumento = lista[1] - lista[0]
razao = lista[1]/lista[0]
for i in range (len(lista)-1):
if lista[i+1] - lista[i] != aumento:
pa = False
elif lista[i+1] / lista[i] != razao:
pg = False
if pa == True and pg == False:
return ("PA")
elif pa == False and pg ==True:
return ("PG")
elif pa== False and pg==False:
return("NA")
elif pa==True and pg== True:
return ("AG")
| [
"you@example.com"
] | you@example.com |
86ed2728cf8554c1b83eace130b6ded059e02e00 | 2a21e9469d52c9eb2c794efa85227ca81cd39be8 | /app/classes/Database.py | 09b9d6d7950135fec164015f1031afc1cd45993b | [] | no_license | CL0456/WebsiteProject | b1f9b0f531eb5afaa21ac8b6c951be8c2c6a1bd0 | 5e31a7e03d69afe036b0c0f1851fb7f276388cc6 | refs/heads/master | 2023-01-01T20:19:02.874794 | 2020-10-29T01:17:22 | 2020-10-29T01:17:22 | 271,406,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,717 | py | import os
import tempfile
import pyrebase
import requests
import json
from collections import OrderedDict
from flask import current_app as flask_app
from app import SITE_ROOT
class Database():
"""
Database Class.
Class to interact with Firebase Realtime Database.
"""
def __init__(self):
"""
Initialise class with configuration
"""
# Load Firebase config data, including Service Account file
firebase_config_file = os.path.join(SITE_ROOT, 'firebase.json')
firebase_config = json.load(open(firebase_config_file))
firebase_config["serviceAccount"] = os.path.join(SITE_ROOT, 'firebase.admin.json')
# Initialize Firebase auth and database
self.firebase = pyrebase.initialize_app(firebase_config)
self.auth = self.firebase.auth()
self.db = self.firebase.database()
# Create readable errors based on Firebase errors
self.readable_errors = {
"INVALID_PASSWORD": "This is an invalid password",
"EMAIL_NOT_FOUND": "This email has not been registered",
"EMAIL_EXISTS": "This email already exists. Try logging in instead.",
"TOO_MANY_ATTEMPTS_TRY_LATER": "Too many attempts, please try again later",
"USER_DISABLED": "This account has been disabled by an administrator.",
}
# Image Requests
def get_images(self, limit=20, user_id=False):
"""
- Pulls last 20 images from Firebase.
- Organises them by User ID or from all users
"""
try:
if (user_id):
images = self.db.child("images").order_by_child("user_id").equal_to(user_id).limit_to_first(limit).get()
else:
images = self.db.child("images").order_by_child("user_id").limit_to_first(limit).get()
flask_app.logger.info('####################### images val #####################')
flask_app.logger.info(images.val())
if isinstance(images.val(), OrderedDict):
return images
else:
return False
except Exception as err:
self.process_error(err)
def get_category_images(self, category, limit=20):
"""
Requests all data regarding images in a category
"""
try:
images = self.db.child("images").order_by_child("category").equal_to(category).limit_to_first(limit).get()
if isinstance(images.val(), OrderedDict):
return images
else:
return False
except Exception as err:
self.process_error(err)
def get_image(self, image_id):
"""
Requests all data regarding an image
"""
error = None
image = False
try:
image = self.db.child("images").child(image_id).get()
except Exception as err:
flask_app.logger.info(err)
error = err
if error:
raise Exception(error)
else:
return image.val()
def save_image(self, image_data, image_id):
flask_app.logger.info('####################### image_data #####################')
flask_app.logger.info(image_data)
try:
self.db.child("images").child(image_id).set(image_data)
except Exception as err:
self.process_error(err)
def delete_image(self, image_id):
try:
self.db.child("images").child(image_id).remove()
except Exception as err:
self.process_error(err)
def remove_matching_value(self, data, value):
return_data = []
for key in data:
if key != value:
return_data.append(key)
return return_data
# User and Account Requests
def register(self, user_data, password):
"""
User Attempts to register
"""
try:
user_auth = self.auth.create_user_with_email_and_password(user_data['email'], password)
user_data['localId'] = user_auth['localId']
self.db.child("users").child(user_auth['localId']).set(user_data)
return user_auth
except Exception as err:
self.process_error(err)
def login(self, email, password):
"""
User attempts to login
"""
try:
user_auth = self.auth.sign_in_with_email_and_password(email, password)
user = self.db.child("users").child(user_auth['localId']).get().val()
return user
except Exception as err:
self.process_error(err)
def update_user(self, user_data):
"""
User makes a request to change their account details
"""
try:
self.db.child("users").child(user_data['localId']).update(user_data)
return
except Exception as err:
self.process_error(err)
def process_error(self, error):
"""
Takes firebase error and turns it into a readable error.
"""
flask_app.logger.info(error)
readable_error = self.get_readable_error(error)
raise Exception(readable_error)
def get_readable_error(self, error):
"""
Presents an error message if a request error occurs
"""
error_json = error.args[1]
error_messsage = json.loads(error_json)['error']['message']
if error_messsage in self.readable_errors.keys():
return self.readable_errors[error_messsage]
else:
return "There was a problem with your request." | [
"you@example.com"
] | you@example.com |
d2cb0fdb41bd08f4584135491fc11aca0f8532ba | a154f6985195429b6f839f452576593f09dea6c7 | /cartpole_fitness.py | 2acd4e600600c2d28c05e369da6e0ba0dfcac22f | [] | no_license | mcx/evolved_cartpole | 56e96cfc3e4f92a47d2c330c352672148a4c7f25 | 1c33f656f7eb3cad4d12de1e1a9f5285dae91df1 | refs/heads/master | 2022-02-19T08:56:44.839769 | 2019-09-13T12:45:19 | 2019-09-13T12:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # given an agent, eval fitness against cartpole
import gym
from PIL import Image
class CartPoleFitness(object):
def __init__(self, render=False):
# prep cart pole env
self.env = gym.make('CartPole-v0')
self.render = render
def fitness(self, agent):
total_reward = 0
for _ in range(10): # num_trials ?
observation = self.env.reset()
done = False
episode_reward = 0
while not done:
action = agent.decide_action(observation)
observation, reward, done, _info = self.env.step(action)
episode_reward += reward
if self.render:
self.env.render()
#rgb_array = self.env.render(mode='rgb_array')
#img = Image.fromarray(rgb_array)
# img.save("cartpole.png")
total_reward += episode_reward
return total_reward
| [
"matthew.kelcey@gmail.com"
] | matthew.kelcey@gmail.com |
a032d70b9c31a56477988fc39f040166ff30a2bb | bdc635a69a9893d01804044d4458fe6af302e586 | /ciphercode.py | 3decd2774a9a6d707c90eaa886d5f55c398155af | [] | no_license | sidv1905/Python-programs | 55ed197bd0b6b01a6c2e1b06d1f13a76f520dbe5 | 796381324ddeebc65a778b22eb2ba07d60038698 | refs/heads/master | 2021-06-16T01:54:20.606829 | 2019-05-23T21:10:20 | 2019-05-23T21:10:20 | 188,306,140 | 1 | 0 | null | 2021-05-06T19:34:12 | 2019-05-23T21:02:15 | Python | UTF-8 | Python | false | false | 264 | py | S=input()
sf=''
k=int(input())
for i in S:
if i.isupper():
sf += chr(65+(ord(i)+k-65) % 26)
elif i.islower():
sf += chr(97+(ord(i)+k-97) % 26)
elif i.isnumeric():
sf += str((int(i) + k) % 10)
else:
sf += i
print(sf)
| [
"sidvarangaonkar1905@gmail.com"
] | sidvarangaonkar1905@gmail.com |
6a243b740ac60525a548af49025da95c955bd002 | f6703b2afca284bf75e0dbf8f61d77e5251f905c | /euler362.py | a8c1a82abc396cc3badb55813c2bc8abd0374d9d | [] | no_license | rwieckowski/project-euler-python | 2a7aa73670b4684f076ad819bfc464aa0778f96c | be9a455058b20adfd32c814effd8753cc9d39890 | refs/heads/master | 2021-01-10T21:10:44.875335 | 2015-06-23T13:29:58 | 2015-06-23T13:29:58 | 37,920,684 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | """
<P>Consider the number 54<BR />54 can be factored in 7 distinct ways
into one or more factors larger than 1<BR />54 2times27 3times18 6
times9 3times3times6 2times3times9 and 2times3times3times3<BR />If we
require that the factors are all squarefree only two ways remain 3
times3times6 and 2times3times3times3</P><P>Lets call Fsf<var>n</var>
the number of ways <var>n</var> can be factored into one or more
squarefree factors larger than 1 soFsf542</P><P>Let S<var>n</var> be
sumFsf<var>k</var> for <var>k</var>2 to <var>n</var></P><P>S100193</P>
<P>Find S10 000 000 000 </P>
"""
def euler362():
"""
>>> euler362()
'to-do'
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | [
"rwieckowski@ivmx.pl"
] | rwieckowski@ivmx.pl |
110872ad79c95d39290a9a65b9e28178a3a65cdf | 200ec10b652f9c504728890f6ed7d20d07fbacae | /migrations/versions/c782b3c7ff30_.py | 3e3707f526106deafa59f0bf3a3ba1ecc69a68cd | [] | no_license | Ks-Ksenia/flask_shop | f4edc17669c29ae02a89e836c3c48230147ae84f | 9eb44fd22bf99913c9824ea35e3922cb14ef2451 | refs/heads/master | 2023-03-01T13:55:20.749127 | 2021-02-14T09:29:04 | 2021-02-14T09:29:04 | 338,767,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | """empty message
Revision ID: c782b3c7ff30
Revises:
Create Date: 2021-01-28 10:59:15.555312
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c782b3c7ff30'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=100), nullable=True),
sa.Column('body', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=100), nullable=True),
sa.Column('username', sa.String(length=100), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('is_superuser', sa.Boolean(), nullable=True),
sa.Column('created', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
op.create_table('roles_users',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], )
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('roles_users')
op.drop_table('user')
op.drop_table('role')
op.drop_table('post')
# ### end Alembic commands ###
| [
"demag74@mail.ru"
] | demag74@mail.ru |
e24e038928e1c3e8008a338b693451cfac3c780f | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/shared/soundeffectsid.py | 2e8f0e577fd6cd3f4b4408c7c2436eddfc45b327 | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | # Embedded file name: scripts/client/gui/shared/SoundEffectsId.py
from debug_utils import LOG_DEBUG
import FMOD
class SoundEffectsId(object):
if FMOD.enabled:
SPEND_CREDITS_GOLD = 'spend_credits_and_gold'
SPEND_CREDITS = 'spend_credits'
SPEND_GOLD = 'spend_gold'
EARN_CREDITS_GOLD = 'earn_credits_and_gold'
EARN_CREDITS = 'earn_credits'
EARN_GOLD = 'earn_gold'
TRANSPORT_ENTER = 'transport_enter'
TRANSPORT_EXIT = 'transport_exit'
TRANSPORT_FIRST_STEP = 'transport_first_step'
TRANSPORT_NEXT_STEP = 'transport_next_step'
TRANSPORT_APPROVE = 'transport_approve'
FORT_CREATE = 'fort_create'
FORT_ENTERED_FOUNDATION_STATE = 'fort_entered_foundation_state'
FORT_FIXED_IN_BUILDING = 'fort_fixed_in_building'
FORT_UPGRADE_BUILDING = 'fort_upgrade_building'
FORT_DEMOUNT_BUILDING = 'fort_demount_building'
FORT_ORDER_INPROGRESS = 'fort_order_inprogress'
FORT_ORDER_ISREADY = 'fort_order_isready'
FORT_DIRECTION_CREATE = 'direction_create'
FORT_DIRECTION_CLOSE = 'direction_close'
ACTIVATE_REQUISITION = 'activate_requisition'
ACTIVATE_EVACUATION = 'activate_evacuation'
ACTIVATE_HEAVY_TRUCKS = 'activate_heavyTrucks'
ACTIVATE_MILITARY_MANEUVERS = 'activate_militaryManeuvers'
ACTIVATE_ADDITIONAL_BRIEFING = 'activate_additionalBriefing'
ACTIVATE_TACTICAL_TRAINING = 'activate_tacticalTraining'
ACTIVATE_BATTLE_PAYMENTS = 'activate_battlePayments'
ACTIVATE_SPECIALMISSION = 'activate_specialMission'
END_BUILDING_PROCESS_POSTFIX = '_endPrcBld'
ACTIVATE_DEFENCE_PERIOD = 'activate_defencePeriod'
DEACTIVATE_DEFENCE_PERIOD = 'deactivate_defencePeriod'
ENEMY_DIRECTION_SELECTED = 'enemyDirection_selected'
ENEMY_DIRECTION_HOVER = 'enemyDirection_hover'
MY_DIRECTION_SELECTED = 'myDirection_selected'
FORT_CLAN_WAR_DECLARED = 'fortClanWar_declared'
BATTLE_ROOM_TIMER_ALERT = 'battleRoom_timerAlert'
FORT_CLAN_WAR_RESULT_WIN = 'fortClanWarResult_win'
FORT_CLAN_WAR_RESULT_LOSE = 'fortClanWarResult_lose'
FORT_CLAN_WAR_RESULT_DRAW = 'fortClanWarResult_draw'
CS_ANIMATION_LEAGUE_UP = 'cs_animation_league_up'
CS_ANIMATION_LEAGUE_DOWN = 'cs_animation_league_down'
CS_ANIMATION_DIVISION_UP = 'cs_animation_division_up'
CS_ANIMATION_DIVISION_UP_ALT = 'cs_animation_division_up_alt'
CS_ANIMATION_DIVISION_DOWN = 'cs_animation_division_down'
DYN_SQUAD_STARTING_DYNAMIC_PLATOON = 'dyn_squad_starting_dynamic_platoon'
RUDY_DOG = 'rody_dog'
@classmethod
def getEndBuildingProcess(cls, buildingID):
if FMOD.enabled:
result = buildingID + cls.END_BUILDING_PROCESS_POSTFIX
return result
| [
"info@webium.sk"
] | info@webium.sk |
78de0ffe83efea63c213df8dbd4adbca5a3209bd | 315f126df2d863cb269a021956dc6c92f60794e7 | /09waimaii/app/models/member.py | f27dd3c295a55be873674419f3fb68a450725dd0 | [] | no_license | OCC111/1809flask | 6759cdac5dea2208e38a204987c9271cf3fc69c5 | 7902e1083a558db2eb6ac47c2c8bf2aac9a669d3 | refs/heads/master | 2020-05-30T16:13:26.117557 | 2019-06-12T12:31:10 | 2019-06-12T12:31:10 | 189,841,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | from app import db
from app.models.baseModel import BaseModel
class Member(BaseModel, db.Model):
__tablename__ = 'member'
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(100), nullable=False, default='')
mobile = db.Column(db.String(11), nullable=False, default='')
gender = db.Column(db.Integer, nullable=False, default=0)
avatar = db.Column(db.String(200), nullable=False, default='')
salt = db.Column(db.String(32), nullable=False, default='')
reg_ip = db.Column(db.String(100), nullable=False, default='')
status = db.Column(db.Integer, nullable=False, default=1) # 1有效 0无效
@property
def status_desc(self):
return self.status
@property
def sex_desc(self):
sex_mapping = {
"0": "未知",
"1": "男",
"2": "女"
}
return sex_mapping[str(self.gender)]
class OauthMemberBind(BaseModel, db.Model):
__tablename__ = 'oauth_member_bind'
id = db.Column(db.Integer, primary_key=True)
client_type = db.Column(db.String(20), nullable=False, default='') # 客户端来源类型。qq,weibo,weixin
type = db.Column(db.Integer, nullable=False, default=0) # 类型 type 1:wechat ,
openid = db.Column(db.String(80), nullable=False, default='') # 第三方id
unionid = db.Column(db.String(100), nullable=False, default='')
extra = db.Column(db.Text, nullable=False, default='') # 额外字段
member_id = db.Column(db.Integer, db.ForeignKey('member.id'), nullable=False)
| [
"859899882@qq.com"
] | 859899882@qq.com |
cdc0fc730c4fec7b7f356cae21e10e857bdc8879 | 36c0532bd632a7ec3c06ac1d03f3ba350b7c140b | /projects/serializer.py | 52f61f6f9e7a6884c87b132dbacf57c9822e4dc7 | [
"MIT"
] | permissive | apwao/Awwards | bab22f458655692ba02f278b8b929f6df3ea2339 | 521f98a8ecb9e9d03e2357f391d0611dd7173409 | refs/heads/master | 2022-11-30T01:07:35.120927 | 2019-08-28T09:25:13 | 2019-08-28T09:25:13 | 194,293,236 | 0 | 0 | null | 2022-11-22T03:55:14 | 2019-06-28T15:09:46 | Python | UTF-8 | Python | false | false | 356 | py | from rest_framework import serializers
from .models import Project
class ProjectSerializer(serializers.ModelSerializer):
"""
Class profile serializers to convert the Profile django model into a JSON object
"""
class Meta:
model=Project
fields=('project_title','project_image','project_description','live_link','posted_by')
| [
"sapwao@gmail.com"
] | sapwao@gmail.com |
e3aa334a68217191e7b035f4851912fe116d78c5 | 8c917dc4810e2dddf7d3902146280a67412c65ea | /v_7/NISS/shamil_v3/mrp_custom/report/__init__.py | 3b93f73ef14b14530c20a4b225e509228dff8fb0 | [] | no_license | musabahmed/baba | d0906e03c1bbd222d3950f521533f3874434b993 | 0b997095c260d58b026440967fea3a202bef7efb | refs/heads/master | 2021-10-09T02:37:32.458269 | 2018-12-20T06:00:00 | 2018-12-20T06:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # -*- coding: utf-8 -*-
#############################################################################
#
# NCTR, Nile Center for Technology Research
# Copyright (C) 2011-2012 NCTR (<http://www.nctr.sd>).
#
#############################################################################
#import production_costs_report
import production_cost
import production_order
import sale_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"bakry@exp-sa.com"
] | bakry@exp-sa.com |
be0a6fb0d5da06a658cbb803dde555f11cb92df9 | bc441bb06b8948288f110af63feda4e798f30225 | /data_ops_analysis_sdk/model/inspection/metric_group_pb2.pyi | 11016ea24bf79dbc2a3001776ac332e255bbb812 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from data_ops_analysis_sdk.model.inspection.dim_pb2 import (
InspectionDim as data_ops_analysis_sdk___model___inspection___dim_pb2___InspectionDim,
)
from data_ops_analysis_sdk.model.inspection.val_pb2 import (
InspectionVal as data_ops_analysis_sdk___model___inspection___val_pb2___InspectionVal,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class InspectionMetricGroup(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
id = ... # type: typing___Text
name = ... # type: typing___Text
category = ... # type: typing___Text
memo = ... # type: typing___Text
@property
def dims(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[data_ops_analysis_sdk___model___inspection___dim_pb2___InspectionDim]: ...
@property
def vals(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[data_ops_analysis_sdk___model___inspection___val_pb2___InspectionVal]: ...
def __init__(self,
*,
id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
category : typing___Optional[typing___Text] = None,
dims : typing___Optional[typing___Iterable[data_ops_analysis_sdk___model___inspection___dim_pb2___InspectionDim]] = None,
vals : typing___Optional[typing___Iterable[data_ops_analysis_sdk___model___inspection___val_pb2___InspectionVal]] = None,
memo : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> InspectionMetricGroup: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> InspectionMetricGroup: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"category",b"category",u"dims",b"dims",u"id",b"id",u"memo",b"memo",u"name",b"name",u"vals",b"vals"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
df441ecebb831006b1216e31a0f500c020819031 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/2201. Count Artifacts That Can Be Extracted.py | d7667b34133a848ec2001bd66bec971255732dc6 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | class Solution:
def digArtifacts(self, n: int, artifacts: List[List[int]], dig: List[List[int]]) -> int:
d = {}
for i, a in enumerate(artifacts):
r1, c1, r2, c2 = a
for x in range(r1, r2 + 1):
for y in range(c1, c2 + 1):
d[x, y] = i
for x, y in dig:
if (x, y) in d:
del d[x, y]
return len(artifacts) - len(set(d.values()))
| [
"19241008o"
] | 19241008o |
0a9868f457eb1aa068c047e33bbec3e8c024ff98 | 26d5c795d8aa83bf5cb3f228675ff51e2f704f57 | /tests/testbed_datastores.py | f87d239f1e36a2101a4c9c72df6982ffb0a085e3 | [] | no_license | binarymachines/mercury | 8e13bb10c67a056fe88e02f558d73f1f1b95d028 | db3e2425f4e77a44a97c740f7fff90312a1bd33f | refs/heads/master | 2023-07-08T11:35:26.867494 | 2023-06-25T00:46:23 | 2023-06-25T00:46:23 | 94,708,610 | 2 | 6 | null | 2023-02-15T21:50:06 | 2017-06-18T19:31:50 | Python | UTF-8 | Python | false | false | 701 | py | #!/usr/bin/env python
from mercury import datamap as dmap
from mercury.dataload import DataStore, DataStoreRegistry, RecordBuffer, checkpoint
class TestDatastore(DataStore):
def __init__(self, service_object_registry, *channels, **kwargs):
super.__init__(service_object_registry, *channels, **kwargs)
self.init_values = kwargs
self.num_bulk_writes = 0
self.num_record_writes = 0
@property
def init_param_fields(self):
return [ key for key, value in self.init_values.items()]
def _write(self, recordset, **kwargs):
for record in recordset:
self.num_record_writes += 1
self.num_bulk_writes += 1
| [
"binarymachineshop@gmail.com"
] | binarymachineshop@gmail.com |
e962c04487db5b0daafe715ec6e115ccd8014031 | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/139B/cdf_139B.py | a343873acece54f65ada08084ba5a15efaecf9d8 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | import math
class CodeforcesTask139BSolution:
def __init__(self):
self.result = ''
self.n = 0
self.walls = []
self.m = 0
self.papers = []
def read_input(self):
self.n = int(input())
for x in range(self.n):
self.walls.append([int(y) for y in input().split(" ")])
self.m = int(input())
for x in range(self.m):
self.papers.append([int(y) for y in input().split(" ")])
def process_task(self):
tot_cost = 0
mx = 500 ** 4 + 1
for wall in self.walls:
mn = mx
cov = wall[0] * 2 + wall[1] * 2
for paper in self.papers:
paper_cov = paper[1] * (paper[0] // wall[2])
if paper_cov:
cost = math.ceil(cov / paper_cov) * paper[2]
mn = min(mn, cost)
tot_cost += mn
self.result = str(tot_cost)
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask139BSolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| [
"oleszek.karol@gmail.com"
] | oleszek.karol@gmail.com |
d94e85b910aeaff80552f9e68d32a295d877586b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /RWLWKmGcbp6drWgKB_23.py | 5adf0badb29c6006de4951b4c8c6af9fb7c7a1cc | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | """
Atticus has been invited to a dinner party, and he decides to purchase a
bottle of wine. However, he has little knowledge of how to choose a good
bottle. Being a very frugal gentleman (yet disliking looking like a
cheapskate), he decides to use a very simple rule. In any selection of **two
or more wines** , he will always buy the second-cheapest.
Given a list of wine dictionaries, write a function that returns the name of
the wine he will buy for the party. If given an empty list, return `None`. If
given a list of only one, Atticus will buy that wine.
### Examples
chosen_wine([
{ "name": "Wine A", "price": 8.99 },
{ "name": "Wine 32", "price": 13.99 },
{ "name": "Wine 9", "price": 10.99 }
]) ➞ "Wine 9"
chosen_wine([{ "name": "Wine A", "price": 8.99 }]) ➞ "Wine A"
chosen_wine([]) ➞ None
### Notes
All wines will be different prices, so there is no confusion in the ordering.
"""
def chosen_wine(wines):
dic = {}
for wine in wines:
dic[wine['name']] = wine['price']
l = sorted(dic.values())
if len(l) == 0: return None
else:
if len(l) == 1: x = l[0]
if len(l) > 1: x = l[1]
for k,v in dic.items():
if v == x: return k
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
8d7a781bfef3a52b7a074199f5ca4714b0e6c872 | 72f6f274a9e4937f99e61eebe14f9b2f301a83f5 | /utils/vocab.py | 377f7baad879bb3d432bda34d57e6de391eff6af | [] | no_license | studio-ousia/textent | e466f8ef4f6910a0f4270014fa29c18aa5f329e0 | 2a73ef2f6a0d29d4d1c1085a75fa0b7592bdd376 | refs/heads/master | 2021-03-22T04:45:57.582737 | 2018-06-03T07:18:28 | 2018-06-03T07:18:28 | 93,811,887 | 20 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,790 | py | # -*- coding: utf-8 -*-
import joblib
import logging
from collections import Counter
from marisa_trie import Trie
from tokenizer import RegexpTokenizer
logger = logging.getLogger(__name__)
class Vocab(object):
def __init__(self, dic, start_index=0):
if isinstance(dic, Trie):
self._dic = dic
else:
self._dic = Trie(dic)
self._start_index = start_index
@property
def size(self):
return len(self)
def __len__(self):
return len(self._dic)
def __iter__(self):
return iter(self._dic)
def __contains__(self, key):
return key in self._dic
def get_index(self, key, default=None):
try:
return self._dic.key_id(key) + self._start_index
except KeyError:
return default
def get_key_by_index(self, index):
return self._dic.restore_key(index - self._start_index)
def save(self, out_file):
joblib.dump(self.serialize(), out_file)
def serialize(self):
return dict(dic=self._dic.tobytes(), start_index=self._start_index)
class WordVocab(Vocab):
def __init__(self, dic, lowercase, start_index=0):
super(WordVocab, self).__init__(dic, start_index)
self._lowercase = lowercase
def __contains__(self, word):
if self._lowercase:
word = word.lower()
return word in self._dic
def get_index(self, word, default=None):
if self._lowercase:
word = word.lower()
return super(WordVocab, self).get_index(word, default)
@staticmethod
def build(description_db, start_index, min_count, lowercase, target_vocab=None):
counter = Counter()
tokenizer = RegexpTokenizer()
for (title, text, _) in description_db.iterator():
if target_vocab is not None and title not in target_vocab:
continue
if lowercase:
counter.update([t.text.lower() for t in tokenizer.tokenize(text)])
else:
counter.update([t.text for t in tokenizer.tokenize(text)])
dic = Trie([w for (w, c) in counter.iteritems() if c >= min_count])
return WordVocab(dic, lowercase, start_index)
def save(self, out_file):
joblib.dump(self.serialize(), out_file)
def serialize(self):
return dict(dic=self._dic.tobytes(), lowercase=self._lowercase,
start_index=self._start_index)
@staticmethod
def load(input):
if isinstance(input, dict):
obj = input
else:
obj = joblib.load(input)
dic = Trie()
dic.frombytes(obj['dic'])
return WordVocab(dic, obj['lowercase'], obj.get('start_index', 0))
class EntityVocab(Vocab):
@staticmethod
def build(description_db, entity_db, white_list, start_index, min_inlink_count,
target_vocab=None):
counter = Counter()
db_titles = set()
for (title, _, titles) in description_db.iterator():
if target_vocab is not None and title not in target_vocab:
continue
counter.update(titles)
db_titles.add(title)
title_list = [t for (t, c) in counter.iteritems() if c >= min_inlink_count]
white_list = [entity_db.resolve_redirect(t) for t in white_list]
white_list = [t for t in white_list if t in db_titles]
title_list = set(title_list + white_list)
return EntityVocab(Trie(title_list), start_index)
@staticmethod
def load(input):
if isinstance(input, dict):
obj = input
else:
obj = joblib.load(input)
dic = Trie()
dic.frombytes(obj['dic'])
return EntityVocab(dic, obj.get('start_index', 0))
| [
"ikuya@ikuya.net"
] | ikuya@ikuya.net |
236d1bfb3fea28dc75f25ad86df9ae345d5ab611 | 8b060d38c63993a3259a80b072768206b558772b | /BlogApp/migrations/0002_article_article_text.py | d99bdd629d6dc36f5b7359e87521a9f2e0f14e77 | [] | no_license | mortadagzar/Simple-Python-feedingTable | d8b0a2a06c1b3d78167241a6f60a2bb00fa9c4ce | 716c68e6b9c55bd2dc8299ca14ccf39431cf0efb | refs/heads/master | 2020-03-30T19:07:16.027807 | 2018-10-14T15:05:28 | 2018-10-14T15:05:28 | 151,529,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 2.1.1 on 2018-09-22 10:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('BlogApp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='article_text',
field=models.TextField(blank=True, null=True),
),
]
| [
"mortadagzar@gmail.com"
] | mortadagzar@gmail.com |
900da8b9e0f950261448fb6dca13409fad7822ae | 31a7c0fa71fa9f7b75406fc6868c698acd714804 | /sucai/views.py | 57f3850c2423baad3f2191892bb4bdeb1640f41f | [] | no_license | cc8848/AiChuangZuoBackground | adb65fc6af937257e4867e95068bf66320a62611 | 72c77f9569f8739a00a82dfe298db8797f04f228 | refs/heads/master | 2020-06-05T02:07:49.050905 | 2018-06-28T10:28:25 | 2018-06-28T10:28:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,642 | py | # -*- coding: utf-8 -*-
from django.http import HttpResponse
import json
import models
# 转换成json形式
def toDicts(objs):
obj_arr = []
for o in objs:
obj_arr.append(o.toDict())
return obj_arr
# 查询平台类型
def deck_list(request):
all_type = models.deck_type.objects.all()
all_dicts = toDicts(all_type)
all_json = json.dumps(all_dicts, ensure_ascii=False)
return HttpResponse(all_json)
# 查询平台领域类型
def field_list(request):
deck = request.GET.get('deck_type')
print deck
deck_id = models.deck_type.objects.get(pingtai_name=deck)
all_objs = models.field_type.objects.filter(deck_type_id=deck_id) # 获取列表用filter,获取单个值用get
all_dicts = toDicts(all_objs)
all_json = json.dumps(all_dicts, ensure_ascii=False)
return HttpResponse(all_json)
# 筛选消息
def list(request):
deck = request.GET.get('deck')
filters = request.GET.get('filters')
print deck
print filters
deck_id = models.deck_type.objects.get(pingtai_name=deck)
filter_id = models.field_type.objects.get(deck_type_id=deck_id, lingyu_name=filters)
all_objs = models.message.objects.filter(deck_type_id=deck_id, field_type_id=filter_id) # 获取列表用filter,获取单个值用get
all_dicts = toDicts(all_objs)
all_json = json.dumps(all_dicts, ensure_ascii=False)
return HttpResponse(all_json)
# 查询所有消息
def all(request):
all_objs = models.message.objects.all() # 获取列表
all_dicts = toDicts(all_objs)
all_json = json.dumps(all_dicts, ensure_ascii=False)
return HttpResponse(all_json)
| [
"shuo.du@edaibu.net"
] | shuo.du@edaibu.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.