blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e33fe0145613768d16866c5fc41bc2560e783bf5
|
70bee1e4e770398ae7ad9323bd9ea06f279e2796
|
/test/test_istio_authorization_policy_source.py
|
d06474312ad2007728f5c1f1dbe3e96ba1395147
|
[] |
no_license
|
hi-artem/twistlock-py
|
c84b420b1e582b3c4cf3631eb72dac6d659d4746
|
9888e905f5b9d3cc00f9b84244588c0992f8e4f4
|
refs/heads/main
| 2023-07-18T07:57:57.705014
| 2021-08-22T04:36:33
| 2021-08-22T04:36:33
| 398,637,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.istio_authorization_policy_source import IstioAuthorizationPolicySource # noqa: E501
from openapi_client.rest import ApiException
class TestIstioAuthorizationPolicySource(unittest.TestCase):
"""IstioAuthorizationPolicySource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IstioAuthorizationPolicySource
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.istio_authorization_policy_source.IstioAuthorizationPolicySource() # noqa: E501
if include_optional :
return IstioAuthorizationPolicySource(
namespaces = [
''
],
principals = [
''
]
)
else :
return IstioAuthorizationPolicySource(
)
def testIstioAuthorizationPolicySource(self):
"""Test IstioAuthorizationPolicySource"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"aakatev@virtru.com"
] |
aakatev@virtru.com
|
a9b556949473408521e5fae46b690dbc52cc4f55
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/pybites/bitesofpy-master/!201-300/204/test_pomodoro.py
|
ed5d098ac2af44caaaf4144782768d028d668cea
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
from typing import Union
import pytest
from pomodoro import break_time, lunch_time, main, session, work_time
@pytest.mark.asyncio
async def test_break_time(capfd):
anno = break_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["loop"] == int
assert anno["return"] is None
delay = 0.0001
await break_time(delay, 1)
output = capfd.readouterr()[0].strip()
assert "[1]" in output
assert f"Time for a {int(delay/60)} min break!" in output
@pytest.mark.asyncio
async def test_lunch_time(capfd):
anno = lunch_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["return"] is None
delay = 0.06
await lunch_time(delay)
output = capfd.readouterr()[0].strip()
assert "Time for lunch!" in output
@pytest.mark.asyncio
async def test_work_time(capfd):
anno = work_time.__annotations__
assert anno["delay"] == Union[int, float]
assert anno["return"] is None
delay = 0.0025
await work_time(delay, 3)
output = capfd.readouterr()[0].strip()
assert "[3]" in output
assert "Time to work!" in output
@pytest.mark.asyncio
async def test_session(capfd):
anno = session.__annotations__
assert anno["work_length"] == Union[int, float]
assert anno["short_break_length"] == Union[int, float]
assert anno["long_break_length"] == Union[int, float]
assert anno["return"] is None
await session(0.0025, 0.0005, 0.003)
output = capfd.readouterr()[0].strip()
assert "Time to work!" in output
assert "min break!" in output
assert "Time for lunch!" not in output
assert len(output.splitlines()) == 8
@pytest.mark.asyncio
async def test_main(capfd):
anno = main.__annotations__
assert anno["work_length"] == Union[int, float]
assert anno["short_break_length"] == Union[int, float]
assert anno["long_break_length"] == Union[int, float]
assert anno["lunch_length"] == Union[int, float]
assert anno["return"] is None
await main(0.0025, 0.0005, 0.003, 0.01)
output = capfd.readouterr()[0].strip()
assert "Pomodor timer started at" in output
assert "Time to work!" in output
assert "min break!" in output
assert "Time for lunch!" in output
assert "Work day completed at" in output
assert len(output.splitlines()) == 45
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
98c7e860f3e9b72be38d65d6434b2f524d8aef87
|
28ec3ee4daab919ef005e5913498be3fb96b19a4
|
/polyorg/tests.py
|
d4e1c78d9034a910e93000b9baa3e029a3b0b5b4
|
[
"BSD-2-Clause"
] |
permissive
|
danielbraun/open-shot
|
2bd5b0af9c8b6c32bc0b244edfafa1a871e85972
|
5e7507b57912a047e460b32927412f43df154def
|
refs/heads/master
| 2020-12-25T12:07:26.287583
| 2013-09-15T10:17:16
| 2013-09-15T10:17:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from models import Candidate, CandidateList
class CreationTest(TestCase):
def setUp(self):
self.linus = User.objects.create(username='Linus')
self.guido = User.objects.create(username='Guido')
self.jacob = User.objects.create(username='Jacob')
def test_candidatelist(self):
"""
Tests the creation of CandiateList and it's basic methods
"""
cl1 = CandidateList.objects.create(name="Imagine", ballot="I")
c = Candidate.objects.create(candidate_list=cl1, user=self.jacob, ordinal=1)
self.assertFalse(cl1.get_candidates())
c.status = 'V'
c.save()
self.assertEquals(cl1.get_candidates().count(), 1)
c.status = 'X'
c.save()
self.assertFalse(cl1.get_candidates())
cl1.delete()
def teardown(self):
for u in self.users: u.delete()
|
[
"bennydaon@gmail.com"
] |
bennydaon@gmail.com
|
49256118e79555242d05bc0d7a022c34619aa4ae
|
c86cd75be4f5b4eef605fb0f40743406ae19685f
|
/core/ui_test.py
|
cd1ce62099cf077a55dbf0934f3f6763c20bac3b
|
[
"Apache-2.0"
] |
permissive
|
jyn514/oil
|
3de53092c81e7f9129c9d12d51a8dfdbcacd397b
|
42adba6a1668ff30c6312a6ce3c3d1f1acd529ec
|
refs/heads/master
| 2022-02-23T08:12:48.381272
| 2019-03-15T08:54:31
| 2019-03-15T08:54:31
| 176,316,917
| 0
| 0
|
Apache-2.0
| 2019-03-18T15:36:14
| 2019-03-18T15:36:13
| null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
#!/usr/bin/python -S
from __future__ import print_function
"""
ui_test.py: Tests for ui.py
"""
import unittest
from core import ui # module under test
class UiTest(unittest.TestCase):
def testFoo(self):
ui.usage('oops')
if __name__ == '__main__':
unittest.main()
|
[
"andy@oilshell.org"
] |
andy@oilshell.org
|
f4430e22cc2f6c99418d9e381141e4def5bbadbe
|
07504838d12c6328da093dce3726e8ed096cecdb
|
/pylon/resources/properties/loadGroupName.py
|
91317396187f0aba508194373e1dc407e7c35dc1
|
[] |
no_license
|
lcoppa/fiat-lux
|
9caaa7f3105e692a149fdd384ec590676f06bf00
|
7c166bcc08768da67c241078b397570de159e240
|
refs/heads/master
| 2020-04-04T02:47:19.917668
| 2013-10-10T10:22:51
| 2013-10-10T10:22:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,416
|
py
|
"""loadGroupName userdefined property type, originally defined in resource
file set iot 90:00:00:05:00:00:00:00-1."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:15.
import pylon.resources.base
from pylon.resources.userdefined import userdefined
import pylon.resources.enumerations.char_encoding_t
class loadGroupName(pylon.resources.base.Structure):
"""loadGroupName userdefined property type. Text load group name. Name
for a load group to be used by optional user interface applications;
used to create an array of load group names."""
def __init__(self):
super().__init__(
key=10,
scope=1
)
self.__encoding = pylon.resources.enumerations.char_encoding_t.char_encoding_t(
)
self._register(('encoding', self.__encoding))
self.__name = pylon.resources.base.Array(
[
pylon.resources.base.Scaled(
size=1,
signed=False,
minimum=0,
maximum=255
) for i in range(120)
]
)
self._register(('name', self.__name))
self._default_bytes = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self._original_name = 'UCPTloadGroupName'
self._property_scope, self._property_key = 1, 10
self._definition = userdefined.add(self)
def __set_encoding(self, v):
self.__encoding._value = v
encoding = property(
lambda self: self.__encoding._value,
__set_encoding,
None,
"""."""
)
def __set_name(self, v):
self.__name._value = v
name = property(
lambda self: self.__name._value,
__set_name,
None,
"""."""
)
def __set(self, v):
if not isinstance(v, type(self)):
raise TypeError(
'Expected instance of {0}, got {1}'.format(
type(self),
type(v)
)
)
self.__set_encoding(v.__encoding)
self.__set_name(v.__name)
_value = property(lambda self: self, __set)
def __len__(self):
"""Return the length of the type, in bytes."""
return 121
if __name__ == '__main__':
# unit test code.
item = loadGroupName()
pass
|
[
"lcoppa@rocketmail.com"
] |
lcoppa@rocketmail.com
|
595a9e74a588b9a31577ba1c84a3e2bd2e99a3bc
|
e4c798246339e765f04424d727106e80e810f47c
|
/Medium/iNo008.py
|
7c70fbb6da2f54341b2bef3bbcc9b1e6fae85c2f
|
[] |
no_license
|
kikihiter/LeetCode
|
3a61dc4ee3223d634632e30b97c30a73e5bbe253
|
62b5ae50e3b42ae7a5a002efa98af5ed0740a37f
|
refs/heads/master
| 2021-05-26T08:05:00.126775
| 2019-05-21T09:18:37
| 2019-05-21T09:18:37
| 127,999,978
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
str = str.lstrip()
posNum = True
if str == "":
return 0
if str[0]=='-':
posNum = False
str = str[1:]
elif str[0]=='+':
str = str[1:]
try:
int(str[0])
except:
return 0
rStr = ""
for i in str:
try:
int(i)
except:
break
rStr = rStr + i
rStr = rStr.lstrip('0')
if rStr == "":
return 0
if posNum == False:
return max(-int(rStr),-2147483648)
print rStr
return min(int(rStr),2147483647)
|
[
"noreply@github.com"
] |
kikihiter.noreply@github.com
|
be4bc8669b12545f0c578c87d72131ebfc8489d0
|
947273c16f8984a20cd002b99b52facd6e63e43b
|
/server/authentication/urls.py
|
dacfd5c43349691a7bc454b922558db58c2608aa
|
[] |
no_license
|
ecuaappgye/App
|
8e3b50b4f7a8b9c50876d24343781e8f53a51bbc
|
2df7be6fd206d012f6a83acd0aa0cb75cf6d5937
|
refs/heads/master
| 2023-07-05T00:48:24.341021
| 2021-07-31T17:02:12
| 2021-07-31T17:02:12
| 385,267,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
from django.urls import include, path
from .apis import (UserEmailChange, UserGetApi, UserLoginApi, UserLogoutApi,
UserPasswordChange, UserPasswordReset,
UserPasswordResetCheck, UserRegisterApi,
UserRegisterVerifyApi, UserRegisterVerifyCheckApi,
UserUpdateApi)
authentication_urls = [
path('register/', UserRegisterApi.as_view(), name='register'),
path('register/verify/<int:user_id>/', UserRegisterVerifyApi.as_view(), name='register_verify'),
path('register/verify_check/<int:user_id>/', UserRegisterVerifyCheckApi.as_view(), name='register_verify_check'),
path('login/', UserLoginApi.as_view(), name='login'),
path('logout/', UserLogoutApi.as_view(), name='logout'),
path('password_reset/', UserPasswordReset.as_view()),
path('password_reset_check/', UserPasswordResetCheck.as_view()),
path('password_change/<int:user_id>/', UserPasswordChange.as_view(), name='password_change'),
path('email_change/<int:user_id>/', UserEmailChange.as_view(), name='email_change'),
path('get/<int:user_id>/', UserGetApi.as_view(), name='get'),
]
drivers_urls =[
path('update/<int:user_id>/', UserUpdateApi.as_view(), name='update')
]
urlpatterns =[
path('auth/', include((authentication_urls, 'auth'))),
path('driver/', include((drivers_urls, 'driver')))
]
|
[
"italobarzola18@gmail.com"
] |
italobarzola18@gmail.com
|
46b52fe8e5c60205d2161d38dc9193d19d105f9e
|
cba90cdd06eced813be6ad80e6295587223c4600
|
/betfairlightweight/endpoints/navigation.py
|
8795b7d2b4a2e08e79350a3a78ae3dd5e1c20f13
|
[
"MIT"
] |
permissive
|
mberk/betfair
|
1a22528b881e02567626dbe7e8c4f0197809c38e
|
6b064a68c8d2afceda81b70d74b6a0ee9601f228
|
refs/heads/master
| 2023-03-07T02:33:06.443407
| 2022-08-16T08:06:10
| 2022-08-16T08:06:10
| 192,976,576
| 0
| 1
|
MIT
| 2023-03-01T12:03:37
| 2019-06-20T19:28:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,510
|
py
|
import requests
from ..exceptions import APIError, InvalidResponse
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
from ..compat import json
class Navigation(BaseEndpoint):
"""
Navigation operations.
"""
def list_navigation(self, session: requests.Session = None) -> dict:
"""
This Navigation Data for Applications service allows the retrieval of the
full Betfair market navigation menu from a compressed file.
:param requests.session session: Requests session object
:rtype: json
"""
return self.request(session=session)
def request(
self, method: str = None, params: dict = None, session: requests.Session = None
) -> (dict, float):
session = session or self.client.session
try:
response = session.get(
self.url,
headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout),
)
except requests.ConnectionError as e:
raise APIError(None, method, params, e)
except Exception as e:
raise APIError(None, method, params, e)
check_status_code(response)
try:
response_json = json.loads(response.content.decode("utf-8"))
except ValueError:
raise InvalidResponse(response.text)
return response_json
@property
def url(self) -> str:
return self.client.navigation_uri
|
[
"paulingliam@gmail.com"
] |
paulingliam@gmail.com
|
42cd98f60f8637e2f8b57280dee6eeb14f3eac98
|
bb4dc40ec0b62e5d2fc3ce1234013aebd4e648d5
|
/src/modules/customised/payroll/hra/__init__.py
|
708a454f4468ac2e8c826538ed0f9f59fab6f7cf
|
[] |
no_license
|
kakamble-aiims/work
|
ba6cbaf4c525ff7bc28d0a407f16c829d0c35983
|
cd392bf0e80d71c4742568e9c1dd5e5211da56a9
|
refs/heads/master
| 2022-04-02T14:45:58.515014
| 2019-12-31T14:00:51
| 2019-12-31T14:00:51
| 199,015,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from trytond.pool import Pool
from .hra import *
def register():
Pool.register(
HRA_Allowance,
module='hra', type_='model')
|
[
"kakamble.aiims@gmail.com"
] |
kakamble.aiims@gmail.com
|
dd8ff876cdff51683095b93c5c1e9985b5a29584
|
9732da539d940904cf09b4164a307cb1a58fbb35
|
/superhero/ability_and_armor.py
|
bb0e7c0ea30847095581385d460942d5d2e5ad75
|
[] |
no_license
|
makhmudislamov/fun_python_exercises
|
f3c7557fa6ed400ee196252a84ad7b6b23b913f1
|
21ab89540fb5f4f04dbdb80f361bf4febd694c11
|
refs/heads/master
| 2020-05-26T05:42:20.115833
| 2019-10-17T03:28:57
| 2019-10-17T03:28:57
| 188,125,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
from random import randint
class Ability:
def __init__(self, name, max_damage):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_damage = max_damage
def __str__(self):
return f'This ability is {self.name}'
def ability_attack(self):
'''
Use randint(a, b) to select a random attack value.
Return an attack value between 0 and the full attack.
'''
attack_value = randint(0, self.max_damage)
# print(f"attack value in ability: {attack_value}")
self.max_damage -= attack_value
return attack_value
class Weapon(Ability):
def ability_attack(self):
""" This method returns a random value
between one half to the full attack power of the weapon.
"""
return randint(self.max_damage // 2, self.max_damage)
class Armor():
def __init__(self, name, max_block):
'''
Initialize the values passed into this
method as instance variables.
'''
self.name = name
self.max_block = max_block
def block(self):
'''
Return a random value between
0 and the initialized max_block strength.
'''
block_value = randint(0, self.max_block)
return block_value
# if __name__ == "__main__":
# pass
|
[
"sunnatovichvv@gmail.com"
] |
sunnatovichvv@gmail.com
|
edb363be7d18412f48d26946d0a265a266919f9e
|
9d43b8a3b53001f25a347fd96e5c49538b0c509a
|
/mxshop/apps/trade/views.py
|
30e854b8ad252b98ccda10e6bfe8ca3d67cb173a
|
[] |
no_license
|
w8833531/mxfresh
|
b81b7e4223536c6bedb049009386015935d33987
|
46b83fafdae8450491344c531de81a45ab5d8aae
|
refs/heads/master
| 2021-04-09T15:53:50.829921
| 2018-08-08T01:41:14
| 2018-08-08T01:41:14
| 125,793,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,746
|
py
|
import random, time
from datetime import datetime
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import status
from rest_framework import permissions
from rest_framework import authentication
from rest_framework import mixins
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.permissions import IsOwnerOrReadOnly
from utils.alipay import AliPay
from .serializers import ShopCartSerializer,ShopCartDetailSerializer, OrderSerializer, OrderDetailSerializer
from .models import ShoppingCart, OrderInfo, OrderGoods
from mxshop.settings import appid, private_key_path, alipay_pub_key_path, alipay_notify_url, alipay_return_url
# Create your views here.
class ShoppingCartViewset(viewsets.ModelViewSet):
"""
购物车功能
list:
获取购物车物品列表
create:
加入购物车物品
delete:
删除购物车物品
update:
更新购物车物品
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
lookup_field = "goods_id"
# override get_serializer_class method, if list return DetailSerializer
def get_serializer_class(self, *args, **kwargs):
if self.action == 'list':
return ShopCartDetailSerializer
else:
return ShopCartSerializer
def get_queryset(self):
return ShoppingCart.objects.filter(user=self.request.user)
class OrderViewset(mixins.ListModelMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):
"""
订单管理
List:
获取订单
Delete:
删除订单
Create:
新增订单
Retrieve:
获取订单详情
"""
authentication_classes = (JSONWebTokenAuthentication, authentication.SessionAuthentication)
permission_classes = (permissions.IsAuthenticated, IsOwnerOrReadOnly)
serializer_class = OrderSerializer
def get_queryset(self):
return OrderInfo.objects.filter(user=self.request.user)
def get_serializer_class(self):
if self.action == 'retrieve':
return OrderDetailSerializer
else:
return OrderSerializer
# 生成订单号 当前时间+userid+random
def generate_order_sn(self):
random_int = random.Random()
order_sn = "{time_str}{userid}{random_str}".format(time_str=time.strftime('%Y%m%d%H%M%S'),
userid=self.request.user.id, random_str=random_int.randint(10, 99))
return order_sn
# 在创建订单时,重载 perform_create 方法, set order_sn in serializer.data
def perform_create(self, serializer):
"""
在创建订单时,关联订单中的商品,消减商品库存,清空购物车
"""
# 保存当前用户的订单
order = serializer.save(order_sn=self.generate_order_sn())
# 获取当前用户购物车内所有商品条目
shop_carts = ShoppingCart.objects.filter(user=self.request.user)
# 把商品、商品数量放入定单,库存相应消减,并清空购物车
for shop_cart in shop_carts:
# 生成订单商品对象
order_goods = OrderGoods()
# 把商品、商品数量放入订单商品对象
order_goods.goods = shop_cart.goods
order_goods.goods_num = shop_cart.nums
# 对商品的库存相应消减
order_goods.goods.goods_num -= order_goods.goods_num
order_goods.goods.save()
# 放入订单对象并保存
order_goods.order = order
order_goods.save()
# 清空购物车
shop_cart.delete()
return order
# 在删除订单时,重载 perform_destroy 方法,实现订单商品库存增加
def perform_destroy(self, instance):
if instance.pay_status != "TRADE_SUCCESS":
# 在删除订单前,如果订单没有支付成功,增加这个订单中的所有商品对应数量的库存
order_goods = OrderGoods.objects.filter(order=instance.id)
for order_good in order_goods:
order_good.goods.goods_num += order_good.goods_num
order_good.goods.save()
instance.delete()
class AliPayViewset(APIView):
def get(self, request):
"""
处理支付宝return_url 返回
:param request:
:return:
"""
processed_dict = {}
for key, value in request.GET.items():
processed_dict[key] = value
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
# order_sn = processed_dict.get('out_trade_no', None)
# trade_no = processed_dict.get('trade_no', None)
# trade_status = processed_dict.get('trade_status', None)
# existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
# for existed_order in existed_orders:
# existed_order.pay_status = trade_status
# existed_order.trade_no = trade_no
# existed_order.pay_time = datetime.now()
# existed_order.save()
return Response("success")
def post(self, request):
"""
处理支付宝notify_url 返回
:param request:
:return:
"""
processed_dict = {}
for key, value in request.POST.items():
processed_dict[key] = value
print(key, value)
sign = processed_dict.pop("sign", None)
alipay = AliPay(
appid=appid,
app_notify_url=alipay_notify_url,
app_private_key_path=private_key_path,
alipay_public_key_path=alipay_pub_key_path, # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
debug=True, # 默认False,
return_url=alipay_return_url,
)
verify_re = alipay.verify(processed_dict, sign)
if verify_re is True:
order_sn = processed_dict.get('out_trade_no', None)
trade_no = processed_dict.get('trade_no', None)
trade_status = processed_dict.get('trade_status', None)
existed_orders = OrderInfo.objects.filter(order_sn=order_sn)
for existed_order in existed_orders:
existed_order.pay_status = trade_status
# 如果支付成功,把订单中所有商品售出数量做相应增加(注:这个操作不要求实时,建议用后台程序来完成会更好)
if existed_order.pay_status == "TRADE_SUCCESS":
order_goods = existed_order.goods.all()
for order_good in order_goods:
order_good.goods.sold_num += order_good.goods_num
order_good.goods.save()
existed_order.trade_no = trade_no
existed_order.pay_time = datetime.now()
existed_order.save()
return Response("success")
|
[
"w8833531@hotmail.com"
] |
w8833531@hotmail.com
|
7d6c817fe544b5cc80a68b8c685ce92faf0c9ef5
|
a9d6a3b0fe418e4e5cc131ebc05f9b56c0e4543e
|
/chapter11-django/site02/site02/settings.py
|
1ba07484b03cf34c8252583125bc6c301d4cb224
|
[] |
no_license
|
Kianqunki/Python_CorePythonApplicationsProgramming
|
34a36ba64bdc303814de507c4fcfc3c81ff88b5f
|
77263c1fde0d02aade180f7e73d2cdee1d170d58
|
refs/heads/master
| 2021-05-07T02:41:44.567088
| 2014-10-27T17:43:51
| 2014-10-27T17:43:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
"""
Django settings for site02 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y97upk5xk__c@j95sw4v-pf&#i45ir$cm6-ya)byzikor7+2sv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'approver',
'poster'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'site02.urls'
WSGI_APPLICATION = 'site02.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'TweetApprover.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# from this point on MY consts
TWEET_APPROVER_EMAIL = 'georstef@gmail.com'
EMAIL_HOST = 'smtp.mydomain.com'
EMAIL_HOST_USER = 'username'
EMAIL_HOST_PASSWORD = 'password'
DEFAULT_FROM_EMAIL = 'username@mydomain.com'
SERVER_EMAIL = 'username@mydomain.com'
TWITTER_CONSUMER_KEY = 'DeH9TfrfeV7UeRgK3OSGA'
TWITTER_CONSUMER_SECRET = 'sZGBB28VZcrRfcZvexYydj2Pc2uWW307kP8l7T7yiQo'
TWITTER_OAUTH_TOKEN = '2334856880-zYwvSu8kS7cGfH67lQ64vulTUbY7zxhc39bpnlG'
TWITTER_OAUTH_TOKEN_SECRET = 'RTQ7pzSytCIPsASCkA0Z5rubpHSWbvjvYR3c3hb9QhC3M'
|
[
"georstef@gmail.com"
] |
georstef@gmail.com
|
b642ce9125bc51b5a9f9d0ae69199d2d0bd1bf63
|
2e8ff2eb86f34ce2fc330766906b48ffc8df0dab
|
/tensorflow_probability/python/experimental/inference_gym/targets/__init__.py
|
a5ba67a6a9b68bf31372bf5990405fe49fbdf663
|
[
"Apache-2.0"
] |
permissive
|
wataruhashimoto52/probability
|
9613f9a3cc685ff1a20643c4a05a48f9cf0fe1ae
|
12e3f256544eadea6e863868da825614f4423eb0
|
refs/heads/master
| 2021-07-16T18:44:25.970036
| 2020-06-14T02:48:29
| 2020-06-14T02:51:59
| 146,873,495
| 0
| 0
|
Apache-2.0
| 2018-08-31T09:51:20
| 2018-08-31T09:51:20
| null |
UTF-8
|
Python
| false
| false
| 2,223
|
py
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Targets package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.inference_gym.targets.banana import Banana
from tensorflow_probability.python.experimental.inference_gym.targets.bayesian_model import BayesianModel
from tensorflow_probability.python.experimental.inference_gym.targets.ill_conditioned_gaussian import IllConditionedGaussian
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import ItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.item_response_theory import SyntheticItemResponseTheory
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import GermanCreditNumericLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.logistic_regression import LogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.model import Model
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import GermanCreditNumericSparseLogisticRegression
from tensorflow_probability.python.experimental.inference_gym.targets.sparse_logistic_regression import SparseLogisticRegression
__all__ = [
'Banana',
'BayesianModel',
'GermanCreditNumericLogisticRegression',
'GermanCreditNumericSparseLogisticRegression',
'IllConditionedGaussian',
'ItemResponseTheory',
'LogisticRegression',
'Model',
'SparseLogisticRegression',
'SyntheticItemResponseTheory',
]
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
748a3810da0b0659890ef170abef1ea0d6d32b5f
|
5961726d2e0d84c4ced32e5cd072c3c0c07153cb
|
/smart_schedule/line/handlers/__init__.py
|
48b1b8553fed5e192692650955bf0185450019e4
|
[] |
no_license
|
macinjoke/smart_schedule
|
46bc68d712646ffb45dcf1e8bd9d140d7a9fb84f
|
605c39f2d465cb8e56bedc941109f3b716608efa
|
refs/heads/master
| 2021-03-19T15:53:35.886128
| 2018-01-13T08:22:50
| 2018-01-13T08:22:50
| 76,947,986
| 7
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,214
|
py
|
from datetime import datetime
import flask
import urllib
import hashlib
import re
from linebot.models import TextSendMessage
from linebot import LineBotApi
from smart_schedule.settings import (
line_env, web_env, hash_env
)
line_bot_api = LineBotApi(line_env['channel_access_token'])
# TODO 以降の関数たちはどこにあるべきか、リファクタリングの余地が無いか考える
def reply_google_auth_message(event):
auth_url = flask.url_for('oauth2')
if event.source.type == 'user':
talk_id = event.source.user_id
elif event.source.type == 'group':
talk_id = event.source.group_id
elif event.source.type == 'room':
talk_id = event.source.room_id
else:
raise Exception('invalid `event.source`')
m = hashlib.md5()
m.update(talk_id.encode('utf-8'))
m.update(hash_env['seed'].encode('utf-8'))
params = urllib.parse.urlencode({'talk_id': talk_id, 'hash': m.hexdigest()})
url = '{}{}?{}'.format(web_env['host'], auth_url, params)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='このリンクから認証を行ってください\n{}'.format(url))
)
def reply_refresh_error_message(event):
reply_text = '''認証情報の更新エラーが発生しました。同じGoogleアカウントで複数の\
認証を行っている場合にこの不具合が発生します。このトークでSmart Scheduleを使用したい場合\
は以下のいずれかを行った後で認証しなおしてください。
1. 同じアカウントで認証しているトークでlogoutコマンドを行う(オススメ)
2. 下記URLから手動でSmart Scheduleの認証を解除する\
https://myaccount.google.com/u/1/permissions'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def reply_invalid_credential_error_message(event):
reply_text = '''無効な認証情報です。同じGoogleアカウントで複数の認証を行っている\
場合にこの不具合が発生します。認証をやりなおしてください。'''
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=reply_text)
)
def generate_message_from_events(events, reply_text):
day_of_week_strs = ["月", "火", "水", "木", "金", "土", "日"]
for e in events:
summary = e['summary']
start = e['start'].get('dateTime', e['start'].get('date'))
if re.match('\d+[-]\d+[-]\d+[T]\d+[:]\d+[:]\d+[+]\d+[:]\d+', start):
start_datetime = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[start_datetime.weekday()]
start = start_datetime.strftime(
'%Y年%m月%d日({}) %H時%S分'.format(day_of_week)
)
end = e['end'].get('dateTime', e['end'].get('date'))
end_datetime = datetime.strptime(end, '%Y-%m-%dT%H:%M:%S+09:00')
day_of_week = day_of_week_strs[end_datetime.weekday()]
end = end_datetime.strftime(
'%Y年%m月%d日({}) %H時%S分'.format(day_of_week)
)
reply_text += '\n\n{}\n{}\n |\n{}\n\n---------------------------'.format(summary,
start,
end)
else:
start_datetime = datetime.strptime(start, '%Y-%m-%d')
start = start_datetime.strftime('%Y年%m月%d日')
end = '終日'
reply_text += '\n\n{}\n{} {}\n\n---------------------------'.format(summary,
start,
end)
return reply_text
from .join_event_handler import JoinEventHandler
from .leave_event_handler import LeaveEventHandler
from .message_event_handler import MessageEventHandler
from .postback_event_handler import PostBackEventHandler
from .unfollow_event_handler import UnfollowEventHandler
|
[
"shunji.makino@gmail.com"
] |
shunji.makino@gmail.com
|
a4354d06907b766c2c8e2f23546b79efe0959e4f
|
06322e962c80f4c25838318e7d805ae88f0299e5
|
/lengths.py
|
f6546177e6a717d960717d0a920b2e6122347ee7
|
[
"BSD-2-Clause"
] |
permissive
|
unixpickle/uno-ai
|
6d4ec187e0c158c15cd4240ccf7e894cb599e071
|
3124afc8fa6b0cbcced95ef03ed9672cdb4f35a7
|
refs/heads/master
| 2020-04-21T10:20:07.310885
| 2019-08-06T15:27:45
| 2019-08-06T15:27:45
| 169,482,953
| 22
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
"""
Measure the lengths of random games.
"""
import random
from uno_ai.game import Game
def main():
while True:
g = Game(4)
num_moves = 0
while g.winner() is None:
action = random.choice(g.options())
g.act(action)
num_moves += 1
print(num_moves)
if __name__ == '__main__':
main()
|
[
"unixpickle@gmail.com"
] |
unixpickle@gmail.com
|
571b5e21a17bb0386eb30bd81b021035a58c3802
|
5b56d0ec345d19c3e9c17764cdfa4ef8180f25e0
|
/2020-01-python/api.py
|
fd5f9add8cd66d0c4436d45b28fc09d9b3c73da0
|
[] |
no_license
|
suzuki-hoge/warikan
|
6e6d5f814fe4a9130b61a416f495326c316e2a8c
|
d47c32338421d4c6c88022a7d64a478e79708835
|
refs/heads/master
| 2020-12-04T08:54:07.960635
| 2020-02-07T03:29:52
| 2020-02-07T10:09:56
| 231,702,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
from bottle import route, get, post, put, request, response, hook, run
import json
import db, party
def handle(f):
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
return {'status': 'ok', 'result': result} if result is not None else {'status': 'ok'}
except BaseException as e:
return {'status': 'ng', 'error': e.message}
return wrapper
@hook('after_request')
def allow_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Content-Type'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
@route('<any:path>', method = 'OPTIONS')
def options(**kwargs):
return {}
@get('/party/<partyName>')
@handle
def find(partyName):
def party_dict(p):
return {'partyName': p.partyName, 'partyHoldAt': p.partyHoldAt, 'participants': map(participant_dict, p.participants), 'billingAmount': p.billingAmount, 'adjustingUnitAmount': p.adjustingUnitAmount}
def participant_dict(p):
return {'participantName': p.participantName, 'participantType': p.participantType, 'paymentSection': p.paymentSection}
return party_dict(db.read(partyName))
@post('/party/plan')
@handle
def plan():
p = request.json
new = party.Party.plan(p.get('partyName'), p.get('partyHoldAt'), p.get('secretaryName'), p.get('paymentSection'), p.get('billingAmount'), p.get('adjustingUnitAmount'))
db.write(new)
@put('/party/<partyName>/add')
@handle
def add(partyName):
p = request.json
found = db.read(partyName)
updated = found.add(party.Participant(p.get('participantName'), 'NotSec', p.get('paymentSection')))
db.write(updated)
@put('/party/<partyName>/remove')
@handle
def remove(partyName):
p = request.params
found = db.read(partyName)
updated = found.remove(p.participantName)
db.write(updated)
@put('/party/<partyName>/change')
@handle
def change(partyName):
p = request.json
found = db.read(partyName)
updated = found.change(p.get('adjustingUnitAmount'))
db.write(updated)
@get('/party/<partyName>/demand')
@handle
def demand(partyName):
found = db.read(partyName)
return map(lambda (participantName, paymentAmount): {'participantName': participantName, 'paymentAmount': str(paymentAmount)}, found.demand())
run(host = 'localhost', port = 9000)
|
[
"user.ryo@gmail.com"
] |
user.ryo@gmail.com
|
01df404873ee9e3bba62ab69c2e05d7863ae98c4
|
2ce0c770b6ebf1122cfe2cc02b943101172920f4
|
/wwt_data_formats/tests/test_wtml_tree.py
|
56668db83d32b8c0c1913e626cf661c4e392067c
|
[
"MIT"
] |
permissive
|
WorldWideTelescope/wwt_data_formats
|
48269945ab835706f75fbf56801c5f19c38c1930
|
8f3a977b87d36c5a903e3bf63ff2ea89547447bb
|
refs/heads/master
| 2022-10-31T02:02:51.003406
| 2022-10-25T19:49:38
| 2022-10-25T19:49:38
| 225,955,212
| 2
| 4
|
MIT
| 2023-08-18T00:18:54
| 2019-12-04T20:54:27
|
Python
|
UTF-8
|
Python
| false
| false
| 833
|
py
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2019-2020 the .NET Foundation
# Licensed under the MIT License.
from __future__ import absolute_import, division, print_function
import os.path
from .. import cli
from . import tempdir
def test_cli(tempdir):
"Simple smoke test to see if it runs at all."
prev_dir = os.getcwd()
try:
os.chdir(tempdir)
cli.entrypoint(
[
"tree",
"fetch",
"https://web.wwtassets.org/engine/assets/builtin-image-sets.wtml",
]
)
cli.entrypoint(["tree", "summarize"])
cli.entrypoint(["tree", "print-image-urls"])
cli.entrypoint(["tree", "print-dem-urls"])
finally:
# Windows can't remove the temp tree unless we chdir out of it.
os.chdir(prev_dir)
|
[
"peter@newton.cx"
] |
peter@newton.cx
|
505e01d16c4946a2cc61a71edd7d0ee2504ca6d6
|
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
|
/sdk/python/pulumi_azure_native/network/v20171001/get_virtual_network_gateway_bgp_peer_status.py
|
ce971110c0cb3c1a127751e2520bf66c4337635f
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
MisinformedDNA/pulumi-azure-native
|
9cbd75306e9c8f92abc25be3f73c113cb93865e9
|
de974fd984f7e98649951dbe80b4fc0603d03356
|
refs/heads/master
| 2023-03-24T22:02:03.842935
| 2021-03-08T21:16:19
| 2021-03-08T21:16:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetVirtualNetworkGatewayBgpPeerStatusResult',
'AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult',
'get_virtual_network_gateway_bgp_peer_status',
]
@pulumi.output_type
class GetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
"""
def __init__(__self__, value=None):
if value and not isinstance(value, list):
raise TypeError("Expected argument 'value' to be a list")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def value(self) -> Optional[Sequence['outputs.BgpPeerStatusResponseResult']]:
"""
List of BGP peers
"""
return pulumi.get(self, "value")
class AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(GetVirtualNetworkGatewayBgpPeerStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayBgpPeerStatusResult(
value=self.value)
def get_virtual_network_gateway_bgp_peer_status(peer: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult:
"""
Response for list BGP peer status API service call
:param str peer: The IP address of the peer to retrieve the status of.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The name of the virtual network gateway.
"""
__args__ = dict()
__args__['peer'] = peer
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20171001:getVirtualNetworkGatewayBgpPeerStatus', __args__, opts=opts, typ=GetVirtualNetworkGatewayBgpPeerStatusResult).value
return AwaitableGetVirtualNetworkGatewayBgpPeerStatusResult(
value=__ret__.value)
|
[
"noreply@github.com"
] |
MisinformedDNA.noreply@github.com
|
c7a3468c7cae4eb4836690dd475d98f13f9a6ac2
|
f854ef28002a3931a8d8b8d0b9cc691b8a449db3
|
/home-assistant/custom_components/hacs/helpers/classes/manifest.py
|
c0e43b9ba3f570e1740dbe3c9e52024391ae5891
|
[
"MIT"
] |
permissive
|
Burningstone91/smart-home-setup
|
030cdaa13d05fb19a82b28ea455614d3276522ab
|
c2f34cc8b8243bc6ce620b3f03e3e44ff28150ca
|
refs/heads/master
| 2023-02-23T06:25:04.476657
| 2022-02-26T16:05:02
| 2022-02-26T16:05:02
| 239,319,680
| 421
| 36
|
MIT
| 2023-02-08T01:16:54
| 2020-02-09T14:39:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
"""
Manifest handling of a repository.
https://hacs.xyz/docs/publish/start#hacsjson
"""
from typing import List
import attr
from custom_components.hacs.exceptions import HacsException
@attr.s(auto_attribs=True)
class HacsManifest:
"""HacsManifest class."""
name: str = None
content_in_root: bool = False
zip_release: bool = False
filename: str = None
manifest: dict = {}
hacs: str = None
hide_default_branch: bool = False
domains: List[str] = []
country: List[str] = []
homeassistant: str = None
persistent_directory: str = None
iot_class: str = None
render_readme: bool = False
@staticmethod
def from_dict(manifest: dict):
"""Set attributes from dicts."""
if manifest is None:
raise HacsException("Missing manifest data")
manifest_data = HacsManifest()
manifest_data.manifest = manifest
if country := manifest.get("country"):
if isinstance(country, str):
manifest["country"] = [country]
for key in manifest:
setattr(manifest_data, key, manifest[key])
return manifest_data
|
[
"dimitri.steiner.gl@gmail.com"
] |
dimitri.steiner.gl@gmail.com
|
7343fb8defbea9a314d6f3be0e874c35f13e8940
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/dlmmin002/question3.py
|
7a33ac4f48f3eddf6202f2094e5bd3b2da9e4fde
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
#personal spam message
#nolwazi dlamini
#3 march 2014
name =input("Enter first name: \n")
surname=input("Enter last name: \n")
money=eval(input("Enter sum of money in USD: \n"))
country=input("Enter country name: \n")
print("\nDearest" ,name)
print("It is with a heavy heart that I inform you of the death of my father,")
print("General Fayk ",surname,", your long lost relative from Mapsfostol.",sep="")
print("My father left the sum of ", money,"USD for us, your distant cousins. ",sep="")
print("Unfortunately, we cannot access the money as it is in a bank in ",country,".",sep="")
print("I desperately need your assistance to access this money.")
print("I will even pay you generously, 30% of the amount - ",(money*0.3),"USD,",sep="")
print("for your help. Please get in touch with me at this email address asap.")
print("Yours sincerely")
print("Frank" ,surname)
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
fafedd086eb52ca3a26667cd17b01a87c8ca5b04
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_155/791.py
|
f4c857964fa46a84265cc71f3b483d20abda438d
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
__author__ = 'rrampage'
t = int(input())
def input_format():
s = input().split()[1]
return [int(i) for i in s]
def ovation(aud):
extras = 0
tot_standing = 0
for i, a in enumerate(aud):
if a == 0:
continue
if tot_standing >= i:
tot_standing += a
else:
extras += (i - tot_standing)
tot_standing += (i - tot_standing)
tot_standing += a
return extras
for x in range(t):
print("Case #%d: %d" % (x+1, ovation(input_format())))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
1f0050636b553377350ef958e53062abe0a0aec4
|
2db7597686f33a0d700f7082e15fa41f830a45f0
|
/Python/String/266. 回文排列.py
|
2dba117a4cfd0caece5666e521229f85abe7fe4f
|
[] |
no_license
|
Leahxuliu/Data-Structure-And-Algorithm
|
04e0fc80cd3bb742348fd521a62bc2126879a70e
|
56047a5058c6a20b356ab20e52eacb425ad45762
|
refs/heads/master
| 2021-07-12T23:54:17.785533
| 2021-05-17T02:04:41
| 2021-05-17T02:04:41
| 246,514,421
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
'''
奇数个的char最多只能有一个
'''
from collections import defaultdict
class Solution:
def canPermutePalindrome(self, s: str) -> bool:
if s == '':
return True
info = defaultdict(int)
for i in s:
info[i] += 1
count = 0
for v in info.values():
if v % 2 == 1:
count += 1
if count >= 2:
return False
return True
|
[
"leahxuliu@gmail.com"
] |
leahxuliu@gmail.com
|
c439e8bc4823a5c6fc7da35db3637314de577c9c
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/say_big_person_of_fact/hand_and_case/try_able_company_up_week.py
|
dfd64790159e034f5c52cd28b6e4a81e19f11920
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
#! /usr/bin/env python
def different_place(str_arg):
way(str_arg)
print('thing')
def way(str_arg):
print(str_arg)
if __name__ == '__main__':
different_place('know_right_world_over_year')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
9f8aaad6b22ea7ecc6945c8288570a353c7d7b8f
|
caaf1b0754db1e676c37a6f1e58f19183754e654
|
/sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2/async_samples/sample_classify_document_from_url_async.py
|
9e4775d42c58ae924f0d55dc072fb01011589d59
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
rdomenzain/azure-sdk-for-python
|
45dfb39121a0abda048c22e7309733a56259f525
|
58984255aeb904346b6958c5ba742749a2cc7d1b
|
refs/heads/master
| 2023-07-07T06:53:12.967120
| 2023-07-04T16:27:37
| 2023-07-04T16:27:37
| 258,050,134
| 0
| 0
|
MIT
| 2020-04-23T00:12:14
| 2020-04-23T00:12:13
| null |
UTF-8
|
Python
| false
| false
| 5,413
|
py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_classify_document_from_url_async.py
DESCRIPTION:
This sample demonstrates how to classify a document from a URL using a trained document classifier.
To learn how to build your custom classifier, see sample_build_classifier.py.
More details on building a classifier and labeling your data can be found here:
https://aka.ms/azsdk/formrecognizer/buildclassifiermodel
USAGE:
python sample_classify_document_from_url_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Form Recognizer resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CLASSIFIER_ID - the ID of your trained document classifier
-OR-
CLASSIFIER_CONTAINER_SAS_URL - The shared access signature (SAS) Url of your Azure Blob Storage container with your training files.
A document classifier will be built and used to run the sample.
"""
import os
import asyncio
async def classify_document_from_url_async(classifier_id):
# [START classify_document_from_url_async]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
classifier_id = os.getenv("CLASSIFIER_ID", classifier_id)
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_analysis_client:
url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/main/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/forms/IRS-1040.pdf"
poller = await document_analysis_client.begin_classify_document_from_url(
classifier_id, document_url=url
)
result = await poller.result()
print("----Classified documents----")
for doc in result.documents:
print(
f"Found document of type '{doc.doc_type or 'N/A'}' with a confidence of {doc.confidence} contained on "
f"the following pages: {[region.page_number for region in doc.bounding_regions]}"
)
# [END classify_document_from_url_async]
async def main():
classifier_id = None
if os.getenv("CLASSIFIER_CONTAINER_SAS_URL") and not os.getenv("CLASSIFIER_ID"):
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
from azure.ai.formrecognizer import (
ClassifierDocumentTypeDetails,
AzureBlobContentSource,
)
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
blob_container_sas_url = os.environ["CLASSIFIER_CONTAINER_SAS_URL"]
document_model_admin_client = DocumentModelAdministrationClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
async with document_model_admin_client:
poller = await document_model_admin_client.begin_build_document_classifier(
doc_types={
"IRS-1040-A": ClassifierDocumentTypeDetails(
azure_blob_source=AzureBlobContentSource(
container_url=blob_container_sas_url,
prefix="IRS-1040-A/train",
)
),
"IRS-1040-D": ClassifierDocumentTypeDetails(
azure_blob_source=AzureBlobContentSource(
container_url=blob_container_sas_url,
prefix="IRS-1040-D/train",
)
),
},
)
classifier = await poller.result()
classifier_id = classifier.classifier_id
await classify_document_from_url_async(classifier_id)
if __name__ == "__main__":
from azure.core.exceptions import HttpResponseError
try:
asyncio.run(main())
except HttpResponseError as error:
print(
"For more information about troubleshooting errors, see the following guide: "
"https://aka.ms/azsdk/python/formrecognizer/troubleshooting"
)
# Examples of how to check an HttpResponseError
# Check by error code:
if error.error is not None:
if error.error.code == "InvalidImage":
print(f"Received an invalid image error: {error.error}")
if error.error.code == "InvalidRequest":
print(f"Received an invalid request error: {error.error}")
# Raise the error again after printing it
raise
# If the inner error is None and then it is possible to check the message to get more information:
if "Invalid request".casefold() in error.message.casefold():
print(f"Uh-oh! Seems there was an invalid request: {error}")
# Raise the error again
raise
|
[
"noreply@github.com"
] |
rdomenzain.noreply@github.com
|
b51914fd7b3e6ca960cf28e6f04ff6f317fe58a5
|
66865b7ed119f42c8490bf3f8821602e1201eb0b
|
/tests/performance/time_mean.py
|
f6149a4c0aef131f24928bd33fcd8962974edd8b
|
[
"MIT"
] |
permissive
|
chanedwin/pandas-profiling
|
1a8a35f6d985a93f02a25af6e1c650b24e11218a
|
d9ee4a8a589e075cfced9fc71ca500a20e2a3e73
|
refs/heads/develop_spark_profiling
| 2023-08-01T19:53:31.340751
| 2021-01-07T15:59:22
| 2021-01-07T15:59:22
| 288,504,610
| 1
| 3
|
MIT
| 2021-04-26T14:09:43
| 2020-08-18T16:14:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
import timeit
testcode = """
import numpy as np
import pandas as pd
np.random.seed(12)
vals = np.random.random(1000)
series = pd.Series(vals)
series[series < 0.2] = pd.NA
def f1(series):
arr = series.values
arr_without_nan = arr[~np.isnan(arr)]
return np.mean(arr_without_nan)
def f2(series):
arr = series.values
return np.nanmean(arr)
def f3(series):
return series.mean()
def f4(series):
return series[series.notna()].mean()
"""
print(timeit.timeit("f1(series)", number=10, setup=testcode))
print(timeit.timeit("f2(series)", number=10, setup=testcode))
print(timeit.timeit("f3(series)", number=10, setup=testcode))
print(timeit.timeit("f4(series)", number=10, setup=testcode))
|
[
"sfbbrugman@gmail.com"
] |
sfbbrugman@gmail.com
|
d8327625f3951b94827154fcd1efc3bb31fd7e6a
|
a4e59c4f47873daf440374367a4fb0383194d2ce
|
/Python/987.py
|
071ba61e1dee050a891b2d02116afb3a3671fc25
|
[] |
no_license
|
maxjing/LeetCode
|
e37cbe3d276e15775ae028f99cf246150cb5d898
|
48cb625f5e68307390d0ec17b1054b10cc87d498
|
refs/heads/master
| 2021-05-23T17:50:18.613438
| 2021-04-02T17:14:55
| 2021-04-02T17:14:55
| 253,406,966
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
q = deque([(root, 0)])
res = defaultdict(list)
while q:
level = defaultdict(list)
for _ in range(len(q)):
node, col = q.popleft()
level[col].append(node.val)
if node.left:
q.append((node.left, col - 1))
if node.right:
q.append((node.right, col + 1))
for col in level:
res[col].extend(sorted(level[col]))
return [res[i] for i in sorted(res)]
|
[
"tvandcc@gmail.com"
] |
tvandcc@gmail.com
|
02f6df5ae4820400c31f0a44ab0af1722aff4957
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/datahub/databus/shippers/mysql/shipper.py
|
47bb186ba428a43fa955ca786b37cc8b70ff1a25
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,040
|
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from datahub.databus.settings import MODULE_SHIPPER
from datahub.databus.shippers.base_shipper import BaseShipper
class MysqlShipper(BaseShipper):
storage_type = "mysql"
module = MODULE_SHIPPER
def _get_shipper_task_conf(self, cluster_name):
# physical_table_name 格式 "dbname_123.table_name"
arr = self.physical_table_name.split(".")
if len(arr) == 1:
db_name = "mapleleaf_%s" % (self.rt_info["bk_biz_id"])
table_name = self.physical_table_name
else:
db_name = arr[0]
table_name = arr[1]
conn_url = (
"jdbc:mysql://{}:{}/{}?autoReconnect=true&useServerPrepStmts=false&rewriteBatchedStatements=true".format(
self.sink_storage_conn["host"],
self.sink_storage_conn["port"],
db_name,
)
)
return self.config_generator.build_tspider_config_param(
cluster_name,
self.connector_name,
self.rt_id,
self.source_channel_topic,
self.task_nums,
conn_url,
self.sink_storage_conn["user"],
self.sink_storage_conn["password"],
table_name,
)
@classmethod
def _field_handler(cls, field, storage_params):
if field.get("is_index"):
storage_params.indexed_fields.append(field["physical_field"])
@classmethod
def _get_storage_config(cls, params, storage_params):
return json.dumps(
{
"indexed_fields": storage_params.indexed_fields,
}
)
|
[
"terrencehan@tencent.com"
] |
terrencehan@tencent.com
|
10a476e13c38323dbe8b0c4072c8570fa256f26c
|
40fc1d38f2d4b643bc99df347c4ff3a763ba65e3
|
/examples/menus/basic1/data/states/menu2.py
|
6adb8054f7339f609ba0c3ea440473cc73fedab8
|
[] |
no_license
|
alecordev/pygaming
|
0be4b7a1c9e7922c63ce4cc369cd893bfef7b03c
|
35e479b703acf038f47c2151b3759ad852781e4c
|
refs/heads/master
| 2023-05-14T05:03:28.484678
| 2021-06-03T10:11:08
| 2021-06-03T10:11:08
| 372,768,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
import pygame as pg
from .. import tools
import random
class Menu(tools.States):
def __init__(self, screen_rect):
tools.States.__init__(self)
self.screen_rect = screen_rect
self.title, self.title_rect = self.make_text(
"Menu2 State", (75, 75, 75), (self.screen_rect.centerx, 75), 50
)
self.pre_render_options()
self.from_bottom = 200
self.spacer = 75
def update(self, now, keys):
self.change_selected_option()
def const_event(self, keys):
pass
def cleanup(self):
pass
def entry(self):
pass
|
[
"alecor.dev@gmail.com"
] |
alecor.dev@gmail.com
|
eababec9f6471e53a80fca79134347940be8d290
|
fe91ffa11707887e4cdddde8f386a8c8e724aa58
|
/components/schema_org/generate_schema_org_code_unittest.py
|
efe4f2b9872edd705ddf08553a7364cb1d9eefc1
|
[
"BSD-3-Clause"
] |
permissive
|
akshaymarch7/chromium
|
78baac2b45526031846ccbaeca96c639d1d60ace
|
d273c844a313b1e527dec0d59ce70c95fd2bd458
|
refs/heads/master
| 2023-02-26T23:48:03.686055
| 2020-04-15T01:20:07
| 2020-04-15T01:20:07
| 255,778,651
| 2
| 1
|
BSD-3-Clause
| 2020-04-15T02:04:56
| 2020-04-15T02:04:55
| null |
UTF-8
|
Python
| false
| false
| 5,946
|
py
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for generate_schema_org_code."""
import sys
import unittest
import generate_schema_org_code
from generate_schema_org_code import schema_org_id
import os
SRC = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import mock
_current_dir = os.path.dirname(os.path.realpath(__file__))
# jinja2 is in chromium's third_party directory
# Insert at front to override system libraries, and after path[0] == script dir
sys.path.insert(
1, os.path.join(_current_dir, *([os.pardir] * 2 + ['third_party'])))
import jinja2
class GenerateSchemaOrgCodeTest(unittest.TestCase):
def test_get_template_vars(self):
schema = {
"@graph": [{
"@id": "http://schema.org/MediaObject",
"@type": "rdfs:Class"
},
{
"@id": "http://schema.org/propertyName",
"@type": "rdf:Property"
}]
}
names = {
"http://schema.org/MediaObject": 1234,
"MediaObject": 1235,
"http://schema.org/propertyName": 2345,
"propertyName": 2346
}
self.assertEqual(
generate_schema_org_code.get_template_vars(schema, names), {
'entities': [{
'name': 'MediaObject',
'name_hash': 1235
}],
'properties': [{
'name': 'propertyName',
'name_hash': 2346,
'thing_types': [],
'enum_types': []
}],
'enums': [],
'entity_parent_lookup':
[{
'name': 'MediaObject',
'name_hash': 1235,
'parents': [{
'name': 'MediaObject',
'name_hash': 1235
}]
}]
})
def test_lookup_parents(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
brand = {'@id': schema_org_id('Brand'), 'rdfs:subClassOf': intangible}
schema = {'@graph': [thing, intangible, structured_value, brand]}
self.assertSetEqual(
generate_schema_org_code.lookup_parents(brand, schema, {}),
set(['Thing', 'Intangible', 'Brand']))
def test_get_root_type_thing(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
schema = {'@graph': [thing, intangible, structured_value]}
self.assertEqual(
generate_schema_org_code.get_root_type(structured_value, schema),
thing)
def test_get_root_type_datatype(self):
number = {
'@id': schema_org_id('Number'),
'@type': [schema_org_id('DataType'), 'rdfs:Class']
}
integer = {'@id': schema_org_id('Integer'), 'rdfs:subClassOf': number}
schema = {'@graph': [integer, number]}
self.assertEqual(
generate_schema_org_code.get_root_type(integer, schema), number)
def test_get_root_type_enum(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
enumeration = {
'@id': schema_org_id('Enumeration'),
'rdfs:subClassOf': intangible
}
actionStatusType = {
'@id': schema_org_id('ActionStatusType'),
'rdfs:subClassOf': enumeration
}
schema = {'@graph': [thing, intangible, enumeration, actionStatusType]}
self.assertEqual(
generate_schema_org_code.get_root_type(actionStatusType, schema),
actionStatusType)
def test_parse_property_identifier(self):
thing = {'@id': schema_org_id('Thing')}
intangible = {
'@id': schema_org_id('Intangible'),
'rdfs:subClassOf': thing
}
structured_value = {
'@id': schema_org_id('StructuredValue'),
'rdfs:subClassOf': intangible
}
property_value = {
'@id': schema_org_id('PropertyValue'),
'rdfs:subClassOf': structured_value
}
number = {
'@id': schema_org_id('Number'),
'@type': [schema_org_id('DataType'), 'rdfs:Class']
}
integer = {'@id': schema_org_id('Integer'), 'rdfs:subClassOf': number}
identifier = {
'@id': schema_org_id('Identifier'),
schema_org_id('rangeIncludes'): [property_value, integer, number]
}
schema = {
'@graph': [
thing, intangible, structured_value, property_value, number,
integer, identifier
]
}
names = {"http://schema.org/Identifier": 1234, "Identifier": 1235}
self.assertEqual(
generate_schema_org_code.parse_property(identifier, schema, names),
{
'name': 'Identifier',
'name_hash': 1235,
'has_number': True,
'thing_types': [property_value['@id']],
'enum_types': []
})
if __name__ == '__main__':
unittest.main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
36a7393b21a2682ca5683e48c739bc8a39c968ea
|
c6ed09339ff21fa70f154f34328e869f0dd8e394
|
/python/mysql-replication/binlog_rows_dump.py
|
255f603361ff6f90e670bf2990edb1f0b99845fd
|
[] |
no_license
|
fits/try_samples
|
f9b15b309a67f7274b505669db4486b17bd1678b
|
0986e22d78f35d57fe1dd94673b68a4723cb3177
|
refs/heads/master
| 2023-08-22T14:35:40.838419
| 2023-08-07T12:25:07
| 2023-08-07T12:25:07
| 642,078
| 30
| 19
| null | 2022-12-28T06:31:24
| 2010-05-02T02:23:55
|
Java
|
UTF-8
|
Python
| false
| false
| 3,022
|
py
|
import configparser
from datetime import date, datetime
import json
import os
import sys
import signal
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent)
class BinlogConfig:
def __init__(self, conf_file):
self.config = configparser.ConfigParser()
self.conf_file = conf_file
def load(self):
self.config.read(self.conf_file)
if 'binlog' in self.config:
return (
self.config['binlog']['log_file'],
int(self.config['binlog']['log_pos'])
)
return (None, None)
def save(self, log_file, log_pos):
self.config['binlog'] = {
'log_file': log_file,
'log_pos': log_pos
}
with open(self.conf_file, 'w') as f:
self.config.write(f)
def to_bool(s):
return s.lower() in ['true', 't', 'ok', 'yes', 'y', 'on', '1']
def split_env(name):
v = os.getenv(name)
if v is None:
return None
return v.split(',')
ini_file = os.getenv('INI_FILE', 'binlog.ini')
bconf = BinlogConfig(ini_file)
(log_file, log_pos) = bconf.load()
blocking = to_bool(os.getenv('BLOCKING', 'off'))
host = os.getenv('MYSQL_HOST', 'localhost')
port = int(os.getenv('MYSQL_PORT', '3306'))
user = os.getenv('MYSQL_USER')
password = os.getenv('MYSQL_PASSWORD')
schemas = split_env('SCHEMAS')
tables = split_env('TABLES')
cfg = {'host': host, 'port': port, 'user': user, 'password': password}
def to_json(obj):
if isinstance(obj, (datetime, date)):
return obj.isoformat()
return str(obj)
def handle_signal(sig, frame):
sys.exit(1)
stream = BinLogStreamReader(
connection_settings = cfg,
server_id = 1,
only_events = [WriteRowsEvent, UpdateRowsEvent, DeleteRowsEvent],
only_schemas = schemas,
only_tables = tables,
resume_stream = True,
log_file = log_file,
log_pos = log_pos,
blocking = blocking
)
try:
signal.signal(signal.SIGTERM, handle_signal)
for ev in stream:
for r in ev.rows:
data = {'table': '', 'schema': '', 'event_type': ''}
if 'values' in r:
data.update(r['values'])
if 'after_values' in r:
data.update(r['after_values'])
data['table'] = ev.table
data['schema'] = ev.schema
if isinstance(ev, WriteRowsEvent):
data['event_type'] = 'insert'
elif isinstance(ev, UpdateRowsEvent):
data['event_type'] = 'update'
elif isinstance(ev, DeleteRowsEvent):
data['event_type'] = 'delete'
print( json.dumps(data, default=to_json) )
finally:
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
stream.close()
bconf.save(stream.log_file, stream.log_pos)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
[
"wadays_wozx@nifty.com"
] |
wadays_wozx@nifty.com
|
cd966a58e69dc06f2d0a257a5dfbfcd40725bc3e
|
dec9ede4b28b8a5ac79ab5c89754f6ff5d65d8e1
|
/source/main/settings.py
|
a97452f77ebe21ed189fdfb51743c0d75bacf140
|
[] |
no_license
|
Beknasar/python_group_6_homework_57_Ulanbek_uulu_Beknasar
|
036f1eb2f84626344581bb7d864e63e40c3d2e4f
|
3bf5e4eaa7133955b1bbb0131ebf9f4732965b1f
|
refs/heads/master
| 2022-12-09T02:10:11.232216
| 2020-09-02T15:44:49
| 2020-09-02T15:44:49
| 292,327,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,240
|
py
|
"""
Django settings for main project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '83le51*6hai4mci%b-xtei(cms3smwhl9k4wy2m+l$8(^s=0qf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'accounts',
'webapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'main.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'main.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Bishkek'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = 'index'
LOGOUT_REDIRECT_URL = 'index'
LOGIN_URL = 'login'
|
[
"680633@gmail.com"
] |
680633@gmail.com
|
a4e2cd025347721566a4b4b6d33b1669cba139cf
|
93a720d9242c73c919ec30f6018d126a391f473f
|
/ShowUserNonOwnerDriveACLs.py
|
4611a76f24cca4f71c23283af816a2f0ad50292c
|
[] |
no_license
|
scottreleehw/GAM-Scripts3
|
c8fa4abddb64e47d8a3d30dd7e19e29634c9e965
|
7eab4f86214bfeb00ee4dd6131828a55f1f42c56
|
refs/heads/master
| 2023-01-09T06:08:08.093789
| 2020-11-05T19:36:14
| 2020-11-05T19:36:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,286
|
py
|
#!/usr/bin/env python3
"""
# Purpose: For a Google Drive User, get all drive file ACLs for files except those indicating the user as owner
# Note: This script can use Basic or Advanced GAM:
# https://github.com/jay0lee/GAM
# https://github.com/taers232c/GAMADV-XTD3
# 1: Use print filelist to get selected ACLs
# Basic: gam user testuser@domain.com print filelist id title permissions owners > filelistperms.csv
# Advanced: gam user testuser@domain.com print filelist fields id,title,permissions,owners.emailaddress > filelistperms.csv
# 2: From that list of ACLs, output a CSV file with headers "Owner,driveFileId,driveFileTitle,emailAddress"
# that lists the driveFileIds/Titles for all ACLs except those indicating the user as owner
# $ python3 ShowUserNonOwnerDriveACLs.py filelistperms.csv localperms.csv
"""
import csv
import re
import sys
FILE_NAME = 'name'
ALT_FILE_NAME = 'title'
QUOTE_CHAR = '"' # Adjust as needed
LINE_TERMINATOR = '\n' # On Windows, you probably want '\r\n'
PERMISSIONS_N_TYPE = re.compile(r"permissions.(\d+).type")
if (len(sys.argv) > 2) and (sys.argv[2] != '-'):
outputFile = open(sys.argv[2], 'w', encoding='utf-8', newline='')
else:
outputFile = sys.stdout
outputCSV = csv.DictWriter(outputFile, ['Owner', 'driveFileId', 'driveFileTitle', 'emailAddress'], lineterminator=LINE_TERMINATOR, quotechar=QUOTE_CHAR)
outputCSV.writeheader()
if (len(sys.argv) > 1) and (sys.argv[1] != '-'):
inputFile = open(sys.argv[1], 'r', encoding='utf-8')
else:
inputFile = sys.stdin
for row in csv.DictReader(inputFile, quotechar=QUOTE_CHAR):
for k, v in iter(row.items()):
mg = PERMISSIONS_N_TYPE.match(k)
if mg and v:
permissions_N = mg.group(1)
emailAddress = row.get(f'permissions.{permissions_N}.emailAddress', '')
if v != 'user' or row[f'permissions.{permissions_N}.role'] != 'owner' or emailAddress != row['owners.0.emailAddress']:
outputCSV.writerow({'Owner': row['owners.0.emailAddress'],
'driveFileId': row['id'],
'driveFileTitle': row.get(FILE_NAME, row.get(ALT_FILE_NAME, 'Unknown')),
'emailAddress': emailAddress})
if inputFile != sys.stdin:
inputFile.close()
if outputFile != sys.stdout:
outputFile.close()
|
[
"ross.scroggs@gmail.com"
] |
ross.scroggs@gmail.com
|
c0680485e5008a6554b28a45fbd927848f84b0a4
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/B/bwscrape/basic_twitter_scrapersefton_12.py
|
60ef63d50469e74904a0718aad24bf8abe06d460
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,490
|
py
|
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'STEVEN Patton MANN'
RESULTS_PER_PAGE = '50'
LANGUAGE = 'en'
NUM_PAGES = 1500
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, faileddd to scrape %s' % base_url
break
###################################################################################
# Twitter scraper - designed to be forked and used for more interesting things
###################################################################################
import scraperwiki
import simplejson
import urllib2
# Change QUERY to your search term of choice.
# Examples: 'newsnight', 'from:bbcnewsnight', 'to:bbcnewsnight'
QUERY = 'STEVEN Patton MANN'
RESULTS_PER_PAGE = '50'
LANGUAGE = 'en'
NUM_PAGES = 1500
for page in range(1, NUM_PAGES+1):
base_url = 'http://search.twitter.com/search.json?q=%s&rpp=%s&lang=%s&page=%s' \
% (urllib2.quote(QUERY), RESULTS_PER_PAGE, LANGUAGE, page)
try:
results_json = simplejson.loads(scraperwiki.scrape(base_url))
for result in results_json['results']:
#print result
data = {}
data['id'] = result['id']
data['text'] = result['text']
data['from_user'] = result['from_user']
data['created_at'] = result['created_at']
print data['from_user'], data['text']
scraperwiki.sqlite.save(["id"], data)
except:
print 'Oh dear, faileddd to scrape %s' % base_url
break
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
9960074f78a8ff9d0d36b41eb50fb4ad3968e291
|
f0adf5afb93b7f0a67802e876a02e898cd92a172
|
/Tencent/GNN/metapath2vec/Generate_metapaths.py
|
18ff11473dcf9de3cb3004299cde11057d87f76b
|
[
"Apache-2.0"
] |
permissive
|
orange-eng/internship
|
9a2f746b3d50673038481392100d375f6eec82d3
|
c8c566df453d3a4bdf692338f74916ae15792fa1
|
refs/heads/main
| 2023-07-18T11:46:36.659858
| 2021-08-31T09:39:10
| 2021-08-31T09:39:10
| 358,230,295
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
import networkx as nx
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch import GraphConv #内置的GCNlayer
import dgl
import matplotlib.pyplot as plt
import random
import time
import tqdm
import sys
import os
def construct_graph():
file_user = './data/user_features.csv'
file_item = './data/item_features.csv'
file_edge = './data/JData_Action_201602.csv'
f_user = pd.read_csv(file_user)
f_item = pd.read_csv(file_item)
f_edge = pd.read_csv(file_edge)
#
f_edge = f_edge.sample(10000)
users = set()
items = set()
for index, row in f_edge.iterrows():
users.add(row['user_id'])
items.add(row['sku_id'])
user_ids_index_map = {x: i for i, x in enumerate(users)} # user编号
item_ids_index_map = {x: i for i, x in enumerate(items)} # item编号
user_index_id_map = {i: x for i, x in enumerate(users)} # index:user
item_index_id_map = {i: x for i, x in enumerate(items)} # index:item
user_item_src = []
user_item_dst = []
for index, row in f_edge.iterrows():
user_item_src.append(user_ids_index_map.get(row['user_id'])) # 获取user的编号
user_item_dst.append(item_ids_index_map.get(row['sku_id'])) # 获取item编号
# 构图; 异构图的编号
'''
ui = dgl.bipartite((user_item_src, user_item_dst), 'user', 'ui', 'item') # 构建异构图; bipartite
iu = dgl.bipartite((user_item_dst, user_item_src), 'item', 'iu', 'user')
hg = dgl.hetero_from_relations([ui, iu])
'''
data_dict = {('user', 'item', 'user'): (torch.tensor(user_item_src), torch.tensor(user_item_dst))}
hg = dgl.heterograph(data_dict)
return hg, user_index_id_map, item_index_id_map
def parse_trace(trace, user_index_id_map, item_index_id_map):
s = []
for index in range(trace.size):
if index % 2 == 0:
s.append(user_index_id_map[trace[index]])
else:
s.append(item_index_id_map[trace[index]])
return ','.join(s)
def main():
hg, user_index_id_map, item_index_id_map = construct_graph()
meta_path = ['ui','iu','ui','iu','ui','iu']
num_walks_per_node = 1
f = open("./output/output_path.txt", "w")
for user_idx in tqdm.trange(hg.number_of_nodes('user')): #以user开头的metapath
traces = dgl.contrib.sampling.metapath_random_walk(
hg=hg, etypes=meta_path, seeds=[user_idx,], num_traces=num_walks_per_node)
dgl.sampling.random_walk
tr = traces[0][0].numpy()
tr = np.insert(tr,0,user_idx)
res = parse_trace(tr, user_index_id_map, item_index_id_map)
f.write(res+'\n')
f.close()
if __name__=='__main__':
main()
|
[
"972353371@qq.com"
] |
972353371@qq.com
|
410778eda359ba00d8f98afb5deb6ac84ae624c1
|
86319aad3690906f614ac1af28b8843529e9e0da
|
/thwackbin/__init__.py
|
ab95f9cc0ab6c8b46a7c0f643cb504f8c070fdcc
|
[] |
no_license
|
sohgoh/thwackbin
|
b5828783a6179e96784bed0bdb894b179e3bea07
|
ba9fedc4bcec598f367aa6d4f2567d1840c65c51
|
refs/heads/master
| 2021-01-21T03:14:08.261732
| 2014-04-16T03:53:51
| 2014-04-16T04:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
"""
thwackbin
~~~~~~~~~
Thwackbin is an HTTP request/response test service which exposes the AppThwack REST API.
This service should be used to test/validate clients which wish to consume the actual API endpoint.
"""
__name__ = 'thwackbin'
__version__ = '0.0.1'
__author__ = 'Andrew Hawker <andrew@appthwack.com>'
import flask
def create_app():
"""
Create the thwackbin WSGI application.
"""
app = flask.Flask(__name__)
#Initialize mock data.
from thwackbin import data
data.init()
#Register blueprints.
from thwackbin import appthwack
app.register_blueprint(appthwack.api)
#Patch exc handlers to always return JSON.
from thwackbin import patch
app = patch.patch_exception_handlers(app)
app.config['DOWNLOAD_FOLDER'] = data.ROOT
return app
|
[
"andrew.r.hawker@gmail.com"
] |
andrew.r.hawker@gmail.com
|
a6f1df8c8c3dd73bd2c937dd3e0186367e7ecc93
|
19980ea46bb169873f01aaa1e89fc0d8ba488030
|
/samples/sampleopenflow/demos/demo11.py
|
b7138557fd71f15abbc7ceeaa7af146675c781a3
|
[
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
gaberger/pybvc
|
b0e4c7fd280d87330fe15e18eecca94f089bf1a4
|
bf546c4595a1a6282fca084865c5a0e69194030f
|
refs/heads/master
| 2023-01-13T21:19:01.625744
| 2015-12-01T16:01:00
| 2015-12-01T16:01:00
| 42,198,126
| 0
| 0
|
BSD-3-Clause
| 2022-12-26T20:18:11
| 2015-09-09T18:53:12
|
Python
|
UTF-8
|
Python
| false
| false
| 8,091
|
py
|
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Instruction,
OutputAction,
Match)
from pybvc.common.status import STATUS
from pybvc.common.utils import load_dict_from_file
from pybvc.common.constants import (ETH_TYPE_IPv4,
IP_PROTO_ICMP,
IP_DSCP_CS2,
IP_ECN_CE)
def of_demo_11():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 11 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
# --- Flow Match: Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# ICMPv4 Type
# ICMPv4 Code
# IP DSCP
# IP ECN
# Input Port
# NOTE: Ethernet type MUST be 2048 (0x800) -> IPv4 protocol
# IP Protocol Type MUST be 1 -> ICMP
eth_type = ETH_TYPE_IPv4
eth_src = "00:00:00:11:23:ae"
eth_dst = "00:ff:20:01:1a:3d"
ipv4_src = "17.1.2.3/8"
ipv4_dst = "172.168.5.6/18"
ip_proto = IP_PROTO_ICMP
ip_dscp = IP_DSCP_CS2 # 'Class Selector' = 'Immediate'
ip_ecn = IP_ECN_CE # Congestion Encountered
icmpv4_type = 6 # Alternate Host Address
icmpv4_code = 3 # Alternate Address for Host
input_port = 10
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'"
% (ctrlIpAddr, nodeName))
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Ethernet Type (%s)\n"
" Ethernet Source Address (%s)\n"
" Ethernet Destination Address (%s)\n"
" IPv4 Source Address (%s)\n"
" IPv4 Destination Address (%s)\n"
" IP Protocol Number (%s)\n"
" IP DSCP (%s)\n"
" IP ECN (%s)\n"
" ICMPv4 Type (%s)\n"
" ICMPv4 Code (%s)\n"
" Input Port (%s)"
% (hex(eth_type), eth_src,
eth_dst, ipv4_src, ipv4_dst,
ip_proto, ip_dscp, ip_ecn,
icmpv4_type, icmpv4_code,
input_port))
print (" Action: Output (NORMAL)")
time.sleep(rundelay)
flow_entry = FlowEntry()
table_id = 0
flow_entry.set_flow_table_id(table_id)
flow_id = 18
flow_entry.set_flow_id(flow_id)
flow_entry.set_flow_hard_timeout(0)
flow_entry.set_flow_idle_timeout(0)
flow_entry.set_flow_priority(1009)
# --- Instruction: 'Apply-actions'
# Action: 'Output' NORMAL
instruction = Instruction(instruction_order=0)
action = OutputAction(order=0, port="NORMAL")
instruction.add_apply_action(action)
flow_entry.add_instruction(instruction)
# --- Match Fields: Ethernet Type
# Ethernet Source Address
# Ethernet Destination Address
# IPv4 Source Address
# IPv4 Destination Address
# IP Protocol Number
# IP DSCP
# IP ECN
# ICMPv4 Type
# ICMPv4 Code
# Input Port
match = Match()
match.set_eth_type(eth_type)
match.set_eth_src(eth_src)
match.set_eth_dst(eth_dst)
match.set_ipv4_src(ipv4_src)
match.set_ipv4_dst(ipv4_dst)
match.set_ip_proto(ip_proto)
match.set_ip_dscp(ip_dscp)
match.set_ip_ecn(ip_ecn)
match.set_icmpv4_type(icmpv4_type)
match.set_icmpv4_code(icmpv4_code)
match.set_in_port(input_port)
flow_entry.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Get configured flow from the Controller")
time.sleep(rundelay)
result = ofswitch.get_configured_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully read from the Controller")
print ("Flow info:")
flow = result.get_data()
print json.dumps(flow, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print ("<<< Delete flow with id of '%s' from the Controller's cache "
"and from the table '%s' on the '%s' node"
% (flow_id, table_id, nodeName))
time.sleep(rundelay)
result = ofswitch.delete_flow(flow_entry.get_flow_table_id(),
flow_entry.get_flow_id())
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully removed from the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_11()
|
[
"jeb@elbrys.com"
] |
jeb@elbrys.com
|
e31a7060d75486ec7fd9ef972bacfc4b74111180
|
b4f66ebb5084efa6839771b62a1034a82094df6e
|
/setup.py
|
d1770422892a97a4591b7399521fb5a79403887d
|
[] |
no_license
|
mhfowler/howdoispeak
|
06f49dab64f62dea727a429557887742d1509265
|
110287dba64ae308943f431f628e528d7c941748
|
refs/heads/master
| 2016-09-05T14:04:48.605245
| 2015-01-17T23:27:15
| 2015-01-17T23:27:15
| 18,955,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['munging/iphone_backup_upload.py']
DATA_FILES = ["munging/secrets.json"]
OPTIONS = {
'argv_emulation': True,
'iconfile':'green_circles.icns',
}
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
py_modules=["munging.common"]
)
|
[
"max_fowler@brown.edu"
] |
max_fowler@brown.edu
|
41ddd091df6ea055f01a6a9169e98ab77a7ceedd
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res/scripts/client/gui/app_loader/decorators.py
|
49d57664e5a33eceff808d2385710e8a082e19e6
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,256
|
py
|
# 2015.11.10 21:25:12 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/app_loader/decorators.py
from gui.app_loader.loader import g_appLoader
from gui.app_loader.settings import APP_NAME_SPACE as _SPACE
class app_getter(property):
def __init__(self, fget = None, doc = None, space = None):
super(app_getter, self).__init__(fget=fget, doc=doc)
self._space = space
def __get__(self, obj, objType = None):
return g_appLoader.getApp(self._space)
class def_lobby(property):
def __get__(self, obj, objType = None):
return g_appLoader.getDefLobbyApp()
class def_battle(property):
def __get__(self, obj, objType = None):
return g_appLoader.getDefBattleApp()
class sf_lobby(app_getter):
def __init__(self, fget = None, doc = None):
super(sf_lobby, self).__init__(fget, doc, _SPACE.SF_LOBBY)
class sf_battle(app_getter):
def __init__(self, fget = None, doc = None):
super(sf_battle, self).__init__(fget, doc, _SPACE.SF_BATTLE)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\app_loader\decorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:25:12 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
7e0c9c66789b5d70e91d999e13647ddd4b2098ae
|
e6132244015942c5ec75c8eff4f90cd0e9302470
|
/src/wshop/apps/customer/notifications/services.py
|
46bab44bef79471157b1207adfc9a79e677340e1
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
vituocgia/wshop-core
|
d3173f603861685b523f6b66af502b9e94b7b0c2
|
5f6d1ec9e9158f13aab136c5bd901c41e69a1dba
|
refs/heads/master
| 2020-03-18T08:25:14.669538
| 2018-05-23T05:55:56
| 2018-05-23T05:55:56
| 134,508,443
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
from wshop.core.loading import get_model
Notification = get_model('customer', 'Notification')
def notify_user(user, subject, **kwargs):
"""
Send a simple notification to a user
"""
Notification.objects.create(recipient=user, subject=subject, **kwargs)
def notify_users(users, subject, **kwargs):
"""
Send a simple notification to an iterable of users
"""
for user in users:
notify_user(user, subject, **kwargs)
|
[
"dotiendiep@gmail.com"
] |
dotiendiep@gmail.com
|
c36e62063a94a409390144111aa8b1febb637d79
|
1c594498900dd6f25e0a598b4c89b3e33cec5840
|
/iqps/search/views.py
|
c6c5dfb564a3088854e3a4badd988789e7fb6d3b
|
[
"MIT"
] |
permissive
|
thealphadollar/iqps
|
cef42ed8c86e4134e724a5f4967e96a83d672fcd
|
187f6b134d82e2dce951b356cb0c7151994ca3ab
|
refs/heads/master
| 2023-07-14T04:41:13.190595
| 2020-06-25T14:51:17
| 2020-06-25T14:51:17
| 277,360,692
| 0
| 0
|
MIT
| 2020-07-05T18:29:17
| 2020-07-05T18:29:16
| null |
UTF-8
|
Python
| false
| false
| 3,320
|
py
|
from django.shortcuts import render
from django.db import connection
from django.http import JsonResponse
from iqps.settings import DATABASES
#from .processors import SearchCursor
#Use this with sqlite
#db_name = DATABASES['default']['NAME']
def sqlite_search(subject, year=0, department="", paper_type=""):
year_filter = "AND p.year = {}".format(year) if year > 0 else ""
dep_filter = "AND d.code = '{}'".format(department) if department != "" else ""
type_filter = "AND p.paper_type = '{}'".format(paper_type) if paper_type != "" else ""
if subject == "":
return []
query =\
"""SELECT p.subject, p.year, p.department_id, d.id, d.code, p.paper_type, p.link, SIMILARITYSCORE(p.subject, '{}') AS s
FROM papers p JOIN departments d ON p.department_id = d.id
WHERE s > 70 {} {} {} ORDER BY s DESC;""".format(subject, year_filter, dep_filter, type_filter)
results = []
with SearchCursor(db_name) as c:
c.execute(query)
for row in c.fetchall():
results.append(row)
return results
def _search(subject, year=0, department="", paper_type="", keywords=""):
year_filter = "AND p.year = {}".format(year) if year > 0 else ""
dep_filter = "AND d.code = '{}'".format(department) if department != "" else ""
type_filter = "AND p.paper_type = '{}'".format(paper_type) if paper_type != "" else ""
keyword_filter = "AND kt.text IN {}".format(keywords) if keywords != "" else ""
if subject == "":
return []
if keyword_filter == "":
query =\
"""SELECT p.subject, p.year, d.code, p.paper_type, p.link, p.id
FROM papers p JOIN departments d ON p.department_id = d.id
WHERE SOUNDEX(SUBSTRING(p.subject, 1, LENGTH('{}'))) = SOUNDEX('{}') {} {} {} ORDER BY year DESC LIMIT 30;""".format(subject, subject, year_filter, dep_filter, type_filter)
else:
query =\
"""SELECT p.subject, p.year, d.code, p.paper_type, p.link, p.id, GROUP_CONCAT(kt.text) AS keywords
FROM papers AS p JOIN departments AS d ON p.department_id = d.id
LEFT OUTER JOIN (
SELECT pk.paper_id, k.text FROM papers_keywords AS pk JOIN keywords AS k ON pk.keyword_id = k.id
) AS kt
ON p.id = kt.paper_id
WHERE SOUNDEX(SUBSTRING(p.subject, 1, LENGTH('{}'))) = SOUNDEX('{}')
{} {} {} {}
ORDER BY p.year DESC LIMIT 30;
""".format(subject, subject, year_filter, dep_filter, type_filter, keyword_filter)
results = []
with connection.cursor() as c:
c.execute(query)
for row in c.fetchall():
results.append(row)
return results
def hitSearch(request):
"""
Meant to be an independent API.
Request args:
q -> subject name
year -> year filter
dep -> department filter
typ -> paper_type filter
"""
q = request.GET.get('q', "")
year = request.GET.get('year', 0)
dep = request.GET.get('dep', "")
typ = request.GET.get('typ', "")
keywords = request.GET.get('keys', "")
try:
year = int(year)
except:
year = 0
results = _search(q, year=year, department=dep, paper_type=typ, keywords=keywords)
response = JsonResponse({"papers": results})
response["Access-Control-Allow-Origin"] = "*" #For CORS
return response
|
[
"smishra99.iitkgp@gmail.com"
] |
smishra99.iitkgp@gmail.com
|
e077f429daff201e907044fe1dafc3a66af86952
|
26fc334777ce27d241c67d97adc1761e9d23bdba
|
/tests/django_tests/tests/middleware_exceptions/tests.py
|
0c39f09f9156cf2b9787fa67ac627a5c7dd4a653
|
[
"BSD-3-Clause"
] |
permissive
|
alihoseiny/djongo
|
1434c9e78c77025d7e0b3330c3a40e9ea0029877
|
e2edf099e398573faa90e5b28a32c3d7f1c5f1e9
|
refs/heads/master
| 2020-03-27T23:27:02.530397
| 2018-08-30T14:44:37
| 2018-08-30T14:44:37
| 147,317,771
| 2
| 1
|
BSD-3-Clause
| 2018-09-04T09:00:53
| 2018-09-04T09:00:53
| null |
UTF-8
|
Python
| false
| false
| 6,887
|
py
|
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import patch_logger
from . import middleware as mw
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class MiddlewareTests(SimpleTestCase):
def tearDown(self):
mw.log = []
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewNoneMiddleware'])
def test_process_view_return_none(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, ['processed view normal_view'])
self.assertEqual(response.content, b'OK')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessViewMiddleware'])
def test_process_view_return_response(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.LogMiddleware',
])
def test_templateresponse_from_process_view_rendered(self):
"""
TemplateResponses returned from process_view() must be rendered before
being passed to any middleware that tries to access response.content,
such as middleware_exceptions.middleware.LogMiddleware.
"""
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(response.content, b'Processed view normal_view\nProcessViewTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware',
'middleware_exceptions.middleware.TemplateResponseMiddleware',
])
def test_templateresponse_from_process_view_passed_to_process_template_response(self):
"""
TemplateResponses returned from process_view() should be passed to any
template response middleware.
"""
response = self.client.get('/middleware_exceptions/view/')
expected_lines = [
b'Processed view normal_view',
b'ProcessViewTemplateResponseMiddleware',
b'TemplateResponseMiddleware',
]
self.assertEqual(response.content, b'\n'.join(expected_lines))
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.TemplateResponseMiddleware'])
def test_process_template_response(self):
response = self.client.get('/middleware_exceptions/template_response/')
self.assertEqual(response.content, b'template_response OK\nTemplateResponseMiddleware')
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.LogMiddleware'])
def test_view_exception_converted_before_middleware(self):
response = self.client.get('/middleware_exceptions/permission_denied/')
self.assertEqual(mw.log, [(response.status_code, response.content)])
self.assertEqual(response.status_code, 403)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_view_exception_handled_by_process_exception(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.ProcessExceptionLogMiddleware',
'middleware_exceptions.middleware.ProcessExceptionMiddleware',
])
def test_response_from_process_exception_short_circuits_remainder(self):
response = self.client.get('/middleware_exceptions/error/')
self.assertEqual(mw.log, [])
self.assertEqual(response.content, b'Exception caught')
@override_settings(MIDDLEWARE=[
'middleware_exceptions.middleware.LogMiddleware',
'middleware_exceptions.middleware.NotFoundMiddleware',
])
def test_exception_in_middleware_converted_before_prior_middleware(self):
response = self.client.get('/middleware_exceptions/view/')
self.assertEqual(mw.log, [(404, response.content)])
self.assertEqual(response.status_code, 404)
@override_settings(MIDDLEWARE=['middleware_exceptions.middleware.ProcessExceptionMiddleware'])
def test_exception_in_render_passed_to_process_exception(self):
response = self.client.get('/middleware_exceptions/exception_in_render/')
self.assertEqual(response.content, b'Exception caught')
@override_settings(ROOT_URLCONF='middleware_exceptions.urls')
class RootUrlconfTests(SimpleTestCase):
@override_settings(ROOT_URLCONF=None)
def test_missing_root_urlconf(self):
# Removing ROOT_URLCONF is safe, as override_settings will restore
# the previously defined settings.
del settings.ROOT_URLCONF
with self.assertRaises(AttributeError):
self.client.get("/middleware_exceptions/view/")
class MyMiddleware:
def __init__(self, get_response=None):
raise MiddlewareNotUsed
def process_request(self, request):
pass
class MyMiddlewareWithExceptionMessage:
def __init__(self, get_response=None):
raise MiddlewareNotUsed('spam eggs')
def process_request(self, request):
pass
@override_settings(
DEBUG=True,
ROOT_URLCONF='middleware_exceptions.urls',
MIDDLEWARE=['django.middleware.common.CommonMiddleware'],
)
class MiddlewareNotUsedTests(SimpleTestCase):
rf = RequestFactory()
def test_raise_exception(self):
request = self.rf.get('middleware_exceptions/view/')
with self.assertRaises(MiddlewareNotUsed):
MyMiddleware().process_request(request)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddleware'])
def test_log(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed: 'middleware_exceptions.tests.MyMiddleware'"
)
@override_settings(MIDDLEWARE=['middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'])
def test_log_custom_message(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 1)
self.assertEqual(
calls[0],
"MiddlewareNotUsed('middleware_exceptions.tests.MyMiddlewareWithExceptionMessage'): spam eggs"
)
@override_settings(DEBUG=False)
def test_do_not_log_when_debug_is_false(self):
with patch_logger('django.request', 'debug') as calls:
self.client.get('/middleware_exceptions/view/')
self.assertEqual(len(calls), 0)
|
[
"nesdis@gmail.com"
] |
nesdis@gmail.com
|
788b1114cf8da3899edd4800a1fbc676bf8142ee
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-Quartz-2.5.1/Examples/Programming with Quartz/BasicDrawing/MyAppController.py
|
7108ddb749d657bf205c4db6e76aba0164427919
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 4,062
|
py
|
from Cocoa import *
import objc
import PDFHandling
import BitmapContext
import Utilities
# Initial defaults
_dpi = 144
_useQT = False
def getURLToExport(suffix):
savePanel = NSSavePanel.savePanel()
initialFileName = "BasicDrawing.%s"%(suffix,)
if savePanel.runModalForDirectory_file_(None, initialFileName) == NSFileHandlingPanelOKButton:
return savePanel.URL()
return None
class MyAppController (NSObject):
theView = objc.IBOutlet()
currentDPIMenuItem = objc.IBOutlet()
currentExportStyleMenuItem = objc.IBOutlet()
@objc.IBAction
def print_(self, sender):
self.theView.print_(sender)
def updateDPIMenu_(self, sender):
if self.currentDPIMenuItem is not sender:
# Uncheck the previous item.
if self.currentDPIMenuItem is not None:
self.currentDPIMenuItem.setState_(NSOffState)
# Update to the current item.
self.currentDPIMenuItem = sender
# Check new menu item.
self.currentDPIMenuItem.setState_(NSOnState)
def updateExportStyleMenu_(self, sender):
if self.currentExportStyleMenuItem is not sender:
# Uncheck the previous item.
if self.currentExportStyleMenuItem is not None:
self.currentExportStyleMenuItem.setState_(NSOffState)
# Update to the current item.
self.currentExportStyleMenuItem = sender
# Check new menu item.
self.currentExportStyleMenuItem.setState_(NSOnState)
@objc.IBAction
def setExportResolution_(self, sender):
global _dpi
_dpi = sender.tag()
self.updateDPIMenu_(sender)
@objc.IBAction
def setUseQT_(self, sender):
global _useQT
_useQT = True
self.updateExportStyleMenu_(sender)
@objc.IBAction
def setUseCGImageSource_(self, sender):
global _useQT
_useQT = False
self.updateExportStyleMenu_(sender)
def setupExportInfo_(self, exportInfoP):
# Use the printable version of the current command. This produces
# the best results for exporting.
exportInfoP.command = self.theView.currentPrintableCommand()
exportInfoP.fileType = ' ' # unused
exportInfoP.useQTForExport = _useQT
exportInfoP.dpi = _dpi
@objc.IBAction
def exportAsPDF_(self, sender):
url = getURLToExport("pdf")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
PDFHandling.MakePDFDocument(url, exportInfo)
@objc.IBAction
def exportAsPNG_(self, sender):
url = getURLToExport("png")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakePNGDocument(url, exportInfo)
@objc.IBAction
def exportAsTIFF_(self, sender):
url = getURLToExport("tif")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakeTIFFDocument(url, exportInfo)
@objc.IBAction
def exportAsJPEG_(self, sender):
url = getURLToExport("jpg")
if url is not None:
exportInfo = Utilities.ExportInfo()
self.setupExportInfo_(exportInfo)
BitmapContext.MakeJPEGDocument(url, exportInfo)
def validateMenuItem_(self, menuItem):
if menuItem.tag == _dpi:
currentDPIMenuItem = menuItem
menuItem.setState_(True)
elif menuItem.action() == 'setUseQT:':
if _useQT:
self.currentDPIMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
elif menuItem.action() == 'setUseCGImageSource:':
if _useQT:
currentDPIMenuItem = menuItem
menuItem.setState_(True)
else:
menuItem.setState_(False)
return True
|
[
"opensource@apple.com"
] |
opensource@apple.com
|
01685b4a849a3156658fa0dbdaad10650ff9d148
|
b14802e3892a661fa62d9d0772f72becc0abd612
|
/evaluation/get_top_socored.py
|
0bd0d8919ad1d0eed44022b6a57cbb69617117bb
|
[] |
no_license
|
gombru/HateSpeech
|
e4c4b7993354ce2cb49334b814f929364fdcb446
|
7891c7e2835f17ed2a9985abd285e19788685c66
|
refs/heads/master
| 2022-02-23T08:57:34.909778
| 2022-02-10T12:54:41
| 2022-02-10T12:54:41
| 138,057,409
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
import numpy as np
import operator
import shutil
import os
model_name = 'MMHS_classification_CNNinit_SCM_ALL_epoch_10_ValAcc_62'
out_folder_name = 'top_MMHS_classification_CNNinit_SCM_ALL_epoch_10_ValAcc_62'
out_file = open('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name + '.txt','w')
if not os.path.exists('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name):
os.makedirs('../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name)
results = {}
with open('../../../datasets/HateSPic/MMHS/results/' + model_name + '/test.txt') as f:
for line in f:
data = line.split(',')
id = int(data[0])
label = int(data[1])
hate_score = float(data[3])
notHate_score = float(data[2])
softmax_hate_score = np.exp(hate_score) / (np.exp(hate_score) + np.exp(notHate_score))
results[id] = softmax_hate_score
results = sorted(results.items(), key=operator.itemgetter(1))
results = list(reversed(results))
for i,r in enumerate(results):
if i == 50: break
print r[1]
shutil.copyfile('../../../datasets/HateSPic/MMHS/img_resized/' + str(str(r[0])) + '.jpg', '../../../datasets/HateSPic/MMHS/top_scored/' + out_folder_name + '/' + str(i) + '-' + str(r[0]) + '.jpg')
out_file.write(str(r[0]) + '\n')
out_file.close()
print("Done")
|
[
"raulgombru@gmail.com"
] |
raulgombru@gmail.com
|
429d42c8fd21f8aeed2ca8697dc6fab586d5a1dd
|
1fec393454ffe7f65fce3617c14a2fcedf1da663
|
/Searching/Searching I/matrix_median.py
|
9cab3f6da7a1a3e9e867bcedf81f9997880f980b
|
[] |
no_license
|
VarmaSANJAY/InterviewBit-Solution-Python
|
fbeb1d855a5244a89b40fbd2522640dc596c79b6
|
ea26394cc1b9d22a9ab474467621d2b61ef15a31
|
refs/heads/master
| 2022-11-27T22:46:34.966395
| 2020-08-09T14:10:58
| 2020-08-09T14:10:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 864
|
py
|
from bisect import *
class Solution:
def binary_search(self,A, min_el, max_el, cnt_before_mid):
s = min_el
e = max_el
while s < e:
mid = (s+e) // 2
count = 0
for row in A:
count += bisect_right(row, mid)
if count > cnt_before_mid:
e = mid
else:
s = mid + 1
return s
def Solve(self,A):
min_el = float('inf')
max_el = float('-inf')
for i in A:
min_el = min(i[0], min_el)
max_el = max(i[-1], max_el)
m=len(A)
n=len(A[0])
cnt_before_mid = (m*n) // 2
return self.binary_search(A, min_el, max_el,cnt_before_mid)
if __name__ == '__main__':
A = [[1, 3, 5],
[2, 6, 9],
[3, 6, 9]]
B = Solution()
print(B.Solve(A))
|
[
"srajsonu02@gmail.com"
] |
srajsonu02@gmail.com
|
4b324a9f9ea99b231e13b55494bd0092b1cf52ec
|
c3ca0bcea4d1b4013a0891f014928922fc81fe7a
|
/examples/multi_step_training.py
|
605e0ac42e4b43a5d9c9b7ba9d1573554d4f6c74
|
[
"MIT"
] |
permissive
|
takuseno/d3rlpy
|
47894b17fc21fab570eca39fe8e6925a7b5d7d6f
|
4ba297fc6cd62201f7cd4edb7759138182e4ce04
|
refs/heads/master
| 2023-08-23T12:27:45.305758
| 2023-08-14T12:07:03
| 2023-08-14T12:07:03
| 266,369,147
| 1,048
| 222
|
MIT
| 2023-09-02T08:12:48
| 2020-05-23T15:51:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
import argparse
import gym
import d3rlpy
GAMMA = 0.99
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default="Pendulum-v1")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--n-steps", type=int, default=1)
parser.add_argument("--gpu", action="store_true")
args = parser.parse_args()
env = gym.make(args.env)
eval_env = gym.make(args.env)
# fix seed
d3rlpy.seed(args.seed)
d3rlpy.envs.seed_env(env, args.seed)
d3rlpy.envs.seed_env(eval_env, args.seed)
# setup algorithm
sac = d3rlpy.algos.SACConfig(
batch_size=256,
gamma=GAMMA,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
temp_learning_rate=3e-4,
action_scaler=d3rlpy.preprocessing.MinMaxActionScaler(),
).create(device=args.gpu)
# multi-step transition sampling
transition_picker = d3rlpy.dataset.MultiStepTransitionPicker(
n_steps=args.n_steps,
gamma=GAMMA,
)
# replay buffer for experience replay
buffer = d3rlpy.dataset.create_fifo_replay_buffer(
limit=100000,
env=env,
transition_picker=transition_picker,
)
# start training
sac.fit_online(
env,
buffer,
eval_env=eval_env,
n_steps=100000,
n_steps_per_epoch=1000,
update_interval=1,
update_start_step=1000,
)
if __name__ == "__main__":
main()
|
[
"takuma.seno@gmail.com"
] |
takuma.seno@gmail.com
|
c3bbb5738b81da3295cb82f51894e74b8553f71b
|
7765c093fbfaebc3328f8500db2e462977ac42a5
|
/sqlite/sample.py
|
f4dc2f38f85c48f038a9b6f853da204c4bf0df63
|
[] |
no_license
|
iamkamleshrangi/datascience
|
e118e41591850f24438aa344100a07737490fd29
|
7add9501c3ac75323e94df5351e2baf6cadb73ae
|
refs/heads/master
| 2022-02-02T20:19:20.986813
| 2018-07-23T13:26:37
| 2018-07-23T13:26:37
| 128,158,552
| 0
| 0
| null | 2022-01-21T04:26:26
| 2018-04-05T04:22:15
|
Python
|
UTF-8
|
Python
| false
| false
| 358
|
py
|
# Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Open engine in context manager
with engine.connect() as con:
rs = con.execute('select * from Employee order by BirthDate asc')
df = pd.DataFrame(rs.fetchall())
# Set the DataFrame's column names
df.columns = rs.keys()
# Print head of DataFrame
print(df.head())
|
[
"iamkamleshrangi@gmail.com"
] |
iamkamleshrangi@gmail.com
|
28afd10dd4bf86cc9fc12239cac8891a7b46c5df
|
a9243f735f6bb113b18aa939898a97725c358a6d
|
/0.12/_downloads/plot_time_frequency_mixed_norm_inverse.py
|
65ac593e852afd7ae0cd4471a6c573000a16b131
|
[] |
permissive
|
massich/mne-tools.github.io
|
9eaf5edccb4c35831400b03278bb8c2321774ef2
|
95650593ba0eca4ff8257ebcbdf05731038d8d4e
|
refs/heads/master
| 2020-04-07T08:55:46.850530
| 2019-09-24T12:26:02
| 2019-09-24T12:26:02
| 158,233,630
| 0
| 0
|
BSD-3-Clause
| 2018-11-19T14:06:16
| 2018-11-19T14:06:16
| null |
UTF-8
|
Python
| false
| false
| 4,959
|
py
|
"""
=============================================
Compute MxNE with time-frequency sparse prior
=============================================
The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)
that promotes focal (sparse) sources (such as dipole fitting techniques).
The benefit of this approach is that:
- it is spatio-temporal without assuming stationarity (sources properties
can vary over time)
- activations are localized in space, time and frequency in one step.
- with a built-in filtering process based on a short time Fourier
transform (STFT), data does not need to be low passed (just high pass
to make the signals zero mean).
- the solver solves a convex optimization problem, hence cannot be
trapped in local minima.
References:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
https://doi.org/10.1007/978-3-642-22092-0_49
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.inverse_sparse import tf_mixed_norm
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left visual'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked = mne.pick_channels_evoked(evoked)
# We make the window slightly larger than what you'll eventually be interested
# in ([-0.05, 0.3]) to avoid edge effects.
evoked.crop(tmin=-0.1, tmax=0.4)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname, force_fixed=False,
surf_ori=True)
###############################################################################
# Run solver
# alpha_space regularization parameter is between 0 and 100 (100 is high)
alpha_space = 50. # spatial regularization parameter
# alpha_time parameter promotes temporal smoothness
# (0 means no temporal regularization)
alpha_time = 1. # temporal regularization parameter
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
loose=loose, depth=depth)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute TF-MxNE inverse solution
stc, residual = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
loose=loose, depth=depth, maxit=200, tol=1e-4,
weights=stc_dspm, weights_min=8., debias=True,
wsize=16, tstep=4, window=0.05,
return_residual=True)
# Crop to remove edges
stc.crop(tmin=-0.05, tmax=0.3)
evoked.crop(tmin=-0.05, tmax=0.3)
residual.crop(tmin=-0.05, tmax=0.3)
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
proj=True)
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1, fig_name="TF-MxNE (cond %s)"
% condition, modes=['sphere'], scale_factors=[1.])
time_label = 'TF-MxNE time=%0.2f ms'
clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])
brain = stc.plot('sample', 'inflated', 'rh', clim=clim, time_label=time_label,
smoothing_steps=5, subjects_dir=subjects_dir)
brain.show_view('medial')
brain.set_data_time_index(120)
brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
brain.add_label("V2", color="red", scalar_thresh=.5, borders=True)
|
[
"larson.eric.d@gmail.com"
] |
larson.eric.d@gmail.com
|
b548eedfdd00fe7c08f5ba00618fbe44e0cba7df
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/pipeline/service_group/number_office/time/fact.py
|
e3cbfccb649de7dbf84162e340a4f0fe1510ddd6
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,575
|
py
|
package textTranslator;
import java.io.*;
import java.net.*;
import java.util.*;
import com.google.gson.*;
import com.squareup.okhttp.*;
public class Translate {
String subscriptionKey = 'b58103fec253e2c21b0fdc1a24e16352';
String url = "https://api.cognitive.microsofttranslator.com/translate?api-version=3.0&to=";
public Translate(String subscriptionKey) {
this.subscriptionKey = subscriptionKey;
}
// Instantiates the OkHttpClient.
OkHttpClient client = new OkHttpClient();
// This function performs a POST request.
public String Post() throws IOException {
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType,
"[{\n\t\"Text\": \"Welcome to Microsoft Translator. Guess how many languages I speak!\"\n}]");
Request request = new Request.Builder()
.url(url).post(body)
.addHeader("ec0c96a092ea0a3ba1041f4738a0b33a", subscriptionKey)
.addHeader("Content-type", "application/json").build();
Response response = client.newCall(request).execute();
return response.body().string();
}
public String Post(String bodyStr, String translateTo) throws IOException {
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType,
"[{\n\t\"Text\": \"" + bodyStr + "\"\n}]");
Request request = new Request.Builder()
.url(url + translateTo).post(body)
.addHeader("f460aacf46d11f243d71d7221840dbe5", subscriptionKey)
.addHeader("Content-type", "application/json").build();
Response response = client.newCall(request).execute();
return response.body().string();
}
// This function prettifies the json response.
public static String prettify(String json_text) {
JsonParser parser = new JsonParser();
JsonElement json = parser.parse(json_text);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
return gson.toJson(json);
}
public static String getTranslatedText(String jsonText) {
JsonParser parser = new JsonParser();
JsonArray json = parser.parse(jsonText).getAsJsonArray();
String translatedText = null;
for (int i = 0; i < json.size(); i++) {
if (translatedText != null)
break;
JsonObject jsonObj = json.get(i).getAsJsonObject();
JsonArray translations = jsonObj.getAsJsonArray("translations");
if (translations == null) return "";
for (int j = 0; j < translations.size(); j++) {
if (translatedText != null) break;
JsonObject translation = translations.get(j).getAsJsonObject();
JsonElement text = translation.get("text");
if (text == null) return "";
translatedText = text.getAsString();
}
}
return translatedText;
}
// public static void main(String[] args) {
// try {
// Translate translateRequest = new Translate(System.getenv("Translator"));
//// String response = translateRequest.Post();
//// System.out.println(prettify(response));
//
// String response = translateRequest.Post("Hello", "fr");
// System.out.println(Translate.prettify(response));
//
// System.out.println(getTranslatedText(response));
//
//
// } catch (Exception e) {
// System.out.println(e);
// }
// }
}
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
ffe965efd83b48d88452e41df5c8274713eac169
|
ca565548206583a58fe8d646bfd9a6f1ba51c673
|
/problem2.py
|
fa5313404ef249962fe28fa2f3edd13684ba5711
|
[] |
no_license
|
GLAU-TND/python-programming-assignment2-kirtimansinghcs19
|
fbd772f38fa3546e579ffc2bdf99cc2b34e9937b
|
5dc16c8b24186a2e00c749e14eecaac426f51e90
|
refs/heads/master
| 2021-01-13T22:51:02.990390
| 2020-02-23T16:32:51
| 2020-02-23T16:32:51
| 242,519,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 282
|
py
|
from itertools import permutations
def largest(l):
lst=[]
for i in permutations(l, len(l)):
lst.append(''.join(map(str,i)))
return max(lst)
ls=[]
n=int(input('Enter the no element'))
for i in range(0,n):
ls.append(int(input()))
print(largest(ls))
|
[
"noreply@github.com"
] |
GLAU-TND.noreply@github.com
|
142f68111255fe08b0cfa29a4378494361ef2c57
|
8ee5dcbdbd407eb5f294d430813b16eca22f571c
|
/data/HW5/hw5_253.py
|
628a39851ed1f06194065eadcb2c20d9da276de9
|
[] |
no_license
|
MAPLE-Robot-Subgoaling/IPT
|
5e60e2ee4d0a5688bc8711ceed953e76cd2ad5d9
|
f512ea3324bfdceff8df63b4c7134b5fcbb0514e
|
refs/heads/master
| 2021-01-11T12:31:00.939051
| 2018-08-13T23:24:19
| 2018-08-13T23:24:19
| 79,373,489
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
def main():
width = int(input("please enter the width of the box "))
height = int(input("please enter the height of thebox "))
sym = input("please enter a symbol for the outline ")
fill = input("please enter a fill symbol ")
for h in range(height):
for w in range(width):
print(sym if h in(0,height-1) or w in(0,width-1) else fill, end = ' ')
print()
main()
|
[
"mneary1@umbc.edu"
] |
mneary1@umbc.edu
|
f8597c8ce3dfbc755d8bf76575047963a0ec8beb
|
6c74c8babd2f94cbed185af75940774a2750f3e5
|
/src/georinex/base.py
|
ccfff852795a64c572afd92589a410550c92cf2e
|
[
"MIT"
] |
permissive
|
geospace-code/georinex
|
c28c8a17196bb1fa8093c818ce43bcb74ec52171
|
c689a5a6bc2ffb68bc055f150f1da1b6bab12812
|
refs/heads/main
| 2023-04-13T15:01:50.903458
| 2022-12-27T19:25:58
| 2022-12-27T19:26:15
| 34,296,204
| 106
| 40
|
MIT
| 2023-04-10T02:54:45
| 2015-04-21T01:19:29
|
Python
|
UTF-8
|
Python
| false
| false
| 7,148
|
py
|
from __future__ import annotations
import typing as T
from pathlib import Path
import xarray
from datetime import datetime, timedelta
import logging
from .rio import rinexinfo
from .obs2 import rinexobs2
from .obs3 import rinexobs3
from .nav2 import rinexnav2
from .nav3 import rinexnav3
from .sp3 import load_sp3
from .utils import _tlim
# for NetCDF compression. too high slows down with little space savings.
ENC = {"zlib": True, "complevel": 1, "fletcher32": True}
def load(
rinexfn: T.TextIO | str | Path,
out: Path = None,
use: set[str] = None,
tlim: tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: list[str] = None,
verbose: bool = False,
*,
overwrite: bool = False,
fast: bool = True,
interval: float | int | timedelta = None,
):
"""
Reads OBS, NAV in RINEX 2.x and 3.x
Files / StringIO input may be plain ASCII text or compressed (including Hatanaka)
"""
if verbose:
logging.basicConfig(level=logging.INFO)
if isinstance(rinexfn, (str, Path)):
rinexfn = Path(rinexfn).expanduser()
# %% determine if/where to write NetCDF4/HDF5 output
outfn = None
if out:
out = Path(out).expanduser()
if out.is_dir():
outfn = out / (
rinexfn.name + ".nc"
) # not with_suffix to keep unique RINEX 2 filenames
elif out.suffix == ".nc":
outfn = out
else:
raise ValueError(f"not sure what output is wanted: {out}")
# %% main program
if tlim is not None:
if len(tlim) != 2:
raise ValueError("time bounds are specified as start stop")
if tlim[1] < tlim[0]:
raise ValueError("stop time must be after start time")
info = rinexinfo(rinexfn)
if info["rinextype"] == "nav":
return rinexnav(rinexfn, outfn, use=use, tlim=tlim, overwrite=overwrite)
elif info["rinextype"] == "obs":
return rinexobs(
rinexfn,
outfn,
use=use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
overwrite=overwrite,
fast=fast,
interval=interval,
)
assert isinstance(rinexfn, Path)
if info["rinextype"] == "sp3":
return load_sp3(rinexfn, outfn)
elif rinexfn.suffix == ".nc":
# outfn not used here, because we already have the converted file!
try:
nav = rinexnav(rinexfn)
except LookupError:
nav = None
try:
obs = rinexobs(rinexfn)
except LookupError:
obs = None
if nav is not None and obs is not None:
return {"nav": nav, "obs": rinexobs(rinexfn)}
elif nav is not None:
return nav
elif obs is not None:
return obs
else:
raise ValueError(f"No data of known format found in {rinexfn}")
else:
raise ValueError(f"What kind of RINEX file is: {rinexfn}")
def batch_convert(
path: Path,
glob: str,
out: Path,
use: set[str] = None,
tlim: tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: list[str] = None,
verbose: bool = False,
*,
fast: bool = True,
):
path = Path(path).expanduser()
flist = (f for f in path.glob(glob) if f.is_file())
for fn in flist:
try:
load(
fn,
out,
use=use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
)
except ValueError as e:
logging.error(f"{fn.name}: {e}")
def rinexnav(
fn: T.TextIO | str | Path,
outfn: Path = None,
use: set[str] = None,
group: str = "NAV",
tlim: tuple[datetime, datetime] = None,
*,
overwrite: bool = False,
) -> xarray.Dataset:
"""Read RINEX 2 or 3 NAV files"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
if fn.suffix == ".nc":
try:
return xarray.open_dataset(fn, group=group)
except OSError as e:
raise LookupError(f"Group {group} not found in {fn} {e}")
tlim = _tlim(tlim)
info = rinexinfo(fn)
if int(info["version"]) == 2:
nav = rinexnav2(fn, tlim=tlim)
elif int(info["version"]) == 3:
nav = rinexnav3(fn, use=use, tlim=tlim)
else:
raise LookupError(f"unknown RINEX {info} {fn}")
# %% optional output write
if outfn:
outfn = Path(outfn).expanduser()
wmode = _groupexists(outfn, group, overwrite)
enc = {k: ENC for k in nav.data_vars}
nav.to_netcdf(outfn, group=group, mode=wmode, encoding=enc)
return nav
# %% Observation File
def rinexobs(
fn: T.TextIO | Path,
outfn: Path = None,
use: set[str] = None,
group: str = "OBS",
tlim: tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: list[str] = None,
verbose: bool = False,
*,
overwrite: bool = False,
fast: bool = True,
interval: float | int | timedelta = None,
):
"""
Read RINEX 2.x and 3.x OBS files in ASCII or GZIP (or Hatanaka)
"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
# %% NetCDF4
if fn.suffix == ".nc":
try:
return xarray.open_dataset(fn, group=group)
except OSError as e:
raise LookupError(f"Group {group} not found in {fn} {e}")
tlim = _tlim(tlim)
# %% version selection
info = rinexinfo(fn)
if int(info["version"]) in (1, 2):
obs = rinexobs2(
fn,
use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
interval=interval,
)
elif int(info["version"]) == 3:
obs = rinexobs3(
fn,
use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
interval=interval,
)
else:
raise ValueError(f"unknown RINEX {info} {fn}")
# %% optional output write
if outfn:
outfn = Path(outfn).expanduser()
wmode = _groupexists(outfn, group, overwrite)
enc = {k: ENC for k in obs.data_vars}
# Pandas >= 0.25.0 requires this, regardless of xarray version
if obs.time.dtype != "datetime64[ns]":
obs["time"] = obs.time.astype("datetime64[ns]")
obs.to_netcdf(outfn, group=group, mode=wmode, encoding=enc)
return obs
def _groupexists(fn: Path, group: str, overwrite: bool) -> str:
print(f"saving {group}:", fn)
if overwrite or not fn.is_file():
return "w"
# be sure there isn't already NAV in it
try:
xarray.open_dataset(fn, group=group)
raise ValueError(f"{group} already in {fn}")
except OSError:
pass
return "a"
|
[
"scivision@users.noreply.github.com"
] |
scivision@users.noreply.github.com
|
e0c09849f0aec5951bf94adaa9bc3656ac75f05f
|
abc72a2f2072ab7a5a338e41d81c354324943b09
|
/MC 102 (Exemplos de aula)/eliminar_repeticao.py
|
55c15d25c81d25f12a60900b67da3c9af6354681
|
[] |
no_license
|
gigennari/mc102
|
a3d39fd9a942c97ef477a9b59d7955f4269b202a
|
fce680d5188a8dfb0bc1832d6f430cbcaf68ef55
|
refs/heads/master
| 2023-04-05T01:40:58.839889
| 2020-07-27T20:33:56
| 2020-07-27T20:33:56
| 354,130,720
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
def eliminar_repeticao(lista1, lista2):
lista_sem_rep = []
freq_sem_rep = []
for i in range(len(lista1)):
if lista1[i] not in lista_sem_rep:
lista_sem_rep.append(lista1[i])
freq_sem_rep.append(lista2[i])
return lista_sem_rep, freq_sem_rep
def main():
lista1 = [3, 3, 6, 5, 8, 8, 10]
lista2 = [2, 2, 1, 1, 2, 2, 1]
lista3, lista4 = eliminar_repeticao(lista1, lista2)
print(lista3)
main()
|
[
"g198010@dac.unicamp.br"
] |
g198010@dac.unicamp.br
|
f7ee63e6b92678782ec9da34b96b0addaf69997c
|
b9571590d8cc83a99293d777f57e5ebeea5bcc92
|
/spiders/DoctorSpider.py
|
1cc8539b8017fa62c7ea2ce5c7a731be27f7fec8
|
[] |
no_license
|
LiuQL2/Crawler_xywy_doctor_communication
|
585a0a3230f397640e5fc54506cd6585bfd04f57
|
3374f08ea34ae8ea7e96501188a4fec247c72b5d
|
refs/heads/master
| 2020-06-30T13:28:01.048195
| 2017-08-04T07:29:19
| 2017-08-04T07:29:19
| 74,369,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
用来获取病例和心得帖子内容的类,传入一个帖子的URL,调用不同的方法得到不同的数据。
"""
# Author: Liu Qianlong <LiuQL2@163.com>
# Date: 2016.12.08
import datetime
import json
import sys
import urllib2
from BaseSpider import BaseSpider
reload(sys)
sys.setdefaultencoding('utf-8')
class DoctorSpider(BaseSpider):
def __init__(self,url, crawl_number, try_number = 20):
self.target_url = url
request = urllib2.Request(url=self.target_url, headers=self.get_header())
self.status = True
self.try_number = try_number
self.crawl_number = crawl_number
self.selector = None
self.number_url = 'http://club.xywy.com/doctorShare/index.php?type=share_operation&uid=' + self.target_url.split('/')[4] + '&stat=14'
def get_number(self):
doc = self.process_url_request(self.number_url,xpath_type=False)
if doc != None:
doc = json.loads(doc)
crawl_time = datetime.datetime.now().strftime('%Y-%m-%d')
return {'attention_number':str(doc['attenNum']), 'fans_number':str(doc['fansNum']),'web_number':str(doc['wbNum']),'doctor_url':self.target_url, 'crawl_time':crawl_time, 'crawl_number':self.crawl_number}
else:
return None
if __name__ == '__main__':
doctor = DoctorSpider(url='http://club.xywy.com/doc_card/55316663/blog')
print doctor.get_number()
|
[
"LiuQL2@sina.com"
] |
LiuQL2@sina.com
|
d71180f0bd321d3d7193738b32581743b75440f3
|
3257372291236aac1737b057c9ac6c61da9ccca0
|
/tutorials/W0D5_Statistics/solutions/W0D5_Tutorial2_Solution_281848de.py
|
65dc66a635bd53e9c76bb3d72f597aebb3c00512
|
[
"CC-BY-4.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
NeuromatchAcademy/precourse
|
230ead0d11ae7b0dba21c8df97695a1796e9797d
|
b7f2432c6a68a7984ca923ceed8e07d5cfdb77c3
|
refs/heads/main
| 2023-07-26T11:18:24.493966
| 2023-07-09T14:42:49
| 2023-07-09T14:42:49
| 256,327,558
| 639
| 174
|
MIT
| 2023-07-09T14:42:50
| 2020-04-16T20:54:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 621
|
py
|
""" You will learn more about "Bayesian brains" and the theory surrounding
these ideas once the course begins. Here is a brief explanation: it may
be ideal for human brains to implement Bayesian inference by integrating "prior"
information the brain has about the world (memories, prior knowledge, etc.) with
new evidence that updates its "beliefs"/prior. This process seems to parallel
the brain's method of learning about its environment, making it a compelling
theory for many neuroscience researchers. One of Bonus exercises below examines a possible
real world model for Bayesian inference: sound localization.
""";
|
[
"noreply@github.com"
] |
NeuromatchAcademy.noreply@github.com
|
f369d5667a7f0255f82296fbbee935075af34b7e
|
7b5ec17918cb2328d53bf2edd876c153af26b38d
|
/scripts/ingestors/rwis/process_idot_awos.py
|
c29e696ecbcafd40fb720a5612021a2b033ca115
|
[
"MIT"
] |
permissive
|
Xawwell/iem
|
78e62f749661f3ba292327f82acf4ef0f0c8d55b
|
88177cc096b9a66d1bd51633fea448585b5e6573
|
refs/heads/master
| 2020-09-06T09:03:54.174221
| 2019-11-08T03:23:44
| 2019-11-08T03:23:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
"""Process AWOS METAR file"""
from __future__ import print_function
import re
import sys
import os
import datetime
import ftplib
import subprocess
import tempfile
from io import StringIO
from pyiem import util
INCOMING = "/mesonet/data/incoming"
def fetch_files():
"""Fetch files """
props = util.get_properties()
fn = "%s/iaawos_metar.txt" % (INCOMING,)
try:
ftp = ftplib.FTP("165.206.203.34")
except TimeoutError:
print("process_idot_awos FTP server timeout error")
sys.exit()
ftp.login("rwis", props["rwis_ftp_password"])
ftp.retrbinary("RETR METAR.txt", open(fn, "wb").write)
ftp.close()
return fn
def main():
"""Go Main"""
fn = fetch_files()
utc = datetime.datetime.utcnow().strftime("%Y%m%d%H%M")
data = {}
# Sometimes, the file gets gobbled it seems
for line in open(fn, "rb"):
line = line.decode("utf-8", "ignore")
match = re.match("METAR K(?P<id>[A-Z1-9]{3})", line)
if not match:
continue
gd = match.groupdict()
data[gd["id"]] = line
sio = StringIO()
sio.write("\001\r\r\n")
sio.write(
("SAUS00 KISU %s\r\r\n")
% (datetime.datetime.utcnow().strftime("%d%H%M"),)
)
sio.write("METAR\r\r\n")
for sid in data:
sio.write("%s=\r\r\n" % (data[sid].strip().replace("METAR ", ""),))
sio.write("\003")
sio.seek(0)
(tmpfd, tmpname) = tempfile.mkstemp()
os.write(tmpfd, sio.getvalue().encode("utf-8"))
os.close(tmpfd)
proc = subprocess.Popen(
(
"/home/ldm/bin/pqinsert -i -p 'data c %s "
"LOCDSMMETAR.dat LOCDSMMETAR.dat txt' %s"
)
% (utc, tmpname),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
(stdout, stderr) = proc.communicate()
os.remove(tmpname)
if stdout != b"" or stderr is not None:
print("process_idot_awos\nstdout: %s\nstderr: %s" % (stdout, stderr))
if __name__ == "__main__":
main()
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
567c5930ce4dce2b362ee9ebf34f4f2c604b528e
|
88df15f1c36960f3473caf54904cbaae5f3bab52
|
/function.py
|
b9deefeceb27b05ad098eda3a0941d75fe731747
|
[] |
no_license
|
pigpigman8686/listen
|
eef86b391b7399a96edfe1f8136dcd26d0ffd646
|
aa3b3f7d2e49ffb557739c19c3712b9e6d823e43
|
refs/heads/master
| 2020-06-19T17:46:29.283378
| 2019-07-14T07:34:39
| 2019-07-14T07:34:39
| 196,806,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
import random
import os,re
def getMessage(yes=True,snr=28,count=1):
if count == 1:
page = int(9)
elif yes==True:
page = int((snr-10)/2-1)
if page<0:
page = 0
else:
page = int((snr-10)/2+1)
if page>18:
page=18
filepath = "static/voice/"+str(page)
#print("filepath:"+filepath)
filelist = os.listdir(filepath)
#print("mess len:"+str(random.randint(0,len(message))))
#print(random.randint(0,len(message)))
message = "voice/"+str(page)+"/"+ str(filelist[random.randint(0,len(filelist)-1)])
return message
def average(result):
patten = re.compile('(\d{1,})-(\d{1,})')
snr = 0
for i in range(1,20):
#print("--------------------i:"+str(i))
s = '%s'%str(result[0]['right'+str(i)])
right = re.search(patten,s)
snr += int(right.group(2))
return snr/19.0
|
[
"952361195@qq.com"
] |
952361195@qq.com
|
06e60323dd57f2def66299b5acfc0b773762fb62
|
a3c4935537a42330758c7ac54553ae45daad069d
|
/.history/backend/src/api_20210807005753.py
|
d5a98b48745dbac2d9550540ef0d1b41a0699108
|
[] |
no_license
|
saraalmuraytib/Coffee-Shop
|
219292b4e2dd9b39621f9cd42bdcb049f5bb20c8
|
cec4b8df3a4c38600fb2964f8fa85d3c820ddb6c
|
refs/heads/main
| 2023-07-12T19:39:26.617313
| 2021-08-28T19:22:23
| 2021-08-28T19:22:23
| 392,365,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,532
|
py
|
import os
from flask import Flask, request, jsonify, abort
from sqlalchemy import exc
import json
from flask_cors import CORS
from .database.models import db_drop_and_create_all, setup_db, Drink
from .auth.auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
@TODO uncomment the following line to initialize the datbase
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
!! Running this funciton will add one
'''
# db_drop_and_create_all()
# ROUTES
'''
@TODO implement endpoint
GET /drinks
it should be a public endpoint
it should contain only the drink.short() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
@app.route('/drinks')
def drinks():
try:
drinks = Drink.query.all()
return jsonify({
'success': True,
'drinks': [drink.short() for drink in drinks]
}),200
except:
abort(404)
'''
@TODO implement endpoint
GET /drinks-detail
it should require the 'get:drinks-detail' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drinks} where drinks is the list of drinks
or appropriate status code indicating reason for failure
'''
@app.route("/drinks-detail")
@requires_auth('get:drinks-detail')
def get_drink_detail(payload):
try:
drinks = Drink.query.all()
return jsonify({
'success': True,
'drinks': [drink.long() for drink in drinks]
}),200
except:
abort(404)
'''
@TODO implement endpoint
POST /drinks
it should create a new row in the drinks table
it should require the 'post:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the newly created drink
or appropriate status code indicating reason for failure
'''
@app.route("/drinks", methods=['POST'])
@requires_auth('post:drinks')
def add_drink(payload):
# Fetch the request body
body = request.get_json()
# Get title and recipe to create the drink
title = body.get('title')
recipe = body.get('recipe')
try:
# json.dumps ---> Serialize obj to a JSON formatted str.
new_drink = Drink(title=title, recipe=json.dumps(recipe))
new_drink.insert()
return jsonify({
'success': True,
'drinks': [new_drink.long()],
})
except:
abort(422)
'''
@TODO implement endpoint
PATCH /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should update the corresponding row for <id>
it should require the 'patch:drinks' permission
it should contain the drink.long() data representation
returns status code 200 and json {"success": True, "drinks": drink} where drink an array containing only the updated drink
or appropriate status code indicating reason for failure
'''
'''
@TODO implement endpoint
DELETE /drinks/<id>
where <id> is the existing model id
it should respond with a 404 error if <id> is not found
it should delete the corresponding row for <id>
it should require the 'delete:drinks' permission
returns status code 200 and json {"success": True, "delete": id} where id is the id of the deleted record
or appropriate status code indicating reason for failure
'''
# Error Handling
'''
Example error handling for unprocessable entity
'''
@app.errorhandler(422)
def unprocessable(error):
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
'''
@TODO implement error handlers using the @app.errorhandler(error) decorator
each error handler should return (with approprate messages):
jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
'''
'''
@TODO implement error handler for 404
error handler should conform to general task above
'''
'''
@TODO implement error handler for AuthError
error handler should conform to general task above
'''
|
[
"sara.almuraytib@gmail.com"
] |
sara.almuraytib@gmail.com
|
0bb96f30a42e50bc0408cf4a6f607b6796d6546d
|
edf91e2614f0bf0dbfea1c77d2f41add5a14fac1
|
/twitterino/twitterino/urls.py
|
5e36063f9d99be119b11bcd0d2d3164226497241
|
[] |
no_license
|
Razhelq/Twitterino
|
c5c4a9de47566a21240f1de316c9cb980b9fe01d
|
88269625aa2306f58c197477f3d682db270ca469
|
refs/heads/master
| 2020-03-26T18:23:15.149839
| 2018-08-27T19:59:57
| 2018-08-27T19:59:57
| 145,210,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
"""twitterino URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path
from t.views import BaseView, IndexView, AddTweetView, LoginView, LogoutView, UserTweetsView, TweetDetailsView
from t.views import UserMessagesView, SendMessageView, MessageDetailsView, AddCommentView
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', BaseView.as_view(), name='base'),
url(r'^index/$', IndexView.as_view(), name='index'),
url(r'^add_tweet/$', AddTweetView.as_view(), name='add-tweet'),
url(r'^login/$', LoginView.as_view(), name='login'),
url(r'^logout/$', LogoutView.as_view(), name='logout'),
url(r'^user_tweets/(?P<id>(\d)+)/$', UserTweetsView.as_view(), name='user-tweets'),
url(r'^tweet_details/(?P<id>(\d)+)/$', TweetDetailsView.as_view(), name='tweet-details'),
url(r'^user_messages/(?P<id>(\d)+)/$', UserMessagesView.as_view(), name='user-messages'),
url(r'^send_message/(?P<id>(\d)+)/$', SendMessageView.as_view(), name='send-message'),
url(r'^message_details/(?P<id>(\d)+)/$', MessageDetailsView.as_view(), name='message-details'),
url(r'^add_comment/(?P<id>(\d)+)/$', AddCommentView.as_view(), name='add-comment')
]
|
[
"mateuszszpakowski@wp.pl"
] |
mateuszszpakowski@wp.pl
|
81b87030e4f49031523e25eeadd2033600229db8
|
ec00584ab288267a7cf46c5cd4f76bbec1c70a6b
|
/Django/webapp/webapp/urls.py
|
b992ee479948a6c3cd42bf76a7571f15eb468e76
|
[] |
no_license
|
rahuldbhadange/Python
|
b4cc806ff23953389c9507f43d817b3815260e19
|
7e162117f1acc12537c7eeb36d6983d804122ff3
|
refs/heads/master
| 2021-06-23T05:04:20.053777
| 2020-01-28T10:34:28
| 2020-01-28T10:34:28
| 217,307,612
| 0
| 0
| null | 2021-06-10T22:44:11
| 2019-10-24T13:35:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
"""webapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URL conf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
# from django.urls import path
# from django.conf.urls import include, url
# from login.views import
urlpatterns = [
# path('', include('login.urls')),
# path('admin/', admin.site.urls),
# url(r'^login/', include('login.urls')),
# url(r'^API1/', include('API1.urls')),
# path('', PersonListView.as_view(), name='person_list'),
# path('add/', PersonCreateView.as_view(), name='person_add'),
# path('<int:pk>/edit/', PersonUpdateView.as_view(), name='person_edit'),
]
'''
from django.urls import path
from login.views import PersonListView, PersonCreateView, PersonUpdateView
urlpatterns = [
path('', PersonListView.as_view(), name='person_list'),
path('add/', PersonCreateView.as_view(), name='person_add'),
path('<int:pk>/edit/', PersonUpdateView.as_view(), name='person_edit'),
]'''
|
[
"46024570+rahuldbhadange@users.noreply.github.com"
] |
46024570+rahuldbhadange@users.noreply.github.com
|
6e23c5de1d7411fa2a34e57a9a50d0e75aa00440
|
abc422f58ad053bcbb6653ba15b66e46d220a199
|
/serial_scripts/rsyslog/mylogging.py
|
d5ca62ae2331a8ee83e6c2ec8c2eda766a56adab
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
tungstenfabric/tf-test
|
d3efff59bca931b614d0008260b2c0881d1fc009
|
4b9eca7eb182e5530223131ecab09d3bdf366407
|
refs/heads/master
| 2023-02-26T19:14:34.345423
| 2023-01-11T08:45:18
| 2023-01-11T10:37:25
| 265,231,958
| 8
| 22
| null | 2023-02-08T00:53:29
| 2020-05-19T11:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
#!/usr/bin/python2.7
from __future__ import print_function
from builtins import str
from builtins import range
import syslog
import random
import time
import sys
def send_10_log_messages_with_delay():
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
for ind in range(10):
msg = str(ind + 1) + '. Test Syslog Messages being sent.'
syslog.syslog(syslog.LOG_EMERG, msg)
time.sleep(1)
syslog.closelog()
# end send_10_log_messages_with_delay
def send_10_log_messages():
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
for ind in range(10):
msg = str(ind + 1) + '. Test Syslog Messages being sent without delay.'
syslog.syslog(syslog.LOG_EMERG, msg)
syslog.closelog()
# end send_10_log_messages
def send_messages_grater_than_1024_bytes():
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
with open("message.txt", "r") as myfile:
msg = myfile.readlines()[0]
myfile.close()
for ind in range(100):
syslog.syslog(
syslog.LOG_EMERG, (msg[:5] + str(ind) + 'mymark' + msg[6:]))
time.sleep(1)
syslog.closelog()
# end send_messages_grater_than_1024_bytes
def send_messages_of_all_facility_and_severity():
dict_of_facility = {
'LOG_KERN': 0,
'LOG_USER': 1,
'LOG_MAIL': 2,
'LOG_DAEMON': 3,
'LOG_AUTH': 4,
'LOG_NEWS': 7,
'LOG_UUCP': 8,
'LOG_LOCAL0': 16,
'LOG_CRON': 15,
'LOG_SYSLOG': 5,
'LOG_LOCAL1': 17}
list_of_severity = ['LOG_EMERG', 'LOG_ALERT', 'LOG_CRIT', 'LOG_ERR',
'LOG_WARNING', 'LOG_NOTICE', 'LOG_INFO', 'LOG_DEBUG']
for each_facility in dict_of_facility:
log_facility = dict_of_facility[each_facility]
syslog.openlog(logoption=syslog.LOG_PID, facility=log_facility)
for each_severity in list_of_severity:
log_severity = list_of_severity.index(each_severity)
msg = 'Test Message from ' + each_facility + \
' with severity ' + each_severity + '.'
syslog.syslog(log_severity, msg)
syslog.closelog()
time.sleep(1)
# end send_messages_of_all_facility_and_severity
def send_test_log_message():
syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_KERN)
for ind in range(5):
msg = str(ind + 1) + '. Test Syslog Messages from different nodes.'
syslog.syslog(syslog.LOG_EMERG, msg)
time.sleep(1)
syslog.closelog()
# end send_test_log_message
help_string = '\nusage:\n\n./mylogging.py <function-name>\n\nwhere function names are:\
\n1. send_10_log_messages\n2. send_10_log_messages_with_delay\
\n3. send_messages_grater_than_1024_bytes\n4. send_messages_of_all_facility_and_severity\
\n5. send_test_log_message\n\n'
FuncCallDict = {
'send_10_log_messages': send_10_log_messages,
'send_test_log_message': send_test_log_message,
'send_10_log_messages_with_delay': send_10_log_messages_with_delay,
'send_messages_grater_than_1024_bytes': send_messages_grater_than_1024_bytes,
'send_messages_of_all_facility_and_severity': send_messages_of_all_facility_and_severity}
NumberOfArgs = len(sys.argv)
if NumberOfArgs != 2:
print(help_string)
sys.exit(2)
FunctionName = sys.argv[1]
FuncCallDict[FunctionName]()
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
e9d169335a5f914f653a37844eac60d56eb8136e
|
b96ed10d6247e22d4fa1d28bc3314bc319d3109c
|
/LessonSample/chapter12/03_进程池.py
|
2ba823ca2fd1e127c6f7ef24b29591aaddf7cb6d
|
[] |
no_license
|
13555785106/PythonPPT-01
|
ac1b22b9b1851f2b3ea6e4ab0a100e5f6896ee8c
|
40e5883f248cb342f3a7fc7ad12ba02ebde4c619
|
refs/heads/master
| 2020-04-26T16:49:59.675964
| 2019-03-04T07:16:21
| 2019-03-04T07:16:21
| 157,095,747
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 941
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import random
import time
from multiprocessing import Pool
def run(name):
print("子进程%d启动--%s" % (name, os.getpid()))
start = time.time()
time.sleep(random.choice([5, 7, 9]))
end = time.time()
print("子进程%d结束--%s--耗时%.2f" % (name, os.getpid(), end - start))
print("父进程启动")
# 创建多个进程
# 进程池
# 表示可以同时执行的进程数量
# Pool默认大小是CPU核心数
pp = Pool()
for i in range(10):
# 创建进程,放入进程池中统一管理
pp.apply_async(run, args=(i,))
# 在调用join之前必须先调用close,并且调用close之后就不能再继续添加新的进程了
pp.close() # close以后进程池pp将被关闭,不能再继续向pp中加入新的进程.
# 进程池对象调用join,会等待进程池中所有的子进程结束完毕再去执行父进程
pp.join()
print("父进程结束")
|
[
"312655424@qq.com"
] |
312655424@qq.com
|
0588499572b5db43202033b8b7e52943684e02b8
|
6811dc616a18898f565ee1e59f52a889da2b4d47
|
/clog/urls.py
|
7e24d23a5403e98aa3fd5f1454f3dabc71e0531f
|
[] |
no_license
|
rtreharne/clog
|
163e28b8e387e7f2fe926d88e28972c115294508
|
69320d929d96fd291e6da8fc11023d14da998655
|
refs/heads/master
| 2021-01-10T18:24:16.863481
| 2015-08-18T13:58:07
| 2015-08-18T13:58:07
| 40,085,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 563
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
urlpatterns = patterns('',
url(r'^$', 'clog.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^user/', include('profiles.urls')),
url(r'^upload/', include('upload.urls')),
url(r'^invite/', include('invite.urls')),
)
if settings.DEBUG:
urlpatterns += patterns(
'django.views.static',
(r'^media/(?P<path>.*)',
'serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
[
"R.Treharne@liverpool.ac.uk"
] |
R.Treharne@liverpool.ac.uk
|
3d16576478fa4173a6b4ac13bd24082a5243908a
|
507e9728307439fa3b343de015237e2f179b0032
|
/hospital/api/AConfig.py
|
9fa547be309e9abd3052ecb9319676517a13cbcb
|
[] |
no_license
|
haobin12358/hospital
|
3075f31c4ed527190292f2655350ef0bbc9ac7f3
|
9705e03eaf9514eb47f1d44d2bbe18ccf5cd5b30
|
refs/heads/master
| 2022-12-14T01:48:56.759520
| 2021-03-02T04:00:47
| 2021-03-02T04:00:47
| 245,367,609
| 2
| 1
| null | 2022-12-08T03:45:40
| 2020-03-06T08:32:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
# -*- coding : utf-8 -*-
from hospital.extensions.base_resource import Resource
from hospital.control.CConfig import CConfig
class AConfig(Resource):
def __init__(self):
self.cconfig = CConfig()
def get(self, config):
apis = {
"list_banner": self.cconfig.list_banner,
"get_csd": self.cconfig.get_csd,
"get_about_us": self.cconfig.get_about_us,
"get_vip_price": self.cconfig.get_vip_price,
"get_pointtask": self.cconfig.get_pointtask,
"get_integral": self.cconfig.get_integral
}
return apis
def post(self, config):
apis = {
"set_banner": self.cconfig.set_banner,
"set_csd": self.cconfig.set_csd,
"set_about_us": self.cconfig.set_about_us,
"set_characteristic_team": self.cconfig.set_characteristic_team,
"set_honour": self.cconfig.set_honour,
"set_vip_price": self.cconfig.set_vip_price,
"update_pointtask": self.cconfig.update_pointtask,
"get_point": self.cconfig.get_point
}
return apis
|
[
"1276121237@qq.com"
] |
1276121237@qq.com
|
bc2a86c16fdc46755d1f68ef3d15c959ac845b19
|
b2ed893d04f04eeaf7209187133de7431c476a96
|
/user_net/activity_info.py
|
94dc0c294de672dfcb333926cf41fa221e1c7235
|
[] |
no_license
|
liruikaiyao/workshop
|
4b5221259f59ad504d87d73c31f5fa0e58d4a1f0
|
6dbde74e35ef02f5e92c76dcdd1909f1d0afb89e
|
refs/heads/master
| 2021-01-17T16:09:13.248109
| 2015-08-05T09:43:21
| 2015-08-05T09:43:21
| 23,420,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
# coding=utf-8
__author__ = 'Carry lee'
from collections import Counter
import datetime
import urllib2
import json
from config.db import ICCv1, sh, utc
api_url = u'http://api.map.baidu.com/geocoder/v2/?ak='
parameter = u'&output=json&coordtype=wgs84ll&location='
ak = 'SIpMcORCSogM916QMOz5tx7S'
weixin = ICCv1['weixin']
detail = ICCv1['detail']
begin = datetime.datetime(year=2015, month=5, day=12, hour=0, minute=0, second=0, tzinfo=sh)
end = datetime.datetime(year=2015, month=5, day=19, hour=0, minute=0, second=0, tzinfo=sh)
begin_utc = begin.astimezone(utc)
end_utc = end.astimezone(utc)
# 性别统计
gender = {}
male = 0
female = 0
for elem in detail.find({'__REMOVED__': False,
'__CREATE_TIME__': {'$gt': begin_utc, '$lt': end_utc}}):
if 'sex' in elem:
if elem['sex'] == 1:
male += 1
elif elem['sex'] == 2:
female += 1
else:
pass
gender['male'] = male
gender['female'] = female
# 根据lbs数据获取城市信息的函数
def getcity(one):
location = one['Latitude'] + ',' + one['Longitude']
try:
res = urllib2.urlopen(api_url + ak + parameter + location, timeout=0.5)
address = json.loads(res.read())['result']
except Exception as e:
print e
else:
city = address['addressComponent']['city']
return city
city_list = []
for elem in weixin.find({'Event': 'LOCATION',
'__REMOVED__': False,
'__CREATE_TIME__': {'$gt': begin_utc, '$lt': end_utc}}).add_option(16):
city_name = getcity(elem)
city_list.append(city_name)
city_info = Counter(city_list)
|
[
"liruikaiyao@gmail.com"
] |
liruikaiyao@gmail.com
|
7670f28ba7aabb549461ab2ef055a921d977f465
|
19236d9e966cf5bafbe5479d613a175211e1dd37
|
/cohesity_management_sdk/controllers/certificates.py
|
419684769ccf8289a0dc95ec3fbc113bd7eed60f
|
[
"MIT"
] |
permissive
|
hemanshu-cohesity/management-sdk-python
|
236c44fbd9604809027f8ddd0ae6c36e4e727615
|
07c5adee58810979780679065250d82b4b2cdaab
|
refs/heads/master
| 2020-04-29T23:22:08.909550
| 2019-04-10T02:42:16
| 2019-04-10T02:42:16
| 176,474,523
| 0
| 0
|
NOASSERTION
| 2019-03-19T09:27:14
| 2019-03-19T09:27:12
| null |
UTF-8
|
Python
| false
| false
| 6,978
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import logging
from cohesity_management_sdk.api_helper import APIHelper
from cohesity_management_sdk.configuration import Configuration
from cohesity_management_sdk.controllers.base_controller import BaseController
from cohesity_management_sdk.http.auth.auth_manager import AuthManager
from cohesity_management_sdk.models.ssl_certificate_configuration import SSLCertificateConfiguration
from cohesity_management_sdk.exceptions.error_error_exception import ErrorErrorException
class Certificates(BaseController):
"""A Controller to access Endpoints in the cohesity_management_sdk API."""
def __init__(self, client=None, call_back=None):
super(Certificates, self).__init__(client, call_back)
self.logger = logging.getLogger(__name__)
def delete_web_server_certificate(self):
"""Does a DELETE request to /public/certificates/webServer.
Returns delete status upon completion.
Returns:
void: Response from the API. No Content
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('delete_web_server_certificate called.')
# Prepare query URL
self.logger.info('Preparing query URL for delete_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare and execute request
self.logger.info('Preparing and executing request for delete_web_server_certificate.')
_request = self.http_client.delete(_query_url)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'delete_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for delete_web_server_certificate.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def get_web_server_certificate(self):
"""Does a GET request to /public/certificates/webServer.
Returns the Server Certificate configured on the cluster.
Returns:
SSLCertificateConfiguration: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('get_web_server_certificate called.')
# Prepare query URL
self.logger.info('Preparing query URL for get_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for get_web_server_certificate.')
_headers = {
'accept': 'application/json'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for get_web_server_certificate.')
_request = self.http_client.get(_query_url, headers=_headers)
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'get_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for get_web_server_certificate.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, SSLCertificateConfiguration.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
def update_web_server_certificate(self,
body=None):
"""Does a PUT request to /public/certificates/webServer.
Returns the updated Web Server Certificate on the cluster.
Args:
body (SSLCertificateConfiguration, optional): TODO: type
description here. Example:
Returns:
SSLCertificateConfiguration: Response from the API. Success
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
try:
self.logger.info('update_web_server_certificate called.')
# Prepare query URL
self.logger.info('Preparing query URL for update_web_server_certificate.')
_url_path = '/public/certificates/webServer'
_query_builder = Configuration.get_base_uri()
_query_builder += _url_path
_query_url = APIHelper.clean_url(_query_builder)
# Prepare headers
self.logger.info('Preparing headers for update_web_server_certificate.')
_headers = {
'accept': 'application/json',
'content-type': 'application/json; charset=utf-8'
}
# Prepare and execute request
self.logger.info('Preparing and executing request for update_web_server_certificate.')
_request = self.http_client.put(_query_url, headers=_headers, parameters=APIHelper.json_serialize(body))
AuthManager.apply(_request)
_context = self.execute_request(_request, name = 'update_web_server_certificate')
# Endpoint and global error handling using HTTP status codes.
self.logger.info('Validating response for update_web_server_certificate.')
if _context.response.status_code == 0:
raise ErrorErrorException('Error', _context)
self.validate_response(_context)
# Return appropriate type
return APIHelper.json_deserialize(_context.response.raw_body, SSLCertificateConfiguration.from_dictionary)
except Exception as e:
self.logger.error(e, exc_info = True)
raise
|
[
"ashish@cohesity.com"
] |
ashish@cohesity.com
|
a907b10a10fa739cebb6a2d9d28ef3ddc01bc2f9
|
e770533cc7d8517134d6f9159f5f9e52747c7153
|
/python/04selenium/selenium014.py
|
88e3348deb7fa6521253c9474ea652722cafae13
|
[] |
no_license
|
code1990/bootPython
|
5d878f7fac8aaa09a2b9e4a6d50a3c0f86c6dea5
|
e5debd59b07a2c713f3e692aa4f44a9d2e5baeae
|
refs/heads/master
| 2022-07-27T04:31:00.292692
| 2020-08-07T07:07:15
| 2020-08-07T07:07:23
| 206,805,170
| 0
| 0
| null | 2020-10-13T15:51:34
| 2019-09-06T13:56:39
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
# (十四)下拉框选择
from selenium import webdriver
from selenium.webdriver.support.select import Select
from time import sleep
driver = webdriver.Chrome(executable_path='C:\driver\chromedriver.exe')
driver.implicitly_wait(10)
driver.get('http://www.baidu.com')
# 鼠标悬停至“设置”链接
driver.find_element_by_link_text('设置').click()
sleep(1)
# 打开搜索设置
driver.find_element_by_link_text("搜索设置").click()
sleep(2)
# 搜索结果显示条数
sel = driver.find_element_by_xpath("//select[@id='nr']")
Select(sel).select_by_value('50') # 显示50条
# ……
driver.quit()
|
[
"s1332177151@sina.com"
] |
s1332177151@sina.com
|
a9f4fef1e4376bfc10ef1a75e7fe509d20b30fac
|
1e0e610166b36e5c73e7ff82c4c0b8b1288990bf
|
/mail/mail02.py
|
dbc58ae4fa61f846897bc4421ee3a39ac60cfbbc
|
[] |
no_license
|
PythonOpen/PyhonProjects
|
4ef1e70a971b9ebd0eb6a09e63e22581ad302534
|
ede93314009564c31aa586d2f89ed8b1e4751c1b
|
refs/heads/master
| 2022-05-20T23:21:03.536846
| 2020-04-27T00:59:32
| 2020-04-27T00:59:32
| 250,142,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,665
|
py
|
from email.mime.text import MIMEText
import smtplib
# MIMEText三个主要参数
# 1.邮件内容
# 2.MIME子类型,在此案例我们用plain表示text类型
# 3.邮件编码格式
main_content="""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
</head>
<body>
<h1>这是一封HTML格式邮件</h1>
</body>
</html>
"""
msg=MIMEText(main_content,"html","utf-8")
# 发送email地址,此处地址直接使用个人QQ,密码一般需要临时输入,此处偷懒
from_addr="1083138609@qq.com"
# 此处密码是经过申请设置后的授权码,不是不是不是你的qq邮箱密码
from_pwd="ofgxddnrkxkqbaaf"
# 收件人信息
# 此处使用qq邮箱,我给我自己发送
to_addr="1083138609@qq.com"
# 输入SMTP服务器地址
# 此处根据不同的邮件服务商有不同的值
# 现在基本任何一家邮件服务商,如果采用第三方收发邮件,都需要开启授权选项
# 腾讯qq邮箱的smtp地址是 smtp.qq.com
smtp_srv="smtp.qq.com"
try:
# 两个参数
# 第一个是服务器地址,但一定是bytes格式,所以需要编码
# 第二个参数是服务器的接受访问端
# SMTP协议默认端口
srv= smtplib.SMTP_SSL(smtp_srv.encode(), 25)
# 登录邮箱发送
srv.login(from_addr, from_pwd)
# 发送邮件
# 三个参数
# 1.发送地址
# 2. 接收地址,必须是list形式
# 3.发送内容,作为字符串发送
srv.sendmail(from_addr,[to_addr],msg.as_string())
srv.quit()
except Exception as e:
print(e)
|
[
"1083138609@qq.com"
] |
1083138609@qq.com
|
5704a6e8200a1842c1da7c558ef26fbc91662ce3
|
c071eb46184635818e8349ce9c2a78d6c6e460fc
|
/system/python_stubs/-745935208/_ast/Eq.py
|
65f93d36a7d2abe45c41f6fe9cdfc0b757574bc7
|
[] |
no_license
|
sidbmw/PyCharm-Settings
|
a71bc594c83829a1522e215155686381b8ac5c6e
|
083f9fe945ee5358346e5d86b17130d521d1b954
|
refs/heads/master
| 2020-04-05T14:24:03.216082
| 2018-12-28T02:29:29
| 2018-12-28T02:29:29
| 156,927,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
# encoding: utf-8
# module _ast
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\numpy\core\_dummy.cp37-win_amd64.pyd
# by generator 1.146
# no doc
# no imports
from .cmpop import cmpop
class Eq(cmpop):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
_fields = ()
|
[
"siddharthnatamai@gmail.com"
] |
siddharthnatamai@gmail.com
|
95513d0222466b05775626ce21dbbcff1a0f2158
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p4VQE/R1/benchmark/startQiskit_noisy68.py
|
2abe4bba4276c4ad5fc8fe827f963e48f6c2e57f
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,423
|
py
|
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.cx(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[3],input_qubit[0]) # number=7
prog.swap(input_qubit[3],input_qubit[0]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_noisy68.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
8d76957311696f5d8031a0f6af07845ca6839a63
|
6223dc2e5de7921696cb34fb62142fd4a4efe361
|
/.metadata/.plugins/org.eclipse.core.resources/.history/0/10c54487a564001418adf2b9b78fa3c6
|
9c5974137244fcb2386c19b2e81426b669f02921
|
[] |
no_license
|
Mushirahmed/python_workspace
|
5ef477b2688e8c25b1372f546752501ee53d93e5
|
46e2ed783b17450aba29e4e2df7b656522b2b03b
|
refs/heads/master
| 2021-03-12T19:24:50.598982
| 2015-05-25T10:23:54
| 2015-05-25T10:23:54
| 24,671,376
| 0
| 1
| null | 2015-02-06T09:27:40
| 2014-10-01T08:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,411
|
#!/usr/bin/env python
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy as np
from gnuradio import gr
import gras
class ztransform(gras.Block):
"""
docstring for block ztransform
"""
def __init__(self):
gras.Block.__init__(self,
name="ztransform",
in_sig=[np.float32],
out_sig=[np.float32])
def set_parameters(self,num,den,window):
self.num = list(map(float,num.split(" ")))
self.den = list(map(float,den.split(" ")))
print("self.num")
print("self.den")
self.n = window
self.num = np.poly1d(self.num)
self.den = np.poly1d(self.den)
self.den_coeff = self.den.c
nm_coeff = self.num.c
#print self.den_coeff
self.den_ord = self.den.order
self.num_ord = self.num.order
for i in range(0,self.den_ord-self.num_ord):
nm_coeff = np.insert(nm_coeff,0,0)
self.num_coeff = nm_coeff
#print self.num_coeff
self.in_q = [0]*(self.den_ord + 1)
self.out_q = [0]*(self.den_ord + 1)
self.final_q = []
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
#print "i am in work function"
# <+signal processing here+>
ans1 = 0
ans2 = 0
for i in range(1,self.den_ord + 1):
ans1 += self.den_coeff[i]*self.out_q[len(self.out_q)-i]
self.in_q.append(float(in0[0]))
#print self.in_q
for i in range(0,self.den_ord + 1):
ans2 += self.num_coeff[i]*self.in_q[len(self.in_q)-i-1]
#print ans2
ans = ans2 - ans1
ans = ans/self.den_coeff[0]
self.out_q.append(ans)
self.out_q.pop(0)
self.in_q.pop(0)
out[0] = ans
print "OUTPUT:",out[0]
#self.final_q.append(ans)
self.consume(0,1)
self.produce(0,1)
|
[
"imushir@gmail.com"
] |
imushir@gmail.com
|
|
c79f5defc4669af504b01120e06bf5dba6eb51f4
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/community/way/lot_world/month/car_level/president_program.py
|
09d22c760a6b771bc4aee758f82f7a48327f5762
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,135
|
py
|
## Get supported languages for translation
The following code gets a list of language codes representing languages supported for translation, using the GetLanguagesForTranslate method.
1. Create a new Node.JS project in your favorite IDE.
2. Add the code provided below.
3. Replace the `key` value with an access key valid for your subscription.
4. Run the program.
```nodejs
'use strict';
let https = require ('https');
// **********************************************
// *** Update or verify the following values. ***
// **********************************************
// Replace the subscriptionKey string value with your valid subscription key.
let subscriptionKey = '970028e5525647c21ea1b2b2ab045fbf';
let host = 'api.microsofttranslator.com';
let path = '/V2/Http.svc/GetLanguagesForTranslate';
let params = '';
let response_handler = function (response) {
let body = '';
response.on ('data', function (d) {
body += d;
});
response.on ('end', function () {
console.log (body);
});
response.on ('error', function (e) {
console.log ('Error: ' + e.message);
});
};
let GetLanguagesForTranslate = function () {
let request_params = {
method : 'GET',
hostname : host,
path : path + params,
headers : {
'ed56e4d6e94b67527cced64771b62d28' : subscriptionKey,
}
};
let req = https.request (request_params, response_handler);
req.end ();
}
GetLanguagesForTranslate ();
```
**Get supported languages for translation response**
A successful response is returned in XML, as shown in the following example:
```xml
<ArrayOfstring xmlns="http://schemas.microsoft.com/2003/10/Serialization/Arrays" xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
<string>af</string>
<string>ar</string>
<string>bn</string>
<string>bs-Latn</string>
<string>bg</string>
<string>ca</string>
<string>zh-CHS</string>
<string>zh-CHT</string>
<string>yue</string>
<string>hr</string>
<string>cs</string>
<string>da</string>
<string>nl</string>
<string>en</string>
<string>et</string>
<string>fj</string>
<string>fil</string>
<string>fi</string>
<string>fr</string>
<string>de</string>
<string>el</string>
<string>ht</string>
<string>he</string>
<string>hi</string>
<string>mww</string>
<string>hu</string>
<string>id</string>
<string>it</string>
<string>ja</string>
<string>sw</string>
<string>tlh</string>
<string>tlh-Qaak</string>
<string>ko</string>
<string>lv</string>
<string>lt</string>
<string>mg</string>
<string>ms</string>
<string>mt</string>
<string>yua</string>
<string>no</string>
<string>otq</string>
<string>fa</string>
<string>pl</string>
<string>pt</string>
<string>ro</string>
<string>ru</string>
<string>sm</string>
<string>sr-Cyrl</string>
<string>sr-Latn</string>
<string>sk</string>
<string>sl</string>
<string>es</string>
<string>sv</string>
<string>ty</string>
<string>ta</string>
<string>th</string>
<string>to</string>
<string>tr</string>
<string>uk</string>
<string>ur</string>
<string>vi</string>
<string>cy</string>
</ArrayOfstring>
```
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
8e81c83eca27fafe4374430fd062b2cda78d9877
|
ca045f22bc2660a24d44ecacdb41f7a646df3d19
|
/src/exemplos/2_Operadores/9_exponenciacao.py
|
4888c9e422a96a463e3625ae6a742f61a4d519ba
|
[] |
no_license
|
gabriel1997castro/CIC-APC
|
db7e4114bfa0925e976b64638cac9e94845d8376
|
217f66ab04b0529886d2ef22ce1de15103440ba3
|
refs/heads/master
| 2020-12-25T11:32:06.276710
| 2016-01-27T02:57:24
| 2016-01-27T02:57:24
| 50,466,976
| 0
| 0
| null | 2016-01-26T23:32:49
| 2016-01-26T23:32:49
| null |
UTF-8
|
Python
| false
| false
| 2,414
|
py
|
# -*- coding: utf-8 -*-
# @file: 9_exponenciacao.py
# @author: Guilherme N. Ramos (gnramos@unb.br)
# @disciplina: Algoritmos e Programação de Computadores
#
# Exemplos de utilização do operador de exponenciação.
print 'Exponenciação:'
base = 0
expoente = 0
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 1
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 2
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 3
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
base = 1
expoente = 0
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 1
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 2
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 3
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
base = 2
expoente = 0
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 1
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 2
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 3
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
base = 3
expoente = 0
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 1
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 2
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
expoente = 3
resultado = base ** expoente
print ' (%d)**(%d) = %d' % (base, expoente, resultado)
base = 2
expoente = -2
resultado = base ** expoente
print ' (%d)**(%d) = %f' % (base, expoente, resultado)
expoente = -1
resultado = base ** expoente
print ' (%d)**(%d) = %f' % (base, expoente, resultado)
expoente = 0.5
resultado = base ** expoente
print ' (%d)**(%f) = %f' % (base, expoente, resultado)
base = 4
expoente = 0.5
resultado = base ** expoente
print ' (%d)**(%f) = %f' % (base, expoente, resultado)
base = 8
expoente = 1.0/3.0
resultado = base ** expoente
print ' (%d)**(%f) = %f' % (base, expoente, resultado)
|
[
"ramos@gnramos.com"
] |
ramos@gnramos.com
|
dc55ed13f4103e66e4e5edcf55079267753cb476
|
ee4db47ccecd23559b3b6f3fce1822c9e5982a56
|
/Build Chatbots/Tokenization.py
|
db3b63f6326833b215d84cf8c42b27248d31c56d
|
[] |
no_license
|
meoclark/Data-Science-DropBox
|
d51e5da75569626affc89fdcca1975bed15422fd
|
5f365cedc8d0a780abeb4e595cd0d90113a75d9d
|
refs/heads/master
| 2022-10-30T08:43:22.502408
| 2020-06-16T19:45:05
| 2020-06-16T19:45:05
| 265,558,242
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
from nltk.tokenize import word_tokenize,sent_tokenize
ecg_text = 'An electrocardiogram is used to record the electrical conduction through a person\'s heart. The readings can be used to diagnose cardiac arrhythmias.'
tokenized_by_word = word_tokenize(ecg_text)
tokenized_by_sentence = sent_tokenize(ecg_text)
try:
print('Word Tokenization:')
print(tokenized_by_word)
except:
print('Expected a variable called `tokenized_by_word`')
try:
print('Sentence Tokenization:')
print(tokenized_by_sentence)
except:
print('Expected a variable called `tokenized_by_sentence`')
|
[
"oluchukwuegbo@gmail.com"
] |
oluchukwuegbo@gmail.com
|
39ccfa1bfe82238f935dda6943bcfeabd47426bd
|
f200651e624d5e5cd2f2262359a5932216d2d443
|
/demo-effects-html5-canvas/fire_controls/conv.py
|
2431b8a237fa8e6630f04a1d1e18c70d1a4332e7
|
[] |
no_license
|
lwerdna/lwerdna.github.io
|
fbea38c62029884930ebfac70c9d455979c43fde
|
f80c7cb173359e13b2894d64fb735c0396278b7e
|
refs/heads/master
| 2023-07-19T17:07:20.169897
| 2023-07-07T18:39:02
| 2023-07-07T18:39:02
| 38,472,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
palette = [
[0,0,0], [0,1,1], [0,4,5], [0,7,9], [0,8,11], [0,9,12], [15,6,8], [25,4,4],
[33,3,3], [40,2,2], [48,2,2], [55,1,1], [63,0,0], [63,0,0], [63,3,0], [63,7,0],
[63,10,0], [63,13,0], [63,16,0], [63,20,0], [63,23,0], [63,26,0], [63,29,0],
[63,33,0], [63,36,0], [63,39,0], [63,39,0], [63,40,0], [63,40,0], [63,41,0],
[63,42,0], [63,42,0], [63,43,0], [63,44,0], [63,44,0], [63,45,0], [63,45,0],
[63,46,0], [63,47,0], [63,47,0], [63,48,0], [63,49,0], [63,49,0], [63,50,0],
[63,51,0], [63,51,0], [63,52,0], [63,53,0], [63,53,0], [63,54,0], [63,55,0],
[63,55,0], [63,56,0], [63,57,0], [63,57,0], [63,58,0], [63,58,0], [63,59,0],
[63,60,0], [63,60,0], [63,61,0], [63,62,0], [63,62,0], [63,63,0],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63]
]
for p in palette:
print '[%d,%d,%d],' % (p[0]*4, p[1]*4, p[2]*4)
|
[
"andrew@vector35.com"
] |
andrew@vector35.com
|
142c92391f03f9036f5df23bd5d855af23e4e0ac
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/YXjx9G5uQ4CdYPuB4_20.py
|
f0a10dccbee452d6af543673b232e8f6d5b7d4b0
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
"""
**Mubashir** needs your help to compare two lists.
First list `lst1` contains some numbers and second list `lst2` contains
**squared values of numbers given in the first list**.
Create a function which takes these two lists and returns `True` if all square
values are available, `False` otherwise.
lst1 = [121, 144, 19, 161, 19, 144, 19, 11]
lst2 = [121, 14641, 20736, 361, 25921, 361, 20736, 361]
Returns `True` because **121 is square of 11, 14641 is square of 121, 20736 is
square of 144, 361 is square of 19, 25921 the square of 161, and so on...**
lst1 = [121, 144, 19, 161, 19, 144, 19, 11]
lst2 = [11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19]
### Examples
simple_comp([121, 144, 19, 161, 19, 144, 19, 11], [121, 14641, 20736, 361, 25921, 361, 20736, 361]) ➞ True
simple_comp([4, 4], [1, 31]) ➞ False
simple_comp([2, 2, 3], [4, 4, 9]) ➞ True
### Notes
Numbers can be in any order.
"""
def simple_comp(lst1, lst2):
if lst1==None or lst2==None:
return lst1==lst2
return sorted(x**2 for x in lst1)==sorted(lst2)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
37d5fa2a6f6b3325e6960b512dbb88914fa86b99
|
b2ba670818623f8ab18162382f7394baed97b7cb
|
/test-data/AndroidSlicer/Mitzuli/DD/10.py
|
d2f993240a11bbf9562d22402b0c95323cee8d8d
|
[
"MIT"
] |
permissive
|
hsumyatwin/ESDroid-artifact
|
012c26c40537a79b255da033e7b36d78086b743a
|
bff082c4daeeed62ceda3d715c07643203a0b44b
|
refs/heads/main
| 2023-04-11T19:17:33.711133
| 2022-09-30T13:40:23
| 2022-09-30T13:40:23
| 303,378,286
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,184
|
py
|
#start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'com.mitzuli'
activity ='com.mitzuli.MainActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.5)
MonkeyRunner.sleep(0.5)
device.touch(1300,113, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(1020,121, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(1001,127, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(863,125, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(355,1601, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(247,1839, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(80,154, 'DOWN_AND_UP')
|
[
"hsumyatwin@gmail.com"
] |
hsumyatwin@gmail.com
|
8f0dd18ff0e2846a87a5f2ca82b2163c648938b6
|
2479345dafbf0ac1118f34fbd3471871a3ac5c11
|
/demo/libdemo/list_countries.py
|
9292611d6422dfbe06ee3e2c9b7058f6e10a215d
|
[] |
no_license
|
srikanthpragada/PYTHON_06_MAY_2021
|
e2fc4d32a38f085658f87d35f31df65ee837a440
|
f30a3c4541e0fc15d157446721b514f791602919
|
refs/heads/master
| 2023-06-02T23:13:53.786444
| 2021-06-16T03:00:38
| 2021-06-16T03:00:38
| 365,402,518
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
import requests
resp = requests.get("https://restcountries.eu/rest/v2/all")
if resp.status_code != 200:
print('Sorry! Could not get details!')
exit(1)
countries = resp.json()
for c in countries:
print(f"{c['name']:50} - {c['capital']}")
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
bb363c5ddd3739e93a04900c1353f55c9f17c3ab
|
923f9270a12be35fdd297d8f27e522c601e94eab
|
/src/decay/test/test_dc_nose.py
|
00a9741044a433b8333c1da2f59dfc64f2536274
|
[] |
no_license
|
t-bltg/INF5620
|
a06b6e06b6aba3bc35e933abd19c58cd78584c1f
|
d3e000462302839b49693cfe06a2f2df924c5027
|
refs/heads/master
| 2021-05-31T00:41:41.624838
| 2016-03-22T09:29:00
| 2016-03-22T09:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,328
|
py
|
import nose.tools as nt
import sys, os
sys.path.insert(0, os.pardir)
import dc_mod_unittest as dc_mod
import numpy as np
def exact_discrete_solution(n, I, a, theta, dt):
"""Return exact discrete solution of the theta scheme."""
dt = float(dt) # avoid integer division
factor = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)
return I*factor**n
def test_against_discrete_solution():
"""
Compare result from solver against
formula for the discrete solution.
"""
theta = 0.8; a = 2; I = 0.1; dt = 0.8
N = int(8/dt) # no of steps
u, t = dc_mod.solver(I=I, a=a, T=N*dt, dt=dt, theta=theta)
u_de = np.array([exact_discrete_solution(n, I, a, theta, dt)
for n in range(N+1)])
diff = np.abs(u_de - u).max()
nt.assert_almost_equal(diff, 0, delta=1E-14)
def test_solver():
"""
Compare result from solver against
precomputed arrays for theta=0, 0.5, 1.
"""
I=0.8; a=1.2; T=4; dt=0.5 # fixed parameters
precomputed = {
't': np.array([ 0. , 0.5, 1. , 1.5, 2. , 2.5,
3. , 3.5, 4. ]),
0.5: np.array(
[ 0.8 , 0.43076923, 0.23195266, 0.12489759,
0.06725255, 0.03621291, 0.01949926, 0.0104996 ,
0.00565363]),
0: np.array(
[ 8.00000000e-01, 3.20000000e-01,
1.28000000e-01, 5.12000000e-02,
2.04800000e-02, 8.19200000e-03,
3.27680000e-03, 1.31072000e-03,
5.24288000e-04]),
1: np.array(
[ 0.8 , 0.5 , 0.3125 , 0.1953125 ,
0.12207031, 0.07629395, 0.04768372, 0.02980232,
0.01862645]),
}
for theta in 0, 0.5, 1:
u, t = dc_mod.solver(I, a, T, dt, theta=theta)
diff = np.abs(u - precomputed[theta]).max()
# Precomputed numbers are known to 8 decimal places
nt.assert_almost_equal(diff, 0, places=8,
msg='theta=%s' % theta)
def test_potential_integer_division():
"""Choose variables that can trigger integer division."""
theta = 1; a = 1; I = 1; dt = 2
N = 4
u, t = dc_mod.solver(I=I, a=a, T=N*dt, dt=dt, theta=theta)
u_de = np.array([exact_discrete_solution(n, I, a, theta, dt)
for n in range(N+1)])
diff = np.abs(u_de - u).max()
nt.assert_almost_equal(diff, 0, delta=1E-14)
def test_convergence_rates():
"""Compare empirical convergence rates to exact ones."""
# Set command-line arguments directly in sys.argv
sys.argv[1:] = '--I 0.8 --a 2.1 --T 5 '\
'--dt 0.4 0.2 0.1 0.05 0.025'.split()
# Suppress output from dc_mod.main()
stdout = sys.stdout # save standard output for later use
scratchfile = open('.tmp', 'w') # fake standard output
sys.stdout = scratchfile
r = dc_mod.main()
for theta in r:
nt.assert_true(r[theta]) # check for non-empty list
scratchfile.close()
sys.stdout = stdout # restore standard output
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
# Compare to 1 decimal place
nt.assert_almost_equal(expected_rates[theta], r_final,
places=1, msg='theta=%s' % theta)
# no need for any main
|
[
"hpl@simula.no"
] |
hpl@simula.no
|
34e0d339fa61eb2fba8a107ea109b6b0c56efc1e
|
743d4545702532c967efee2c12015d91853b6b80
|
/orders/migrations/0001_initial.py
|
50adf5b21efe66d7cf544e46d52e15ce62c1faa2
|
[] |
no_license
|
SOAD-Group-36/server
|
81a7ced2149174fe4d9c1644ee2afd78054d7d29
|
5a5a1e2cd4a361cff8fff008600d65d6dc8edaab
|
refs/heads/main
| 2023-02-03T06:44:36.041311
| 2020-12-12T10:45:21
| 2020-12-12T10:45:21
| 305,055,627
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
# Generated by Django 3.1.2 on 2020-11-11 15:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('price', models.DecimalField(decimal_places=2, max_digits=7)),
('placed_on', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('Pl', 'Placed'), ('Pr', 'Processed'), ('Pk', 'Packed'), ('Sh', 'Shipped'), ('Dl', 'Delivered'), ('Rj', 'Rejected'), ('Rt', 'Returned'), ('Rc', 'Received')], default='Pl', max_length=2)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"masterashu@live.in"
] |
masterashu@live.in
|
8b57c9efa4983527dbd55908cbb5b5acbd4edbeb
|
20e3ee6642d20578e48756963798acfe307ac6b5
|
/Miscellaneous/Python XML Parser/Example.py
|
ef7e6dc6952d02a5cb41a0c433b4bb1594c14bce
|
[] |
no_license
|
sirinenisaikiran/Python
|
538f64276767435de3233b720f547aac0bf4d511
|
bdfef0d1c04c7f3b9fc91a164b5fd1789828176c
|
refs/heads/master
| 2023-01-31T00:53:01.650916
| 2021-06-06T10:39:20
| 2021-06-06T10:39:20
| 237,744,104
| 0
| 0
| null | 2023-01-26T03:38:47
| 2020-02-02T08:58:49
|
Python
|
UTF-8
|
Python
| false
| false
| 455
|
py
|
import xml.etree.ElementTree as ET
mytree = ET.parse('Sample.xml')
myroot = mytree.getroot()
# print(myroot)
# print(myroot.tag)
# print(myroot[0].tag)
# print(myroot[0].attrib)
#
# for x in myroot[0]:
# print(x.tag, x.attrib)
# for x in myroot[0]:
# print(x.text)
# for x in myroot[0]:
# print(x.tag, x.attrib, x.text)
for x in myroot.findall('food'):
item = x.find('item').text
price = x.find('price').text
print(item,price)
|
[
"saikiran.sirneni@gmail.com"
] |
saikiran.sirneni@gmail.com
|
a199a85117918b1c8fe6769bfdcbff3be408262e
|
5186cc912502f9f32948c3810b5adc2cd0f015d8
|
/soybean/reactor.py
|
b9e91523fe64d36b907749d9656b9625adbdbb63
|
[
"Apache-2.0"
] |
permissive
|
lcgong/soybean
|
c0ef4f1a88191a653bfd1f70881a2f1e470943fd
|
43fd891113b05c79419d7c0850145c8284e51206
|
refs/heads/main
| 2023-02-27T08:47:47.198713
| 2021-02-03T04:00:52
| 2021-02-03T04:00:52
| 334,369,214
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,575
|
py
|
import inspect
import asyncio
import logging
from rocketmq.client import PushConsumer, ConsumeStatus
from .utils import make_group_id, json_loads
from .event import OccupiedEvent
from .typing import HandlerType
from .exceptions import UnkownArgumentError
logger = logging.getLogger("soybean.reactor")
class Reactor:
def __init__(self, channel, topic: str, expression: str,
handler: HandlerType, depth: int):
self._channel = channel
self._topic = topic
self._expression = expression
self._handler = handler
self._reactor_id = make_group_id(channel.name, handler, depth)
self._consumer = None
argvals_getter = build_argvals_getter(handler)
self._handler_argvals_getter = argvals_getter
self._busy_event = None
@property
def reactor_id(self):
return self._reactor_id
async def start(self):
import threading
print(
f"reacter-start thread: {threading.get_ident()}, loop: {id(asyncio.get_event_loop())}")
consumer = PushConsumer(group_id=self._reactor_id)
consumer.set_thread_count(1)
consumer.set_name_server_address(self._channel.namesrv_addr)
self._busy_event = OccupiedEvent()
loop = asyncio.get_running_loop()
def run_coroutine(coroutine):
# 在其它线程以线程安全的方式执行协程,并阻塞等待执行结果
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
return future.result
def _callback(msg):
run_coroutine(self._busy_event.acquire())
try:
arg_values = self._handler_argvals_getter(msg)
run_coroutine( self._handler(*arg_values))
return ConsumeStatus.CONSUME_SUCCESS
except Exception as exc:
logger.error((f"caught an error in reactor "
f"'{self._reactor_id}': {exc}"),
exc_info=exc)
return ConsumeStatus.RECONSUME_LATER
finally:
run_coroutine(self._busy_event.release())
consumer.subscribe(self._topic, _callback, expression=self._expression)
consumer.start()
self._consumer = consumer
async def stop(self):
await self._busy_event.wait_idle()
# 问题:当前rocket-client-cpp实现在shutdown之前并不能保证工作线程正常结束
# 这会导致工作线程和asyncio死锁,所以得到callback线程里任务结束后,再多等待
# 一会儿,等待rocket-client-cpp处理完consumer工作线程,再关闭consumer
await asyncio.sleep(0.5)
if self._consumer:
self._consumer.shutdown()
self._consumer = None
def build_argvals_getter(handler):
arguments = inspect.signature(handler).parameters
getters = []
unknowns = []
for arg_name, arg_spec in arguments.items():
getter_factory = _getter_factories.get(arg_name)
if getter_factory is not None:
getters.append(getter_factory(arg_spec))
continue
unknowns.append((arg_name, arg_spec))
if unknowns:
mod = handler.__module__
func = handler.__qualname__
args = ", ".join([f"'{name}'" for name, spec in unknowns])
errmsg = f"Unknown arguments: {args} of '{func}' in '{mod}'"
raise UnkownArgumentError(errmsg)
def _getter(msgobj):
return (arg_getter(msgobj) for arg_getter in getters)
return _getter
def getter_message(arg_spec):
if arg_spec.annotation == str:
return lambda msgobj: msgobj.body.decode("utf-8")
elif arg_spec.annotation == bytes:
return lambda msgobj: msgobj.body
else:
return lambda msgobj: json_loads(msgobj.body.decode("utf-8"))
def getter_msg_id(arg_spec):
return lambda msgobj: getattr(msgobj, "id")
def getter_msg_topic(arg_spec):
return lambda msgobj: getattr(msgobj, "tpoic").decode("utf-8")
def getter_msg_keys(arg_spec):
return lambda msgobj: getattr(msgobj, "keys").decode("utf-8")
def getter_msg_tags(arg_spec):
return lambda msgobj: getattr(msgobj, "tags").decode("utf-8")
_getter_factories = {
"message": getter_message,
"message_id": getter_msg_id,
"message_topic": getter_msg_topic,
"message_keys": getter_msg_keys,
"message_tags": getter_msg_tags,
"msg_id": getter_msg_id,
"msg_topic": getter_msg_topic,
"msg_keys": getter_msg_keys,
"msg_tags": getter_msg_tags,
}
|
[
"lcgong@gmail.com"
] |
lcgong@gmail.com
|
6c3f8ad91c11294558986e5612928dcb59119e90
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/281/81893/submittedfiles/testes.py
|
9d5ad8d30fc63ed816896c55f3d77b98a8e9722a
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
x=int(input('Digite um número:'))
while x>0 and x<=13:
print('Ok')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
067a7abea5aa8ea89d7339cdb1ac2cad200418bb
|
5fbf2adec8d7647b9aeefa51695aa3f13ee57810
|
/server/load_backup_locally.py
|
076c18cbae05647fcf9c789b079ff13e403dc7b7
|
[] |
no_license
|
angelacantfly/dancedeets-monorepo
|
8bb6579f6f5d30e88c8d4c0e239c6c8fed678094
|
6b7a48d91d0737010acd9e08a89d99c2c982205a
|
refs/heads/master
| 2021-01-20T09:14:22.613044
| 2017-08-26T21:48:14
| 2017-08-26T21:48:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,381
|
py
|
#!/usr/bin/python
"""
# App Engine import data from Datastore Backup to localhost
You can use this script to import large(ish) App Engine Datastore backups to your localohst dev server.
## Getting backup files
Follow instructions from Greg Bayer's awesome article to fetch the App Engine backups:
http://gbayer.com/big-data/app-engine-datastore-how-to-efficiently-export-your-data/
Basically, download and configure gsutil and run:
```
gsutil -m cp -R gs://your_bucket_name/your_path /local_target
```
## Reading data to your local (dev_appserver) application
Copy-paste this gist to your Interactive Console, set correct paths and press `Execute`.
(default: http://localhost:8000/console)
"""
import sys
sys.path.insert(0, '/usr/local/google_appengine')
print sys.path
from google.appengine.api.files import records
from google.appengine.datastore import entity_pb
from google.net.proto.ProtocolBuffer import ProtocolBufferDecodeError
from google.appengine.ext import ndb
from os.path import isfile
from os.path import join
from os import listdir
from events.eventdata import DBEvent
def run():
# Set your downloaded folder's path here (must be readable by dev_appserver)
mypath = '/Users/lambert/Dropbox/dancedeets/data/datastore_backup_datastore_backup_2016_11_19_DBEvent/15700286559371541387849311E815D'
# Se the class of the objects here
cls = DBEvent
# Set your app's name here
appname = "dev~None"
# Do the harlem shake
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for file in onlyfiles:
i = 0
try:
raw = open(mypath + "/" + file, 'r')
reader = records.RecordsReader(raw)
to_put = list()
for record in reader:
entity_proto = entity_pb.EntityProto(contents=record)
entity_proto.key_.app_ = appname
obj = cls._from_pb(entity_proto)
to_put.append(obj)
i += 1
if i % 100 == 0:
print "Saved %d %ss" % (i, '') #entity.kind())
ndb.put_multi(to_put) # use_memcache=False)
to_put = list()
ndb.put_multi(to_put) # use_memcache=False)
to_put = list()
print "Saved %d" % i
except ProtocolBufferDecodeError:
""" All good """
run()
|
[
"mlambert@gmail.com"
] |
mlambert@gmail.com
|
3a6f927241b180e157f7756d4833dee91440dfa9
|
7c8bd2e26fdabf1555e0150272ecf035f6c21bbd
|
/삼성기출/새로운 게임2.py
|
3f7cacad987e8780f64a22bcecc01d30ec281fc1
|
[] |
no_license
|
hyeokjinson/algorithm
|
44090c2895763a0c53d48ff4084a96bdfc77f953
|
46c04e0f583d4c6ec4f51a24f19a373b173b3d5c
|
refs/heads/master
| 2021-07-21T10:18:43.918149
| 2021-03-27T12:27:56
| 2021-03-27T12:27:56
| 245,392,582
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,461
|
py
|
from _collections import deque
#체스판 말 갯수:k(1번~k번)
#이동방향:위,아래,왼쪽,오른쪽
#흰색인 경우 그 칸으로 이동,이동하는 칸에 말이 있으면 그곳에 스택 쌓기
#빨간색인 경우 이동하고 순서 reverse
#파란색인 경우 말의 이동방향을 역방향 한칸 이동 ,이동칸이 파란색인 경우 이동x
dx=[0,0,-1,1]
dy=[1,-1,0,0]
rev_direction={0:1,1:0,2:3,3:2}
def check():
for i in range(n):
for j in range(n):
if len(start[i][j])>=4:
return True
return False
def solve():
turn=0
p=0
while True:
turn+=1
if turn>1000:
return -1
for number in range(1,k+1):
x,y,d=horse[number]
nx,ny=x+dx[d],y+dy[d]
if nx<0 or nx>=n or ny<0 or ny>=n or arr[nx][ny]==2:
nd=rev_direction[d]
nx,ny=x+dx[nd],y+dy[nd]
if nx<0 or nx>=n or ny<0 or ny>=n or arr[nx][ny]==2:
horse[number][2]=nd
continue
p=1
if arr[nx][ny]==0:
left=start[x][y][:start[x][y].index(number)]
right=start[x][y][start[x][y].index(number):]
start[x][y]=left
start[nx][ny].extend(right)
if len(start[nx][ny])>=4:
return turn
for i in right:
horse[i][0],horse[i][1]=nx,ny
if p==1:
horse[number][2]=nd
p=0
elif arr[nx][ny]==1:
left = start[x][y][:start[x][y].index(number)]
right = start[x][y][start[x][y].index(number):]
start[x][y] = left
right.reverse()
start[nx][ny].extend(right)
if len(start[nx][ny]) >= 4:
return turn
for i in right:
horse[i][0], horse[i][1] = nx, ny
if p == 1:
horse[number][2] = nd
p = 0
if __name__ == '__main__':
n,k=map(int,input().split())
#0:흰색,1:빨간색,2:파란색
arr=[list(map(int,input().split()))for _ in range(n)]
start=[[[]*n for _ in range(n)] for _ in range(n)]
horse=dict()
for i in range(1,k+1):
x,y,v=map(int,input().split())
start[x-1][y-1].append(i)
horse[i]=[x-1,y-1,v-1]
print(solve())
|
[
"hjson817@gmail.com"
] |
hjson817@gmail.com
|
845db2f47f763ae4e09097e253320bf541736141
|
53eee7eb899cb518983008532257037fb89def13
|
/343.integer-break.py
|
e226facec72a5754c30be689c04e5eec6a509a9c
|
[] |
no_license
|
chenxu0602/LeetCode
|
0deb3041a66cb15e12ed4585bbe0fefce5dc6b26
|
3dc5af2bc870fcc8f2142130fcd2b7cab8733151
|
refs/heads/master
| 2023-07-05T19:26:21.608123
| 2023-07-02T08:35:35
| 2023-07-02T08:35:35
| 233,351,978
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
#
# @lc app=leetcode id=343 lang=python3
#
# [343] Integer Break
#
# https://leetcode.com/problems/integer-break/description/
#
# algorithms
# Medium (50.19%)
# Likes: 1086
# Dislikes: 227
# Total Accepted: 110.4K
# Total Submissions: 219.2K
# Testcase Example: '2'
#
# Given a positive integer n, break it into the sum of at least two positive
# integers and maximize the product of those integers. Return the maximum
# product you can get.
#
# Example 1:
#
#
#
# Input: 2
# Output: 1
# Explanation: 2 = 1 + 1, 1 × 1 = 1.
#
#
# Example 2:
#
#
# Input: 10
# Output: 36
# Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
#
# Note: You may assume that n is not less than 2 and not larger than 58.
#
#
#
# @lc code=start
import math
class Solution:
def integerBreak(self, n: int) -> int:
# if n == 2:
# return 1
# if n == 3:
# return 2
# dp = [0] * (n + 1)
# dp[2] = 2
# dp[3] = 3
# for i in range(4, n + 1):
# dp[i] = max(dp[i-2] * 2, dp[i-3] * 3)
# return dp[n]
# O(logN)
if n == 2:
return 1
elif n == 3:
return 2
elif n % 3 == 0:
return int(math.pow(3, n // 3))
elif n % 3 == 1:
return 2 * 2 * int(math.pow(3, (n - 4) // 3))
else:
return 2 * int(math.pow(3, n // 3))
# @lc code=end
|
[
"chenxu@Chens-iMac.local"
] |
chenxu@Chens-iMac.local
|
2c190be799017c52cc5a83639396080f5ef20ae9
|
82c54cab8e0c5b73e1fdb9615296613cc43929a0
|
/authentication/forms.py
|
d3f7b622935250beef47f85ac1ec6f9ee9435405
|
[] |
no_license
|
creechcorbin/twitter_clone
|
e4146657bd13043544f846c48b34fe83e90e91da
|
bd075bd53fd9e5558cda85ade86ed9995f72118c
|
refs/heads/master
| 2022-12-10T09:23:37.036180
| 2020-09-05T03:23:32
| 2020-09-05T03:23:32
| 292,993,852
| 0
| 0
| null | 2020-09-09T01:08:27
| 2020-09-05T03:22:43
|
Python
|
UTF-8
|
Python
| false
| false
| 345
|
py
|
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length=80)
password = forms.CharField(widget=forms.PasswordInput)
class SignupForm(forms.Form):
username = forms.CharField(max_length=80)
displayname = forms.CharField(max_length=80)
password = forms.CharField(widget=forms.PasswordInput)
|
[
"creechcorbin@gmail.com"
] |
creechcorbin@gmail.com
|
855c082aa1c28384a3ca3f6688c7cd52583b2287
|
47e93b916a6b55871997bfa95bb2f69676416b00
|
/landerdb.py
|
0486a4742f580c46200c8342d154cb857fb29434
|
[] |
no_license
|
Inqre/Melody
|
dcc88acb83b23a3c0786ab5b9529b1dcd71f6ece
|
84f298e5446f53c5f3fededd9f2920552db74c87
|
refs/heads/master
| 2020-05-15T22:32:28.959905
| 2013-11-08T02:45:06
| 2013-11-08T02:45:06
| 14,127,017
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,807
|
py
|
import json
import os
__version__ = "1.0.0"
class Connect:
def __init__(self, db_file):
self.db = db_file
self.json_data = {}
# allows find to be called multiple times, without
# re-reading from disk unless a change has occured
self.stale = True
if not os.path.exists(self.db):
self._save()
def _load(self):
if self.stale:
with open(self.db, 'rb') as fp:
try:
self.json_data = json.load(fp)
except:
with open(self.db, 'wb') as file:
file.write(json.dumps(self.json_data))
self._load()
def _save(self):
with open(self.db, 'wb') as fp:
json.dump(self.json_data, fp)
self.stale = True
def insert(self, collection, data):
self._load()
if collection not in self.json_data:
self.json_data[collection] = []
self.json_data[collection].append(data)
self._save()
def remove(self, collection, data):
self._load()
if collection not in self.json_data:
return False
self.json_data[collection].remove(data) #Will only delete one entry
self._save()
def find(self, collection, data):
self._load()
if collection not in self.json_data:
return False
output = []
for x in self.json_data[collection]:
if data != "all":
for y in data:
try:
if data[y] == x[y]:
output.append(x)
except KeyError:
continue
else:
output.append(x)
return output
|
[
"max00355@gmail.com"
] |
max00355@gmail.com
|
8074d9f48b99a19a25b95da45d02787fb65ed44d
|
771247a4498d50745c5fbff09e7446ea9213ab19
|
/Py8/export_openweather.py
|
a80a7c5c48213f7a13b051fcbfb593a6a75dd25e
|
[] |
no_license
|
ostrowsky/Parcer
|
42697f9a98f42c8220675d540e8dc2a95855783e
|
f953b7cbb6b948df894950ee7ed804fcd6b8e811
|
refs/heads/master
| 2021-01-21T06:39:46.184872
| 2017-06-23T16:07:15
| 2017-06-23T16:07:15
| 91,581,143
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,845
|
py
|
""" OpenWeatherMap (экспорт)
Сделать скрипт, экспортирующий данные из базы данных погоды,
созданной скриптом openweather.py. Экспорт происходит в формате CSV или JSON.
Скрипт запускается из командной строки и получает на входе:
export_openweather.py --csv filename [<город>]
export_openweather.py --json filename [<город>]
export_openweather.py --html filename [<город>]
При выгрузке в html можно по коду погоды (weather.id) подтянуть
соответствующие картинки отсюда: http://openweathermap.org/weather-conditions
Экспорт происходит в файл filename.
Опционально можно задать в командной строке город. В этом случае
экспортируются только данные по указанному городу. Если города нет в базе -
выводится соответствующее сообщение.
"""
import sys
import sqlite3
db_filename = 'db_weather.sqlite'
#sys.argv = ['export_openweather.py', 'weather.html', 'MX']
try:
filename = sys.argv[1]
country = sys.argv[2]
except IndexError:
print("Задан неверный параметр. Файл должен быть запущен с указанием параметров: export_openweather.py filename [<город>]")
print(sys.argv)
html_string = '''
<!DOCTYPE html>
<html>
<head>
<title>Weather</title>
</head>
<body>
<h1>Погода на момент актуализации базы данных</h1>
<table border = "1">
<tbody>
<tr>
<th align="center" width="auto">id_города</th>
<th align="center" width="auto">Город</th>
<th align="center" width="auto">Страна</th>
<th align="center" width="auto">Дата</th>
<th align="center" width="auto">Температура</th>
<th align="center" width="auto">id_погоды</th>
<th align="center" width="auto">Значок</th>
</tr>
'''
if len(sys.argv) == 3:
with sqlite3.connect(db_filename) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('''
select distinct id_города, Город, Страна, Дата, Температура, id_погоды, Значок
from weather
where Страна = ?''', (country,))
db_rows = cur.fetchall()
cities = list(db_rows)
for city in cities:
#print(list(city))
if city:
#print(city)
#print(list(city))
html_string += '\t<tr>\n'
for k in list(city):
if k == list(city)[-1]:
path = "http://openweathermap.org/img/w/" + str(k) + ".png"
html_string += '\t\t<td align="center" width="auto"><img src=' + path + '></td>\n'
else:
html_string += '\t\t<td align="center" width="auto">' + str(k) + '</td>\n'
html_string += '\t</tr>\n'
else:
print("Города указанной страны отсутствуют в базе")
html_string += '''
</tbody>
</table>
</body>
</html>'''
elif len(sys.argv) == 4:
city = sys.argv[3]
with sqlite3.connect(db_filename) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('''
select distinct id_города, Город, Страна, Дата, Температура, id_погоды, Значок
from weather
where Город = ? and Страна = ?''', (city, country,))
db_rows = cur.fetchall()
cities = list(db_rows)
for city in cities:
# print(list(city))
if city:
# print(city)
# print(list(city))
html_string += '\t<tr>\n'
for k in list(city):
if k == list(city)[-1]:
path = "http://openweathermap.org/img/w/" + str(k) + ".png"
html_string += '\t\t<td align="center" width="auto"><img src=' + path + '></td>\n'
else:
html_string += '\t\t<td align="center" width="auto">' + str(k) + '</td>\n'
html_string += '\t</tr>\n'
else:
print("Город отсутствует в базе")
html_string += '''
</tbody>
</table>
</body>
</html>'''
encoded_str = html_string.encode(encoding='UTF-8')
with open(filename, 'w', encoding='UTF-8') as f:
f.write(html_string)
|
[
"ostrowskyi@gmail.com"
] |
ostrowskyi@gmail.com
|
7f12cf4f8c2a9dbbd0be88734b98d0c8b28eca87
|
e9bc070d1d9257c4a213bc1f33ca6269bbc37b43
|
/tests/roots/test-ext-autosummary/conf.py
|
f4d696cc912bb3108db71ca0fb841c3d904f7427
|
[
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"BSD-2-Clause"
] |
permissive
|
GoodRx/sphinx
|
99b33454afa06cf6a66d080c3c4019cc7ddde2f0
|
c310c73baffa4892cf35fd74918193824c86309a
|
refs/heads/1.6.x-py-type-xref
| 2021-01-01T06:02:33.415993
| 2017-07-16T03:12:58
| 2017-07-16T03:12:58
| 97,339,105
| 1
| 1
| null | 2017-07-16T03:12:58
| 2017-07-15T19:57:45
|
Python
|
UTF-8
|
Python
| false
| false
| 184
|
py
|
import sys, os
sys.path.insert(0, os.path.abspath('.'))
extensions = ['sphinx.ext.autosummary']
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
|
[
"i.tkomiya@gmail.com"
] |
i.tkomiya@gmail.com
|
c1b3876aae1a898188d4da189bd9db75e5afc8c6
|
41249d7d4ca9950b9c6fee89bf7e2c1929629767
|
/results/lz_optimizations_20200507/script_lz_crab4freq_powell_bound10_constantFreqAndInitAmps_tf0-1.py
|
d14345a8c9437a041da7e650381b2b1114829de0
|
[
"MIT"
] |
permissive
|
lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381
|
f739b3baad1d2aadda576303bb0bbe9d48ec204a
|
29f80dcf914096555cee9bc2e18249a2c95d6a50
|
refs/heads/master
| 2022-11-22T00:44:09.998199
| 2020-07-21T08:35:28
| 2020-07-21T08:35:28
| 281,237,037
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
import src.protocol_ansatz as protocol_ansatz
from src.utils import autonumber_filename, basic_logger_configuration
output_file_name = os.path.basename(__file__)[7:-3] + '.csv'
output_file_name = autonumber_filename(output_file_name)
basic_logger_configuration(filename=output_file_name[:-3] + 'log')
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
num_frequencies = 4
protocol = protocol_ansatz.CRABProtocolAnsatz(num_frequencies=num_frequencies)
protocol.generate_rnd_frequencies_each_tf = False
for idx in range(num_frequencies):
protocol.hyperpars['nuk' + str(idx + 1)] = 0
protocol.fill_hyperpar_value(y0=-5, y1=0)
results = optimization.find_best_protocol(
problem_specification=dict(
model='lz',
model_parameters=dict(omega_0=1),
task=dict(initial_intensity=-5, final_intensity=0)
),
optimization_specs=dict(
protocol=protocol,
protocol_options=dict(num_frequencies=num_frequencies),
optimization_method='powell',
parameters_constraints=[-10, 10],
initial_parameters=[0] * (2 * num_frequencies)
),
other_options=dict(
scan_times=np.linspace(0.01, 1, 200)
)
)
# ------ save results to file
results.to_csv(output_file_name)
|
[
"lukeinnocenti@gmail.com"
] |
lukeinnocenti@gmail.com
|
ced0baa0e9192cab080e7e0c0c749c9c7e56e9a1
|
1da91735d1a4d19e62b2d19826d9a1e85d88d690
|
/dxpy/dxpy/task/model/tests/test_task.py
|
32e1f9139b28e9e0836aef2a1a5c31a6253ebbf0
|
[] |
no_license
|
Hong-Xiang/dxl
|
94229e4c20f0c97dfe21f8563889c991330df9c3
|
29aed778d1c699cc57d09666a20b4ca60196392f
|
refs/heads/master
| 2021-01-02T22:49:20.298893
| 2018-05-22T13:42:20
| 2018-05-22T13:42:20
| 99,401,725
| 1
| 1
| null | 2018-05-22T13:42:21
| 2017-08-05T05:34:35
|
Python
|
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
import json
import unittest
from dxpy.task.model import task
from dxpy.time.timestamps import TaskStamp
from dxpy.time.utils import strp
class TestTask(unittest.TestCase):
def test_to_json(self):
t = task.Task(tid=10, desc='test', workdir='/tmp/test',
worker=task.Worker.MultiThreading,
ttype=task.Type.Regular,
dependency=[1, 2, 3],
time_stamp=TaskStamp(create=strp(
"2017-09-22 12:57:44.036185")),
data={'sample': 42},
is_root=True)
s = t.to_json()
dct = json.loads(s)
self.assertEqual(dct['id'], 10)
self.assertEqual(dct['desc'], 'test')
self.assertEqual(dct['dependency'], [1, 2, 3])
self.assertEqual(dct['data'], {'sample': 42})
self.assertEqual(dct['type'], 'Regular')
self.assertEqual(dct['workdir'], '/tmp/test')
self.assertEqual(dct['worker'], 'MultiThreading')
self.assertEqual(dct['is_root'], True)
self.assertEqual(dct['time_stamp'], {
'create': "2017-09-22 12:57:44.036185", 'start': None, 'end': None})
self.assertEqual(dct['state'], 'BeforeSubmit')
def test_from_json(self):
dct = {
'__task__': True,
'id': 10,
'desc': 'test',
'workdir': '/tmp/test',
'worker': 'Slurm',
'type': 'Script',
'dependency': [1, 2, 3],
'data': {'sample': 42},
'is_root': True,
'time_stamp': {
'create': "2017-09-22 12:57:44.036185",
'start': None,
'end': None
},
'state': 'BeforeSubmit'
}
t = task.Task.from_json(json.dumps(dct))
self.assertEqual(t.id, 10)
self.assertEqual(t.desc, 'test')
self.assertEqual(t.workdir, '/tmp/test')
self.assertEqual(t.worker, task.Worker.Slurm)
self.assertEqual(t.type, task.Type.Script)
self.assertEqual(t.dependency, [1, 2, 3])
self.assertEqual(t.data, {'sample': 42})
self.assertEqual(t.is_root, True)
self.assertEqual(t.time_stamp.create, strp(
"2017-09-22 12:57:44.036185"))
self.assertEqual(t.state, task.State.BeforeSubmit)
def test_submit(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.submit(t)
self.assertEqual(t.state, task.State.Pending)
def test_start(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.start(t)
self.assertEqual(t.state, task.State.Runing)
def test_complete(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.complete(t)
self.assertEqual(t.state, task.State.Complete)
|
[
"hx.hongxiang@gmail.com"
] |
hx.hongxiang@gmail.com
|
aa6af7048c44cea9653dd669212be652afc07c82
|
960b3a17a4011264a001304e64bfb76d669b8ac5
|
/mstrio/api/authentication.py
|
ee18f3ec2d1622d62b49c9697d82696d49d54468
|
[
"Apache-2.0"
] |
permissive
|
MicroStrategy/mstrio-py
|
012d55df782a56dab3a32e0217b9cbfd0b59b8dd
|
c6cea33b15bcd876ded4de25138b3f5e5165cd6d
|
refs/heads/master
| 2023-08-08T17:12:07.714614
| 2023-08-03T12:30:11
| 2023-08-03T12:30:11
| 138,627,591
| 84
| 60
|
Apache-2.0
| 2023-07-31T06:43:33
| 2018-06-25T17:23:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,218
|
py
|
from mstrio.utils.error_handlers import ErrorHandler
@ErrorHandler(
err_msg='Authentication error. Check user credentials or REST API URL and try again'
)
def login(connection):
"""Authenticate a user and create an HTTP session on the web server where
the user's MicroStrategy sessions are stored.
This request returns an authorization token (X-MSTR-AuthToken) which will be
submitted with subsequent requests. The body of the request contains
the information needed to create the session. The loginMode parameter in
the body specifies the authentication mode to use. You can authenticate with
one of the following authentication modes: Standard (1), Anonymous (8),
or LDAP (16). Authentication modes can be enabled through the System
Administration REST APIs, if they are supported by the deployment.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/login',
data={
'username': connection.username,
'password': connection._Connection__password,
'loginMode': connection.login_mode,
'applicationType': 35,
},
)
@ErrorHandler(err_msg="Failed to logout.")
def logout(connection, error_msg=None, whitelist=None):
"""Close all existing sessions for the authenticated user.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/logout',
headers={'X-MSTR-ProjectID': None},
)
def session_renew(connection):
"""Extends the HTTP and Intelligence Server sessions by resetting the
timeouts.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.put(
skip_expiration_check=True,
url=f'{connection.base_url}/api/sessions',
headers={'X-MSTR-ProjectID': None},
timeout=2.0,
)
def session_status(connection):
"""Checks Intelligence Server session status.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.get(
skip_expiration_check=True,
url=f'{connection.base_url}/api/sessions',
headers={'X-MSTR-ProjectID': None},
)
@ErrorHandler(err_msg='Could not get identity token.')
def identity_token(connection):
"""Create a new identity token.
An identity token is used to share an existing session with another
project, based on the authorization token for the existing
session.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
url=f'{connection.base_url}/api/auth/identityToken',
)
def validate_identity_token(connection, identity_token):
"""Validate an identity token.
Args:
connection: MicroStrategy REST API connection object
identity_token: Identity token
Returns:
Complete HTTP response object.
"""
return connection.get(
url=f'{connection.base_url}/api/auth/identityToken',
headers={'X-MSTR-IdentityToken': identity_token},
)
@ErrorHandler(
err_msg='Error creating a new Web server session that shares an existing IServer '
'session.'
)
def delegate(connection, identity_token, whitelist=None):
"""Returns authentication token and cookies from given X-MSTR-
IdentityToken.
Args:
connection: MicroStrategy REST API connection object
identity_token: Identity token
whitelist: list of errors for which we skip printing error messages
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/delegate',
json={'loginMode': "-1", 'identityToken': identity_token},
)
@ErrorHandler(err_msg='Error getting privileges list.')
def user_privileges(connection):
"""Get the list of privileges for the authenticated user.
The response includes the name, ID, and description of each
privilege and specifies which projects the privileges are valid for.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.get(url=f"{connection.base_url}/api/sessions/privileges")
@ErrorHandler(err_msg='Error getting info for authenticated user.')
def get_info_for_authenticated_user(connection, error_msg=None):
"""Get information for the authenticated user.
Args:
connection: MicroStrategy REST API connection object
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
url = f'{connection.base_url}/api/sessions/userInfo'
return connection.get(url=url)
|
[
"noreply@github.com"
] |
MicroStrategy.noreply@github.com
|
ec9c0cd180f50fb23acae69744788f81a9bfa036
|
8ccf7e6a93256fd83fed2bb7bd4f8bbe13dc1f40
|
/Assignment 3. Paxos/Simulation/Agents/Proposer.py
|
c35f8b2ea5e2ba44032b554a298ca176490310d9
|
[
"MIT"
] |
permissive
|
WailAbou/Distributed-Processing
|
5e2b84edc86b6d709c2599d82434731c6fd64dd6
|
46a36f1fd51d6f8b35cc639eb8002d81d7e09f2b
|
refs/heads/main
| 2023-05-28T05:52:39.790190
| 2021-06-14T00:57:08
| 2021-06-14T00:57:08
| 367,988,336
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
from Simulation.Agents import Agent
from Simulation.Message import Message, MessageTypes
class Proposer(Agent):
max_id = 0
def __init__(self, name, agent_id, value=None):
super().__init__(name, agent_id, value)
self.votes = 0
self.majority = False
self.suggested_value = None
self.consensus = False
Proposer.max_id = max(Proposer.max_id, agent_id + 1)
def recieve_promise(self, message, majority):
if message.source.value:
self.value = max(self.value, message.source.value)
self.votes += 1
if self.votes >= majority and not self.majority:
self.majority = True
return lambda acceptor: Message(message.destination, acceptor, MessageTypes.ACCEPT)
def recieve_accepted(self, message):
self.consensus = True
def init_value(self, value):
self.value = value
self.suggested_value = value
def reset(self):
self.votes = 0
self.majority = False
self.agent_id = Proposer.max_id
|
[
"abou.w@hotmail.com"
] |
abou.w@hotmail.com
|
b12c14f2d187174e8f714e4790ec36839780011f
|
ac5d55e43eb2f1fb8c47d5d2a68336eda181d222
|
/Reservoir Sampling/382. Linked List Random Node.py
|
535508fa3eecbcc13bfe833e95712b6200c347d5
|
[] |
no_license
|
tinkle1129/Leetcode_Solution
|
7a68b86faa37a3a8019626e947d86582549374b3
|
1520e1e9bb0c428797a3e5234e5b328110472c20
|
refs/heads/master
| 2021-01-11T22:06:45.260616
| 2018-05-28T03:10:50
| 2018-05-28T03:10:50
| 78,925,011
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
# - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: shutingnjupt@gmail.com
# Name: Linked List Random Node.py
# Creation Time: 2017/9/24
###########################################
'''
Given a singly linked list, return a random node's value from the linked list. Each node must have the same probability of being chosen.
Follow up:
What if the linked list is extremely large and its length is unknown to you? Could you solve this efficiently without using extra space?
Example:
// Init a singly linked list [1,2,3].
ListNode head = new ListNode(1);
head.next = new ListNode(2);
head.next.next = new ListNode(3);
Solution solution = new Solution(head);
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
solution.getRandom();
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
import random
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
self.length = 0
ans = head
while (ans):
self.length += 1
ans = ans.next
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
index = random.randint(1, self.length) - 1
idx = 0
ans = self.head
while (idx < index):
ans = ans.next
idx += 1
return ans.val
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom()
|
[
"496047829@qq.com"
] |
496047829@qq.com
|
68e09501a51d712d45387f738b12c0239a752984
|
b4777bf27a6d10d0e5b1c51351f9ad14a049b5e7
|
/results_discrete_paradigm_acc.py
|
1f08f50c522ed31784d9ff4e831821666ace9b7e
|
[] |
no_license
|
bioelectric-interfaces/cfir
|
1216ba1b62935f99f8821ccce2577be9cf71c6b8
|
6034b5216352e5d933405bccbe9a67b9e89c4735
|
refs/heads/master
| 2022-07-12T10:45:17.758669
| 2020-03-10T13:34:10
| 2020-03-10T13:34:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,356
|
py
|
"""
Figure 5: Discrete paradigm accuracy for one subject with median SNR
"""
import pandas as pd
import pylab as plt
import numpy as np
import seaborn as sns
from filters import CFIRBandEnvelopeDetector, RectEnvDetector
from utils import magnitude_spectrum
from constants import FS, DELAY_RANGE
from sklearn.metrics import roc_auc_score, average_precision_score, balanced_accuracy_score
def get_classes(y, alpha, n_states=3):
y_pred = np.zeros(len(y))
if n_states == 3:
y_pred[y > np.percentile(y, alpha)] = 1
y_pred[y > np.percentile(y, 100 - alpha)] = 2
if n_states == 2:
y_pred[y > np.percentile(y, 100 - alpha)] = 1
return y_pred
dataset = 8
eeg_df = pd.read_pickle('data/train_test_data.pkl').query('subj_id=={}'.format(dataset))
envelope = eeg_df['an_signal'].abs().values
band = eeg_df[['band_left', 'band_right']].values[0]
magnitude_spectrum_train = {}
_, weights = magnitude_spectrum(eeg_df['eeg'].values, FS)
stats_df = pd.read_pickle('results/stats.pkl').query('subj_id=={}'.format(dataset))
flatui = {'cfir':'#0099d8', 'acfir': '#84BCDA', 'wcfir':'#FE4A49', 'rect':'#A2A79E'}
alpha=5
#DELAY_RANGE = np.linspace(-50, 100, 51, dtype=int)
acc = np.zeros(len(DELAY_RANGE))
acc_rand = np.zeros(len(DELAY_RANGE))
fig, axes = plt.subplots(2, 2, sharey='col', figsize=(6,6))
plt.subplots_adjust(hspace=0.4, wspace=0.4)
for j_n_states, n_states in enumerate([2, 3]):
y_true = get_classes(envelope, alpha, n_states)
for method_name, method_class in zip(
['cfir', 'rect', 'wcfir'],
[CFIRBandEnvelopeDetector, RectEnvDetector, CFIRBandEnvelopeDetector]):
acc = np.zeros(len(DELAY_RANGE))*np.nan
for d, DELAY in enumerate(DELAY_RANGE):
if method_name == 'rect' and DELAY <0: continue
params = stats_df.query('method=="{}" & metric=="corr" & delay=="{}"'.format(method_name, DELAY*2))['params'].values[0]
params['weights'] = weights if method_name == 'wcfir' else None
env_det = method_class(band=band, fs=FS, delay=DELAY, **params)
envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
# params = stats_df.query('method=="rect" & metric=="corr"')['params'].values[0]
# env_det = WHilbertFilter(band=band, fs=FS, delay=DELAY, **params)
# envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
#
# params = stats_df.query('method=="whilbert" & metric=="corr"')['params'].values[0]
# env_det = WHilbertFilter(band=band, fs=FS, **params)
# envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
#
# params = stats_df.query('method=="ffiltar" & metric=="corr"')['params'].values[0]
# env_det = RectEnvDetector(band, FS, params['n_taps'], DELAY)
# env_det = WHilbertFilter(band=band, fs=FS, **params)
y_pred = get_classes(envelope_pred, alpha, n_states)
acc[d] = balanced_accuracy_score(y_true, y_pred) if (method_name in ['cfir', 'wcfir'] or DELAY>=0) else np.nan
axes[j_n_states, 1].plot(DELAY_RANGE*2, acc*100, '.-', label=method_name, color=flatui[method_name])
axes[j_n_states, 1].plot(DELAY_RANGE*2, DELAY_RANGE*0 + balanced_accuracy_score(y_true, y_true*0)*100, '.-', color='k', label='all-high')
# [ax.set_xlabel('Delay, ms') for ax in axes[:, 1]]
axes[1, 1].set_xlabel('Delay, ms')
axes[1, 1].legend()
axes[0, 1].set_ylabel('Balanced accuracy score, %')
axes[1, 1].set_ylabel('Balanced accuracy score, %')
axes[0, 0].set_title('A. High/Other\n', x = 0)
axes[1, 0].set_title('B. High/Middle/Low\n', ha='right')
[ax.axvline(0, color='k', linestyle='--', alpha=0.5, zorder=-1000) for ax in axes[:, 1]]
# plt.plot(envelope0ms)
# plt.plot(envelope)
#
# sns.kdeplot(envelope, envelope0ms)
# plt.savefig('results/viz/res-classification.png', dpi=500)
ax = axes
# fig, ax = plt.subplots(2, figsize=(6, 6))
up = np.percentile(envelope*1e6, 100-alpha)
low = np.percentile(envelope*1e6, alpha)
t = np.arange(len(envelope))/500
ax[0, 0].plot(t-58, envelope*1e6, color='k')
ax[0, 0].axhline(np.percentile(envelope*1e6, 100-alpha), color='k', linestyle='--')
ax[0, 0].text(8.5, up+4, 'High', ha='center')
ax[0, 0].text(8.5, up-3, 'Other', ha='center')
# plt.axhspan(np.percentile(envelope*1e6, alpha), np.percentile(envelope*1e6, 100-alpha), color=flatui['cfir'], alpha=0.5)
# plt.axhspan(np.percentile(envelope*1e6, alpha), -1000, color=flatui['wcfir'], alpha=0.5)
ax[0, 0].set_ylim(-7, 20)
ax[0, 0].set_xlim(0, 10)
ax[0, 0].set_ylabel('Envelope, $uV$')
ax[1, 0].plot(t-58, envelope*1e6, color='k')
ax[1, 0].axhline(np.percentile(envelope*1e6, 100-alpha), color='k', linestyle='--')
ax[1, 0].axhline(np.percentile(envelope*1e6, alpha), color='k', linestyle='--')
ax[1, 0].text(8.5, up+4, 'High', ha='center')
ax[1, 0].text(8.5, up-3, 'Middle', ha='center')
ax[1, 0].text(8.5, low-5, 'Low', ha='center')
# plt.axhspan(np.percentile(envelope*1e6, alpha), np.percentile(envelope*1e6, 100-alpha), color=flatui['cfir'], alpha=0.5)
# plt.axhspan(np.percentile(envelope*1e6, alpha), -1000, color=flatui['wcfir'], alpha=0.5)
ax[1, 0].set_ylim(-7, 20)
ax[1, 0].set_xlim(0, 10)
ax[1, 0].set_ylabel('Envelope, $uV$')
ax[1, 0].set_xlabel('Time, s')
# plt.savefig('results/viz/res-classification-explained.png', dpi=500)
|
[
"n.m.smetanin@gmail.com"
] |
n.m.smetanin@gmail.com
|
7aa41765cd6860e2540b6f799c4551cd82d47f48
|
34148545a20f0b9fe07860d1107e6aab2ec1f75d
|
/info_spider/Scrapy_History_Hanchao_V1_01/build/lib/Scrapy_History_Hanchao_V1_01/spiders/Zhuixue_01.py
|
139bef56439c9928931b6c7045a6f1948b1c9a0b
|
[] |
no_license
|
tangzhutao/chf
|
9bb9fa9b6ad75f1b587364e1005922c5bdddb4ca
|
4b249aee9689d3669306bbf020ad7fbb7e6b92bc
|
refs/heads/master
| 2022-12-03T03:55:17.308231
| 2020-08-21T09:57:47
| 2020-08-21T09:57:47
| 288,969,437
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,641
|
py
|
# -*- coding: utf-8 -*-
import scrapy, time, re
from scrapy.utils import request
from Scrapy_History_Hanchao_V1_01.items import InfoItem
import requests
from urllib3 import encode_multipart_formdata
from Scrapy_History_Hanchao_V1_01.ApolloConfig import IMAGES_STORE, SPIDER_NAME, UPLOADURL
class Zhuixue01Spider(scrapy.Spider):
name = 'Zhuixue_01'
base_url = 'http://lishi.zhuixue.net'
url_name = '追学网'
def start_requests(self):
for i in range(3):
url = f'http://lishi.zhuixue.net/hanchao/list_43_{i + 1}.html'
req = scrapy.Request(url=url, callback=self.parse, dont_filter=True)
yield req
def parse(self, response):
get_info = response.xpath('//div[@class="list1"]/li/a/@href').extract()
for info in get_info:
url = self.base_url + info
req = scrapy.Request(url=url, callback=self.detail_parse, dont_filter=True)
news_id = request.request_fingerprint(req)
req.meta.update({'news_id': news_id})
yield req
def detail_parse(self, response):
headers = {}
for k, v in response.request.headers.items():
headers[k.decode()] = v[0].decode()
title = response.xpath('//ul[@class="lisbt"]/li[1]/span/h1/text()').extract_first()
try:
issue_time = re.findall(r'\d+-\d+-\d+ \d+:\d+', response.text)[0].split(' ')[0]
except IndexError:
issue_time = None
content = response.xpath('//ul[@class="lisnr"]').extract_first()
images_url = response.xpath('//ul[@class="lisnr"]//img/@src').extract()
item = InfoItem()
images = []
if images_url:
for image_url in images_url:
if 'http' in image_url:
link = image_url
else:
link = self.base_url + image_url
res = self.download_img(link, headers)
if res['success']:
self.logger.info({'图片下载完成': link})
images.append(res['data']['url'])
else:
self.logger.info({'图片下载失败': link})
item['images'] = ','.join(images) if images else None
item['category'] = '汉朝'
item['content_url'] = response.url
item['title'] = title
item['issue_time'] = issue_time if issue_time else None
item['information_source'] = '历史追学网'
item['sign'] = '19'
item['news_id'] = response.meta['news_id']
item['content'] = content
item['author'] = None
item['title_image'] = None
item['attachments'] = None
item['area'] = None
item['address'] = None
item['tags'] = None
item['update_time'] = str(int(time.time() * 1000))
item['source'] = None
if content:
yield item
self.logger.info({'title': title, 'issue_time': issue_time})
def download_img(self, url, headers):
resp = requests.get(url, headers=headers)
file_name = url.split('/')[-1]
file = {
'file': (file_name, resp.content)
}
send_url = UPLOADURL + SPIDER_NAME
encode_data = encode_multipart_formdata(file)
file_data = encode_data[0]
headers_from_data = {
"Content-Type": encode_data[1]
}
response = requests.post(url=send_url, headers=headers_from_data, data=file_data).json()
return response
if __name__ == '__main__':
from scrapy import cmdline
cmdline.execute(['scrapy', 'crawl', 'Zhuixue_01'])
|
[
"18819492919@163.com"
] |
18819492919@163.com
|
9f38297ffcb415afd27671f80d18b3c3ccc487db
|
cb57a9ea4622b94207d12ea90eab9dd5b13e9e29
|
/lc/python/1768_merge_strings_alternately.py
|
32222174bc34d1567b034641491b8b2e157d8c7a
|
[] |
no_license
|
boknowswiki/mytraning
|
b59585e1e255a7a47c2b28bf2e591aef4af2f09a
|
5e2f6ceacf5dec8260ce87e9a5f4e28e86ceba7a
|
refs/heads/master
| 2023-08-16T03:28:51.881848
| 2023-08-10T04:28:54
| 2023-08-10T04:28:54
| 124,834,433
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
# string and two pointers
# time O(max(m,n))
# space O(1)
class Solution:
def mergeAlternately(self, word1: str, word2: str) -> str:
ret = []
n = len(word1)
m = len(word2)
if n == 0:
return word2
if m == 0:
return word1
i, j = 0, 0
idx = 0
while i < n and j < m:
if idx % 2 == 0:
ret.append(word1[i])
i += 1
else:
ret.append(word2[j])
j += 1
idx += 1
if i == n:
ret.extend(list(word2[j:]))
if j == m:
ret.extend(list(word1[i:]))
return "".join(ret)
class Solution(object):
def mergeAlternately(self, word1, word2):
m = len(word1)
n = len(word2)
i = 0
j = 0
result = []
while i < m or j < n:
if i < m:
result += word1[i]
i += 1
if j < n:
result += word2[j]
j += 1
return "".join(result)
class Solution(object):
def mergeAlternately(self, word1, word2):
result = []
n = max(len(word1), len(word2))
for i in range(n):
if i < len(word1):
result += word1[i]
if i < len(word2):
result += word2[i]
return "".join(result)
|
[
"noreply@github.com"
] |
boknowswiki.noreply@github.com
|
ca674d56b645b5721ff9210287a3026a3c86b84d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2801/58758/256072.py
|
829cc7621c561a24efea43b99bb9b2ba608d94f2
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
n = int(input())
nums = [int(x) for x in input().split()]
nums.sort()
flag = False
for i in range(0, len(nums)-2):
if nums[i] + nums[i+1] > nums[i+2]:
flag = True
break
if flag:
print('YES')
else:
print('NO')
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
754f3df17792c7911d0f110efed7a7832bb5de48
|
f4b2d9a0de1f7a26a8fd5afe25446e62dfa0fdb5
|
/Python/base_algorithm/base_sum.py
|
b3db43265b69011967ccd5ef53c5613268a1b43e
|
[] |
no_license
|
Alexanderklau/LeetCode
|
e675425cca0b4e2e6f94d8c1ce6df92bbec32ac7
|
6090fa602ab29aef40d41661e473058eaaec490d
|
refs/heads/master
| 2021-06-23T17:41:53.309882
| 2020-12-01T14:36:00
| 2020-12-01T14:36:00
| 148,267,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
# coding: utf-8
__author__ = "lau.wenbo"
"""
高斯解法
"""
def sum_of_n(n):
the_sum = 0
for i in range(1, n+1):
the_sum = the_sum + i
return the_sum
print(sum_of_n(100))
|
[
"429095816@qq.com"
] |
429095816@qq.com
|
234615d0dfa6ec1b4bb50bbc470a76d507001e80
|
58be8fc8996b98b624fb9784527b2dc588d4587c
|
/pybamm/models/submodels/active_material/stress_driven_active_material.py
|
61fbe41ec0392883bec8138e4988b5b026f60706
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
gwhite09/PyBaMM
|
b9f7b6b06bb37b6819e306356f5b8e90df8affff
|
033ad6384582a3e5d29ad48eeaa7fe92b98e2a29
|
refs/heads/main
| 2023-08-22T19:49:26.112089
| 2021-09-17T17:02:34
| 2021-09-17T17:02:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,401
|
py
|
#
# Class for varying active material volume fraction, driven by stress
#
import pybamm
from .base_active_material import BaseModel
class StressDriven(BaseModel):
"""Submodel for varying active material volume fraction, driven by stress, from
[1]_ and [2]_.
Parameters
----------
param : parameter class
The parameters to use for this submodel
domain : str
The domain of the model either 'Negative' or 'Positive'
options : dict
Additional options to pass to the model
x_average : bool
Whether to use x-averaged variables (SPM, SPMe, etc) or full variables (DFN)
**Extends:** :class:`pybamm.active_material.BaseModel`
References
----------
.. [1] Ai, W., Kraft, L., Sturm, J., Jossen, A., & Wu, B. (2019). Electrochemical
Thermal-Mechanical Modelling of Stress Inhomogeneity in Lithium-Ion Pouch
Cells. Journal of The Electrochemical Society, 167(1), 013512.
.. [2] Reniers, J. M., Mulder, G., & Howey, D. A. (2019). Review and performance
comparison of mechanical-chemical degradation models for lithium-ion
batteries. Journal of The Electrochemical Society, 166(14), A3189.
"""
def __init__(self, param, domain, options, x_average):
super().__init__(param, domain, options=options)
pybamm.citations.register("Reniers2019")
self.x_average = x_average
def get_fundamental_variables(self):
domain = self.domain.lower() + " electrode"
if self.x_average is True:
eps_solid_xav = pybamm.Variable(
"X-averaged " + domain + " active material volume fraction",
domain="current collector",
)
eps_solid = pybamm.PrimaryBroadcast(eps_solid_xav, domain)
else:
eps_solid = pybamm.Variable(
self.domain + " electrode active material volume fraction",
domain=domain,
auxiliary_domains={"secondary": "current collector"},
)
variables = self._get_standard_active_material_variables(eps_solid)
return variables
def get_coupled_variables(self, variables):
# obtain the rate of loss of active materials (LAM) by stress
# This is loss of active material model by mechanical effects
if self.x_average is True:
stress_t_surf = variables[
"X-averaged "
+ self.domain.lower()
+ " particle surface tangential stress"
]
stress_r_surf = variables[
"X-averaged " + self.domain.lower() + " particle surface radial stress"
]
else:
stress_t_surf = variables[
self.domain + " particle surface tangential stress"
]
stress_r_surf = variables[self.domain + " particle surface radial stress"]
if self.domain == "Negative":
beta_LAM = self.param.beta_LAM_n
stress_critical = self.param.stress_critical_n
m_LAM = self.param.m_LAM_n
else:
beta_LAM = self.param.beta_LAM_p
stress_critical = self.param.stress_critical_p
m_LAM = self.param.m_LAM_p
stress_h_surf = (stress_r_surf + 2 * stress_t_surf) / 3
# compressive stress make no contribution
stress_h_surf *= stress_h_surf > 0
# assuming the minimum hydrostatic stress is zero for full cycles
stress_h_surf_min = stress_h_surf * 0
j_stress_LAM = (
-(beta_LAM / self.param.t0_cr)
* ((stress_h_surf - stress_h_surf_min) / stress_critical) ** m_LAM
)
deps_solid_dt = j_stress_LAM
variables.update(
self._get_standard_active_material_change_variables(deps_solid_dt)
)
return variables
def set_rhs(self, variables):
Domain = self.domain + " electrode"
if self.x_average is True:
eps_solid = variables[
"X-averaged " + Domain.lower() + " active material volume fraction"
]
deps_solid_dt = variables[
"X-averaged "
+ Domain.lower()
+ " active material volume fraction change"
]
else:
eps_solid = variables[Domain + " active material volume fraction"]
deps_solid_dt = variables[
Domain + " active material volume fraction change"
]
self.rhs = {eps_solid: deps_solid_dt}
def set_initial_conditions(self, variables):
if self.domain == "Negative":
x_n = pybamm.standard_spatial_vars.x_n
eps_solid_init = self.param.epsilon_s_n(x_n)
elif self.domain == "Positive":
x_p = pybamm.standard_spatial_vars.x_p
eps_solid_init = self.param.epsilon_s_p(x_p)
if self.x_average is True:
eps_solid_xav = variables[
"X-averaged "
+ self.domain.lower()
+ " electrode active material volume fraction"
]
self.initial_conditions = {eps_solid_xav: pybamm.x_average(eps_solid_init)}
else:
eps_solid = variables[
self.domain + " electrode active material volume fraction"
]
self.initial_conditions = {eps_solid: eps_solid_init}
|
[
"valentinsulzer@hotmail.com"
] |
valentinsulzer@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.