blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de94c56c7667c7a7c99683be9a0bf04d00ecca56
|
a5688a923c488414ecffcb92e3405d3876f1889d
|
/examples/computer_vision/mmdetection_pytorch/configs/guided_anchoring/ga_faster_x101_64x4d_fpn_1x_coco.py
|
28f4fd4b13801f9cb47ceb3d715244d40c281c70
|
[
"Apache-2.0"
] |
permissive
|
armandmcqueen/determined
|
ae6e7a4d5d8c3fb6a404ed35519643cf33bd08e4
|
251e7093b60a92633b684586ac7a566379442f15
|
refs/heads/master
| 2023-05-28T17:52:18.915710
| 2021-06-09T23:55:59
| 2021-06-09T23:55:59
| 259,449,481
| 0
| 0
|
Apache-2.0
| 2021-04-09T12:13:11
| 2020-04-27T20:47:23
|
Go
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
_base_ = "./ga_faster_r50_fpn_1x_coco.py"
model = dict(
pretrained="open-mmlab://resnext101_64x4d",
backbone=dict(
type="ResNeXt",
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type="BN", requires_grad=True),
style="pytorch",
),
)
|
[
"noreply@github.com"
] |
armandmcqueen.noreply@github.com
|
3af8710d6b01d30dfe73cc770a65abe5f3cdfb70
|
99da8a6d2392472cb66e5b12c03142c90640186a
|
/BOJ/DFS&BFS/2573.py
|
784c81d48ea577cc9b507ab56b9611ea6bc47a60
|
[] |
no_license
|
chorwonkim/__Algorithms__
|
cf6cf4ae5cf091d856397369b6db1bb41f925377
|
0c1e58410ae90b72c0d7e44a6179b8fedc786131
|
refs/heads/master
| 2022-09-28T16:59:20.841482
| 2022-09-25T09:57:58
| 2022-09-25T09:57:58
| 130,082,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
# import sys
# sys.setrecursionlimit(100000)
#
# dx = [-1, 0, 1, 0]
# dy = [0, -1, 0, 1]
#
# N, M = map(int, sys.stdin.readline().rstrip().split())
# ice_map = []
# ice_map_checker = []
# result = 0
#
#
# def func_2573(p, q):
# ice_map_checker[p][q] = True
#
# for k in range(4):
# ice_p = p + dx[k]
# ice_q = q + dy[k]
#
# if (not ice_map_checker[ice_p][ice_q]) and ice_map[ice_p][ice_q]:
# func_2573(ice_p, ice_q)
#
#
# for i in range(N):
# ice_map.append(list(map(int, sys.stdin.readline().rstrip().split())))
#
# while True:
# block = 0
# ice_map_checker.clear()
#
# for i in range(N):
# ice_map_checker.append([False] * M)
#
# for i in range(1, N-1):
# for j in range(1, M-1):
# if ice_map[i][j] and (not ice_map_checker[i][j]):
# block += 1
# func_2573(i, j)
#
# if block >= 2:
# print(result)
# break
# elif block == 0:
# print(0)
# break
#
# result += 1
#
# for i in range(N):
# for j in range(M):
# if (not ice_map_checker[i][j]) and (not ice_map[i][j]):
# for k in range(4):
# ice_x = i + dx[k]
# ice_y = j + dy[k]
#
# if 0 <= ice_x < N and 0 <= ice_y < M and ice_map[ice_x][ice_y]:
# ice_map[ice_x][ice_y] -= 1
import sys
sys.setrecursionlimit(100000)
Read = sys.stdin.readline
N, M = map(int, Read().split())
ice_map = [list(map(int, Read().split())) for _ in range(N)]
ice_map_checker = [[False for _ in range(M)] for _ in range(N)]
result = 0
def func_2573(p, q):
ice_map_checker[p][q] = True
for i, j in zip([-1,0,1,0], [0,-1,0,1]):
ice_p = p + i
ice_q = q + j
if (not ice_map_checker[ice_p][ice_q]) and ice_map[ice_p][ice_q]:
func_2573(ice_p, ice_q)
while True:
block = 0
ice_map_checker.clear()
ice_map_checker = [[False for _ in range(M)] for _ in range(N)]
for i in range(1, N-1):
for j in range(1, M-1):
if ice_map[i][j] and (not ice_map_checker[i][j]):
block += 1
func_2573(i, j)
if block >= 2:
print(result)
break
elif block == 0:
print(0)
break
result += 1
for i in range(N):
for j in range(M):
if (not ice_map_checker[i][j]) and (not ice_map[i][j]):
for p, q in zip([-1,0,1,0], [0,-1,0,1]):
ice_x = i + p
ice_y = j + q
if 0 <= ice_x < N and 0 <= ice_y < M and ice_map[ice_x][ice_y]:
ice_map[ice_x][ice_y] -= 1
|
[
"upheaval212@gmail.com"
] |
upheaval212@gmail.com
|
6fe6df1f0c3cf8fbc8b71b768fcb421f521466e2
|
2d4127f5fa1bca8ba41b9da48d9180c64680b327
|
/openid_connect_op/utils/jwt.py
|
7c7f306b1959045ffe75b8aaff0c8c8189a54732
|
[
"MIT"
] |
permissive
|
WilliBobadilla/django-openid-op
|
ce02b7fe9db4fa8aca0cec4df003c905927dfbe3
|
732812cab7610080289ae70b8ea791ba9f0105ad
|
refs/heads/master
| 2023-04-19T08:49:19.962249
| 2021-05-03T18:21:26
| 2021-05-03T18:21:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,642
|
py
|
import datetime
from functools import lru_cache
import python_jwt as jwt
import jwcrypto.jwk as jwk
from django.conf import settings
from os import urandom
from jwcrypto.jws import JWS
from jwcrypto.common import base64url_encode, json_encode, json_decode
from calendar import timegm
# need to add "kid" header which the original python_jwt can not do
from openid_connect_op.models import OpenIDClient
def generate_jwt_patched(claims, priv_key=None,
algorithm='PS512', lifetime=None, expires=None,
not_before=None,
jti_size=16, extra_headers={}):
"""
Generate a JSON Web Token.
:param claims: The claims you want included in the signature.
:type claims: dict
:param priv_key: The private key to be used to sign the token. Note: if you pass ``None`` then the token will be returned with an empty cryptographic signature and :obj:`algorithm` will be forced to the value ``none``.
:type priv_key: `jwcrypto.jwk.JWK <https://jwcrypto.readthedocs.io/en/latest/jwk.html>`_
:param algorithm: The algorithm to use for generating the signature. ``RS256``, ``RS384``, ``RS512``, ``PS256``, ``PS384``, ``PS512``, ``ES256``, ``ES384``, ``ES512``, ``HS256``, ``HS384``, ``HS512`` and ``none`` are supported.
:type algorithm: str
:param lifetime: How long the token is valid for.
:type lifetime: datetime.timedelta
:param expires: When the token expires (if :obj:`lifetime` isn't specified)
:type expires: datetime.datetime
:param not_before: When the token is valid from. Defaults to current time (if ``None`` is passed).
:type not_before: datetime.datetime
:param jti_size: Size in bytes of the unique token ID to put into the token (can be used to detect replay attacks). Defaults to 16 (128 bits). Specify 0 or ``None`` to omit the JTI from the token.
:type jti_size: int
:rtype: unicode
:returns: The JSON Web Token. Note this includes a header, the claims and a cryptographic signature. The following extra claims are added, per the `JWT spec <http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html>`_:
- **exp** (*IntDate*) -- The UTC expiry date and time of the token, in number of seconds from 1970-01-01T0:0:0Z UTC.
- **iat** (*IntDate*) -- The UTC date and time at which the token was generated.
- **nbf** (*IntDate*) -- The UTC valid-from date and time of the token.
- **jti** (*str*) -- A unique identifier for the token.
"""
header = {
'typ': 'JWT',
'alg': algorithm if priv_key else 'none'
}
header.update(extra_headers)
claims = dict(claims)
now = datetime.datetime.utcnow()
if jti_size:
claims['jti'] = base64url_encode(urandom(jti_size))
claims['nbf'] = timegm((not_before or now).utctimetuple())
claims['iat'] = timegm(now.utctimetuple())
if lifetime:
claims['exp'] = timegm((now + lifetime).utctimetuple())
elif expires:
claims['exp'] = timegm(expires.utctimetuple())
if header['alg'] == 'none':
signature = ''
else:
token = JWS(json_encode(claims))
token.add_signature(priv_key, protected=header)
signature = json_decode(token.serialize())['signature']
return u'%s.%s.%s' % (
base64url_encode(json_encode(header)),
base64url_encode(json_encode(claims)),
signature
)
class JWTTools:
@staticmethod
def generate_jwt(payload, for_client=None, ttl=None, from_client=None):
if for_client is None:
sign_alg = 'RS256'
elif for_client.client_auth_type == OpenIDClient.CLIENT_AUTH_TYPE_SECRET_JWT:
sign_alg = 'HS256'
else:
sign_alg = for_client.client_registration_data.get('id_token_signed_response_alg', 'RS256')
return JWTTools.generate_jwt_with_sign_alg(payload, sign_alg, ttl=ttl, client=from_client)
@staticmethod
def generate_jwt_with_sign_alg(payload, sign_alg, ttl=None, client=None):
from openid_connect_op.models import OpenIDClient
if not client:
client = OpenIDClient.self_instance()
if client.client_auth_type == client.CLIENT_AUTH_TYPE_SECRET_JWT:
sign_key = jwk.JWK(kty="oct", use="sig", alg="HS256", k=base64url_encode(client.client_hashed_secret))
extra_headers = {}
alg = 'HS256'
else:
sign_key = client.get_key(sign_alg)
extra_headers = {
'kid': sign_key.key_id
}
alg = sign_key._params['alg']
return generate_jwt_patched(payload,
sign_key,
alg,
extra_headers=extra_headers,
lifetime=ttl)
@staticmethod
def validate_jwt(token, client=None):
from openid_connect_op.models import OpenIDClient
if client is None:
client = OpenIDClient.self_instance()
if client.client_auth_type == OpenIDClient.CLIENT_AUTH_TYPE_SECRET_JWT:
key = jwk.JWK(kty="oct", use="sig", alg="HS256", k=base64url_encode(client.client_hashed_secret))
return jwt.verify_jwt(token, key, ['HS256'], checks_optional=True)
header, __ = jwt.process_jwt(token)
key = client.get_key(alg=header.get('alg', 'RS256'), kid=header.get('kid', None))
return jwt.verify_jwt(token, key, [key._params.get('alg', 'RS256')], checks_optional=True)
@staticmethod
def unverified_jwt_payload(token):
return jwt.process_jwt(token)[1]
|
[
"miroslav.simek@vscht.cz"
] |
miroslav.simek@vscht.cz
|
24e1af42b6cf8518e8c366fe186394139c53ee9a
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/2f9vjBiynkBtF3TBi_2.py
|
78016bb10def75086de7b057fd4c4c5f57730f40
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
"""
In this challenge, you must verify the equality of two different values given
the parameters `a` and `b`.
Both the _value_ and _type_ of the parameters need to be equal. The possible
types of the given parameters are:
* Numbers
* Strings
* Booleans (`False` or `True`)
* Special values: `None`
What have you learned so far that will permit you to do two different checks
(value **and** type) with a single statement?
Implement a function that returns `True` if the parameters are equal, and
`False` if they are not.
### Examples
check_equality(1, true) ➞ False
# A number and a boolean: the value and type are different.
check_equality(0, "0") ➞ False
# A number and a string: the type is different.
check_equality(1, 1) ➞ True
# A number and a number: the type and value are equal.
### Notes
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def check_equality(a, b):
return a is b
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
32f75d1ab64a5f215393d6f5c1848e0138edc409
|
eb5fa3bdbad17674e95b360694e6d794387a557c
|
/menuhin/middleware.py
|
2f5bf781bf9e2cac82946707eb46785c3e434752
|
[
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
kezabelle/django-menuhin
|
0648bc923fe159612846282a526f2c60d0f535a9
|
b9c4111eed04e241c29ca8ec95c8a3ffeaac48da
|
refs/heads/master
| 2021-05-02T02:00:44.896065
| 2014-12-19T10:00:50
| 2014-12-19T10:00:50
| 14,557,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,201
|
py
|
import logging
from django.utils.functional import SimpleLazyObject
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from .models import MenuItem
from .utils import (LengthLazyObject, get_menuitem_or_none,
get_relations_for_request)
logger = logging.getLogger(__name__)
class RequestTreeMiddleware(object):
def get_ignorables(self):
if hasattr(settings, 'STATIC_URL') and settings.STATIC_URL:
yield settings.STATIC_URL
if hasattr(settings, 'MEDIA_URL') and settings.MEDIA_URL:
yield settings.MEDIA_URL
try:
yield reverse('admin:index')
except NoReverseMatch: # pragma: no cover
logger.debug("Admin is not mounted")
def process_request(self, request):
ignored_prefixes = tuple(self.get_ignorables())
if request.path.startswith(ignored_prefixes):
logger.debug("Skipping this request")
return None
def lazy_menuitem():
return get_menuitem_or_none(MenuItem, request.path)
def lazy_ancestors_func():
return get_relations_for_request(
model=MenuItem, request=request,
relation='get_ancestors').relations
def lazy_descendants_func():
return get_relations_for_request(
model=MenuItem, request=request,
relation='get_descendants').relations
def lazy_siblings_func():
return get_relations_for_request(
model=MenuItem, request=request,
relation='get_siblings').relations
def lazy_children_func():
return get_relations_for_request(
model=MenuItem, request=request,
relation='get_children').relations
request.menuitem = SimpleLazyObject(lazy_menuitem)
request.ancestors = LengthLazyObject(lazy_ancestors_func)
request.descendants = LengthLazyObject(lazy_descendants_func)
request.siblings = LengthLazyObject(lazy_siblings_func)
request.children = LengthLazyObject(lazy_children_func)
|
[
"keryn@kerynknight.com"
] |
keryn@kerynknight.com
|
da11b4cca48ae0c88eb3b8c66a041828ea0b021e
|
e38db85f6a13b32c60bf66d78838d6ed348f1798
|
/healthplans/tests.py
|
549fcf58df9886eecfa78cb4e957f908105e148f
|
[
"BSD-2-Clause"
] |
permissive
|
westurner/health-marketplace
|
ab72d60d9469c9f3622bc64c391b222018c7c7a1
|
15f5379cc213e2e2b2150e967b56092ea8468db2
|
refs/heads/master
| 2020-12-28T21:28:09.415775
| 2013-11-09T17:55:08
| 2013-11-09T17:55:08
| 14,261,431
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,880
|
py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from healthplans.models import Provider
from healthplans.models import Plan
class HealtplansModelsTest(TestCase):
def test_011_provider_create(self):
provider = Provider.objects.create(
name="Test Provider")
self.assertIsNotNone(provider)
self.assertIsNotNone(provider.created_time)
self.assertIsNotNone(provider.updated_time)
def test_012_plan_create(self):
provider = Provider.objects.create(
name="Test Provider")
COST = 1.05
plan = Plan.objects.create(
provider=provider,
name="Test Plan",
category=Plan.PLATINUM,
base_rate=COST)
self.assertIsNotNone(plan)
self.assertIsNotNone(plan.created_time)
self.assertIsNotNone(plan.updated_time)
self.assertEqual(plan.base_rate, COST)
def test_021_provider_slug(self):
provider = Provider.objects.create(
name="Test Provider")
self.assertEqual(provider.slug, "Test-Provider")
def test_022_plan_slug(self):
provider = Provider.objects.create(
name="Test Provider")
COST = 1.05
plan = Plan.objects.create(
provider=provider,
name="Test Plan",
category=Plan.PLATINUM,
base_rate=COST)
self.assertIsNotNone(plan)
self.assertEqual(plan.slug, "Test-Plan")
from django.test import Client
class HealthplansViewsTest(TestCase):
def setUp(self):
self.client = Client()
def test_homepage_links(self):
response = self.client.get('/')
self.assertContains(response, 'href="/providers/"')
self.assertContains(response, 'href="/plans/"')
def test_provider_list(self):
provider = Provider.objects.create(
name="Test Provider") # TODO: fixtures
response = self.client.get('/providers/')
self.assertContains(response, provider.name)
self.assertContains(response, 'href="/providers/%s"' % provider.slug)
def test_provider_detail(self):
provider = Provider.objects.create(
name="Test Provider") # TODO: fixtures
response = self.client.get('/providers/%s' % provider.slug)
self.assertContains(response, provider.name)
self.assertContains(response, 'href="/providers/%s">' % provider.slug)
def test_plan_list(self):
provider = Provider.objects.create(
name="Test Provider") # TODO: fixtures
plan = Plan.objects.create(
provider=provider,
name="Test Plan",
category=Plan.PLATINUM,
base_rate=1.05)
response = self.client.get('/plans/')
self.assertContains(response, plan.name)
self.assertContains(response, 'href="/plans/%s"' % plan.slug)
def test_plan_detail(self):
provider = Provider.objects.create(
name="Test Provider") # TODO: fixtures
plan = Plan.objects.create(
provider=provider,
name="Test Plan",
category=Plan.PLATINUM,
base_rate=1.05)
response = self.client.get('/plans/%s' % plan.slug)
self.assertContains(response, plan.name)
self.assertContains(response, 'href="/plans/%s"' % plan.slug)
class HealthplansAdminTest(TestCase):
def setUp(self):
self.client = Client()
self._superuser_login()
def _superuser_login(self):
USERNAME = 'test'
EMAIL = 'test@example.org'
PASSWORD = 'TODOTODOTODO'
from django.db import DEFAULT_DB_ALIAS as db
from django.contrib.auth.models import User
User.objects.db_manager(db).create_superuser(
USERNAME, EMAIL, PASSWORD)
logged_in = self.client.login(
username=USERNAME,
password=PASSWORD)
self.assertEqual(logged_in, True)
def test_provider_admin(self):
provider = Provider.objects.create(
name="Test Provider") # TODO: fixtures
response = self.client.get('/admin/healthplans/provider/')
self.assertContains(response, provider.name)
def test_plan_admin(self):
provider = Provider.objects.create(
name="Test Provider") # TODO: fixtures
plan = Plan.objects.create(
provider=provider,
name="Test Plan",
category=Plan.PLATINUM,
base_rate=1.05)
response = self.client.get('/admin/healthplans/plan/')
self.assertContains(response, plan.name)
|
[
"wes@wrd.nu"
] |
wes@wrd.nu
|
098c796afacb67484884c0be523186710e54f517
|
32e6e405bebc7c63ca0a1721512a9abe4c2ed7cb
|
/food_reference_listing/database/archive_loader/initialize_database.py
|
86532879fe4e3fdf107fdcc2b05257eba61c3ebd
|
[
"MIT"
] |
permissive
|
bfssi-forest-dussault/food_reference_listing
|
d034d853a966967a94b6a1e82beee442786f1e7f
|
85372a81a9201dda02797ab0c11b1bd710f9b70d
|
refs/heads/master
| 2023-01-03T22:44:13.388149
| 2020-10-29T18:40:18
| 2020-10-29T18:40:18
| 241,152,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,294
|
py
|
import sys
import django
from pathlib import Path
# Need to do this in order to access the database models
sys.path.append("/app")
django.setup()
# For some bizarre reason I need to import all functions, rather than have them be defined in this file vOv
from food_reference_listing.database.archive_loader.helpers import *
"""
Script to load the archived data into the new database.
Call it using this command:
docker-compose -f local.yml run --rm django python manage.py shell < food_reference_listing/database/archive_loader/initialize_database.py
"""
# Script starts here
data_dir = Path('/app/food_reference_listing/database/archive_data')
assert data_dir.is_dir()
print(f'Confirmed data is available in {data_dir}')
# Keys correspond 1:1 with database.Model names
table_dict = {
'Acronym': data_dir / 'Acronyms.csv',
'AcronymType': data_dir / 'AcronymTypes.csv',
'Category': data_dir / 'Categories.csv',
'Country': data_dir / 'Countries.csv',
'ProvinceState': data_dir / 'ProvinceStates.csv',
'City': data_dir / 'Cities.csv',
'Company': data_dir / 'Companies.csv',
'Subcategory': data_dir / 'SubCategories.csv',
'Product': data_dir / 'Products_Combined.csv', # Combination of Products.csv and Final Web Update.csv
'Language': data_dir / 'Languages.csv',
}
print(f'Confirming expected source data files exist')
# Make sure our data files exist
for model, src in table_dict.items():
assert src.exists()
print(f'Deleting all entries in existing database')
# Cleanup while we debug
delete_all_rows_in_all_tables()
# Start populating tables
print(f'Recreating database with data files from {data_dir}')
populate_language_table(data=read_csv(table_dict['Language']))
populate_acronym_type_table(data=read_csv(table_dict['AcronymType']))
populate_acronym_table(data=read_csv(table_dict['Acronym']))
populate_category_table(data=read_csv(table_dict['Category']))
populate_country_table(data=read_csv(table_dict['Country']))
populate_provincestate_table(data=read_csv(table_dict['ProvinceState']))
populate_city_table(data=read_csv(table_dict['City']))
populate_company_table(data=read_csv(table_dict['Company']))
populate_subcategory_table(data=read_csv(table_dict['Subcategory']))
populate_product_table(data=read_csv(table_dict['Product']))
|
[
"forest.dussault@canada.ca"
] |
forest.dussault@canada.ca
|
0e73a9a57c08e990f0dd90913c0a96dfe81fc02a
|
6f7032e954334d102a9e1eff8f420f0a8b7ee70a
|
/pytestpackage/test_conftest_demo1.py
|
2820a6af2e706f8e8ff16ee55e6d55b40b53b7c1
|
[] |
no_license
|
suchismitarout/selenium_practice
|
186596e14ed66550ef184703a04aa323faad9f45
|
2281e6a3a526f2ff5c1c566517dc85b4ae23b85b
|
refs/heads/master
| 2022-12-26T12:16:17.297471
| 2020-10-06T06:21:04
| 2020-10-06T06:21:04
| 301,632,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
import pytest
def test_demo_conf1(onetimesetUp):
print("This is conftest demo")
def test_demo_conf2(onetimesetUp):
print("This is second conftest demo")
|
[
"suchismitarout47@gmail.com"
] |
suchismitarout47@gmail.com
|
d16c9b91a5c897f61fb9bd659fd74a37ceeb379c
|
5995b039f039accf17538283a51668be929aeaea
|
/red_mind/red_mind/wsgi.py
|
6eb39eb6bbcbd2a0289055e40fd2dc5560db6e82
|
[] |
no_license
|
harshitdixit69/red_mind_heart
|
0c9bed294a8e89b05ecdd93590da25f79139ad6c
|
8693ddef44e949219eab6670a3b1b862646f3dd8
|
refs/heads/main
| 2022-12-29T23:17:51.061114
| 2020-10-14T21:12:21
| 2020-10-14T21:12:21
| 304,288,555
| 0
| 0
| null | 2020-10-15T10:21:03
| 2020-10-15T10:21:02
| null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for red_mind project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'red_mind.settings')
application = get_wsgi_application()
|
[
"satyam1998.1998@gmail.com"
] |
satyam1998.1998@gmail.com
|
d271a9cf8126ae56eb7272193959b38f7de60c87
|
7463a66dfa00572f4e4d8ef4349309531f0105ae
|
/ServerDL/cfgs/configfiles.py
|
3ba6e38d4d33dfb1119a53e1b5ed363987540186
|
[] |
no_license
|
fx19940824/DetectionModel
|
f2e380fd21f4b31a17fd175a6dea1067b8f0d5cc
|
edc0d2f9eea481d2bc6f3abb2f222b59fdc25538
|
refs/heads/master
| 2022-12-20T19:58:32.224829
| 2019-05-30T01:16:05
| 2019-05-30T01:16:05
| 188,800,679
| 2
| 0
| null | 2022-11-22T02:39:23
| 2019-05-27T08:13:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
from ServerDL.apis.transformers import *
from ServerDL.apis.postprocesses import *
from Algorithm.classifications.utils.model_factory import classification
from Algorithm.segmentation.deeplabv3plus.modeling.deeplab import DeepLab
model_register_table = dict()
model_register_table["maskrcnn_box"] = {
"model_weight_path": "/media/cobot/30b0f4a0-3376-4f8f-b458-9c6857504361/Projects/bag/resnext101/model_0660000.pth", # 选填,会覆盖model_cfg_string中WEIGHT的路径
"model_cfg_file":"/media/cobot/30b0f4a0-3376-4f8f-b458-9c6857504361/Projects/bag/resnext101/n_rcnn.yaml",
"model_type": "RCNN",
"model_transformer": build_transform_maskrcnn,
"model_handle_function": build_postprocess_plgdetection,
"model_network": "",
"confidence_threshold": 0.9
}
model_register_table["maskrcnn_bag"] = {
"model_weight_path": "/media/cobot/30b0f4a0-3376-4f8f-b458-9c6857504361/Projects/bag/resnext101/model_0660000.pth", # 选填,会覆盖model_cfg_string中WEIGHT的路径
"model_cfg_file":"/media/cobot/30b0f4a0-3376-4f8f-b458-9c6857504361/Projects/bag/resnext101/n_rcnn.yaml",
"model_type": "RCNN",
"model_transformer": build_transform_maskrcnn,
"model_handle_function": build_postprocess_plgdetection,
"model_network": "",
"confidence_threshold": 0.7
}
model_register_table["maskrcnn_tube"] = {
"model_weight_path": "/home/cobot/tube_model/model_0505000.pth",
"model_cfg_file": "/home/cobot/tube_model/n_rcnn.yaml",
"model_type": "RCNN",
"model_transformer": build_transform_maskrcnn,
"model_handle_function": build_postprocess_plgdetection,
"model_network": "",
"confidence_threshold": 0.8
}
model_register_table["CLS"] = {
"model_cfg_string":
'''
modelname = resnet18
classes = 2
img_size = 224
lr = 0.001
batchsize = 32
epochs = 10
freeze_layers = 0
is_Train = True
transformer = default
half = True
''',
"model_weight_path": "/home/cobot/caid2.0/python/Main/ServerDL/weights/test.pt",
"model_type": "CLS",
"model_transformer": build_transform_cls,
"model_handle_function": build_postprocess_cls,
"model_network": classification
}
model_register_table["DeepLabv3+"] = {
"model_cfg_string":
'''
num_classes = 2
backbone = mobilenet
output_stride = 16
sync_bn = False
freeze_bn = False
img_size = 257
''',
"model_weight_path": "/home/cobot/model_best.pth.tar",
"model_type": "SEG",
"model_transformer": build_transform_seg,
"model_handle_function": build_postprocess_seg,
"model_network": DeepLab
}
|
[
"you@example.com"
] |
you@example.com
|
8196a02855f2ae340915ff324e2199e99211f8a6
|
3eadf176c95f0c924af53770441afc0797d7c6ff
|
/memberships/admin.py
|
ccbd60dec107d29cc05318f498a7790d51b957e8
|
[
"MIT"
] |
permissive
|
Maneesh9063/video-membership
|
d144d559fe721e4a077659d610e6c0e8c35062d6
|
c241310a6a4e3b7ee831b1d65ae8afaef7d3076a
|
refs/heads/master
| 2022-12-08T23:58:37.176632
| 2020-09-04T15:41:40
| 2020-09-04T15:41:40
| 292,874,425
| 0
| 0
|
MIT
| 2020-09-04T14:52:54
| 2020-09-04T14:52:53
| null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from django.contrib import admin
# Register your models here.
from .models import Membership, UserMembership, Subscription
admin.site.register(Membership)
admin.site.register(UserMembership)
admin.site.register(Subscription)
|
[
"freire1996@gmail.com"
] |
freire1996@gmail.com
|
2b1638999ca31a03dac9b7ae400bf86815b43735
|
0e8dd5901b1f98934c44a85b133eb7ca6f44b4b9
|
/osr2mp4/Parser/osrparser.py
|
398325f8b59fc702a5af1c87a8170c270acdc087
|
[] |
no_license
|
Hazuki-san/osr2mp4-core
|
dbd2f4d44a3d0e90974214c97b434dcbb2eedd18
|
83dc5c47bc73dcb0b4d4b6a5ae1924771c13c623
|
refs/heads/master
| 2022-11-24T13:41:15.703261
| 2020-07-03T14:00:54
| 2020-07-03T14:00:54
| 279,099,127
| 1
| 0
| null | 2020-07-12T16:02:35
| 2020-07-12T16:02:34
| null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
import osrparse
# index for replay_event
from ..CheckSystem.Judgement import DiffCalculator
# noinspection PyTypeChecker
from ..EEnum.EReplay import Replays
def setupReplay(osrfile, beatmap):
replay_info = osrparse.parse_replay_file(osrfile)
replay_data = [None] * len(replay_info.play_data)
start_time = beatmap.start_time
total_time = 0
start_index = 1
start_osr = start_time - 3000
for index in range(len(replay_data)):
times = replay_info.play_data[index].time_since_previous_action
total_time += times
# if total_time >= end_osr:
# break
# end_index += 1
if total_time < start_osr:
start_index += + 1 # to crop later, everything before we can ignore
continue
replay_data[index] = [None, None, None, None]
replay_data[index][Replays.CURSOR_X] = replay_info.play_data[index].x
replay_data[index][Replays.CURSOR_Y] = replay_info.play_data[index].y
replay_data[index][Replays.KEYS_PRESSED] = replay_info.play_data[index].keys_pressed
replay_data[index][Replays.TIMES] = total_time
replay_data = replay_data[start_index:-1]
replay_data.sort(key=lambda x: x[Replays.TIMES]) # sort replay data based on time
start_time = replay_data[0][Replays.TIMES]
for x in range(10):
replay_data.append([replay_data[-1][Replays.CURSOR_X], replay_data[-1][Replays.CURSOR_Y], 0, max(replay_data[-1][Replays.TIMES], int(beatmap.end_time + 1000) + 17 * x)])
diffcalculator = DiffCalculator(beatmap.diff)
timepreempt = diffcalculator.ar()
if replay_data[0][Replays.TIMES] > beatmap.hitobjects[0]["time"] - timepreempt - 2000:
startdata = replay_data[0].copy()
startdata[Replays.TIMES] = beatmap.hitobjects[0]["time"] - timepreempt - 2000
replay_data.insert(0, startdata)
replay_data.append([0, 0, 0, replay_data[-1][3] * 5])
replay_data.append([0, 0, 0, replay_data[-1][3] * 5])
start_time = replay_data[0][Replays.TIMES]
beatmap.breakperiods.append({"Start": int(beatmap.end_time + 200), "End": replay_data[-1][Replays.TIMES] + 100, "Arrow": False})
return replay_data, start_time
|
[
"snkraishin87@gmail.com"
] |
snkraishin87@gmail.com
|
3ec57b2531ab6a937124feee52ac1b6da03020ff
|
b5ef3b9da130f604f111bd469128b73e78d6ba9d
|
/bt5/erp5_accounting/SkinTemplateItem/portal_skins/erp5_accounting/AccountingTransactionLine_getProjectItemList.py
|
43b31ee9f20c5d1e877c6974693c15afe6a456ef
|
[] |
no_license
|
soediro/erp5
|
154bb2057c4cd12c14018c1ab2a09a78b2d2386a
|
3d1a8811007a363b7a43df4b295b5e0965c2d125
|
refs/heads/master
| 2021-01-11T00:31:05.445267
| 2016-10-05T09:28:05
| 2016-10-07T02:59:00
| 70,526,968
| 1
| 0
| null | 2016-10-10T20:40:41
| 2016-10-10T20:40:40
| null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
"""Returns all validated projects.
This script is indented to be used on custom listfields for accounting lines, and on reports.
If this script returns an empty list, it means that reports by project are disabled.
"""
from Products.ERP5Type.Message import translateString
portal = context.getPortalObject()
# case 1: script is used for reports, we display all validated projects.
if context.getPortalType() == 'Accounting Transaction Module':
project_list = []
for project in portal.portal_catalog(
portal_type='Project',
select_list=['relative_url', 'title', 'reference'],
validation_state=('validated',),
sort_on=(('title', 'ASC'),)):
if project.reference:
project_list.append(('%s - %s' % (project.reference, project.title), project.relative_url,))
else:
project_list.append((project.title, project.relative_url,))
if not project_list:
return [] # returning an empty list, not to add project column on reports
return [('', ''), (translateString('No Project'), 'None')] + project_list
# case 2: script is used on custom listfields.
# for now the script has to be customized in such case.
# [(x.getTitle(), x.getRelativeUrl()) for x in context.project_module.searchFolder()]
return [('', '')]
|
[
"georgios.dagkakis@nexedi.com"
] |
georgios.dagkakis@nexedi.com
|
2309594889f7eaf0567ecb3881e41ae564d04ca9
|
22fa0db584e41a9f06d829a5dd06b32bcdeb3646
|
/registration/mobile_backend.py
|
dfb3c3b608616dec696f7b31dceed277b810a84c
|
[] |
no_license
|
muthuraj-python/muthuinterview
|
186ecd4efd173fa0fe7ce25e721ebadbeabc94f7
|
d51b03f90fea9abd6aa763cf666dd39ead7a4753
|
refs/heads/master
| 2021-02-22T07:24:10.319876
| 2020-03-15T13:37:16
| 2020-03-15T13:37:16
| 245,371,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
class MobileBackend(ModelBackend):
def authenticate(self, request, **kwargs):
UserModel = get_user_model()
try:
user = UserModel.objects.get(mobile_number=kwargs.get('username'))
except UserModel.DoesNotExist:
return None
else:
if user.check_password(kwargs.get('password')):
return user
return None
def get_user(self, user_id):
UserModel = get_user_model()
try:
return UserModel.objects.get(pk=user_id)
except UserModel.DoesNotExist:
return None
|
[
"you@example.com"
] |
you@example.com
|
64cd0072445b206c8f730510ee8bb8e3f9fedfcd
|
7765acf96f3c334a073f647ead1e6c862046fd41
|
/tex/figures/photometry_nullspace.py
|
6cb74974a958ef681cf8425500f031c951b3c321
|
[
"MIT"
] |
permissive
|
rodluger/fishy
|
da752a2e7aab159b10d24eb0433251b8485d6983
|
94bb393b01ade283e416589ea013d604166749e3
|
refs/heads/master
| 2020-05-06T15:15:44.725979
| 2019-07-08T13:19:04
| 2019-07-08T13:19:04
| 180,179,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# -*- coding: utf-8 -*-
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import starry
ydeg = 10
theta = np.linspace(-180, 180, 1000)
s = np.zeros(((ydeg + 1) ** 2, len(theta)))
map = starry.Map(ydeg, lazy=False)
n = 0
for l in range(ydeg + 1):
for m in range(-l, l + 1):
map.reset()
if l > 0:
map[l, m] = 1.0
s[n] = map.flux(theta=theta)
n += 1
# Set up the plot
fig, ax = plt.subplots(ydeg + 1, 2 * ydeg + 1, figsize=(16, 10),
sharex=True, sharey=True)
fig.subplots_adjust(hspace=0)
for axis in ax.flatten():
axis.spines['top'].set_visible(False)
axis.spines['right'].set_visible(False)
axis.spines['bottom'].set_visible(False)
axis.spines['left'].set_visible(False)
axis.set_xticks([])
axis.set_yticks([])
# Loop over the orders and degrees
n = 0
for i, l in enumerate(range(ydeg + 1)):
for j, m in enumerate(range(-l, l + 1)):
j += ydeg - l
ax[i, j].plot(s[n])
n += 1
# Labels
for j, m in enumerate(range(-ydeg, ydeg + 1)):
ax[-1, j].set_xlabel("%d" % m, fontsize=14, fontweight="bold", alpha=0.5)
for i, l in enumerate(range(ydeg + 1)):
ax[i, ydeg - l].set_ylabel("%d" % l, fontsize=14, fontweight="bold",
rotation=45, labelpad=20, alpha=0.5)
# Save
fig.savefig("photometry_nullspace.pdf", bbox_inches="tight")
|
[
"rodluger@gmail.com"
] |
rodluger@gmail.com
|
3bd32d35fa515727b1d2fdf19b6da268ce008399
|
5a42ce780721294d113335712d45c62a88725109
|
/project/graphdata/module/yiyiyuan/model/yi_favorite_contacts.py
|
03a9050f0826ae4d25e32f4863e189e8bd71ffc5
|
[] |
no_license
|
P79N6A/project_code
|
d2a933d53deb0b4e0bcba97834de009e7bb78ad0
|
1b0e863ff3977471f5a94ef7d990796a9e9669c4
|
refs/heads/master
| 2020-04-16T02:06:57.317540
| 2019-01-11T07:02:05
| 2019-01-11T07:02:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
# -*- coding: utf-8 -*-
# sqlacodegen mysql://root:123!@#@127.0.0.1/xhh_test --outfile yyy.py --flask
from lib.application import db
from .base_model import BaseModel
from .yi_user import YiUser
class YiFavoriteContact(db.Model, BaseModel):
__bind_key__ = 'xhh_yiyiyuan'
__tablename__ = 'yi_favorite_contacts'
id = db.Column(db.BigInteger, primary_key=True)
user_id = db.Column(db.BigInteger, nullable=False)
contacts_name = db.Column(db.String(20), nullable=False)
mobile = db.Column(db.String(20), nullable=False)
relatives_name = db.Column(db.String(20), nullable=False)
phone = db.Column(db.String(20), nullable=False)
last_modify_time = db.Column(db.DateTime, nullable=False)
create_time = db.Column(db.DateTime)
def getByUserId(self, user_id):
return self.query.filter_by(user_id=user_id).limit(1).first()
def contactDue(self, dbContact):
# 亲属联系人是否逾期
if dbContact is None:
return {}
mobiles = []
#亲属
if 'phone' in dbContact.keys():
mobiles.append(str(dbContact['phone']))
#常用
if 'mobile' in dbContact.keys():
mobiles.append(str(dbContact['mobile']))
if len(mobiles) == 0:
return {}
oUser = YiUser()
overdue_users = oUser.isOverdueMobile(mobiles)
overdue_mobiles = [user[0] for user in overdue_users]
# # 判断是否逾期
contract_due_data = {}
if 'phone' in dbContact.keys() and dbContact['phone'] is not None:
if dbContact['phone'] in overdue_mobiles:
contract_due_data['com_r_overdue'] = 1
else:
contract_due_data['com_r_overdue'] = 0
if 'mobile' in dbContact.keys() and dbContact['mobile'] is not None:
if dbContact['mobile'] in overdue_mobiles:
contract_due_data['com_c_overdue'] = 1
else:
contract_due_data['com_c_overdue'] = 0
return contract_due_data
|
[
"wangyongqiang@ihsmf.com"
] |
wangyongqiang@ihsmf.com
|
1603f794730d82775b9c08df1c1c6289ae8cf270
|
f68cd225b050d11616ad9542dda60288f6eeccff
|
/testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_5GHzIsRadioAutoBlockAckEnabled.py
|
342c1a28d36b0e908ebc40dcbfc9e62a2bb7a193
|
[
"Apache-2.0"
] |
permissive
|
cablelabs/tools-tdkb
|
18fb98fadcd169fa9000db8865285fbf6ff8dc9d
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
refs/heads/master
| 2020-03-28T03:06:50.595160
| 2018-09-04T11:11:00
| 2018-09-05T00:24:38
| 147,621,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,337
|
py
|
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2017 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>3</version>
<name>TS_WIFIHAL_5GHzIsRadioAutoBlockAckEnabled</name>
<primitive_test_id/>
<primitive_test_name>WIFIHAL_GetOrSetParamBoolValue</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>Check AutoBlock-ACK enable status by using wifi_getRadioAutoBlockAckEnable HAL API</synopsis>
<groups_id>4</groups_id>
<execution_time>10</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIHAL_94</test_case_id>
<test_objective>Check AutoBlock-ACK enable status by using wifi_getRadioAutoBlockAckEnable HAL API</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3. XB6</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>wifi_getRadioAutoBlockAckEnable()</api_or_interface_used>
<input_parameters>methodName : getRadioAutoBlockAckEnable
methodName : setRadioAutoBlockAckEnable
radioIndex : 1</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(WIFIHAL_GetOrSetParamBoolValue - func name - "If not exists already"
WIFIHAL - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automatically by Test Manager with provided arguments in configure page (TS_WIFIHAL_5GHzIsRadioAutoBlockAckEnabled.py)
3.Execute the generated Script(TS_WIFIHAL_5GHzIsRadioAutoBlockAckEnabled.py) using execution page of Test Manager GUI
4.wifihalstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named WIFIHAL_GetOrSetParamBoolValue through registered TDK wifihalstub function along with necessary arguments
5.WIFIHAL_GetOrSetParamBoolValue function will call Ccsp Base Function named "ssp_WIFIHALGetOrSetParamBoolValue", that inturn will call WIFIHAL Library Functions
wifi_getRadioAutoBlockAckEnable() and wifi_setRadioAutoBlockAckEnable()
6.Response(s)(printf) from TDK Component,Ccsp Library function and wifihalstub would be logged in Agent Console log based on the debug info redirected to agent console
7.wifihalstub will validate the available result (from agent console log and Pointer to instance as updated) with expected result
8.Test Manager will publish the result in GUI as SUCCESS/FAILURE based on the response from wifihalstub</automation_approch>
<except_output>CheckPoint
1:wifi_getRadioAutoBlockAckEnable log from DUT should be available in Agent Console LogCheckPoint
2:TDK agent Test Function will log the test case result as PASS based on API response CheckPoint
3:Test Manager GUI will publish the result as SUCCESS in Execution page"""</except_output>
<priority>High</priority>
<test_stub_interface>WIFIHAL</test_stub_interface>
<test_script>TS_WIFIHAL_5GHzIsRadioAutoBlockAckEnabled</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from wifiUtility import *;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifihal","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIHAL_5GHzIsRadioAutoBlockAckEnabled');
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
expectedresult="SUCCESS";
radioIndex = 1
getMethod = "getRadioAutoBlockAckEnable"
primitive = 'WIFIHAL_GetOrSetParamBoolValue'
#Getting the default enable mode
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, radioIndex, 0, getMethod)
if expectedresult in actualresult :
tdkTestObj.setResultStatus("SUCCESS");
enable = details.split(":")[1].strip()
if "Enabled" in enable:
print "Auto Block-Ack is Enabled for Radio 5GHz"
oldEnable = 1
newEnable = 0
else:
print "Auto Block-Ack is Disabled for Radio 5GHz "
oldEnable = 0
newEnable = 1
setMethod = "setRadioAutoBlockAckEnable"
#Toggle the enable status using set
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, radioIndex, newEnable, setMethod)
if expectedresult in actualresult :
print "Enable state toggled using set"
# Get the New enable status
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, radioIndex, 0, getMethod)
if expectedresult in actualresult and enable not in details.split(":")[1].strip():
print "getRadioAutoBlockAckEnable Success, verified Along with setRadioAutoBlockAckEnable() api"
#Revert back to original Enable status
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, radioIndex, oldEnable, setMethod)
if expectedresult in actualresult :
print "Enable status reverted back";
else:
print "Couldn't revert enable status"
tdkTestObj.setResultStatus("FAILURE");
else:
print "getRadioAutoBlockAckEnable() failed after set function"
tdkTestObj.setResultStatus("FAILURE");
else:
print "setRadioAutoBlockAckEnable() failed"
tdkTestObj.setResultStatus("FAILURE");
else:
print "getRadioAutoBlockAckEnable() failed"
tdkTestObj.setResultStatus("FAILURE");
obj.unloadModule("wifihal");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
|
[
"jim.lawton@accenture.com"
] |
jim.lawton@accenture.com
|
84e7d9a493e30b3985e39ae5126bbaeeca00239c
|
82c73b70c2002f647bdc254125f0bdb18f0b79d2
|
/openstack_dashboard/dashboards/admin/license/forms.py
|
099ccaf8c180726a41000b7ec3cdcf950aee7eaa
|
[
"Apache-2.0"
] |
permissive
|
xuweiliang/Codelibrary
|
cfb5755ced54c65cacdb3e35ab2b98385f8d5f8e
|
54e45b2daa205132c05b0ff5a2c3db7fca2853a7
|
refs/heads/master
| 2021-05-04T00:31:42.025238
| 2018-03-20T07:05:20
| 2018-03-20T07:05:20
| 71,852,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,302
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pytz
import random, string
import re
import sys
import operator
from django.core.urlresolvers import reverse
from openstack_dashboard import api
from django import shortcuts
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import forms
from horizon import messages
from datetime import datetime as time
from horizon import exceptions
from horizon.utils import validators
from django.utils import encoding
from subprocess import PIPE,Popen
from django.http import HttpResponseRedirect
from openstack_dashboard import record_action
LOG = logging.getLogger(__name__)
class LicenseRegisterForm(forms.SelfHandlingForm):
licence_heip=_("Please enter the serial number")
cryptogram_help=_("If you need a new certificate,\
please send your service provider the Cryptogram")
encrypted_license = forms.CharField(widget=forms.widgets.Textarea,
label=_("Input licence"),
help_text = licence_heip,
required=True)
system_uuid = forms.CharField(label=_("Cryptogram"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
help_text = cryptogram_help)
def __init__(self, request, *args, **kwargs):
super(LicenseRegisterForm, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
licence = data.get('encrypted_license', None).strip()
systemUUID = data.get('system_uuid')
UUID, key = systemUUID.split(":")
decoded_string = eval(api.authcode.AuthCode.decode(licence, UUID.strip()))
during = decoded_string.get('during', None)
num = decoded_string.get('num', None)
authcode = decoded_string.get('uuid')
uuid, pwd = authcode.split(":")
licenceTime = decoded_string.get('time', None)
if uuid != UUID:
messages.error(request,
encoding.force_unicode(_("Serial number can only activate\
the specified server")))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=False, detail=_("Licence Register Fail"))
return HttpResponseRedirect('/dashboard/admin/license')
date = time.strptime(licenceTime, '%Y-%m-%dT%H:%M:%S.%f')
starttime = time.strftime(date, '%Y-%m-%d %H:%M:%S')
apartDays =(time.now()- date).days
if during > 0 and apartDays < 3 and num > 0:
kwargs={'licence':{'starttime':starttime,
'system_uuid':authcode,
'encrypted_license':licence,
'disabled':False}}
try:
api.nova.update_licence(request, **kwargs)
msg = _("Serial number authentication success")
messages.success(request,
encoding.force_unicode(msg))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=True, detail=msg)
return True
except Exception as e:
exceptions.handle(request,
encoding.force_unicode(_("%s", e)))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=False, detail=_("Licence Register Fail"))
return False
else:
messages.error(request,
encoding.force_unicode(_("Serial number expired or invalid")))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=False, detail=_("Licence invalid"))
return HttpResponseRedirect('/dashboard/admin/license')
except Exception as e:
exceptions.handle(request,
encoding.force_unicode(_("Invalid serial number \
or registration error %s" % e)))
api.nova.systemlogs_create(request, '-',\
record_action.REGISTERLICENSE,
result=True, detail=_("Licence Register Success"))
return True
|
[
"root@newton.com"
] |
root@newton.com
|
b5b156cdd0e5a59512ec09150e8dfd07ed2350af
|
3ba8bf9fb1a3d54233e927893bc2d3865692c896
|
/ophyd/controls/ophydobj.py
|
c94c640ca165b825b3e5f70be5d377a4f711108f
|
[] |
no_license
|
NSLS-II-CSX/ophyd
|
68fee01393d819350270b143457f76a4a5ccf703
|
aadf6197f7a3b1ba907e48a73d5af8b07b7c57ad
|
refs/heads/master
| 2021-01-17T21:19:16.867892
| 2014-12-13T23:39:40
| 2014-12-13T23:39:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,022
|
py
|
# vi: ts=4 sw=4
'''
:mod:`ophyd.control.ophydobj` - Base object type
================================================
.. module:: ophyd.control.ophydobj
:synopsis:
'''
from __future__ import print_function
from ..session import register_object
class OphydObject(object):
_default_sub = None
def __init__(self, name, alias, register=True):
'''
Subscription/callback mechanism for registered objects in ophyd sessions.
'''
self._name = name
self._alias = alias
self._subs = dict((getattr(self, sub), []) for sub in dir(self)
if sub.startswith('SUB_'))
self._sub_cache = {}
self._ses_logger = None
if register:
self._register()
def _run_sub(self, cb, *args, **kwargs):
'''
Run a single subscription callback
:param cb: The callback
'''
try:
cb(*args, **kwargs)
except Exception as ex:
sub_type = kwargs['sub_type']
self._ses_logger.error('Subscription %s callback exception (%s)' %
(sub_type, self), exc_info=ex)
def _run_cached_sub(self, sub_type, cb):
'''
Run a single subscription callback using the most recent
cached arguments
:param sub_type: The subscription type
:param cb: The callback
'''
try:
args, kwargs = self._sub_cache[sub_type]
except KeyError:
pass
else:
# Cached kwargs includes sub_type
self._run_sub(cb, *args, **kwargs)
def _run_subs(self, *args, **kwargs):
'''
Run a set of subscription callbacks
Only the kwarg :param:`sub_type` is required, indicating
the type of callback to perform. All other positional arguments
and kwargs are passed directly to the callback function.
No exceptions are raised when the callback functions fail;
they are merely logged with the session logger.
'''
sub_type = kwargs['sub_type']
# Shallow-copy the callback arguments for replaying the
# callback at a later time (e.g., when a new subscription is made)
self._sub_cache[sub_type] = (tuple(args), dict(kwargs))
for cb in self._subs[sub_type]:
self._run_sub(cb, *args, **kwargs)
def subscribe(self, cb, event_type=None, run=True):
'''
Subscribe to events this signal group emits
See also :func:`clear_sub`
:param callable cb: A callable function (that takes kwargs)
to be run when the event is generated
:param event_type: The name of the event to subscribe to (if None,
defaults to SignalGroup._default_sub)
:type event_type: str or None
:param bool run: Run the callback now
'''
if event_type is None:
event_type = self._default_sub
self._subs[event_type].append(cb)
if run:
self._run_cached_sub(event_type, cb)
def clear_sub(self, cb, event_type=None):
'''
Remove a subscription, given the original callback function
See also :func:`subscribe`
:param callable callback: The callback
:param event_type: The event to unsubscribe from (if None, removes it
from all event types)
:type event_type: str or None
'''
if event_type is None:
for event_type, cbs in self._subs.items():
try:
cbs.remove(cb)
except ValueError:
pass
else:
self._subs[event_type].remove(cb)
def _register(self):
'''
Register this object with the session
'''
register_object(self)
@property
def name(self):
return self._name
@property
def alias(self):
'''
An alternative name for the signal
'''
return self._alias
|
[
"klauer@bnl.gov"
] |
klauer@bnl.gov
|
a2c709a82b9f707add011e641eedf079ded0b67e
|
95f9c734c4bf5de8e5d0adff9ac2cf0228df75ac
|
/Django/mysite-bak/mysite/polls/views.py
|
da0e946bc8b3ef8aea7fb16256eecbdb21caf692
|
[] |
no_license
|
holen/Python
|
7a996b13ff2224084397223879c380169d47ff8c
|
506fff291d6e9c6f80c30a51cc3b77e9dd048468
|
refs/heads/master
| 2022-12-12T22:12:51.561716
| 2019-10-16T03:08:00
| 2019-10-16T03:08:00
| 14,278,665
| 1
| 0
| null | 2022-12-08T00:51:26
| 2013-11-10T15:29:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,090
|
py
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from polls.models import Question, Choice
from django.template import RequestContext, loader
from django.http import Http404
from django.shortcuts import get_object_or_404, render
from django.core.urlresolvers import reverse
# Create your views here.
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
# template = loader.get_template('polls/index.html')
#context = RequestContext(request, {
# 'latest_question_list': latest_question_list,
# })
#return HttpResponse(template.render(context))
#output = ','.join([p.question_text for p in latest_question_list])
#return HttpResponse(output)
#return HttpResponse("Hello, world. You're at the polls index.")
def detail(request, question_id):
#try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question':question})
#return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
#response = "You're looking at the results of question %s."
#return HttpResponse(response % question_id)
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
#return HttpResponse("You're voting on question %s." % question_id)
|
[
"yh_zhl@sina.com"
] |
yh_zhl@sina.com
|
7fa86d8b542bd91ae527f605694388c5e6e58d35
|
5608a9cd3bec8cab1c3f9d7f42896107b78593cc
|
/tests/unit/mock_docker/fake_stat.py
|
b85bc3b8007c8c020d9650a655a3cbcd34fdafe7
|
[
"Apache-2.0"
] |
permissive
|
troyready/runway
|
cdee6d94f42173c8aa0bd414620b68be36a510aa
|
4fd299961a4b73df39e14f4f19a7236f7be17dd8
|
refs/heads/master
| 2021-06-18T16:05:30.712211
| 2021-01-14T01:44:32
| 2021-01-14T01:44:32
| 151,314,626
| 0
| 0
|
Apache-2.0
| 2018-10-02T19:55:09
| 2018-10-02T19:55:08
| null |
UTF-8
|
Python
| false
| false
| 2,990
|
py
|
"""Stats for fake Docker API."""
OBJ = {
"read": "2015-02-11T19:20:46.667237763+02:00",
"network": {
"rx_bytes": 567224,
"rx_packets": 3773,
"rx_errors": 0,
"rx_dropped": 0,
"tx_bytes": 1176,
"tx_packets": 13,
"tx_errors": 0,
"tx_dropped": 0,
},
"cpu_stats": {
"cpu_usage": {
"total_usage": 157260874053,
"percpu_usage": [52196306950, 24118413549, 53292684398, 27653469156],
"usage_in_kernelmode": 37140000000,
"usage_in_usermode": 62140000000,
},
"system_cpu_usage": 3.0881377e14,
"throttling_data": {"periods": 0, "throttled_periods": 0, "throttled_time": 0},
},
"memory_stats": {
"usage": 179314688,
"max_usage": 258166784,
"stats": {
"active_anon": 90804224,
"active_file": 2195456,
"cache": 3096576,
"hierarchical_memory_limit": 1.844674407371e19,
"inactive_anon": 85516288,
"inactive_file": 798720,
"mapped_file": 2646016,
"pgfault": 101034,
"pgmajfault": 1207,
"pgpgin": 115814,
"pgpgout": 75613,
"rss": 176218112,
"rss_huge": 12582912,
"total_active_anon": 90804224,
"total_active_file": 2195456,
"total_cache": 3096576,
"total_inactive_anon": 85516288,
"total_inactive_file": 798720,
"total_mapped_file": 2646016,
"total_pgfault": 101034,
"total_pgmajfault": 1207,
"total_pgpgin": 115814,
"total_pgpgout": 75613,
"total_rss": 176218112,
"total_rss_huge": 12582912,
"total_unevictable": 0,
"total_writeback": 0,
"unevictable": 0,
"writeback": 0,
},
"failcnt": 0,
"limit": 8039038976,
},
"blkio_stats": {
"io_service_bytes_recursive": [
{"major": 8, "minor": 0, "op": "Read", "value": 72843264},
{"major": 8, "minor": 0, "op": "Write", "value": 4096},
{"major": 8, "minor": 0, "op": "Sync", "value": 4096},
{"major": 8, "minor": 0, "op": "Async", "value": 72843264},
{"major": 8, "minor": 0, "op": "Total", "value": 72847360},
],
"io_serviced_recursive": [
{"major": 8, "minor": 0, "op": "Read", "value": 10581},
{"major": 8, "minor": 0, "op": "Write", "value": 1},
{"major": 8, "minor": 0, "op": "Sync", "value": 1},
{"major": 8, "minor": 0, "op": "Async", "value": 10581},
{"major": 8, "minor": 0, "op": "Total", "value": 10582},
],
"io_queue_recursive": [],
"io_service_time_recursive": [],
"io_wait_time_recursive": [],
"io_merged_recursive": [],
"io_time_recursive": [],
"sectors_recursive": [],
},
}
|
[
"noreply@github.com"
] |
troyready.noreply@github.com
|
b4e60d4747db4b3823750c69fecee8522989f43d
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/ControlRegions/SS/2016HIPM_v9/variables.py
|
b38df10fccc279186d7b3cf5a9b34a05e48c2e2e
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 3,120
|
py
|
# variables
# 0 = not fold (default), 1 = fold underflowbin, 2 = fold overflow bin, 3 = fold underflow and overflow
#variables = {}
#variables['nvtx'] = { 'name': 'PV_npvsGood',
# 'range' : (20,0,100),
# 'xaxis' : 'nvtx',
# 'fold' : 3
# }
variables['mllpeak'] = {
'name': 'mll',
'range' : (20,80,100),
'xaxis' : 'm_{ll} [GeV]',
'fold' : 0
}
variables['ptll'] = {
'name': 'ptll',
'range' : (20, 0,200),
'xaxis' : 'p_{T}^{ll} [GeV]',
'fold' : 0
}
variables['ptll_more'] = {
'name': 'ptll',
'range' : (50, 0,100),
'xaxis' : 'p_{T}^{ll} [GeV]',
'fold' : 0
}
variables['pt1'] = {
'name': 'Lepton_pt[0]',
'range' : (20,0,200),
'xaxis' : 'p_{T} 1st lep',
'fold' : 3
}
variables['pt1_v7'] = {
'name': 'Lepton_pt[0]',
'range' : (20,20,100),
'xaxis' : 'p_{T} 1st lep',
'fold' : 3
}
variables['pt2'] = {
'name': 'Lepton_pt[1]',
'range' : (20,0,100),
'xaxis' : 'p_{T} 2nd lep',
'fold' : 3
}
variables['eta1'] = {
'name': 'Lepton_eta[0]',
'range' : (20,-3,3),
'xaxis' : '#eta 1st lep',
'fold' : 3
}
variables['eta2'] = {
'name': 'Lepton_eta[1]',
'range' : (20,-3,3),
'xaxis' : '#eta 2nd lep',
'fold' : 3
}
variables['phi1'] = {
'name': 'Lepton_phi[0]',
'range' : (20,-3.2,3.2),
'xaxis' : '#phi 1st lep',
'fold' : 3
}
variables['phi2'] = {
'name': 'Lepton_phi[1]',
'range' : (20,-3.2,3.2),
'xaxis' : '#phi 2nd lep',
'fold' : 3
}
variables['puppimet'] = {
'name': 'PuppiMET_pt',
'range' : (20,0,100),
'xaxis' : 'puppimet [GeV]',
'fold' : 3
}
variables['njet'] = {
'name': 'Sum$(CleanJet_pt>30)',
'range' : (5,0,5),
'xaxis' : 'Number of jets',
'fold' : 2
}
variables['jetpt1'] = {
'name': '(Sum$(CleanJet_pt>30)>0)*(Alt$(CleanJet_pt[0], 0)) - (Sum$(CleanJet_pt>30)==0)*99',
'range' : (20,0,200),
'xaxis' : 'p_{T} 1st jet',
'fold' : 0
}
variables['jetpt2'] = {
'name': '(Sum$(CleanJet_pt>30)>1)*(Alt$(CleanJet_pt[1], 0)) - (Sum$(CleanJet_pt>30)<=1)*99',
'range' : (20,0,200),
'xaxis' : 'p_{T} 2nd jet',
'fold' : 0
}
variables['jeteta1'] = {
'name': '(Sum$(CleanJet_pt>30)>0)*(Alt$(CleanJet_eta[0], 0)) - (Sum$(CleanJet_pt>30)==0)*99',
'range' : (20,-5.0,5.0),
'xaxis' : '#eta 1st jet',
'fold' : 0
}
variables['jeteta2'] = {
'name': '(Sum$(CleanJet_pt>30)>1)*(Alt$(CleanJet_eta[1], 0)) - (Sum$(CleanJet_pt>30)<=1)*99',
'range' : (20,-5.0,5.0),
'xaxis' : '#eta 2nd jet',
'fold' : 0
}
variables['trkMet'] = {
'name': 'TkMET_pt',
'range' : (20,0,200),
'xaxis' : 'trk met [GeV]',
'fold' : 3
}
variables['mpmet'] = {
'name': 'mpmet',
'range' : (20,0,200),
'xaxis' : 'min proj met [GeV]',
'fold' : 3
}
|
[
"nicolo.trevisani@cern.ch"
] |
nicolo.trevisani@cern.ch
|
ff9d10c84448a1f47b8ab9374247b004431a8c67
|
766da3ffcbd26e7f58d711f5b0e7312cb365e9fb
|
/layers/transformer/__init__.py
|
9950eabee3288f70b34f0e30bc81b80b808266ca
|
[
"MIT"
] |
permissive
|
RobertCsordas/ndr
|
1277b353eb61267e023b671072730bdc7e779ca5
|
da20530dfb4336deddfbe5e79d62e72d1dc2580e
|
refs/heads/master
| 2023-09-02T22:38:57.601098
| 2021-11-19T09:52:23
| 2021-11-19T09:52:23
| 414,588,414
| 20
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
from .transformer import Transformer, AttentionMask
from .relative_transformer import RelativeTransformer
from .universal_transformer import UniversalTransformer
from .universal_relative_transformer import UniversalRelativeTransformer
from .transformer import TransformerEncoderWithLayer
from .relative_transformer import RelativeTransformerEncoderLayer
from .universal_transformer import UniversalTransformerEncoderWithLayer
from .universal_transformer import UniversalTransformerEncoder
|
[
"xdever@gmail.com"
] |
xdever@gmail.com
|
476654bc49e7272f77917b445426eaf80a1cee93
|
35efa105f00073303284103699fcaec54045a8a4
|
/invoke_servers.py
|
d3bd5d43949a4ac06d60f0cf6a4d3fb25c6e0e22
|
[] |
no_license
|
Quantumke/mpesaapi
|
b12c7e663fc89d6b98170a229b7876fdb5c1541f
|
e0941927b194d361f443aa8cf665fc3cfce71bca
|
refs/heads/master
| 2021-01-19T00:25:47.958078
| 2016-05-27T09:19:27
| 2016-05-27T09:19:27
| 59,822,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 770
|
py
|
import urllib.parse
import urllib.request
import ssl
def invoke_server():
url="https://safaricom.co.ke/mpesa_online/lnmo_checkout_server.php?wsdl"
values = ({
"MERCHANT_TRANSACTION_ID": "1",
"PASSWORD": "1",
"REFERENCE_ID" :'1',
"TIMESTAMP":'1461164700'
})
find_a_better_way = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
#data = urllib.parse.urlencode(values)
#data = data.encode('utf-8')
data = urllib.urlencode({'MERCHANT_TRANSACTION_ID': '1', 'PASSWORD': '10', 'REFERENCE_ID':'10','TIMESTAMP':'1461164700'})
req = urllib.request.Request(url , data, headers={'User-Agent': 'Mozilla/5.0'})
resp = urllib.request.urlopen(req, context=find_a_better_way)
respData = resp.read()
print(respData)
if __name__ == "__main__":
invoke_server()
|
[
"nguruben@gmail.com"
] |
nguruben@gmail.com
|
e1da3255668999c3b77aa8c9332b197a9203478e
|
42104a0ebfc03caf9c7648211ca6ac69de984cbb
|
/memedata/errors.py
|
1a923ec91aa865271c3a85ad8fe34bf5b1120fc9
|
[
"MIT"
] |
permissive
|
bomdiabot/memedata-service
|
61fbdf00d32fb9ef55f352aa400c8830588b8cfe
|
52c772c1440901fd23bc8d5254d1f16d11d6c629
|
refs/heads/dev
| 2022-12-09T13:46:29.588250
| 2018-10-29T23:31:30
| 2018-10-29T23:31:30
| 147,910,641
| 2
| 0
|
MIT
| 2022-12-08T02:53:13
| 2018-09-08T07:09:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
from marshmallow import ValidationError
from werkzeug.exceptions import HTTPException
from flask_jwt_extended.exceptions import JWTExtendedException
from memedata.util import mk_errors
from memedata import config
def jwt_error_handler(error):
code = 401
messages = list(getattr(error, 'args', []))
return mk_errors(code, messages)
def http_error_handler(error):
resp = error.response
if resp is None:
code = error.code
messages = [error.description]
else:
code = getattr(resp, 'status_code', 500)
json = resp.get_json()
if 'errors' in json and json['errors']:
messages = [e['message'] for e in json['errors'] if 'message' in e]
else:
messages = [str(resp.status)]
return mk_errors(code, messages)
def validation_error_handler(error):
code = getattr(error, 'status_code', 500)
messages = getattr(error, 'messages', [])
return mk_errors(code, messages)
def generic_error_handler(error):
code = getattr(error, 'status_code', 500)
if config.debug:
messages = [str(error)]
else:
messages = ['something went wrong!']
return mk_errors(code, messages)
def error_handler(error):
try:
if isinstance(error, JWTExtendedException):
return jwt_error_handler(error)
elif isinstance(error, HTTPException):
return http_error_handler(error)
elif isinstance(error, ValidationError):
return validation_error_handler(error)
else:
return generic_error_handler(error)
except:
return mk_errors(500, 'something went wrong!')
def register_handlers(app):
app.errorhandler(Exception)(error_handler)
app.errorhandler(HTTPException)(error_handler)
app.handle_user_exception = error_handler
|
[
"erik.perillo@gmail.com"
] |
erik.perillo@gmail.com
|
31abd5ef24459b6c62af258b25f1201638b650b3
|
8e1d05d2e130e30585a08b54ce8d613ee78d6b8c
|
/Evaluation/modularity_analysis.py
|
93c04dc25880df6da1ce71526c29416018e92e39
|
[] |
no_license
|
mturja-vf-ic-bd/AD-Longitudinal-Smoothing
|
11793fafcfb039629a5a5265d98789a1e1576de2
|
10fafdd57c63cc7b5a1c0400abdbfaa4353254f3
|
refs/heads/master
| 2020-04-16T17:09:14.790401
| 2019-08-17T17:05:54
| 2019-08-17T17:05:54
| 165,764,720
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,380
|
py
|
# Modularity Analysis of the raw and smooth connectomes
# The goal is to show that our method produce connectomes that has consistent modularity along time
#from bct import community_louvain
import numpy as np
from utils.readFile import readSubjectFiles
from plot_functions import plot_matrix_all, plot_community_structure_variation
from math import log
from utils.helper import get_subject_names
from collections import Counter
def match_list(l1, l2):
return len(set(l1).intersection(set(l2)))
def compare_community_structure(c1, c2):
matching = {}
if len(c1) < len(c2):
c1, c2 = c2, c1
for i in range(len(c1)):
max = -1
max_idx = -1
for j in range(len(c2)):
temp = match_list(c1[i], c2[j])
if temp > max:
max = temp
max_idx = j
matching[i] = max_idx
def build_longitudinal_community_structure(c_i_list):
for i, c_s in enumerate(c_i_list):
for j in range(i + 1, len(c_i_list)):
c_d = c_i_list[j]
def build_community_structure(c_i):
"""
Returns a list of list with each list representing the indices of a community
:param c_i: communitiy labels of the nodes of graph
:return: idx_ordered: nested list with each element is the indices of each community
"""
community_dict = {}
label_set = set(c_i)
for label in label_set:
idx_c = np.nonzero(c_i == label)[0]
key = min(idx_c)
community_dict[key] = idx_c
idx_ordered = []
for k in sorted(community_dict.keys()):
idx_ordered.append(list(community_dict[k]))
return idx_ordered
def sort_connectomes_by_modularity(mat, c_i=None):
"""
Sort a matrix by community structure
:param mat: adjacancy matrix (N*N)
:return: sorted adjacancy matrix (N*N)
"""
c_i, q = community_louvain(np.asarray(mat), gamma=1, ci=c_i)
com_struct = build_community_structure(c_i)
idx_ordered = []
for idx in com_struct:
idx_ordered = idx_ordered + idx
mat = mat[idx_ordered, :]
return mat[:, idx_ordered], c_i
def variation_of_information(X, Y):
n = float(sum([len(x) for x in X]))
sigma = 0.0
for x in X:
p = len(x) / n
for y in Y:
q = len(y) / n
r = len(set(x) & set(y)) / n
if r > 0.0:
sigma += r * (log(r / p, 2) + log(r / q, 2))
return abs(sigma)
def voi_between_community_structure(mat1, mat2, gamma=1):
"""
Measures normalized variation of information between two community structure
:param mat1: N*N adjcancy matrix of graph one.
:param mat2: N*N adjcancy matrix of graph two.
:return: nvoi: normalized variation of information between two community structure
"""
c_1, q = community_louvain(np.asarray(mat1), gamma=gamma)
c_2, q = community_louvain(np.asarray(mat2), gamma=gamma)
N = len(c_1)
X = build_community_structure(c_1)
Y = build_community_structure(c_2)
return variation_of_information(X, Y) / log(N, 2)
def mean_std_voi(sub_names):
voi_rw = []
voi_sm = []
for sub in sub_names:
connectome_list, smoothed_connectomes = readSubjectFiles(sub, method="row")
voi_rw = voi_rw + [voi_between_community_structure(v1, v2) for
v1 in connectome_list for v2 in connectome_list if v1 is not v2]
voi_sm = voi_sm + [voi_between_community_structure(v1, v2) for
v1 in smoothed_connectomes for v2 in smoothed_connectomes if v1 is not v2]
voi_rw_mean = np.mean(voi_rw)
voi_rw_std = np.std(voi_rw)
voi_sm_mean = np.mean(voi_sm)
voi_sm_std = np.std(voi_sm)
return voi_rw_mean, voi_rw_std, voi_sm_mean, voi_sm_std
if __name__ == '__main__':
sub_names = get_subject_names(3)
#sub_names = ["027_S_5110"]
print(mean_std_voi(sub_names))
'''
for sub in sub_names:
connectome_list, smoothed_connectomes = readSubjectFiles(sub, method="row")
connectome_list = [sort_connectomes_by_modularity(connectome) for connectome in connectome_list]
smoothed_connectomes = [sort_connectomes_by_modularity(connectome) for connectome in smoothed_connectomes]
plot_matrix_all(connectome_list, fname="raw_mod", savefig=True)
plot_matrix_all(smoothed_connectomes, fname="sm_mod", savefig=True)
'''
|
[
"mturja.vf.ic.bd@gmail.com"
] |
mturja.vf.ic.bd@gmail.com
|
7e59f965301071339cf8b02f1232ee076d1d8bc1
|
d697c1d45e96bd440be9c17ab14243a5882b1f52
|
/qianfeng/常用模块/Tkinter/Text控件.py
|
720a8d02a87bfe968a9f57bcdad6841620b2607e
|
[] |
no_license
|
ithjl521/python
|
9eeda2e60dda97ee36e8764c06400eb12818689f
|
f4fe50799501c483cb64445fd05ee0f30f56576c
|
refs/heads/master
| 2020-07-12T23:10:53.608276
| 2019-11-08T08:59:35
| 2019-11-08T08:59:35
| 204,931,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
import tkinter
# 创建主窗口
win = tkinter.Tk()
# 设置标题
win.title('title-hjl')
# 设置大小和位置
win.geometry("400x400+200+50")
'''
文本控件,用于显示多行文本
'''
text = tkinter.Text(win,width=30,height=4)
text.pack()
str = '''
【推荐】超50万C++/C#源码: 大型实时仿真组态图形源码
【活动】阿里云910会员节多款云产品满减活动火热进行中
【推荐】新手上天翼云,数十款云产品、新一代主机0元体验
【推荐】零基础轻松玩转华为云产品,获壕礼加返百元大礼
【推荐】华为云文字识别资源包重磅上市,1元万次限时抢购'''
text.insert(tkinter.INSERT,str)
# 进入消息循环
win.mainloop()
|
[
"it_hjl@163.com"
] |
it_hjl@163.com
|
9e374836569d4aa1541f6d838fc0cf6528594d4e
|
70c69365a7a5b86af601fbf071f221c85abef9fc
|
/tensorflow/python/tpu/api.py
|
540f425494a6a23291c5ec68fa93deda8c08911a
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] |
permissive
|
dyna999/tensorflow
|
3ed8f243b8cc8bb30a8c96dbd30e4b27be226f83
|
163c946f6827ce9e3ffa49e56fa65ce520bf6ea5
|
refs/heads/master
| 2022-03-28T03:00:28.650532
| 2022-03-25T22:47:49
| 2022-03-25T22:51:26
| 97,944,926
| 0
| 0
|
Apache-2.0
| 2022-01-07T22:43:29
| 2017-07-21T12:22:03
|
C++
|
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Modules that need to be exported to the API.
List TPU modules that aren't included elsewhere here so that they can be scanned
for tf_export decorations.
"""
# pylint: disable=unused-import
from tensorflow.python.tpu import bfloat16
from tensorflow.python.tpu import feature_column_v2
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu import tpu_embedding_for_serving
from tensorflow.python.tpu import tpu_embedding_v2
from tensorflow.python.tpu import tpu_embedding_v2_utils
from tensorflow.python.tpu import tpu_hardware_feature
from tensorflow.python.tpu import tpu_optimizer
# pylint: enable=unused-import
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
2ee0088396c8b6299c327c0d2faefc2d53d9a962
|
f0cec246e2f30f6b4ee5656f1cb6406dd0f7879a
|
/thingsboard_client/models/rule_node_id.py
|
cad448df1ff67cb7c21896a0d4480fd703c9bb39
|
[] |
no_license
|
ascentio-tech/thingsboard-swagger-client
|
4e2f7c943e243ec8505c32dab0aa3d6cf1559105
|
1e8bf7664c281c29612fd5b44261f049ca7c44fd
|
refs/heads/master
| 2021-07-20T07:18:12.969459
| 2020-06-17T02:35:54
| 2020-06-17T02:35:54
| 184,322,192
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,040
|
py
|
# coding: utf-8
"""
Thingsboard REST API
For instructions how to authorize requests please visit <a href='http://thingsboard.io/docs/reference/rest-api/'>REST API documentation page</a>. # noqa: E501
OpenAPI spec version: 2.0
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleNodeId(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str'
}
attribute_map = {
'id': 'id'
}
def __init__(self, id=None): # noqa: E501
"""RuleNodeId - a model defined in Swagger""" # noqa: E501
self._id = None
self.discriminator = None
if id is not None:
self.id = id
@property
def id(self):
"""Gets the id of this RuleNodeId. # noqa: E501
:return: The id of this RuleNodeId. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RuleNodeId.
:param id: The id of this RuleNodeId. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RuleNodeId, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleNodeId):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"gmatheu@ascentio.com.ar"
] |
gmatheu@ascentio.com.ar
|
af566ecfbb44440a1ec164059caab18a664f4f21
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/fdlm_chordrec/chordrec-master/experiments/mlsp2016/create_crf_init_params.py
|
989cab4c857b5f28322c54b30be3b0271045634f
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
import numpy as np
import pickle
import os
from docopt import docopt
from glob import glob
from os.path import join, exists
USAGE = """
create_crf_init_params.py - creates initial crf parameters from a learned
gap convnet.
Usage:
create_crf_init_params.py <src_dir> <dst_dir>
Arguments:
<src_dir> directory containing the CNN parameter files for each fold
<dst_dir> directory where to store the initial CRF parameters
"""
args = docopt(USAGE)
param_files = glob(join(args['<src_dir>'], 'params*.pkl'))
if not exists(args['<dst_dir>']):
os.makedirs(args['<dst_dir>'])
for fold, pfile in enumerate(param_files):
params = pickle.load(open(pfile))
conv, beta, gamma, mean, inv_std = params[-5:]
c = (beta - mean * gamma * inv_std)
W = (conv.reshape(conv.shape[:2]) * gamma[:, np.newaxis] *
inv_std[:, np.newaxis]).T
pi = np.zeros_like(c)
tau = np.zeros_like(c)
A = np.zeros((len(beta), len(beta)))
dst_file = join(args['<dst_dir>'], 'crf_init_params_{}.pkl'.format(fold))
pickle.dump([pi.astype(np.float32),
tau.astype(np.float32),
c.astype(np.float32),
A.astype(np.float32),
W.astype(np.float32)], open(dst_file, 'w'))
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
9f531e0eaf6f4b7c54ba7928cb1cfa50714bfe90
|
49dbac90edc329416525825e842382470345641d
|
/tests/medium/test_num_sum.py
|
13f451161779c5dc0a928281932484afcf2ce90a
|
[] |
no_license
|
AleksTor/belhard_6_tasks
|
4a9ed9367978a2086531a9e31f2d1c71a17446b6
|
f5f77a622bbb020a01d251423cbb5f25cc6239f3
|
refs/heads/master
| 2023-07-26T19:10:22.957008
| 2021-09-13T02:19:28
| 2021-09-13T02:19:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
import pytest
from tasks.medium.num_sum import sum_of_numbers
@pytest.mark.parametrize(
"n, expected", (
(123, 6),
(1111111, 7),
(4, 4),
(935145, 27),
)
)
def test_sum_of_numbers(n, expected):
assert sum_of_numbers(n) == expected
|
[
"rineisky@gmail.com"
] |
rineisky@gmail.com
|
ef65d624ada369ecd60fd78c94bf7905807cefb2
|
2af5f89257e268b63d66a29287a6290c40125372
|
/Array/Leetcode_JZO_74_medium_二维数组的寻找.py
|
1fca5bff8d82796caede9ab8f694f070255a3fa1
|
[] |
no_license
|
lilitom/Leetcode-problems
|
7dea24a1d07b3ee49e2f90764330f3e695f4f64d
|
82901a31c558433478dd23026efda63cf4dae8e5
|
refs/heads/master
| 2020-12-02T22:39:34.812479
| 2018-10-21T11:38:31
| 2018-10-21T11:38:31
| 96,162,066
| 2
| 2
| null | 2017-07-05T13:06:18
| 2017-07-04T01:07:41
|
Python
|
GB18030
|
Python
| false
| false
| 1,164
|
py
|
'''
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
For example,
Consider the following matrix:
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
Given target = 3, return true.
'''
#South China University of Technology
#Author:Guohao
#coding=utf-8
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
if not matrix:
return False
row,cols=len(matrix),len(matrix[0])
low,high=0,row*cols-1
while low<=high: #注意这里要有等号的
mid=(low+high)/2
num=matrix[mid/cols][mid%cols]
if num==target:
return True
if num<target:
low=mid+1
else:
high=mid-1
return False
#参考:
#https://leetcode.com/problems/search-a-2d-matrix/description/
|
[
"ahguohao11@163.com"
] |
ahguohao11@163.com
|
ffc51a551202748e2b844689ae88726dc0f22a93
|
b310ea0f7a60786f7e17ac80154fcd5f2c3ffd29
|
/Entity/Entity.py
|
756f25faf1a3f34fce0ad9b1028e4b5531edb829
|
[
"MIT"
] |
permissive
|
twtrubiks/Flask-Migrate-Tutorial
|
464e475e7ed8bd5127420c819b4672dc15f9ba61
|
a659171eee4c84fd0ea1a1e39914c53d44177ea8
|
refs/heads/master
| 2021-01-11T22:42:04.620084
| 2017-02-15T09:49:45
| 2017-02-15T09:49:45
| 79,018,103
| 7
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 635
|
py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class PictureDate(db.Model):
__tablename__ = 'PictureDate'
Id = db.Column(db.Integer, primary_key=True)
Uuid = db.Column(db.String(64), unique=True)
Title = db.Column(db.String(64))
Description = db.Column(db.String(128))
if __name__ == '__main__':
manager.run()
|
[
"twtrubiks@gmail.com"
] |
twtrubiks@gmail.com
|
97210ea3fec9b92d035e5e3c826327e710361fdc
|
3d0ae7c8693463faa11bacad8e6ea9d0d70b9eb1
|
/pytools/utilities/python/tklib_image_utils.py
|
fcfd1fa577b7d1e1f3361b64d84d1a8f276abf3b
|
[] |
no_license
|
stefie10/slu_hri
|
a76f79094bd1740676fec5d889411ba3b1d9dc26
|
50753379953e1ff822162eeab094cffe4a30f3e1
|
refs/heads/master
| 2022-12-14T01:07:51.522258
| 2020-08-31T00:50:12
| 2020-08-31T00:50:12
| 291,386,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
from copy import deepcopy
from scipy import *
def median_filter(myimage, filter_size):
new_map = deepcopy(myimage)
#for i in range(filter_size, len(myimage)-filter_size):
# for j in range(filter_size, len(myimage[0])-filter_size):
for i in range(len(myimage)):
for j in range(len(myimage[0])):
if(i <= filter_size or j < filter_size
or i >= len(myimage)-filter_size or j > len(myimage[0])-filter_size):
new_map[i,j]=0.0
continue
neigh = myimage[i-filter_size:i+filter_size+1,j-filter_size:j+filter_size+1]
v = median(neigh.flatten())
new_map[i,j] = v
return new_map
def skeletonize(b_map, max_num_iterations):
for i in range(max_num_iterations):
print "iteration, ", i
if(i%2 == 0):
b_map, num_deleted = skeletonize_iter(b_map, True)
else:
b_map, num_deleted = skeletonize_iter(b_map, False)
if(num_deleted == 0):
return b_map
return b_map
def skeletonize_iter(binary_map, is_step1):
new_map = deepcopy(binary_map)
num_deleted = 0
for i in range(len(binary_map)):
for j in range(len(binary_map[0])):
#print len(binary_map), len(binary_map[0])
en = binary_map[i-1:i+2,j-1:j+2]
if(i == 0 or i == len(binary_map) - 1
or j == 0 or j == len(binary_map[0]) - 1):
new_map[i,j] = 0.0
elif(binary_map[i][j] == 0.0):
continue
elif(skeletonize_is_deleted(en, is_step1)):
num_deleted +=1
new_map[i,j] = 0.0
return new_map, num_deleted
def skeletonize_is_deleted(en, is_step1=True):
if(not len(en) == 3 or not len(en[0]) == 3):
print "not an eight neighbor"
exit(1)
else:
s = sum(en)
n = skeletonize_num_transitions(en)
if(is_step1):
p1 = en[0,1]*en[1,2]*en[2,1]
p2 = en[1,2]*en[2,1]*en[1,0]
else:
p1 = en[1,0]*en[0,1]*en[1,2]
p2 = en[0,1]*en[2,1]*en[1,0]
if(s <= 7 and s >= 3 and n == 1 and p1 == 0 and p2 == 0):
return True
return False
def skeletonize_num_transitions(en):
flat_en = concatenate((en[0,:], en[1:,2], [en[2,1], en[2,0], en[1,0]]))
zero_one_count = 0
for i in range(len(flat_en)):
if(flat_en[i-1] == 0 and flat_en[i] == 1):
zero_one_count += 1
return zero_one_count
|
[
"stefie10@alum.mit.edu"
] |
stefie10@alum.mit.edu
|
c442edea8bffc494b18b504b67a661651392627c
|
9adc810b07f7172a7d0341f0b38088b4f5829cf4
|
/experiments/ashvin/icml2020/hand/sparse/mc1.py
|
91b9a0c61f14b220fae8f8cf80a48e1e0efe16a4
|
[
"MIT"
] |
permissive
|
Asap7772/railrl_evalsawyer
|
7ee9358b5277b9ddf2468f0c6d28beb92a5a0879
|
baba8ce634d32a48c7dfe4dc03b123e18e96e0a3
|
refs/heads/main
| 2023-05-29T10:00:50.126508
| 2021-06-18T03:08:12
| 2021-06-18T03:08:12
| 375,810,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,473
|
py
|
"""
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=1001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, ],
max_log_std=0,
min_log_std=-6,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=True,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=True,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
),
num_exps_per_instance=1,
region='us-west-2',
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["pen-sparse-v0", "relocate-sparse-v0", "hammer-sparse-v0", "door-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, 1, 10, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [0.0, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
trainer_kwargs = variant["trainer_kwargs"]
if not (trainer_kwargs["reparam_weight"] == 0 and trainer_kwargs["awr_weight"] == 0 and trainer_kwargs["bc_weight"] == 0):
variants.append(variant)
run_variants(experiment, variants, run_id=0)
|
[
"alexanderkhazatsky@gmail.com"
] |
alexanderkhazatsky@gmail.com
|
93dc4b460365f00007184adbfdfea6b17f6f08bb
|
066ee4df594a5dc90335d271b9d5a1b1e2a4d34c
|
/y/google-cloud-sdk/platform/gcutil/lib/google_api_python_client/oauth2client/django_orm.py
|
3607bb9442651745171963eb5d3b141bae8d4add
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] |
permissive
|
ychen820/microblog
|
a2d82447525325ec58285c2e5db58b79cceaca1b
|
d379afa2db3582d5c3be652165f0e9e2e0c154c6
|
refs/heads/master
| 2021-01-20T05:58:48.424357
| 2015-04-28T22:03:09
| 2015-04-28T22:03:09
| 32,948,331
| 0
| 2
|
BSD-3-Clause
| 2020-07-25T05:04:35
| 2015-03-26T19:45:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,782
|
py
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(FlowField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
|
[
"ychen207@binghamton.edu"
] |
ychen207@binghamton.edu
|
3e8fa85fb7afd03c45f83c311743c88e5e962eb2
|
fd7406dcf3f898bd5b82a0ee4306e02c1d1b4970
|
/backup/testcase/operation_testcase/testWebBasicDataOperation.py
|
d50339d1639b7162ead6e2eccf1c21f0aedb5275
|
[] |
no_license
|
18786262315/zidonghuaapi
|
e5f979fc599f8bca08e7e5cfbd58943fe36b75d3
|
28145f13231e9df76a33894b82c0e552afb485fc
|
refs/heads/master
| 2020-03-26T01:48:13.018859
| 2018-08-11T12:32:51
| 2018-08-11T12:32:51
| 144,383,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
# -*- coding: utf8 -*-
import unittest
from AthenaTest.common.loadDatas import *
import sys
import importlib
importlib.reload(sys)
class testWebBasicDataOperation(unittest.TestCase):
def setUp(self):
self.datafile = '../../datacase/operation_datacase/web_basic_data_operation.xls'
def test_merchantInfoOperation(self):
'''商家资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'merchantInfoOperation')
def test_warehouseInfoOperation(self):
'''收货仓资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'warehouseInfoOperation')
def test_trunkInfoOperation(self):
'''物流资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'trunkInfoOperation')
def test_branchInfoOperation(self):
'''网点资料测试'''
datafile = self.datafile
loadDatas(self, datafile, u'branchInfoOperation')
if __name__ == "__main__":
unittest.main()
|
[
"843092012@qq.com"
] |
843092012@qq.com
|
63ef7efb72cdbd6ab053b45a1e1b67b28b822140
|
3bb57eb1f7c1c0aced487e7ce88f3cb84d979054
|
/reliability/scripts/selectors/user_study_sgss/Run_All_Grammaticality.py
|
e9e24f48f1582714451889b5453d7e2feb86cc90
|
[] |
no_license
|
ghpaetzold/phd-backup
|
e100cd0bbef82644dacc73a8d1c6b757b2203f71
|
6f5eee43e34baa796efb16db0bc8562243a049b6
|
refs/heads/master
| 2020-12-24T16:41:21.490426
| 2016-04-23T14:50:07
| 2016-04-23T14:50:07
| 37,981,094
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 826
|
py
|
import os
#Generators:
generators = os.listdir('../../../substitutions/')
#generators.remove('all')
#Parameters:
ks = ['all']
criterions = ['gini']
splitters = ['best']
maxes = ['sqrt']
depths = ['None']
for c in criterions:
for s in splitters:
for m in maxes:
for d in depths:
for k in ks:
for generator in generators:
trainset = '/export/data/ghpaetzold/user_study_sgss/datasets/grammaticality_victor_all_optimistic.txt'
testset = '../../../substitutions/'
testset += generator + '/substitutions_void.txt'
output = '../../../substitutions/' + generator + '/'
output += 'substitutions_GrammaticalityUS_'+c+'_'+s+'_'+m+'_'+d+'_'+k+'.txt'
comm = 'nohup python Run_Grammaticality.py '+trainset+' '+k+' '+c+' '+s+' '+m+' '+d+' '+testset+' '+output+' &'
os.system(comm)
|
[
"ghpaetzold@outlook.com"
] |
ghpaetzold@outlook.com
|
6e0ebec1fc78e0835c6ecbdb7219e01a09843027
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_disobeying.py
|
6227a9b226dae0e7d81f3e25281a295fcd1d7404
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _DISOBEYING():
def __init__(self,):
self.name = "DISOBEYING"
self.definitions = disobey
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['disobey']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
6e591a5e888418ed9a9c93a7560a9da591c60be4
|
5537eec7f43098d216d2b550678c8d10b2a26f09
|
/venv/tower/lib/python2.7/site-packages/awx/main/migrations/0033_v303_v245_host_variable_fix.py
|
fad3545b65c1e39e6a6594561e71708a6219f49d
|
[] |
no_license
|
wipro-sdx/Automation
|
f0ae1512b8d9d491d7bacec94c8906d06d696407
|
a8c46217d0fbe51a71597b5db87cbe98ed19297a
|
refs/heads/master
| 2021-07-08T11:09:05.314435
| 2018-05-02T07:18:54
| 2018-05-02T07:18:54
| 131,812,982
| 0
| 1
| null | 2020-07-23T23:22:33
| 2018-05-02T07:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 661
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from awx.main.migrations import _migration_utils as migration_utils
def update_dashed_host_variables(apps, schema_editor):
Host = apps.get_model('main', 'Host')
for host in Host.objects.filter(variables='---'):
host.variables = ''
host.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0032_v302_credential_permissions_update'),
]
operations = [
migrations.RunPython(migration_utils.set_current_apps_for_migrations),
migrations.RunPython(update_dashed_host_variables),
]
|
[
"admin@example.com"
] |
admin@example.com
|
7540c67ee87e4363953e1932e1e8090ac2b4bef0
|
444079944e66d19dd8a01fe9834b43d7aa5fc427
|
/DRRQueue.py
|
0083c37ff00f549cd00373f0ef062d82bde4f67b
|
[
"Apache-2.0"
] |
permissive
|
dound/vns
|
6ea2a6560a87fc86c6533a7e4532d61af908894d
|
675c581fa18b57b801782079ed33f7b095898c7f
|
refs/heads/master
| 2016-09-05T23:17:28.078179
| 2011-04-30T20:40:25
| 2011-04-30T20:40:25
| 288,103
| 20
| 9
| null | 2020-02-01T01:25:56
| 2009-08-25T21:54:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,958
|
py
|
"""The DDRQueue module implements a Queue subclass for use with Deficit Round
Robin (DRR) like algorithms."""
from Queue import Queue, Empty
class DRRQueue(Queue):
"""Implements a queue for use with a DRR-like algorithm. Each queue tracks
"quanta" available to it (some unit of work - for the original DRR, this was
the number of bytes which could be sent). start_service() is used to
initiate a new round of service on the queue. task_done() should be called
each time a "job" from the queue is finished so that the appropriate quanta
can be deducted. When task_done() returns None, then no more quanta are
available for jobs from this queue this round.
Like the original, leftover quanta are only maintained if the queue is
non-empty. Unlike the original, jobs are run until the quanta available is
less than or equal to zero.
put() or put_nowait() can be used to add jobs to the queue.
Note: This queue can be used with ordinary round robin scheme by making the
quantum 1 and always calling task_done() with quanta_used=1.
"""
def __init__(self, maxsize=0, quantum=1):
"""Creates a simple JobQueue. Use put_nowait() to add jobs to the
queue."""
Queue.__init__(self, maxsize)
self.deficit_counter = 0 # number of "quanta" which are available for use
self.quantum = quantum # how much "quanta" to add each round of service
def start_service(self, quantum=None):
"""Allocates a new quantum to this queue and returns the next job from
this queue if sufficient quanta are available to this queue. The quanta
added will be self.quantum unless quantum is specified. The next job to
run is returned (if any)."""
# a new quantum of service is now available to this queue
self.deficit_counter += (self.quantum if quantum is None else quantum)
return self.__next_task()
def task_done(self, quanta_used=1):
"""Informs the queue that a job has been completed. quanta_used will be
subtracted from the amount of quanta available for jobs on this queue.
Returns the next job from this queue if sufficient quanta are available.
If sufficient quanta are not available or the queue is empty, then None
is returned."""
Queue.task_done(self)
self.deficit_counter -= quanta_used
return self.__next_task()
def __next_task(self):
"""Returns the next job from this queue if sufficient quanta are available.
If sufficient quanta are not available or the queue is empty, then None
is returned."""
if self.deficit_counter > 0:
try:
return self.get_nowait()
except Empty:
# when the queue empties, any leftover quanta are lost
self.deficit_counter = 0
# no jobs OR insufficient quanta are left
return None
|
[
"dgu@cs.stanford.edu"
] |
dgu@cs.stanford.edu
|
642e1e9eb6213ea382a825d9cf12a1a9552fa277
|
ad8b30544480ba1e5f5b1cb2dec2aa77a644e8d2
|
/SWEA/D2_5102_노드의거리.py
|
ffc560ef41043f390c247473811e0b1d849de0e1
|
[] |
no_license
|
hyunwoojeong123/Algorithm
|
79abc82d944ca60342a7f8b6fc44fac20ac55123
|
0baaf3222fbbec699ffbec5d4cc680067cf293fb
|
refs/heads/master
| 2023-07-10T18:28:51.934005
| 2021-08-18T01:51:23
| 2021-08-18T01:51:23
| 284,403,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
T = int(input())
for tc in range(1,T+1):
V,E = list(map(int,input().split()))
linked = [[False for j in range(V+1)] for i in range(V+1)]
#print('연결:')
for e in range(E):
s,g = list(map(int,input().split()))
#print(s,g)
linked[s][g] = True
linked[g][s] = True
#print('연결 끝')
S,G = list(map(int,input().split()))
dist = [-1 for v in range(V+1)]
dist[S] = 0
q = []
q.append(S)
ans = 0
while q:
pv = q.pop(0)
#print(pv,dist[pv])
if pv == G:
ans = dist[G]
break
for nv in range(1,V+1):
#print('linked[{}][{}]'.format(pv,nv),linked[nv][pv])
if not linked[nv][pv]:
continue
#print('dist[{}] < dist[{}]'.format(nv,pv),dist[nv],dist[pv])
if dist[nv] != -1:
continue
q.append(nv)
dist[nv] = dist[pv]+1
print('#{} {}'.format(tc,ans))
|
[
"hw2621@daum.net"
] |
hw2621@daum.net
|
993e1f05b6625bd9b8a9e969e2a2bc0c853cb581
|
4552bce7f09cffe1770162130896c30e481f1989
|
/Tensorflow components and Linear Regression/Variables.py
|
16ac2f75f07572ea4151a43503576e4e67a70924
|
[] |
no_license
|
Marius-Juston/Tensorflow_Tutorial
|
b953745a046e068af2eb5e803180b9fdc67b9c45
|
bc2217f86f31b4722426899152c6355e973de8f5
|
refs/heads/master
| 2021-07-17T19:11:23.685737
| 2017-10-26T03:51:46
| 2017-10-26T03:51:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
# coding=utf-8
# In machine learning we will typically want a model that can take arbitrary inputs, such as the one above. To make
# the model trainable, we need to be able to modify the graph to get new outputs with the same input. Variables allow
# us to add trainable parameters to a graph. They are constructed with a type and initial value:
import tensorflow as tf
from Helper_Methods import open_tensorboard
W = tf.Variable(.3, dtype=tf.float32, name="weight")
b = tf.Variable(-.3, dtype=tf.float32, name="bias")
x = tf.placeholder(tf.float32)
linear_model = W * x + b
sess = tf.Session()
# Constants are initialized when you call tf.constant, and their value can never change. By contrast, variables are
# not initialized when you call tf.Variable. To initialize all the variables in a TensorFlow program,
# you must explicitly call a special operation as follows:
init = tf.global_variables_initializer()
sess.run(init)
# Since x is a placeholder, we can evaluate linear_model for several values of x simultaneously as follows:
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
# to produce the output
# [ 0. 0.30000001 0.60000002 0.90000004]
open_tensorboard(__file__, sess)
|
[
"Marius.juston@hotmail.fr"
] |
Marius.juston@hotmail.fr
|
60bccdd0b292adf076dd2409f1527cfdfa80ee7b
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/q5jCspdCvmSjKE9HZ_10.py
|
d11123755af6974e993ef6ec8e17acbdeed16337
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
def lcm(a,b):
if a<b:
a,b=b,a
for i in range(b,a*b+1):
if i%a==0 and i%b==0:
break
return i
def lcm_of_list(numbers):
m=lcm(numbers[0],numbers[1])
for i in range(2,len(numbers)):
m=lcm(m,numbers[i])
return m
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6d8aecc8c41ff3f7b51eb4678cc55f2310d9c393
|
ed7e61c8eef7fb2213adeb67557d605470c17fb3
|
/ML/50-mlps/28-keras-cnn-big-filters/create_validation_curve.py
|
f39496852849e0b8a1383a08607ff10c06e7c771
|
[] |
no_license
|
MartinThoma/algorithms
|
535840224323822f2ea6b7dd6f82a0fdd22a0ff9
|
a251e9599b685dbf89c891f02d20fefd8538ead5
|
refs/heads/master
| 2023-02-23T17:58:10.913634
| 2023-02-21T05:58:59
| 2023-02-21T05:58:59
| 4,939,076
| 241
| 126
| null | 2023-02-16T05:16:23
| 2012-07-07T16:07:23
|
Python
|
UTF-8
|
Python
| false
| false
| 780
|
py
|
#!/usr/bin/env python
"""Visualize validation curve."""
import matplotlib.pyplot as plt
import pandas as pd
Y_SCALE_FACTOR = 100
# Prepare dataframe
df = pd.read_csv('log.csv', sep=';')
df = df[['epoch', 'acc', 'val_acc']]
df[['acc', 'val_acc']] = df[['acc', 'val_acc']] * Y_SCALE_FACTOR
df = df.set_index('epoch').rename(columns={'acc': 'Training Accuracy',
'val_acc': 'Validation Accuracy'})
print(df)
# Plot
fig, ax = plt.subplots()
df.plot.line(ylim=(0.75 * Y_SCALE_FACTOR, 1.00 * Y_SCALE_FACTOR),
title='Validation Curve',
ax=ax)
ax.minorticks_on() # required for minor grid
ax.grid()
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.savefig('validation-curve.png', dpi=300)
|
[
"info@martin-thoma.de"
] |
info@martin-thoma.de
|
1cc80ab7f1cc14d254b7aa8d4c1063259b1c3fbb
|
45b180004c441663bd223219f8edef8c82481be1
|
/color.py
|
9c26e029d1b9faa98fc5d9d1338fca7c4d1aa9db
|
[] |
no_license
|
bradbann/mypy
|
a679e22fdd04525faf32a73934d813a45af1092f
|
8bf6234be438aaf3ce2b69e4c10b2ce84eaccb98
|
refs/heads/master
| 2023-02-19T14:21:59.362385
| 2020-12-31T09:35:11
| 2020-12-31T09:35:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from colorama import Fore,Back,Style
import colorsys
print(Fore.BLACK + 'some red text')
print(Back.BLUE + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Style.RESET_ALL)
print('back to normal now')
|
[
"127575708@qq.com"
] |
127575708@qq.com
|
88e97776f2dcc40e428edb17c7c0b4271f3e1855
|
4d995c70ab763e0422ed52e76a3aef9a48208023
|
/tests/test_data_processors.py
|
e2b6a855b09d563ab4023b41385a7ba32cb9a542
|
[] |
no_license
|
QualiSystems/cloudshell-autodiscovery
|
b254008c3422b525cdb72d4c2716ef5771a38b8a
|
9ed9e8e74fcf12cb7f7c7fd945f8bc4fae6d5755
|
refs/heads/master
| 2021-06-03T17:45:36.975215
| 2019-05-30T13:39:30
| 2019-05-30T13:44:48
| 83,421,029
| 1
| 1
| null | 2021-03-25T22:03:43
| 2017-02-28T10:44:13
|
Rich Text Format
|
UTF-8
|
Python
| false
| false
| 7,409
|
py
|
import unittest
import mock
from autodiscovery.data_processors import JsonDataProcessor
class TestJsonDataProcessor(unittest.TestCase):
def setUp(self):
self.filename = "example.json"
self.logger = mock.MagicMock()
self.json_data_processor = JsonDataProcessor(logger=self.logger)
@mock.patch("autodiscovery.data_processors.config")
@mock.patch("autodiscovery.data_processors.utils")
def test_prepare_file_path(self, utils, config):
full_path = mock.MagicMock()
utils.get_full_path.return_value = full_path
# act
result = self.json_data_processor._prepare_file_path(filename=self.filename)
# verify
self.assertEqual(result, full_path)
utils.get_full_path.assert_called_once_with(config.DATA_FOLDER, self.filename)
@mock.patch("autodiscovery.data_processors.json")
@mock.patch("autodiscovery.data_processors.open")
def test_save(self, open, json):
data = mock.MagicMock()
file_path = mock.MagicMock()
self.json_data_processor._prepare_file_path = mock.MagicMock(return_value=file_path)
# act
self.json_data_processor._save(data=data, filename=self.filename)
# verify
self.json_data_processor._prepare_file_path.assert_called_once_with(self.filename)
open.assert_called_once_with(file_path, "w")
json.dump.assert_called_once_with(data,
open().__enter__(),
indent=4,
sort_keys=True)
@mock.patch("autodiscovery.data_processors.json")
@mock.patch("autodiscovery.data_processors.open")
def test_load(self, open, json):
file_path = mock.MagicMock()
data = mock.MagicMock()
json.load.return_value = data
self.json_data_processor._prepare_file_path = mock.MagicMock(return_value=file_path)
# act
result = self.json_data_processor._load(filename=self.filename)
# verify
self.assertEqual(result, data)
self.json_data_processor._prepare_file_path.assert_called_once_with(self.filename)
open.assert_called_once_with(file_path, "r")
json.load.assert_called_once_with(open().__enter__())
@mock.patch("autodiscovery.data_processors.config")
def test_save_vendor_enterprise_numbers(self, config):
data = mock.MagicMock()
self.json_data_processor._save = mock.MagicMock()
# act
self.json_data_processor.save_vendor_enterprise_numbers(data=data)
# verify
self.json_data_processor._save.assert_called_once_with(data=data,
filename=config.VENDOR_ENTERPRISE_NUMBERS_FILE)
@mock.patch("autodiscovery.data_processors.config")
def test_load_vendor_enterprise_numbers(self, config):
"""Check that method will properly merge initial vendors config with the additional one"""
data = mock.MagicMock()
self.json_data_processor._load = mock.MagicMock(return_value=data)
# act
result = self.json_data_processor.load_vendor_enterprise_numbers()
# verify
self.assertEqual(result, data)
self.json_data_processor._load.assert_called_once_with(filename=config.VENDOR_ENTERPRISE_NUMBERS_FILE)
def test_merge_vendors_data(self):
"""Check that method will properly merge initial vendors config with the additional one"""
conf_data = [
{
"name": "Cisco",
"default_os": "IOS",
"operation_systems": [
{
"name": "IOS",
"default_model": "switch",
},
{
"name": "IOSXR",
"default_model": "router",
}
]
},
{
"name": "Raritan",
"default_prompt": "#",
"family_name": "PDU",
"model_name": "Raritan PDU",
"driver_name": "Raritan PDU Driver"
}
]
additional_data = [
{
"name": "Cisco",
"default_os": "IOS-EXTENDED",
"operation_systems": [
{
"name": "IOS-EXTENDED",
"default_model": "switch",
},
{
"name": "IOSXR",
"default_model": "switch",
}
]
},
{
"name": "Huawei",
"default_os": "VPR",
"operation_systems": [
{
"name": "VRP",
"default_model": "switch",
}
]
},
]
expected_data = [
{
"name": "Cisco",
"default_os": "IOS-EXTENDED",
"operation_systems": [
{
"name": "IOS-EXTENDED",
"default_model": "switch",
},
{
"name": "IOSXR",
"default_model": "switch",
},
{
"name": "IOS",
"default_model": "switch",
}
]
},
{
"name": "Huawei",
"default_os": "VPR",
"operation_systems": [
{
"name": "VRP",
"default_model": "switch",
}
]
},
{
"name": "Raritan",
"default_prompt": "#",
"family_name": "PDU",
"model_name": "Raritan PDU",
"driver_name": "Raritan PDU Driver"
},
]
# act
result = self.json_data_processor._merge_vendors_data(conf_data, additional_data)
# verify
self.assertEqual(result, expected_data)
@mock.patch("autodiscovery.data_processors.config")
@mock.patch("autodiscovery.data_processors.models")
def test_load_vendor_config(self, models, config):
"""Check that method will return VendorDefinitionCollection model"""
vendors_collection = mock.MagicMock()
models.VendorDefinitionCollection.return_value = vendors_collection
additional_vendors_data = mock.MagicMock()
vendors_data = mock.MagicMock()
self.json_data_processor._merge_vendors_data = mock.MagicMock()
self.json_data_processor._load = mock.MagicMock(return_value=vendors_data)
# act
result = self.json_data_processor.load_vendor_config(additional_vendors_data=additional_vendors_data)
# verify
self.assertEqual(result, vendors_collection)
self.json_data_processor._load.assert_called_once_with(filename=config.VENDORS_CONFIG_FILE)
self.json_data_processor._merge_vendors_data.assert_called_once_with(conf_data=vendors_data,
additional_data=additional_vendors_data)
|
[
"anton.p@qualisystems.com"
] |
anton.p@qualisystems.com
|
5ca3a3da95dfa01675de20c2a2689d7b0c4568a5
|
b9faae037d6c2fb69406aa1f2e86f42d3a6225ce
|
/data/snippets/github.com/kkkmmu/useful_script/python/function.py
|
07ead533d318ceefbec699c872dfd32f30dc60cf
|
[] |
no_license
|
lovefcaaa/snk.dev-assistant
|
9eaedd448ee2c3f532aa7acdbee1ff1d91f9c07f
|
20ffcdddba19102348d75235f7d6d557c2386e69
|
refs/heads/master
| 2023-07-09T14:12:55.141283
| 2018-06-25T14:18:49
| 2018-06-25T14:18:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
print(abs(-110))
print(max(1, 5, -10, 10))
print(int("123"))
print(int(1.24))
print(float("1.2345"))
print(str(1.23))
print(str(1000))
print(bool(100))
print(bool(-1000))
print(bool(0))
print(bool(None))
a = abs
print(a(-1234))
print(hex(1234))
def mabs(x):
if x >= 0:
return x
else:
return -x
print(mabs(-11111))
def nop():
pass
print(nop())
def mabs2(x):
if not isinstance(x, (int, float)):
raise TypeError("Bad operand type")
if x >= 0:
return x
else:
return -x
print(mabs2(-12343))
import math
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
print(move(10, 10, 1, 30))
z = move(16, 16, 2, 45)
print(z)
def power(x, n=2):
s = 1
while n > 0:
n = n-1
s = s*x
return s
print(power(2))
print(power(2, 4))
def defaultp(a, b, c=1, d=2):
print(a, b, c, d)
defaultp(1, 2)
defaultp(1, 2, d=10)
def calc(*numbers):
sum = 0
for n in numbers:
sum += n*n
return sum
print(calc(1,2,3,4,5))
print(calc(1,2))
print(calc(1))
print(calc())
nums = [4, 5, 5, 6, 8]
print(calc(*nums))
def person(name, age, **kv):
print("name:", name, "age:", age, "other:", kv)
person("Liwei", 30)
person("Liwei", 30, city="Beijing")
person("Liwei", 30, city="Beijing", gender="M")
extra = {"City": "Beijing", "job": "polan"}
person("Jack", 100, **extra)
def person2(name, age, *, city, job):
print(name, age, city, job)
#person2("jack", 24)
#person2("jack", 24, "Beijing", "polan")
person2("jack", 24, city="Beijin", job="polan")
def fact(n):
if n == 1:
return 1
return n * fact(n -1)
print(fact(10))
def f (x):
return x * x
r = map(f, [1,2,3,4,5,6,7,8,9,10])
print(r)
print(list(r))
print(list(map(str, [1,2,3,4,5,6,7,8,9])))
|
[
"rosco_pecoltran@msn.com"
] |
rosco_pecoltran@msn.com
|
a892c3b8705ffa0ffda05544d3ee3374853c0d4a
|
65f548bbeb62d7f2476351bda7b723dab216ce08
|
/train_scripts/trainScriptNN.py
|
aae161d89eeaafeb65bc1a94037985b9c6919291
|
[] |
no_license
|
StefanPushkov/TestDocker
|
f6148db20e2f50694528f88a837e4c517e5a8afe
|
aecdd0b5207e9f93950faa51535a888d62bb67c1
|
refs/heads/master
| 2022-11-21T08:44:00.607204
| 2020-07-20T10:46:39
| 2020-07-20T10:46:39
| 280,253,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,911
|
py
|
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from torch.utils import data as data_utils
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
import torch.nn.functional as F
# Read data
df = pd.read_csv("./data/train.csv/train.csv")
df_test = pd.read_csv('./data/test.csv/test.csv')
print('-'*15, "Data loaded")
# Replace inf values with np.nan, then replace nan with 0
df.replace([np.inf, -np.inf], np.nan,inplace=True)
df = df.fillna(0)
df_test.replace([np.inf, -np.inf], np.nan,inplace=True)
df_test = df_test.fillna(0)
# Features
X = df.drop(['sample_id', 'y'], axis=1)
X_submission = df_test.drop(['sample_id'], axis=1)
# Labels
y = df['y']
# Features normalization
features_norm = StandardScaler()
X_std = features_norm.fit_transform(X)
X_submission_std = features_norm.fit_transform(X_submission)
# Split data in train/test
X_train, x_test, Y_train, y_test = train_test_split(X_std, y, test_size=0.2, random_state=42)
# To torch tensor: Train
X_train_tensor = torch.tensor(X_train, dtype=torch.float)
Y_train_tensor = torch.tensor(Y_train.values).flatten()
# Test
x_test_tensor = torch.tensor(x_test, dtype=torch.float)
y_test_tensor = torch.tensor(y_test.values).flatten()
# Create train dataloader
batch_size = 128
train_dataset = data_utils.TensorDataset(X_train_tensor, Y_train_tensor)
train_loader = data_utils.DataLoader(dataset = train_dataset, batch_size = batch_size, shuffle = True)
# Create eval dataloader
eval_dataset = data_utils.TensorDataset(x_test_tensor, y_test_tensor)
eval_loader = data_utils.DataLoader(dataset = eval_dataset, batch_size = batch_size, shuffle = True)
import torch.nn.functional as F
# Class must extend nn.Module
class MyClassifier(nn.Module):
def __init__(self):
super(MyClassifier,self).__init__()
# Our network consists of 3 layers. 1 input, 1 hidden and 1 output layer
self.fc1 = nn.Linear(1612,200)
self.fc2 = nn.Linear(200,100)
self.layer_out = nn.Linear(100,1)
self.dropout = nn.Dropout()
self.bn0 = nn.BatchNorm1d(1612)
self.bn1 = nn.BatchNorm1d(200)
self.bn_out = nn.BatchNorm1d(100)
def forward(self,x):
# Batch normalization
x = self.bn0(x)
# This applies Linear transformation to input data with non-linear activation
x = F.relu(self.fc1(x))
# Dropout
x = self.dropout(x)
x = self.bn1(x)
x = F.relu(self.fc2(x))
x = self.dropout(x)
x = self.bn_out(x)
#This applies linear transformation to produce output data
x = self.layer_out(x)
return x
#This function takes an input and predicts the class, (0 or 1)
def predict(self, x):
with torch.no_grad():
y_pred = model(x)
y_pred_tag = torch.round(torch.sigmoid(y_pred))
return torch.tensor(y_pred_tag, dtype=float)
def predict_proba(self, x):
with torch.no_grad():
y_pred = model(x)
prob = torch.sigmoid(y_pred)
return torch.tensor(prob, dtype=float)
def train_model(model, optim, criterion, train_dl):
model.train()
total = 0
sum_loss = 0
for x, y in train_dl:
batch = y.shape[0]
output = model(x)
loss = criterion(output, y.unsqueeze(1))
optim.zero_grad()
loss.backward()
# Clip gradient
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optim.step()
# Accumulate epoch loss
total += batch
sum_loss += batch*(loss.item())
# print("Batch loss: ", batch*(loss.item()))
return sum_loss/total
if __name__ == '__main__':
# Initialize the model
model = MyClassifier()
# Define loss criterion
criterion = nn.BCEWithLogitsLoss()
# Define the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
print(15*'-', 'Model started training')
#Number of epochs
epochs = 150
#List to store losses
train_losses = []
for i in range(epochs):
epoch_loss = train_model(model=model, optim=optimizer, criterion=criterion, train_dl=train_loader)
train_losses.append(epoch_loss)
if i % 10 == 0:
print("Epoch {0}, Loss {1}".format(i+1, epoch_loss))
auc_sc = roc_auc_score(y_test_tensor.long(), model.predict_proba(x_test_tensor))
print('-'*15, "AUC score Network = ", auc_sc)
prob_voting = model.predict_proba(x_test_tensor)
# Plotting ROC-AUC
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test_tensor.numpy(), prob_voting.numpy())
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.figure(figsize=(10,10))
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate,true_positive_rate, color='red',label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('./aucNN.jpg')
# Convert numpy to torch tensor and make prediction
X_submission_tensor = torch.tensor(X_submission_std, dtype=torch.float)
a = model.predict_proba(X_submission_tensor).numpy()
# Create submission
submission = pd.DataFrame(df_test["sample_id"], index=None)
submission["y"] = a
submission.to_csv("./Network_sabmission.csv", sep=",", index=False)
print("Submission created (NN).")
|
[
"stefan.pushkov@icloud.com"
] |
stefan.pushkov@icloud.com
|
6634da207df73bf435953807b98cb2f9663aeea0
|
3d705ec48c94373817e5f61d3f839988910431e3
|
/misc/tool/player/create_m3u8.py
|
9a477d1647cb3a6a26eaa517614fc0aad8c674d8
|
[] |
no_license
|
namesuqi/zeus
|
937d3a6849523ae931162cd02c5a09b7e37ebdd8
|
3445b59b29854b70f25da2950016f135aa2a5204
|
refs/heads/master
| 2022-07-24T14:42:28.600288
| 2018-03-29T08:03:09
| 2018-03-29T08:03:09
| 127,256,973
| 0
| 0
| null | 2022-07-07T22:57:57
| 2018-03-29T07:53:16
|
Python
|
UTF-8
|
Python
| false
| false
| 475
|
py
|
# coding=utf-8
"""
不停地将m3u8子片段写入文件
用于测试直播播放器的模拟器
__author__ = 'zengyuetian'
"""
from random import randint
import time
if __name__ == "__main__":
for i in range(124):
time.sleep(randint(1, 4))
with open("zeus.m3u8", "a") as f:
url = "http://buddiestv.qiniudn.com/sUWPWh5odxh9vtorJ2tsEue__hQ=/lsmeSvostHYW3MuybV2NyHNYoRqS/seg{0}\n".format(str(i))
f.write(url)
f.flush()
|
[
"suqi_name@163.com"
] |
suqi_name@163.com
|
86274f2d6be5ec0e61d7bdcfd74fd71805bf9bb9
|
0b3024565732948d36cb1ada431274db7fb2d6f1
|
/PublicReference/config.py
|
9559073ec5f12b9d486615af8f062953b627adf7
|
[] |
no_license
|
Luyaojun/DNFCalculating
|
adaa5b761e34985661d0c994ad52a63f092c6e62
|
02d701ec63ed083a8b7043229dfceb758e426cae
|
refs/heads/master
| 2022-11-17T11:04:00.245554
| 2020-07-18T07:07:32
| 2020-07-18T07:07:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
import configparser
conf = configparser.ConfigParser()
conf.read('./ResourceFiles/set.ini', encoding='utf-8')
#窗口缩放
try:
窗口显示模式 = conf.getint('窗口显示', 'value')
except:
窗口显示模式 = 0
#不计算装备属性,武器类型为角色武器选项第一个
try:
调试开关 = conf.getint('调试开关', 'value')
except:
调试开关 = 0
#输出搭配和伤害数据到csv
try:
输出数据 = conf.getint('输出数据', 'value')
except:
输出数据 = 0
#控制夜语黑瞳武器是否显示在第一页
try:
普雷武器显示 = 1 - conf.getint('夜语黑瞳', 'value')
except:
普雷武器显示 = 1
#排行里每把武器只会出现一次
try:
武器排名 = conf.getint('武器排名', 'value')
except:
武器排名 = 0
#怪物属性
try:
防御输入 = conf.getint('怪物属性', '防御')
火抗输入 = conf.getint('怪物属性', '火抗')
冰抗输入 = conf.getint('怪物属性', '冰抗')
光抗输入 = conf.getint('怪物属性', '光抗')
暗抗输入 = conf.getint('怪物属性', '暗抗')
except:
防御输入 = 443243
火抗输入 = 0
冰抗输入 = 0
光抗输入 = 0
暗抗输入 = 0
#武器序号
try:
武器序号 = conf.getint('武器序号', 'value')
except:
武器序号 = -1
#天劫
try:
天劫减防 = conf.getint('天劫', '减防生效')
天劫减抗 = conf.getint('天劫', '减抗生效')
except:
天劫减防 = 0
天劫减抗 = 0
#战术之王的御敌套装
try:
战术白字 = conf.getint('战术之王的御敌', '套装附加') / 100
except:
战术白字 = 0.40
#天御之灾
try:
天御套装 = conf.getint('天御之灾', '套装属性')
except:
天御套装 = 0
#千蛛碎影减防
try:
千蛛减防 = conf.getint('千蛛碎影', '减防生效')
except:
千蛛减防 = 0
|
[
"wxh_email@yeah.net"
] |
wxh_email@yeah.net
|
23cc18ee51c728fe6203cb595154805a17389974
|
f5d2a1459c81eb23a745bd63f41ef980c41ea0a4
|
/ZG-PhaseFour/code/website/pcauto/PcautoComments.py
|
3d29bfc7086cc8fd34cc48ac7d373bdd125bca66
|
[] |
no_license
|
ErBingBing/django-tonado-crawler
|
6800bb0269e99e2454fb0a9079175ffe9d4d0a0b
|
db31b4cdf7ecc509f1a87aa325621943df825e98
|
refs/heads/master
| 2021-08-22T11:30:08.419583
| 2017-11-30T04:04:40
| 2017-11-30T04:04:40
| 112,562,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,027
|
py
|
# encoding=utf-8
##############################################################################################
# @file:PcautoComments.py
# @author:Ninghz
# @date:2016/11/18
# @note:太平洋汽车网获取评论的文件
# @修改日志
# @author:yongjicao
# @date:2017/9/12
# @note:修改评论存储方式为mysql
##############################################################################################
import json
import datetime
import traceback
import math
from utility.regexutil import RegexUtility
from website.common.comments import SiteComments
from storage.cmtstorage import CMTStorage
from storage.newsstorage import NewsStorage
from utility.gettimeutil import TimeUtility
from log.spiderlog import Logger
##############################################################################################
# @class:PcautoComments
# @author:Ninghz
# @date:2016/11/18
# @note:太平洋汽车网站获取评论的类,继承于SiteComments类
##############################################################################################
class PcautoComments(SiteComments):
COMMENTS_URL = 'http://cmt.pcauto.com.cn/action/comment/list_new_json.jsp?urlHandle=1&url=%s&pageNo=%d&pageSize=%d'
PAGE_SIZE = 50.0
STEP_1 = None
STEP_2 = 2
STEP_3 = 3
def __init__(self):
SiteComments.__init__(self)
self.r = RegexUtility()
##############################################################################################
# @functions:process
# @param:共通模块传入的参数(对象url, 原始url, 当前step数,自定义参数)
# @return:Step1:获取评论的首页url
# Step2:获取评论的所有url
# Step3: 抽出的评论和最新评论的创建时间
# @author:Ninghz
# @date:2016/11/16
# @note:Step1:通过共通模块传入的url,拼出获取评论总页数的url,并传递给共通模块
# Step2:通过共通模块传入的html内容获取到评论总页数,拼出获取评论的url,并传递给共通模块
# Step3:通过共通模块传入的html内容获取到评论和最新评论的创建时间,并传递给共通模块
##############################################################################################
def process(self, params):
try:
if params.step is None:
# 拼接第一页评论url
comments_url = PcautoComments.COMMENTS_URL % (params.originalurl, 1, PcautoComments.PAGE_SIZE)
#通知下载平台,根据评论url获取第一页评论内容
self.storeurl(comments_url, params.originalurl, PcautoComments.STEP_2)
#获取第一页评论内容,循环获取全部评论url
elif params.step == PcautoComments.STEP_2:
# 获取评论的Jason返回值
comments = json.loads(params.content)
# 获取评论页数
comments_count = int(comments['total'])
NewsStorage.setcmtnum(params.originalurl, comments_count)
if comments_count == 0:
return
# 判断增量
cmtnum = CMTStorage.getcount(params.originalurl, True)
if cmtnum >= comments_count:
return
page_num = int(math.ceil(float(comments_count - cmtnum) / self.PAGE_SIZE))
if page_num >= self.maxpages:
page_num = self.maxpages
# 循环拼接评论url,提交下载平台获取评论数据
for page in range(1, page_num + 1, 1):
commentUrl = PcautoComments.COMMENTS_URL % (params.originalurl, page, PcautoComments.PAGE_SIZE)
self.storeurl(commentUrl, params.originalurl, PcautoComments.STEP_3)
#解析评论数据
elif params.step == PcautoComments.STEP_3:
commentsinfo = json.loads(params.content)
comments = []
for comment in commentsinfo['data']:
updatetime = comment['createTime']
content = comment['content']
curtime = TimeUtility.getuniformtime(updatetime)
try:
nick = comment['nickName']
except:
nick = 'anonymous'
if not CMTStorage.exist(params.originalurl, content, curtime, nick):
CMTStorage.storecmt(params.originalurl, content, curtime, nick)
# if URLStorage.storeupdatetime(params.originalurl, updatetime):
# cmti = CommentInfo()
# cmti.content = comment['content']
# comments.append(cmti)
# if len(comments) > 0:
# self.commentstorage.store(params.originalurl, comments)
except Exception, e:
traceback.print_exc()
|
[
"913702626@qq.com"
] |
913702626@qq.com
|
dc0cd93f35f2c1313c250efce57156a66fbf52bb
|
4bf3aaf77c309a489100b98a8c03532632df152c
|
/Python/동빈북/10주차2/트리의지름.py
|
5a204ed65235c2f7a796144aba287d2f73437648
|
[] |
no_license
|
murane/PS
|
7fbfc54d962231949efc67f1a35c4b0119de0780
|
e938c6c503aeac08bf65e1e66709172b0e5da6ef
|
refs/heads/master
| 2023-05-06T22:51:54.105811
| 2021-05-30T03:34:53
| 2021-05-30T03:34:53
| 293,699,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
import sys
r=sys.stdin.readline
V=int(r())
tree=[dict() for _ in range(V+1)]
for _ in range(V):
line=list(map(int,r().split()))
target=line[0]
line=line[1:-1]
for i in range(len(line)//2):
tree[target][line[i*2]]=line[i*2+1]
tree[line[i*2]][target]=line[i*2+1]
leng=0
pole=0
visit=[False]*(V+1)
def dfs(V,Cur):
global leng
global pole
visit[V]=True
if leng<Cur:
leng=Cur
pole=V
for Node,W in tree[V].items():
if not visit[Node]:
visit[Node]=True
dfs(Node,Cur+W)
dfs(1,0)
leng=0
visit=[False]*(V+1)
dfs(pole,0)
print(leng)
|
[
"murane@naver.com"
] |
murane@naver.com
|
381983aa73140a1fb84cd80fcf777075c6e1922f
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-1/vse-naloge-brez-testov/DN7-M-175.py
|
9f07005e81b8ff32835a51038b44d55f2690253c
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,198
|
py
|
# To funkcijo prijazno podarjam vsem, ki bodo programirali v eni vrstici. :)
# Kako jo uporabiti, je v navodilih. Kdor je ne potrebuje, naj jo ignorira.
import collections
def vsa_polja(s, v):
"""
Generiraj vse koordinate (x, y) za polje s podano širino in višino
Args:
s (int): širina
v (int): višina
Returns:
generator parov polj
"""
return ((x, y) for x in range(s) for y in range(v))
########################
# Za oceno 6
def sosedov(x, y, mine):
"""
Vrni število sosedov polja s koordinatami `(x, y)` na katerih je mina.
Polje samo ne šteje.
Args:
x (int): koordinata x
y (int): koordinata y
mine (set of tuple of int): koordinate min
Returns:
int: število sosedov
"""
def sosedov(x, y, mine):
k = 0
for x1, y1, in mine:
if abs(x - x1) <= 1 and abs(y - y1) <= 1 and (x, y) != (x1, y1):
k += 1
return k
def najvec_sosedov(mine, s, v):
"""
Vrni koordinati polja z največ sosednjih min
Args:
mine (set of (int, int)): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
tuple of int: koordinati polja
"""
def najvec_sosedov(mine, s, v):
sl = collections.defaultdict(list)
for x, y in vsa_polja(s, v):
sl[sosedov(x, y, mine)].append((x, y))
return sl[max(sl)][0]
def brez_sosedov(mine, s, v):
"""
Vrni množico koordinat polj brez min na sosednjih poljih. Polje samo lahko
vsebuje mino.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
set of tuple: polja brez min na sosednjih poljih
"""
def brez_sosedov(mine, s, v):
sl = collections.defaultdict(set)
for x, y in vsa_polja(s, v):
sl[sosedov(x, y, mine)].add((x, y))
return sl[0]
def po_sosedih(mine, s, v):
"""
Vrni slovar, katerega ključi so možna števila sosednjih polj z minami
(torej števila od 0 do 8), vrednosti pa množice koordinat polj s toliko
sosedami.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
dict: (glej zgoraj)
"""
def po_sosedih(mine, s, v):
sl = {}
for i in range(9):
sl[i] = set()
for x, y in vsa_polja(s, v):
sl[sosedov(x, y, mine)].add((x, y))
return sl
########################
# Za oceno 7
def dolzina_poti(pot):
"""
Vrni dolžino podane poti, vključno z vmesnimi polji.
Args:
pot (list of tuple): seznam koordinat polj
Returns:
int: dolžina poti
"""
def dolzina_poti(pot):
k = 0
for (x0, y0), (x1, y1) in zip(pot, pot[1:]):
if x0 != x1:
k += abs(x1 - x0)
if y0 != y1:
k += abs(y1 - y0)
return k
def varen_premik(x0, y0, x1, y1, mine):
"""
Vrni `True`, če je pomik z (x0, y0) and (x1, y1) varen, `False`, če ni.
Args:
x0 (int): koordinata x začetnega polja
y0 (int): koordinata y začetnega polja
x1 (int): koordinata x končnega polja
y1 (int): koordinata y končnega polja
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je premik varen, `False`, če ni.
"""
def varen_premik(x0, y0, x1, y1, mine):
if (x0, y0) in mine or (x1, y1) in mine:
return False
if x0 != x1:
if x1 < x0:
while x1 != x0:
x0 -= 1
if(x0, y0) in mine:
return False
else:
while x1 != x0:
x0 += 1
if(x0, y0) in mine:
return False
else:
if y1 < y0:
while y1 != y0:
y0 -= 1
if(x0, y0) in mine:
return False
else:
while y1 != y0:
y0 += 1
if(x0, y0) in mine:
return False
return True
def varna_pot(pot, mine):
"""
Vrni `True`, če je podana pot varna, `False`, če ni.
Args:
pot (list of tuple of int): koordinate točk na poti (brez vmesnih točk)
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je pot varna, `False`, če ni.
"""
def varna_pot(pot, mine):
if len(pot) == 1 and pot[0] in mine:
return False
for pot1, pot2 in zip(pot, pot[1:]):
if not varen_premik(pot1[0], pot1[1], pot2[0], pot2[1], mine):
return False
return True
########################
# Za oceno 8
def polje_v_mine(polje):
"""
Vrni koordinate min v podanem polju.
Niz polje opisuje polje tako, da so vodoravne "vrstice" polja ločene s
presledki. Prosta polja so označena z znako `.`, mine z `X`.
Args:
polje (str): polje
Returns:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja.
"""
def polje_v_mine(polje):
kor = set()
v = -1
for pole in polje.split():
v += 1
for s in range(len(pole)):
if pole[s] == "X":
kor.add((s, v))
return kor, s + 1, v + 1
########################
# Za oceno 9
#
# Vse funkcije za oceno 6 in 7 morajo biti napisane v eni vrstici.
def sosedov(x, y, mine):
return sum([1 for x1, y1 in mine if abs(x - x1) <= 1 and abs(y - y1) <= 1 and (x, y) != (x1, y1)])
def dolzina_poti(pot):
return sum([abs(x1 - x0) + abs(y1 - y0) for (x0, y0), (x1, y1) in zip(pot, pot[1:])])
def brez_sosedov(mine, s, v):
return {a for a, b in {(x, y):sosedov(x, y, mine) for x, y in vsa_polja(s, v)}.items() if b == 0}
def varna_pot(pot, mine):
return all([not (len(pot) == 1 and pot[0] in mine1)] +
[varen_premik(pot1[0], pot1[1], pot2[0], pot2[1], mine1)
for pot1, pot2 in zip(pot, pot[1:])])
def varen_premik(x0, y0, x1, y1, mine):
return all([not((x0, y0) in mine or (x1, y1) in mine)] + \
[not((x, y0) in mine) for x in range(x0, x1) if x0 < x1] + \
[not((x, y0) in mine) for x in range(x0, x1, -1) if x0 > x1] + \
[not ((x0, y) in mine) for y in range(y0, y1) if y0 < y1] + \
[not((x0, y) in mine) for y in range(y0, y1, -1) if y0 > y1])
def najvec_sosedov(mine, s, v):
return max({sosedov(x, y, mine):(x, y) for x, y in vsa_polja(s, v)}.items())[1]
def po_sosedih(mine, s, v):
return {a:{(x, y)for x, y in vsa_polja(s, v) if a == sosedov(x, y, mine)} for a, b in {i:{1, 2, 3} for i in range(9)}.items()}
########################
# Za oceno 10
def preberi_pot(ukazi):
"""
Za podani seznam ukazov (glej navodila naloge) vrni pot.
Args:
ukazi (str): ukazi, napisani po vrsticah
Returns:
list of tuple of int: pot
"""
def preberi_pot(ukazi):
x = y = nasoka = 0
pot = [(x, y)]
for ukaz in ukazi.split():
if ukaz.isalpha():
if ukaz == "DESNO":
nasoka += 1
else:
nasoka -= 1
if nasoka < -3 or nasoka > 3:
nasoka = 0
else:
if nasoka == 1 or nasoka == -3: #360
x += int(ukaz)
elif nasoka == 2 or nasoka == -2: #270
y += int(ukaz)
elif nasoka == 3 or nasoka == -1: #180
x -= int(ukaz)
else:
y -= int(ukaz)
pot.append((x, y))
return pot
def zapisi_pot(pot):
"""
Za podano pot vrni seznam ukazov (glej navodila naloge).
Args:
pot (list of tuple of int): pot
Returns:
str: ukazi, napisani po vrsticah
"""
def zapisi_pot(pot):
potmoj = []
ukazi = ""
for (x0, y0), (x1, y1) in zip(pot, pot[1:]):
if (x0, y0) not in potmoj:
potmoj.append((x0, y0))
if (x1, y1) not in potmoj:
potmoj.append((x1, y1))
k = abs(x1 - x0) + abs(y1 - y0)
while preberi_pot(ukazi + str(k)) != potmoj:
ukazi += " LEVO "
ukazi += str(k)
return ukazi
|
[
"lenart.motnikar@gmail.com"
] |
lenart.motnikar@gmail.com
|
14d337cc0ae57c9c93b5155c34ee19114e1317c7
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/openpyxl/openpyxl/drawing/connector.pyi
|
3b3e8a479f72006e2689a5b70ac025c01e2a488f
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,867
|
pyi
|
from typing import Any
from openpyxl.descriptors.serialisable import Serialisable
class Connection(Serialisable):
id: Any
idx: Any
def __init__(self, id: Any | None = ..., idx: Any | None = ...) -> None: ...
class ConnectorLocking(Serialisable):
extLst: Any
def __init__(self, extLst: Any | None = ...) -> None: ...
class NonVisualConnectorProperties(Serialisable):
cxnSpLocks: Any
stCxn: Any
endCxn: Any
extLst: Any
def __init__(
self, cxnSpLocks: Any | None = ..., stCxn: Any | None = ..., endCxn: Any | None = ..., extLst: Any | None = ...
) -> None: ...
class ConnectorNonVisual(Serialisable):
cNvPr: Any
cNvCxnSpPr: Any
__elements__: Any
def __init__(self, cNvPr: Any | None = ..., cNvCxnSpPr: Any | None = ...) -> None: ...
class ConnectorShape(Serialisable):
tagname: str
nvCxnSpPr: Any
spPr: Any
style: Any
macro: Any
fPublished: Any
def __init__(
self,
nvCxnSpPr: Any | None = ...,
spPr: Any | None = ...,
style: Any | None = ...,
macro: Any | None = ...,
fPublished: Any | None = ...,
) -> None: ...
class ShapeMeta(Serialisable):
tagname: str
cNvPr: Any
cNvSpPr: Any
def __init__(self, cNvPr: Any | None = ..., cNvSpPr: Any | None = ...) -> None: ...
class Shape(Serialisable):
macro: Any
textlink: Any
fPublished: Any
fLocksText: Any
nvSpPr: Any
meta: Any
spPr: Any
graphicalProperties: Any
style: Any
txBody: Any
def __init__(
self,
macro: Any | None = ...,
textlink: Any | None = ...,
fPublished: Any | None = ...,
fLocksText: Any | None = ...,
nvSpPr: Any | None = ...,
spPr: Any | None = ...,
style: Any | None = ...,
txBody: Any | None = ...,
) -> None: ...
|
[
"intellij-monorepo-bot-no-reply@jetbrains.com"
] |
intellij-monorepo-bot-no-reply@jetbrains.com
|
51773bb0b5cf61f1eb2f13888232b12c252e6fbe
|
f121695e2dff353607fa47fb42482470e03bbf8a
|
/capitulo_06-Dicionarios/user.py
|
27f9b3f93f445d4a391344208f4b9726d72af033
|
[] |
no_license
|
ranog/python_work
|
76cbcf784c86fae4482be5383223e4b0a34f4130
|
47c442a90dcf32d5aef70858693a772a3c76a7ac
|
refs/heads/master
| 2022-12-22T11:02:26.482059
| 2021-04-17T01:12:22
| 2021-04-17T01:12:22
| 233,634,221
| 2
| 1
| null | 2022-12-08T07:38:43
| 2020-01-13T15:58:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,642
|
py
|
#! /usr/bin/env python3
"""
NOME
user.py - Percorrendo todos os pares chave-valor com um laço
SINOPSES
chmod +x user.py
./user.py
Key: username
Value: efermi
Key: first
Value: enrico
Key: last
Value: fermi
- 6.12 - Extensões:
Username: efermi
Name: Erico Fermi
Username: aeinstein
Name: Albert Einstein
Username: mcurie
Name: Marie Curie
DESCRIÇÃO
Para escrever um laço for para um dicionário, devemos criar nomes
para as duas variáveis que armazenarão a chave e o valor de cada par
chave-valor. Você pode escolher qualquer nome que quiser para essas
duas variáveis. Esse código também funcionaria bem se usássemos
abreviaturas para os nomes das variáveis, assim: for k, v in
user_0.items(). A segunda metade da instrução for inclui o nome do
dicionário, seguido do método items(), que devolve uma lista de
pares chave-valor. O laço for então armazena cada um desses pares
nas duas variáveis especificadas. No exemplo anterior, usamos as
variáveis para exibir cada chave (key), seguido do valor associado
(value). O "\n" na primeira instrução print garante que uma linha em
branco seja inserida antes de cada par chave-valor na saída.
Python não se importa com a ordem em que os pares chave-valor são
armazenados; ele só registra as conexões entre cada chave individual
e seu valor.
6.12 – Extensões: Estamos trabalhando agora com exemplos complexos o
bastante para poderem ser estendidos de várias maneiras. Use um dos
programas de exemplo deste capítulo e estenda-o acrescentando novas
chaves e valores, alterando o contexto do programa ou melhorando a
formatação da saída.
----------------------------------------------------------------------
HISTÓRICO
20202410: João Paulo, outubro de 2020.
- Percorrendo todos os pares chave-valor com um laço
(Pag. 138-139).
20202810: João Paulo, outubro de 2020.
- FAÇA VOCÊ MESMO 6.12 - Extensões (pg 150).
"""
user_0 = {'username' : 'efermi', 'first' : 'enrico', 'last' : 'fermi',}
for key, value in user_0.items():
print("\nKey: " + key)
print("Value: " + value)
print("\n- 6.12 - Extensões:")
users = {
'efermi' : {'first' : 'erico', 'last' : 'fermi',},
'aeinstein' : {'first' : 'albert', 'last': 'einstein',},
'mcurie': {'first': 'marie', 'last': 'curie',},}
for key, value in users.items():
print("\nUsername: " + key)
full_name = value['first'] + " " + value['last']
print("Name: " + full_name.title())
|
[
"jprnogueira@yahoo.com.br"
] |
jprnogueira@yahoo.com.br
|
7a14e8464e65364030788781634321eba09f3d7f
|
80d50ea48e10674b1b7d3f583a1c4b7d0b01200f
|
/examples/v1/synthetics/CreateSyntheticsAPITest.py
|
84904e1d7992fcae134d9955d3c134e4da54febc
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] |
permissive
|
DataDog/datadog-api-client-python
|
3e01fa630278ad0b5c7005f08b7f61d07aa87345
|
392de360e7de659ee25e4a6753706820ca7c6a92
|
refs/heads/master
| 2023-09-01T20:32:37.718187
| 2023-09-01T14:42:04
| 2023-09-01T14:42:04
| 193,793,657
| 82
| 36
|
Apache-2.0
| 2023-09-14T18:22:39
| 2019-06-25T22:52:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,304
|
py
|
"""
Create an API test returns "OK - Returns the created test details." response
"""
from datadog_api_client import ApiClient, Configuration
from datadog_api_client.v1.api.synthetics_api import SyntheticsApi
from datadog_api_client.v1.model.synthetics_api_test import SyntheticsAPITest
from datadog_api_client.v1.model.synthetics_api_test_config import SyntheticsAPITestConfig
from datadog_api_client.v1.model.synthetics_api_test_type import SyntheticsAPITestType
from datadog_api_client.v1.model.synthetics_assertion_operator import SyntheticsAssertionOperator
from datadog_api_client.v1.model.synthetics_assertion_target import SyntheticsAssertionTarget
from datadog_api_client.v1.model.synthetics_assertion_type import SyntheticsAssertionType
from datadog_api_client.v1.model.synthetics_browser_test_rum_settings import SyntheticsBrowserTestRumSettings
from datadog_api_client.v1.model.synthetics_device_id import SyntheticsDeviceID
from datadog_api_client.v1.model.synthetics_restricted_roles import SyntheticsRestrictedRoles
from datadog_api_client.v1.model.synthetics_test_ci_options import SyntheticsTestCiOptions
from datadog_api_client.v1.model.synthetics_test_details_sub_type import SyntheticsTestDetailsSubType
from datadog_api_client.v1.model.synthetics_test_execution_rule import SyntheticsTestExecutionRule
from datadog_api_client.v1.model.synthetics_test_options import SyntheticsTestOptions
from datadog_api_client.v1.model.synthetics_test_options_http_version import SyntheticsTestOptionsHTTPVersion
from datadog_api_client.v1.model.synthetics_test_options_monitor_options import SyntheticsTestOptionsMonitorOptions
from datadog_api_client.v1.model.synthetics_test_options_retry import SyntheticsTestOptionsRetry
from datadog_api_client.v1.model.synthetics_test_options_scheduling import SyntheticsTestOptionsScheduling
from datadog_api_client.v1.model.synthetics_test_options_scheduling_timeframe import (
SyntheticsTestOptionsSchedulingTimeframe,
)
from datadog_api_client.v1.model.synthetics_test_pause_status import SyntheticsTestPauseStatus
from datadog_api_client.v1.model.synthetics_test_request import SyntheticsTestRequest
body = SyntheticsAPITest(
config=SyntheticsAPITestConfig(
assertions=[
SyntheticsAssertionTarget(
operator=SyntheticsAssertionOperator.LESS_THAN,
target=1000,
type=SyntheticsAssertionType.RESPONSE_TIME,
),
],
request=SyntheticsTestRequest(
method="GET",
url="https://example.com",
),
),
locations=[
"aws:eu-west-3",
],
message="Notification message",
name="Example test name",
options=SyntheticsTestOptions(
ci=SyntheticsTestCiOptions(
execution_rule=SyntheticsTestExecutionRule.BLOCKING,
),
device_ids=[
SyntheticsDeviceID.LAPTOP_LARGE,
],
http_version=SyntheticsTestOptionsHTTPVersion.HTTP1,
monitor_options=SyntheticsTestOptionsMonitorOptions(),
restricted_roles=SyntheticsRestrictedRoles(
[
"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
]
),
retry=SyntheticsTestOptionsRetry(),
rum_settings=SyntheticsBrowserTestRumSettings(
application_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
client_token_id=12345,
is_enabled=True,
),
scheduling=SyntheticsTestOptionsScheduling(
timeframes=[
SyntheticsTestOptionsSchedulingTimeframe(
day=1,
_from="07:00",
to="16:00",
),
SyntheticsTestOptionsSchedulingTimeframe(
day=3,
_from="07:00",
to="16:00",
),
],
timezone="America/New_York",
),
),
status=SyntheticsTestPauseStatus.LIVE,
subtype=SyntheticsTestDetailsSubType.HTTP,
tags=[
"env:production",
],
type=SyntheticsAPITestType.API,
)
configuration = Configuration()
with ApiClient(configuration) as api_client:
api_instance = SyntheticsApi(api_client)
response = api_instance.create_synthetics_api_test(body=body)
print(response)
|
[
"noreply@github.com"
] |
DataDog.noreply@github.com
|
50942d27099e308aa4147588ac09f780f7856048
|
f9308d5a8efe2dbb48e9cc87cd06405b60a9dc7b
|
/samples/python/apidocs/ee_featurecollection_getstring.py
|
8f9996c332c70c0c6ba24aec13ae4de989df6de8
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
google/earthengine-community
|
4e054b421f66f03507d58668084aee981062fc24
|
ce931040c518860f8788b4888c0acfdebd2952fc
|
refs/heads/master
| 2023-09-01T14:47:54.812703
| 2023-08-31T23:01:00
| 2023-08-31T23:01:39
| 200,732,820
| 428
| 552
|
Apache-2.0
| 2023-09-13T21:46:51
| 2019-08-05T21:42:11
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
# Copyright 2023 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_featurecollection_getstring]
# A FeatureCollection with a string property value.
fc = ee.FeatureCollection([]).set('string_property', 'Abies magnifica')
# Fetch the string property value as an ee.String object.
print('String property value as ee.String:',
fc.getString('string_property').getInfo())
# [END earthengine__apidocs__ee_featurecollection_getstring]
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
548439ff669f3f0d005ab4790ec8a3cb6a20f164
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_officiates.py
|
4b9a0f7403716650f0c91ed440000739919939b4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _OFFICIATES():
def __init__(self,):
self.name = "OFFICIATES"
self.definitions = officiate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['officiate']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
8aed7256c3bb58a3dd4968ada6a007943f82ab89
|
3034cb06289f747066571c4ab54ca81996c22319
|
/module_utils/RubrikLib/rubrik_lib/models/download_file_job_config.py
|
54fb2f3618d7eb072adffba24a2445ad05a26500
|
[] |
no_license
|
tarunactivity/ansible-rubrik
|
b2f644805f13a553bd0635e6ddc230257d125ef7
|
5d978c23902fd32d92cc90c75e48e5fe2209f8e0
|
refs/heads/master
| 2023-04-29T04:25:26.834701
| 2023-04-20T21:58:47
| 2023-04-20T21:58:47
| 116,251,368
| 0
| 0
| null | 2018-01-04T11:18:38
| 2018-01-04T11:18:37
| null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DownloadFileJobConfig(Model):
"""DownloadFileJobConfig.
:param path: Absolute file path
:type path: str
"""
_validation = {
'path': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
}
def __init__(self, path):
self.path = path
|
[
"tarun.activity+github@gmail.com"
] |
tarun.activity+github@gmail.com
|
715dc05ef8ff9902ec23f1039df3e9c7fa5bbe74
|
a9ca484b422c802f02684ad64694212a7374a180
|
/devel/lib/turtlebot3_example/turtlebot3_server
|
e8c5800d6414be4bda9b9bbc6d47d9b985fbad01
|
[] |
no_license
|
akash1306/mate_unity
|
791cd5fadc1fae10e01e6f577fef5c43098eb766
|
e947904f37cad6c814c9e5dfea0017a3d02a5b31
|
refs/heads/master
| 2022-12-10T11:22:59.709816
| 2020-08-30T10:29:34
| 2020-08-30T10:29:34
| 290,722,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/script.py.in
# creates a relay to a python script source file, acting as that file.
# The purpose is that of a symlink
python_script = '/home/kajal/mate_unity/src/turtlebot3/turtlebot3_example/nodes/turtlebot3_server'
with open(python_script, 'r') as fh:
context = {
'__builtins__': __builtins__,
'__doc__': None,
'__file__': python_script,
'__name__': __name__,
'__package__': None,
}
exec(compile(fh.read(), python_script, 'exec'), context)
|
[
"akash.chaudhary1306@gmail.com"
] |
akash.chaudhary1306@gmail.com
|
|
10051e7b6695576ce02e1731f8384c452f3e9597
|
08583f5a46dab2455ef707a91a342e6a30f62e8a
|
/advanced-web-scraping-2/14.py
|
d97c5ac81eb880454b2680bb8a8a6d3656c18d5e
|
[] |
no_license
|
mahdi-asadzadeh/python-webscraping-simple-projects
|
924cfefcf1e88698bd76e09e2824da28a9460cb0
|
24f629f48921e7d047f5b5c0803e4d9b3ec31c86
|
refs/heads/main
| 2023-06-20T08:58:46.730938
| 2021-07-22T11:59:56
| 2021-07-22T11:59:56
| 388,445,419
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 275
|
py
|
import requests
url = 'http://www.webscrapingfordatascience.com/referercheck/secret.php'
my_headers = {
'Referer': 'http://www.webscrapingfordatascience.com/referercheck/'
}
r = requests.get(url, headers=my_headers)
print(r.text)
print(r.headers)
print(r.request.headers)
|
[
"mahdi.asadzadeh.programing@gmail.com"
] |
mahdi.asadzadeh.programing@gmail.com
|
608b91ef49bdcf91ae244f0073ec6a7bb4b05a22
|
87163acf1614292be250754f28114f89013f73a3
|
/HackerRank/Problem Solving/Implementation/Easy/Cut the sticks.py
|
400aa182819db47b8f08baf51c32151e0dddc50c
|
[] |
no_license
|
khush-01/Python-codes
|
742a9d9966d2ceb3ad2e7c78e34ef88e55df955a
|
da3cae8df0aafe763399066eefc9b786538fdb35
|
refs/heads/main
| 2023-03-20T04:37:14.020134
| 2021-03-12T04:56:30
| 2021-03-12T04:56:30
| 346,941,048
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
n = int(input())
arr = list(map(int, input().split()))
while len(arr):
print(len(arr))
low = min(arr)
arr = [x for x in arr if x != low]
for x in arr:
x -= low
|
[
"noreply@github.com"
] |
khush-01.noreply@github.com
|
38bfd9428ec551d1e7378c0bed6889e115ea4cdb
|
a8499deff2fa4abde885891d655c3f53ab741d37
|
/rotkehlchen/typing.py
|
baf11f828d8959e799887993f4df71e0658772a7
|
[
"BSD-3-Clause"
] |
permissive
|
georgerobescu/rotkehlchen
|
4c6d2cadcf8b1a8a20f33bb7352b3924a492af54
|
817c880b771b8daf5635b02642861dd9949689e2
|
refs/heads/master
| 2020-06-25T00:22:20.442607
| 2019-07-26T11:42:13
| 2019-07-26T11:42:13
| 199,137,746
| 0
| 0
|
BSD-3-Clause
| 2019-07-27T08:25:06
| 2019-07-27T08:24:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,836
|
py
|
from enum import Enum
from typing import Dict, NamedTuple, NewType, Optional, Union
from rotkehlchen.fval import FVal
T_BinaryEthAddress = bytes
BinaryEthAddress = NewType('BinaryEthAddress', T_BinaryEthAddress)
T_Timestamp = int
Timestamp = NewType('Timestamp', T_Timestamp)
T_ApiKey = bytes
ApiKey = NewType('ApiKey', T_ApiKey)
T_ApiSecret = bytes
ApiSecret = NewType('ApiSecret', T_ApiSecret)
T_B64EncodedBytes = bytes
B64EncodedBytes = NewType('B64EncodedBytes', T_B64EncodedBytes)
T_B64EncodedString = str
B64EncodedString = NewType('B64EncodedString', T_B64EncodedString)
class ApiCredentials(NamedTuple):
"""Represents Credentials for various APIs. Exchanges, Premium e.t.c."""
api_key: ApiKey
api_secret: ApiSecret
@staticmethod
def serialize(api_key: str, api_secret: str) -> 'ApiCredentials':
return ApiCredentials(
api_key=ApiKey(str.encode(api_key)),
api_secret=ApiSecret(str.encode(api_secret)),
)
T_FilePath = str
FilePath = NewType('FilePath', T_FilePath)
T_TradePair = str
TradePair = NewType('TradePair', T_TradePair)
T_FiatAsset = str
FiatAsset = NewType('FiatAsset', T_FiatAsset)
T_EthAddres = str
EthAddress = NewType('EthAddress', T_EthAddres)
T_ChecksumEthAddress = str
ChecksumEthAddress = NewType('ChecksumEthAddress', T_ChecksumEthAddress)
T_BTCAddress = str
BTCAddress = NewType('BTCAddress', T_BTCAddress)
BlockchainAddress = Union[EthAddress, BTCAddress, ChecksumEthAddress]
class EthTokenInfo(NamedTuple):
address: ChecksumEthAddress
symbol: str
name: str
decimal: int
T_EmptyStr = str
EmptyStr = NewType('EmptyStr', T_EmptyStr)
T_Fee = FVal
Fee = NewType('Fee', T_Fee)
T_Price = FVal
Price = NewType('Price', T_Price)
class ResultCache(NamedTuple):
"""Represents a time-cached result of some API query"""
result: Dict
timestamp: Timestamp
T_EventType = str
EventType = NewType('EventType', T_EventType)
class EthereumTransaction(NamedTuple):
"""Represent an Ethereum transaction"""
timestamp: Timestamp
block_number: int
hash: bytes
from_address: EthAddress
to_address: EthAddress
value: FVal
gas: FVal
gas_price: FVal
gas_used: FVal
class SupportedBlockchain(Enum):
"""These are the blockchains for which account tracking is supported """
ETHEREUM = 'ETH'
BITCOIN = 'BTC'
class AssetType(Enum):
FIAT = 1
OWN_CHAIN = 2
ETH_TOKEN = 3
OMNI_TOKEN = 4
NEO_TOKEN = 5
XCP_TOKEN = 6
BTS_TOKEN = 7
ARDOR_TOKEN = 8
NXT_TOKEN = 9
UBIQ_TOKEN = 10
NUBITS_TOKEN = 11
BURST_TOKEN = 12
WAVES_TOKEN = 13
QTUM_TOKEN = 14
STELLAR_TOKEN = 15
TRON_TOKEN = 16
ONTOLOGY_TOKEN = 17
ETH_TOKEN_AND_MORE = 18
EXCHANGE_SPECIFIC = 19
VECHAIN_TOKEN = 20
BINANCE_TOKEN = 21
class AssetData(NamedTuple):
"""Data of an asset. Keep in sync with assets/asset.py"""
identifier: str
name: str
symbol: str
active: bool
asset_type: AssetType
# Every asset should have a started timestamp except for FIAT which are
# most of the times older than epoch
started: Optional[Timestamp]
ended: Optional[Timestamp]
forked: Optional[str]
swapped_for: Optional[str]
ethereum_address: Optional[ChecksumEthAddress]
decimals: Optional[int]
class TradeType(Enum):
BUY = 1
SELL = 2
SETTLEMENT_BUY = 3
SETTLEMENT_SELL = 4
def __str__(self) -> str:
if self == TradeType.BUY:
return 'buy'
elif self == TradeType.SELL:
return 'sell'
elif self == TradeType.SETTLEMENT_BUY:
return 'settlement_buy'
elif self == TradeType.SETTLEMENT_SELL:
return 'settlement_sell'
raise RuntimeError('Corrupt value for TradeType -- Should never happen')
|
[
"lefteris@refu.co"
] |
lefteris@refu.co
|
09be8dca5ceacb21fe25c7dffea510e79c928207
|
2379e840d0a9e47331ac247f4d6164cdfd548cbd
|
/tools/docs/docload.py
|
6cdadb8d0f4de2a0db4459bf4ad0b6d8bf40ee44
|
[
"BSD-3-Clause",
"PostgreSQL"
] |
permissive
|
jberkus/pgweb
|
b9d04bc60da437f2f60c8e4cf844e0cfa601a160
|
fa07ed84b8708240264fe9f091b2087b7f872b8c
|
refs/heads/master
| 2020-04-03T10:12:04.734671
| 2017-05-11T13:00:07
| 2017-05-11T13:00:20
| 30,271,195
| 0
| 0
| null | 2015-02-03T23:49:04
| 2015-02-03T23:49:04
| null |
UTF-8
|
Python
| false
| false
| 3,730
|
py
|
#!/usr/bin/env python
# Script to load documentation from tarballs
import sys
import os
import tarfile
import re
import tidy
from optparse import OptionParser
from ConfigParser import ConfigParser
import psycopg2
pagecount = 0
quiet = False
re_titlematch = re.compile('<title\s*>([^<]+)</title\s*>', re.IGNORECASE)
## Load a single page
def load_doc_file(filename, f):
tidyopts = dict(drop_proprietary_attributes=1,
alt_text='',
hide_comments=1,
output_xhtml=1,
show_body_only=1,
clean=1,
char_encoding='utf8',
indent='auto',
)
# Postgres 10 started using xml toolchain and now produces docmentation in utf8. So we need
# to figure out which version it is.
rawcontents = f.read()
if rawcontents.startswith('<?xml version="1.0" encoding="UTF-8"'):
# Version 10, use utf8
encoding = 'utf-8'
# XML builds also don't need clean=1, and that one adds some interesting CSS properties
del tidyopts['clean']
else:
encoding = 'latin1'
contents = unicode(rawcontents, encoding)
tm = re_titlematch.search(contents)
if tm:
title = tm.group(1)
else:
title = ""
if not quiet: print "--- file: %s (%s) ---" % (filename, title)
s = tidy.parseString(contents.encode('utf-8'), **tidyopts)
curs.execute("INSERT INTO docs (file, version, title, content) VALUES (%(f)s, %(v)s, %(t)s, %(c)s)",{
'f': filename,
'v': ver,
't': title,
'c': str(s),
})
global pagecount
pagecount += 1
## Main execution
parser = OptionParser(usage="usage: %prog [options] <version> <tarfile>")
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="Run quietly")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_usage()
sys.exit(1)
quiet = options.quiet
ver = sys.argv[1]
tarfilename = sys.argv[2]
config = ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'docload.ini'))
if not os.path.isfile(tarfilename):
print "File %s not found" % tarfilename
sys.exit(1)
tf = tarfile.open(tarfilename)
connection = psycopg2.connect(config.get('db', 'dsn'))
curs = connection.cursor()
# Verify that the version exists, and what we're loading
curs.execute("SELECT current FROM core_version WHERE tree=%(v)s", {'v': ver})
r = curs.fetchall()
if len(r) != 1:
print "Version %s not found in the system, cannot load!" % ver
sys.exit(1)
iscurrent = r[0][0]
# Remove any old docs for this version (still protected by a transaction while
# we perform the load)
curs.execute("DELETE FROM docs WHERE version=%(v)s", {'v': ver})
re_htmlfile = re.compile('[^/]*/doc/src/sgml/html/.*')
re_tarfile = re.compile('[^/]*/doc/postgres.tar.gz$')
for member in tf:
if re_htmlfile.match(member.name):
load_doc_file(os.path.basename(member.name), tf.extractfile(member))
if re_tarfile.match(member.name):
f = tf.extractfile(member)
inner_tar = tarfile.open(fileobj=f)
for inner_member in inner_tar:
# Some old versions have index.html as a symlink - so let's
# just ignore all symlinks to be on the safe side.
if inner_member.issym(): continue
if inner_member.name.endswith('.html') or inner_member.name.endswith('.htm'):
load_doc_file(inner_member.name, inner_tar.extractfile(inner_member))
tf.close()
# Update the docs loaded timestamp
curs.execute("UPDATE core_version SET docsloaded=CURRENT_TIMESTAMP WHERE tree=%(v)s", {'v': ver})
# Issue varnish purge for all docs of this version
if ver == "0":
# Special handling of developer docs...
ver = "devel"
curs.execute("SELECT varnish_purge('^/docs/' || %(v)s || '/')", {'v': ver})
if iscurrent:
curs.execute("SELECT varnish_purge('^/docs/current/')")
connection.commit()
connection.close()
if not quiet: print "Done (%i pages)." % pagecount
|
[
"magnus@hagander.net"
] |
magnus@hagander.net
|
76dc68fb3fe760087f98d3966455e9f50c8e0e40
|
c5d6e21744f10c6e57d58b57bba2763b82a9726b
|
/Bimestre_03_Aula_03/exercicios/04_somando_forcas.py
|
ccae92fe26f595c8832a0e1be2f97b2d84922e72
|
[] |
no_license
|
valeriacavalcanti/ALP-2020-R
|
bf32af707d49db650deb6d122a1abdf58d94ae4f
|
62e0be861ad7439b99ae5d0b0e14d97c887424c7
|
refs/heads/main
| 2023-05-05T02:05:00.128872
| 2021-06-04T10:30:05
| 2021-06-04T10:30:05
| 316,784,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
# declarar as matrizes
matriz1 = []
for i in range(6):
matriz1.append([0] * 4)
matriz2 = []
for i in range(6):
matriz2.append([0] * 4)
matriz_soma = []
for i in range(6):
matriz_soma.append([0] * 4)
# ler os dados das matrizes
for i in range(6):
for j in range(4):
matriz1[i][j] = int(input("Matriz1 {} {}: ".format(i, j)))
for i in range(6):
for j in range(4):
matriz2[i][j] = int(input("Matriz2 {} {}: ".format(i, j)))
# calculando a soma das matrizes 1 e 2
for i in range(6):
for j in range(4):
matriz_soma[i][j] = matriz1[i][j] + matriz2[i][j]
print(matriz_soma)
|
[
"valeria.cavalcanti@ifpb.edu.br"
] |
valeria.cavalcanti@ifpb.edu.br
|
7b5cbe0847dd7ee161252a3f29692ab99f07de36
|
d7ccb4225f623139995a7039f0981e89bf6365a4
|
/.history/carts/models_20211010234636.py
|
41796b5b857bad7162f4bffa6392f2ca23d6f216
|
[] |
no_license
|
tonnymuchui/django-mall
|
64fd4abc3725c1bd0a3dcf20b93b490fe9307b37
|
55c083d8433be3c77adc61939cd197902de4ce76
|
refs/heads/master
| 2023-08-23T04:59:20.418732
| 2021-10-13T15:59:37
| 2021-10-13T15:59:37
| 415,668,388
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
from django.db import models
# Create your models here.
class Cart(models.Model):
cart_id = models.CharField(max_length=255, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.cart_id
class CardItem(models.Model):
product = models.ForeignKey(Product, on_delete=models.Ca)
|
[
"tonykanyingah@gmail.com"
] |
tonykanyingah@gmail.com
|
9f29c6cd97e8f4ec92d968578340ad5c858fb023
|
be4459658d667c47eefeeb3cf689a678042edb94
|
/modules/ext/util/lua/packages/luaunit/luaunit/doit.py
|
b4e636abe621a2734eb4e4f3ce9b7ce2d3bef357
|
[
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kparr/RIFT.ware-1
|
7945174aa23ac1f7d74a7464b645db5824982fc3
|
6846108d70b80b95c5117fdccd44ff058ac605be
|
refs/heads/master
| 2021-01-13T08:36:03.751610
| 2016-07-24T21:36:15
| 2016-07-24T21:36:15
| 72,420,438
| 0
| 0
| null | 2016-10-31T09:11:27
| 2016-10-31T09:11:27
| null |
UTF-8
|
Python
| false
| false
| 2,796
|
py
|
import subprocess, sys, os, shutil, os.path, optparse
VERSION='3.0'
RELEASE_NAME='luaunit-%s' % VERSION
RELEASE_DIR='release/' + RELEASE_NAME + '/'
TARGET_ZIP=RELEASE_NAME + '.zip'
TARGET_TGZ=RELEASE_NAME + '.tgz'
REPO_PATH='d:/work/luaunit/luaunit-git/luaunit/'
LUA50='d:/program/lua/lua50/lua50.exe'
LUA51='d:/program/lua/lua51/lua51.exe'
LUA52='d:/program/lua/lua52/lua52.exe'
ALL_LUA = (
(LUA52, 'lua 5.2'),
(LUA51, 'lua 5.1'),
# (LUA50, 'lua 5.0'),
)
os.environ["nodosfilewarning"] = "1"
def report( s ):
print '[[[[[[[[[[[[[ %s ]]]]]]]]]]]]]' % s
def run_tests():
'''Run tests with all versions of lua'''
for lua, luaversion in ALL_LUA:
report( 'Running tests with %s' % luaversion )
retcode = subprocess.call( [lua, 'test_luaunit.lua'] )
if retcode != 0:
report( 'Invalid retcode when running tests: %d' % retcode )
sys.exit( retcode )
report( 'All tests succeed!' )
def run_example():
for lua, luaversion in ALL_LUA:
report( 'Running examples with %s' % luaversion )
retcode = subprocess.call( [lua, 'example_with_luaunit.lua'] )
if retcode != 12:
report( 'Invalid retcode when running examples: %d' % retcode )
sys.exit( retcode )
report( 'All examples ran!' )
def packageit():
shutil.rmtree('release', True)
try:
os.mkdir('release')
except OSError:
pass
subprocess.check_call(['d:/program/msysgit/msysgit/bin/git.exe', 'clone', '--no-hardlinks', REPO_PATH, RELEASE_DIR])
os.chdir( RELEASE_DIR )
# Release dir cleanup
shutil.rmtree('.git')
os.unlink('.gitignore')
run_tests()
run_example()
makedoc()
shutil.rmtree('doc/_build')
# Packaging
os.chdir('..')
report('Start packaging')
shutil.make_archive(RELEASE_NAME, 'zip', root_dir='.', base_dir=RELEASE_NAME )
shutil.make_archive(RELEASE_NAME, 'gztar', root_dir='.', base_dir=RELEASE_NAME )
report('Zip and tgz ready!')
def help():
print( 'Available actions:')
for opt in OptToFunc:
print '\t%s' % opt
def makedoc():
os.chdir('doc')
if os.path.exists('html'):
shutil.rmtree('html')
subprocess.check_call(['make.bat', 'html'])
shutil.copytree('_build/html', 'html')
os.chdir('..')
OptToFunc = {
'runtests' : run_tests,
'runexample' : run_example,
'packageit' : packageit,
'makedoc' : makedoc,
'help' : help,
}
if __name__ == '__main__':
doingNothing = True
for arg in sys.argv[1:]:
if OptToFunc.has_key(arg):
doingNothing = False
OptToFunc[arg]()
else:
print 'No such action :', arg
sys.exit(-1)
if doingNothing:
help()
|
[
"Jeremy.Mordkoff@riftio.com"
] |
Jeremy.Mordkoff@riftio.com
|
6bc2f2f74fa4f7f954d72f9f188aa154ec803c32
|
4f7aa44d21ae38093869e79e10f5cdc8842d48b7
|
/01-python-academy-intro-lab/exceptions_01.py
|
743b25bface618a473cf911dd91b8165a4e2d8b3
|
[
"Apache-2.0"
] |
permissive
|
iproduct/intro-python
|
31e802c2c21a4df3361656f12d267ec52c2d6564
|
7e08e144da2907fcf45dc734ab4e896631625d75
|
refs/heads/master
| 2023-02-19T11:42:37.522624
| 2023-02-13T15:54:03
| 2023-02-13T15:54:03
| 128,980,155
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
import sys
class MyException(Exception):
def __init__(self, *args):
# Exception.__init__(self, *args)
super().__init__(*args)
def erroneuos():
try:
return 1 / 0
except:
print("Within except.")
raise
finally:
print("Within finally.")
if __name__ == "__main__":
try:
erroneuos()
except Exception as ex:
# tb = sys.exc_info()[2]
# print(f"Type: {sys.exc_info()[0]}, Value: {sys.exc_info()[1]}\n")
raise MyException("Raised from main()") from ex
|
[
"office@iproduct.org"
] |
office@iproduct.org
|
6c6ca35bcb1cae919f48bc7f70104caabf953925
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/2329.py
|
bf535ce3f6a4ddf821921237facfb76044f407d5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
ipt = open('B-large.in').read().splitlines()
cases = int(ipt[0])
fw = open('B-large.out','w')
for i in xrange(cases):
c_per_sec = 2.0
c,f,x = map(lambda x: float(x), ipt[i+1].split(' '))
just_wait = x / c_per_sec
sub_total = 0
while True:
#print just_wait
sub_total += c/c_per_sec
c_per_sec += f
total = sub_total + (x / c_per_sec)
if total >= just_wait:
fw.write("Case #{}: {}\n".format(i+1, round(just_wait, 7)))
break
else:
just_wait = total
fw.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2ece9826a7c51f922007dd4b37ea17ae54963ccc
|
c609730a43596a2d3303f072fc97d9cf681fac7b
|
/cagey/kache/kache/spiders/woniuhc.py
|
954c5f0dbca656d7d140c892215686432f16e2ad
|
[] |
no_license
|
sinnettluo/ChenProject
|
5403311c0c7b78c484145e16d692abff00d2a110
|
0e33ecf1683afb22f1deb4bd54294c41aed8a46b
|
refs/heads/master
| 2023-03-22T23:48:08.430178
| 2020-09-02T15:05:02
| 2020-09-02T15:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,182
|
py
|
# -*- coding: utf-8 -*-
import scrapy
import time
import json
from copy import deepcopy
from kache.items import woniuItem
class WoniuhcSpider(scrapy.Spider):
name = 'woniuhc'
allowed_domains = ['woniuhuoche.com']
# start_urls = ['http://woniuhuoche.com/']
@classmethod
def update_settings(cls, settings):
settings.setdict(getattr(cls, 'custom_debug_settings' if getattr(cls, 'is_debug', False) else 'custom_settings', None) or {}, priority='spider')
def __init__(self, **kwargs):
super(WoniuhcSpider, self).__init__(**kwargs)
self.counts = 0
is_debug = True
custom_debug_settings = {
'MYSQL_SERVER': '192.168.1.94',
'MYSQL_DB': 'truck',
'MYSQL_TABLE': 'woniuhc',
'MONGODB_SERVER': '192.168.1.94',
'MONGODB_DB': 'truck',
'MONGODB_COLLECTION': 'woniuhc',
'CONCURRENT_REQUESTS': 8,
'DOWNLOAD_DELAY': 0,
'LOG_LEVEL': 'DEBUG',
}
def start_requests(self):
url = "http://www.woniuhuoche.com/truck-auction-app/api/auction/v1/lotList?source=2&type=0"
yield scrapy.Request(
url=url,
dont_filter=True,
)
def parse(self, response):
item = woniuItem()
res = json.loads(response.text)
data_list = res["data"]["lotList"]
for data in data_list:
item["title"] = data["title"]
item["carid"] = data["lotId"]
item["truckId"] = data["truckId"]
item["registerdate"] = data["registDate"]
item["price"] = data["maxPrice"]
item["emission"] = data["emission"]
item["startTime"] = data["startTime"]
item["endTime"] = data["endTime"]
url = f"http://www.woniuhuoche.com/truck-auction-app/api/auction/v1/truckDetail?lotId={item['carid']}&truckId={item['truckId']}"
item["url"] = url
yield scrapy.Request(
url=url,
callback=self.parse_detail_url,
meta={"item": deepcopy(item)},
dont_filter=True,
)
def parse_detail_url(self, response):
item = response.meta["item"]
res = json.loads(response.text)
data = res["data"]
item["city"] = data["city"]
basicList = data["basicList"]
for basic in basicList:
if "数量" in basic["key"]:
item["num"] = basic["value"]
if "车辆类型" in basic["key"]:
item["car_type"] = basic["value"]
if "表显里程" in basic["key"]:
item["mileage"] = basic["value"]
if "发动机品牌" in basic["key"]:
item["engine"] = basic["value"]
if "燃料类型" in basic["key"]:
item["fuel"] = basic["value"]
if "排放标准" in basic["key"]:
item["let"] = basic["value"]
if "品牌" in basic["key"]:
item["brand"] = basic["value"]
if "车辆颜色" in basic["key"]:
item["color"] = basic["value"]
if "最大马力" in basic["key"]:
item["hoursepower"] = basic["value"]
if "箱体长度" in basic["key"]:
item["containerLong"] = basic["value"]
if "栏板高度" in basic["key"]:
item["containerHight"] = basic["value"]
if "驱动形式" in basic["key"]:
item["driveType"] = basic["value"]
if "罐体容积" in basic["key"]:
item["containerVolume"] = basic["value"]
if "看车地点" in basic["key"]:
item["carLocation"] = basic["value"]
proceduresList = data["proceduresList"]
for procedures in proceduresList:
if "是否可过户" in procedures["key"]:
item["isTransfer"] = procedures["value"]
if "年检到期日" in procedures["key"]:
item["inspectionDate1"] = procedures["value"]
if "购置税证" in procedures["key"]:
item["isPurchase"] = procedures["value"]
if "交强险" in procedures["key"]:
item["inspectionDate2"] = procedures["value"]
if "第三责任险" in procedures["key"]:
item["inspectionDate3"] = procedures["value"]
if "其他法定凭证、证书" in procedures["key"]:
item["isCertificate"] = procedures["value"]
if "是否有违章" in procedures["key"]:
item["isRules"] = procedures["value"]
if "是否抵押车" in procedures["key"]:
item["isMortgage"] = procedures["value"]
detect = data["detect"]
item["grade"] = detect["grade"]
item["surveyor"] = detect["surveyor"]
item["detectTime"] = detect["detectTime"]
item["detectItem"] = json.dumps(detect["detectItem"], ensure_ascii=False)
item["desc"] = data["descp"]
item["statusplus"] = item["url"]+'-'+item["price"]
item["grab_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield item
# print(item)
|
[
"1316446041@qq.com"
] |
1316446041@qq.com
|
b3305b39cd894d4e5bb295a10bedfc5a27e0d140
|
e358b0c801b0173e2b5fe31820af2a45d5fff8ae
|
/altair_saver/savers/tests/test_html.py
|
6899fec8d9fd514217be3d235d24a1e5c2dde027
|
[
"BSD-3-Clause"
] |
permissive
|
steevensmelo/altair_saver
|
a3270d2a4c2615c0b86b6d10f87e67f04c3bd40f
|
d8dfc9a8d40b3ad20d152e014b37e3b1e6d512ef
|
refs/heads/master
| 2022-06-11T11:53:12.329070
| 2020-05-13T14:50:11
| 2020-05-13T14:50:11
| 263,651,277
| 0
| 0
|
BSD-3-Clause
| 2020-05-13T14:19:45
| 2020-05-13T14:19:44
| null |
UTF-8
|
Python
| false
| false
| 5,557
|
py
|
import io
import json
import os
from typing import Any, Dict, IO, Iterator, Optional, Tuple
from altair_data_server import Provider
from PIL import Image
import pytest
import selenium.webdriver
from selenium.webdriver.remote.webdriver import WebDriver
from altair_saver import HTMLSaver
from altair_saver._utils import internet_connected
CDN_URL = "https://cdn.jsdelivr.net"
@pytest.fixture(scope="module")
def internet_ok() -> bool:
return internet_connected()
@pytest.fixture(scope="module")
def provider() -> Iterator[Provider]:
provider = Provider()
yield provider
provider.stop()
@pytest.fixture(scope="module")
def driver() -> Iterator[WebDriver]:
options = selenium.webdriver.chrome.options.Options()
options.add_argument("--headless")
if hasattr(os, "geteuid") and (os.geteuid() == 0):
options.add_argument("--no-sandbox")
driver = selenium.webdriver.Chrome(options=options)
yield driver
driver.quit()
def get_testcases() -> Iterator[Tuple[str, Dict[str, Any]]]:
directory = os.path.join(os.path.dirname(__file__), "testcases")
cases = set(f.split(".")[0] for f in os.listdir(directory))
f: IO
for case in sorted(cases):
with open(os.path.join(directory, f"{case}.vl.json")) as f:
vl = json.load(f)
with open(os.path.join(directory, f"{case}.png"), "rb") as f:
png = f.read()
yield case, {"vega-lite": vl, "png": png}
@pytest.mark.parametrize("inline", [True, False])
@pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_save(
case: str, data: Dict[str, Any], embed_options: Optional[dict], inline: bool
) -> None:
saver = HTMLSaver(data["vega-lite"], inline=inline, embed_options=embed_options)
fp = io.StringIO()
saver.save(fp, "html")
html = fp.getvalue()
assert isinstance(html, str)
assert html.strip().startswith("<!DOCTYPE html>")
assert json.dumps(data["vega-lite"]) in html
assert f"const embedOpt = {json.dumps(embed_options or {})}" in html
if inline:
assert CDN_URL not in html
else:
assert CDN_URL in html
@pytest.mark.parametrize("embed_options", [None, {"theme": "dark"}])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_mimebundle(
case: str, data: Dict[str, Any], embed_options: Optional[dict],
) -> None:
saver = HTMLSaver(data["vega-lite"], embed_options=embed_options)
bundle = saver.mimebundle("html")
assert bundle.keys() == {"text/html"}
html = bundle["text/html"]
assert isinstance(html, str)
assert html.strip().startswith("<div")
assert json.dumps(data["vega-lite"]) in html
assert json.dumps(embed_options or {}) in html
assert CDN_URL in html
def test_bad_format() -> None:
saver = HTMLSaver({})
with pytest.raises(ValueError):
saver.mimebundle("vega")
@pytest.mark.parametrize("case, data", get_testcases())
@pytest.mark.parametrize("inline", [True, False])
def test_html_save_rendering(
provider: Provider,
driver: WebDriver,
case: str,
data: Dict[str, Any],
inline: bool,
internet_ok: bool,
) -> None:
if not (inline or internet_ok):
pytest.xfail("Internet connection not available")
saver = HTMLSaver(data["vega-lite"], inline=inline)
fp = io.StringIO()
saver.save(fp, "html")
html = fp.getvalue()
resource = provider.create(content=html, extension="html")
driver.set_window_size(800, 600)
driver.get(resource.url)
element = driver.find_element_by_class_name("vega-visualization")
png = driver.get_screenshot_as_png()
im = Image.open(io.BytesIO(png))
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
im = im.crop((left, top, right, bottom))
im_expected = Image.open(io.BytesIO(data["png"]))
assert abs(im.size[0] - im_expected.size[0]) < 40
assert abs(im.size[1] - im_expected.size[1]) < 40
@pytest.mark.parametrize("requirejs", [True, False])
@pytest.mark.parametrize("case, data", get_testcases())
def test_html_mimebundle_rendering(
provider: Provider,
driver: WebDriver,
case: str,
data: Dict[str, Any],
requirejs: bool,
internet_ok: bool,
) -> None:
if not internet_ok:
pytest.xfail("Internet connection not available")
saver = HTMLSaver(data["vega-lite"])
bundle = saver.mimebundle("html")
html = bundle["text/html"]
assert isinstance(html, str)
if requirejs:
html = f"""<!DOCTYPE html>
<html>
<head><script src="{CDN_URL}/npm/requirejs@2.3.6"></script></head>
<body>{html}</body>
</html>
"""
else:
html = f"<html>{html}</html>"
resource = provider.create(content=html, extension="html")
driver.set_window_size(800, 600)
driver.get(resource.url)
element = driver.find_element_by_class_name("vega-visualization")
png = driver.get_screenshot_as_png()
im = Image.open(io.BytesIO(png))
left = element.location["x"]
top = element.location["y"]
right = element.location["x"] + element.size["width"]
bottom = element.location["y"] + element.size["height"]
im = im.crop((left, top, right, bottom))
im_expected = Image.open(io.BytesIO(data["png"]))
assert abs(im.size[0] - im_expected.size[0]) < 40
assert abs(im.size[1] - im_expected.size[1]) < 40
|
[
"jakevdp@google.com"
] |
jakevdp@google.com
|
360826c9d54c53412f463d98e4c247151a877b01
|
16d159d6d3fe69d513717caad3e2c21320f93224
|
/AtCoder/ABC/ABC101-150/abc141/abc141b.py
|
23b49e7105c52df065df47863ad2a802a189189d
|
[] |
no_license
|
tsushiy/competitive-programming-submissions
|
d4f068a5157c0de0f1822367e0ca66dd978e43f9
|
9011d855d9252134179cc9cc8f328f6e0ca32407
|
refs/heads/master
| 2023-04-11T08:34:01.015316
| 2021-04-11T15:16:17
| 2021-04-11T15:17:35
| 175,807,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
s = input()
flag = True
for i, e in enumerate(s):
if i%2==0 and e not in "RUD":
flag = False
elif i%2==1 and e not in "LUD":
flag = False
if flag:
print("Yes")
else:
print("No")
|
[
"28962399+tsushiy@users.noreply.github.com"
] |
28962399+tsushiy@users.noreply.github.com
|
6e6e1991a34c11354f69814d4470d27f07744b55
|
8e02228b5857d876de244d37b1d6ec3bbd21227e
|
/python_modules/dagit/dagit_tests/test_debug_cli.py
|
8131baa4613455e33bad678940c7646f893f743e
|
[
"Apache-2.0"
] |
permissive
|
ChocoletMousse/dagster
|
4af255d5ab99c25d1a2be379a5bbd83fa3221b64
|
a256cb43cde0ab5a800a87ee1f55de560587a4ab
|
refs/heads/master
| 2022-12-26T00:04:31.172150
| 2020-09-25T21:36:15
| 2020-09-26T02:15:35
| 297,765,844
| 1
| 0
|
Apache-2.0
| 2020-09-22T20:35:23
| 2020-09-22T20:35:22
| null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
from os import path
from click.testing import CliRunner
from dagit.debug import dagit_debug_command
from gevent import pywsgi
from dagster import execute_pipeline, lambda_solid, pipeline
from dagster.cli.debug import export_command
from dagster.core.test_utils import instance_for_test
@lambda_solid
def emit_one():
return 1
@pipeline
def test_pipe():
emit_one()
emit_one()
def test_roundtrip(monkeypatch):
runner = CliRunner()
with instance_for_test() as instance:
run_result = execute_pipeline(test_pipe, instance=instance)
assert run_result.success
file_path = path.join(instance.root_directory, ".temp.dump")
export_result = runner.invoke(export_command, [run_result.run_id, file_path])
assert "Exporting run_id" in export_result.output
assert file_path in export_result.output
# make dagit stop after launch
monkeypatch.setattr(pywsgi.WSGIServer, "serve_forever", lambda _: None)
debug_result = runner.invoke(dagit_debug_command, [file_path])
assert file_path in debug_result.output
assert "run_id: {}".format(run_result.run_id) in debug_result.output
assert "Serving on" in debug_result.output
|
[
"alex@elementl.com"
] |
alex@elementl.com
|
61b8dbbbc2d89a236263828285aac5e64b2dec48
|
e8d719fe45dfbff9cbbc4ed872832cec6cabaca6
|
/128_Longest_Consecutive_Sequence.py
|
1ccccaf698c6be4b9c5f01f29d570fac2016dac6
|
[] |
no_license
|
nlfox/leetcode
|
64f4f48d7f4be6df0542e51cc7037df40bf184a3
|
d61363f99de3d591ebc8cd94f62544a31a026d55
|
refs/heads/master
| 2020-12-21T01:43:01.792899
| 2016-11-14T23:10:12
| 2016-11-14T23:10:12
| 56,680,839
| 2
| 0
| null | 2016-05-17T17:16:37
| 2016-04-20T11:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 500
|
py
|
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums=list(set(nums))
nums.sort()
maxCnt = 1
cnt = 1
for i in xrange(1, len(nums)):
if nums[i] - 1 == nums[i - 1]:
cnt += 1
else:
cnt = 1
if cnt > maxCnt:
maxCnt = cnt
return maxCnt
print Solution().longestConsecutive([1,2,0,1])
|
[
"nlfox@msn.cn"
] |
nlfox@msn.cn
|
08a6fe350bd23f142bad00a4690285eba2230d71
|
a9dc497a723917d4256ef15b2e9c3cf88a3fae4f
|
/GPinv/mean_functions.py
|
e2f7bd94404647f0cc8161fe0941bcca89455f3f
|
[
"Apache-2.0"
] |
permissive
|
chaoshunh/GPinv
|
83b8773870f7ccc8a636fc88ac9a96acfa306c54
|
e46964991459cb43752cd344c18be0e197d439f8
|
refs/heads/master
| 2023-03-16T06:42:39.966449
| 2016-10-15T05:39:58
| 2016-10-15T05:39:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
import tensorflow as tf
import numpy as np
from GPflow import mean_functions
from GPflow.param import Param,ParamList
from GPflow._settings import settings
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class MeanFunction(mean_functions.MeanFunction):
"""
A wrap of GPflow.mean_functions.MeanFunction.
The main difference of this wrap is __call__ method, that returns
nxR sized tensor, in contrast to GPflow.mean_functions.MeanFunction, which
returns nx1 sized tensor.
"""
def __init__(self, output_dim):
"""
:param integer output_dim: number of output dimension, R.
"""
mean_functions.MeanFunction.__init__(self)
self.output_dim = output_dim
def __call__(self, X):
"""
:param tf.tensor x: nxD tensor.
:return tf.tensor: nxR tensor.
"""
raise NotImplementedError("Implement the __call__\
method for this mean function")
class Zero(MeanFunction):
""" Zero mean """
def __call__(self, X):
return tf.zeros([tf.shape(X)[0],self.output_dim], float_type)
class Constant(MeanFunction):
""" Constant mean """
def __init__(self, output_dim, c=None):
MeanFunction.__init__(self, output_dim)
if c is None:
c = np.ones(output_dim,np_float_type)
self.c = Param(c)
def __call__(self, X):
return tf.tile(tf.expand_dims(self.c,0), [tf.shape(X)[0],1])
class Stack(MeanFunction):
"""
Mean function that returns multiple kinds of mean values, stacked
vertically.
Input for the initializer is a list of MeanFunctions, [m_1,m_2,...,m_M].
The function call returns [m_1(X),m_2(X),...,m_M(X)].
The size of the return is n x (sum_i m_i.output_dim).
"""
def __init__(self, list_of_means):
"""
:param list list_of_means: A list of MeanFunction object.
"""
output_dim = 0
for m in list_of_means:
output_dim += m.output_dim
MeanFunction.__init__(self, output_dim)
# MeanFunctions are stored as ParamList
self.mean_list = ParamList(list_of_means)
def __call__(self, X):
"""
Return a concatenated tensor of the multiple mean functions.
The size of the return is n x (sum_i m_i.output_dim).
"""
return tf.concat(1, [l(X) for l in self.mean_list])
|
[
"fujiisoup@gmail.com"
] |
fujiisoup@gmail.com
|
8f1ad2f8765959e86316c7bf0ab8d91cc8ace7c1
|
8fc2ab3d29a30e603e19b30bb9517928de529167
|
/recursion_count7.py
|
da54e1e6515b053de42599e4a335a9a73680a757
|
[] |
no_license
|
rushilchugh/Practise
|
35a9861bec6786580dc0a440eb25d78e43cb7bc9
|
98fd593b95dad641bef1d519c6c6ed1daaae630f
|
refs/heads/master
| 2020-03-13T21:14:14.013604
| 2018-04-27T12:23:50
| 2018-04-27T12:23:50
| 131,291,684
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
__author__ = 'Rushil'
def count7(n):
if n == 0:
return 0
if n%10 == 7:
return 1 + count7(n//10)
else:
return count7(n//10)
print(count7(77177))
|
[
"noreply@github.com"
] |
rushilchugh.noreply@github.com
|
2d90be2e1a6b6bc6c2fe03c9b529449c1127a540
|
fd0604a74b72e273e194cae9145b4a299d2e1858
|
/cogs/helpers/context.py
|
e5289d13bd928bfaecd26cf9b74ac09ac4cc12da
|
[] |
no_license
|
DuckHunt-discord/DuckHunt-Community-Rewrite
|
4610f0f5a503ae072b6092ddd031264842c6919c
|
c53ed8c994527052bcf588824ce6519280974056
|
refs/heads/master
| 2020-03-23T05:14:02.442187
| 2018-11-03T00:37:17
| 2018-11-03T00:37:17
| 141,131,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
import logging
from discord.ext import commands
class CustomContext(commands.Context):
def __init__(self, **attrs):
super().__init__(**attrs)
@property
def logger(self):
# Copy that to log
if self.channel:
cname = self.channel.name
else:
cname = "PRIVATE_MESSAGE"
extra = {"channelname": f"#{cname}", "userid": f"{self.author.id}", "username": f"{self.author.name}#{self.author.discriminator}"}
logger = logging.LoggerAdapter(self.bot.base_logger, extra)
return logger
async def send_to(self, message, user=None, **kwargs):
if user is None:
user = self.author
message = f"{user.mention} > {message}"
await self.send(message, **kwargs)
|
[
"jovart.arthur@gmail.com"
] |
jovart.arthur@gmail.com
|
a87152f3a008ec4832177a68b36f5aa27bf07c1b
|
cda43bf6a84f7e55fab26aa70cda934683a51fe5
|
/MainMajor/cifar_loader.py
|
fa597d04eb30e7dc0e3b560acb21758dfb94fd31
|
[] |
no_license
|
nikolaosdionelis/NeuralNetworksNNs
|
abb55622882e31c8d130a8986868b3d19ede186f
|
8a217490ad5bb3f7fccf4002c6b43a06c1e562fc
|
refs/heads/master
| 2022-11-13T00:50:23.578197
| 2020-07-12T18:52:20
| 2020-07-12T18:52:20
| 279,042,013
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,034
|
py
|
"""
Utilities for downloading and unpacking the CIFAR-10 dataset, originally published
by Krizhevsky et al. and hosted here: https://www.cs.toronto.edu/~kriz/cifar.html
"""
import os
import sys
import tarfile
from six.moves import urllib
import numpy as np
def maybe_download_and_extract(data_dir, url='http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'):
if not os.path.exists(os.path.join(data_dir, 'cifar-10-batches-py')):
if not os.path.exists(data_dir):
os.makedirs(data_dir)
filename = url.split('/')[-1]
filepath = os.path.join(data_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\n>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(data_dir)
def unpickle(file):
fo = open(file, 'rb')
if (sys.version_info >= (3, 0)):
import pickle
d = pickle.load(fo, encoding='latin1')
else:
import cPickle
d = cPickle.load(fo)
fo.close()
return {'x': d['data'].reshape((10000,3,32,32)), 'y': np.array(d['labels']).astype(np.uint8)}
def load_cifar(data_dir="data/cifar_data/"):
if not os.path.exists(data_dir):
print('creating folder', data_dir)
os.makedirs(data_dir)
maybe_download_and_extract(data_dir)
train_data = [unpickle(os.path.join(data_dir,'cifar-10-batches-py','data_batch_' + str(i))) for i in range(1,6)]
skip_first_500 = [0 for x in range(10)]
trainx_list = []
trainy_list = []
valx_list = []
valy_list = []
for row in train_data:
for dx, dy in zip(row['x'], row['y']):
# print(d['y'])
if skip_first_500[dy] < 500:
valx_list.append(dx)
valy_list.append(dy)
skip_first_500[dy] += 1
continue
trainx_list.append(dx)
trainy_list.append(dy)
trainx = np.array(trainx_list)
trainy = np.array(trainy_list)
valx = np.array(valx_list)
valy = np.array(valy_list)
test_data = unpickle(os.path.join(data_dir,'cifar-10-batches-py','test_batch'))
testx = test_data['x']
testy = test_data['y']
trainx = trainx/255.0
valx = valx/255.0
testx = testx/255.0
print("max: " + str(np.amax(trainx)))
print("min: " + str(np.amin(trainx)))
print("max: " + str(np.amax(testx)))
print("min: " + str(np.amin(testx)))
print("max: " + str(np.amax(valx)))
print("min: " + str(np.amin(valx)))
# (N,3,32,32) -> (N,32,32,3)
return np.transpose(trainx, (0,2,3,1)), \
np.transpose(valx, (0,2,3,1)), \
np.transpose(testx, (0,2,3,1))
|
[
"noreply@github.com"
] |
nikolaosdionelis.noreply@github.com
|
c377555c2f0b5965254d6b7ac73814194212cf28
|
2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5
|
/archive/298BinaryTreeLongestConsecutiveSequence.py
|
92d9aa4d93f3055c85599dd48eacc42d05e24958
|
[] |
no_license
|
doraemon1293/Leetcode
|
924b19f840085a80a9e8c0092d340b69aba7a764
|
48ba21799f63225c104f649c3871444a29ab978a
|
refs/heads/master
| 2022-10-01T16:20:07.588092
| 2022-09-08T02:44:56
| 2022-09-08T02:44:56
| 122,086,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
# coding=utf-8
'''
Created on 2017�?6�?16�?
@author: Administrator
'''
from data_structure.Tree import list_to_tree
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None: return 0
self.ans = 0
self.memo = {}
def foo(node):
res = 1
if node.left:
left = self.memo(node.left) if node.left in self.memo else foo(node.left)
else:
left = 0
if node.right:
right = self.memo(node.right) if node.right in self.memo else foo(node.right)
else:
right = 0
if left and node.left.val == node.val + 1:
res = max(res, left + 1)
if right and node.right.val == node.val + 1:
res = max(res, right + 1)
self.ans = max(self.ans, res)
self.memo[node] = res
return res
foo(root)
# for k, v in self.memo.items():
# print k.val, v
# print self.memo
return self.ans
root = list_to_tree([1, None, 3, 2, 4, None, None, None, 5])
print Solution().longestConsecutive(root)
|
[
"yanhuang1293@gmail.com"
] |
yanhuang1293@gmail.com
|
31179b3b337c588adca95e89088525bd446ce1e8
|
b01182728eb4a657f9bd8974ba8508010cc8173e
|
/manage.py
|
314bde693826fb496c2d2cd9215db691ca17844c
|
[
"BSD-3-Clause"
] |
permissive
|
pombredanne/django-buildbot
|
459a6d8a942c8c5f857626f4772010a3aaba2a6e
|
660ec35473aec081428a115eab6460f5a1cd2a0d
|
refs/heads/master
| 2021-01-17T19:57:01.296286
| 2014-07-29T06:25:03
| 2014-07-29T06:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djbuildbot.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"xordoquy@linovia.com"
] |
xordoquy@linovia.com
|
36c7630adaee0011f298f2b5daaa682935d10da2
|
76b983258793d294b81791ebe72591bfebf78625
|
/tools/pesearch.py
|
d6b57f8548ff6441b7acce963e476872846cc4ae
|
[
"BSD-2-Clause"
] |
permissive
|
lotusexpeditor/syringe
|
18ac9cb800a7fefb7d67e31936db6a84e47df9eb
|
34a8386b90f534f9a856d0a436bba04dbf5100bd
|
refs/heads/master
| 2023-02-08T10:08:20.295797
| 2020-12-27T00:06:09
| 2020-12-27T00:06:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,743
|
py
|
import os,sys
import pecoff
from ptypes import utils
def traverse_address(address):
def edges(self, address=address, **kwargs):
if not isinstance(self, ptypes.ptype.container):
return
for item in self.value:
if item.contains(address):
yield item
continue
return
return edges
def rightjustify(string, size, space=' '):
# i don't know if python has support for right justification with character
# filling. their fmtstring doesn't seem to..
diff = size - len(string)
if diff > 0:
string = space*diff + string
return string
def processexecutable(filename, address):
# globals for interpretive use
global mz,pe,imagebase,sections,datadirectory
print('Query: %x\n'% address)
print('Module: %s'% os.path.basename(filename))
print('ImageBase: %x'% imagebase)
# try exe header first
mz.setoffset(imagebase, recurse=True)
if mz.contains(address):
result = mz
for item in mz.traverse(traverse_address(address)):
x = item.__name__
print(rightjustify('------- %s'%x, 70, '-'))
result = result[int(x) if isinstance(result, ptypes.parray.type) else x]
print(result)
# try sections
else:
mz.setoffset(0,recurse=True)
va = address - imagebase
s = pe['Sections'].getsectionbyaddress(va)
offset = va - s['VirtualAddress'].int()
data = s['PointerToRawData'].d.load().serialize()
left = offset - 8
left &= ~0xf
right = left+0x30
if left < 0: left = 0
if right > len(data): right = len(data)
sectionname = s['Name'].get()
print(rightjustify(' section %s'% sectionname, 76, '-'))
print(utils.hexdump(data[left:right], offset=s['VirtualAddress'].int()+offset+imagebase))
mz.setoffset(0, recurse=True)
return
from ptypes import ptype
def dumpcontainer(pc, indent=''):
if isinstance(pc.value, list):
for p in pc.value:
a = p.getoffset()
range ='%x-%x'% (a, a+p.size())
sym = '%s -> %s'%( p.__name__, p.__class__.__name__)
r = repr(p.serialize())
if not isinstance(p.value, list):
print(indent, range, sym, ' | ', r)
continue
print(indent, range, sy)
dumpcontainer(p, indent+' ')
pass
return
def dumpexecutable(filename):
# globals for interpretive use
global mz,pe,imagebase,sections,datadirectory,imports
print('Module: %s'% os.path.basename(filename))
print('ImageBase: %x'% imagebase)
print('Imports: %s'% ', '.join([x['Name'].d.l.str() for x in imports.l[:-1]]))
mz.setoffset(imagebase,recurse=True)
print(pe)
for x in sections:
name = x['Name'].str()
address = x['VirtualAddress'].int() + imagebase
print(x['Name'].str(), hex(address), hex(address + x.getloadedsize()))
mz.setoffset(0,recurse=True)
return
def dumpversion(filename):
global mz,pe,imagebase,sections,datadirectory,imports
opt = pe['OptionalHeader']
print('OperatingSystem', float('%d.%d'%(opt['MajorOperatingSystemVersion'].int(), opt['MinorOperatingSystemVersion'].int())))
print('ImageVersion', float('%d.%d'%(opt['MajorImageVersion'].int(), opt['MinorImageVersion'].int())))
print('SubsystemVersion', float('%d.%d'%(opt['MajorSubsystemVersion'].int(), opt['MinorSubsystemVersion'].int())))
print(opt['Win32VersionValue'])
global root
rsrc = datadirectory[2]
root = rsrc['VirtualAddress'].d.l
global a,b
a,b = root['Ids']
if __name__ == '__main__':
import sys, ptypes
zerobase = False
if '-z' in sys.argv:
i = sys.argv.index('-z')
sys.argv.pop(i)
zerobase = True
try:
filename = sys.argv[1]
except:
print('Usage: %s [-z] filename [hexaddress]'% sys.argv[0])
sys.exit(0)
if not os.path.exists(filename):
raise OSError("The specified file ({:s}) does not exist.".format(filename))
source = ptypes.provider.file(filename)
mz = pecoff.Executable.File(source=source).l
pe = mz['Next']['Header']
sections = pe['Sections']
imagebase = pe['OptionalHeader']['ImageBase'].int()
datadirectory = pe['DataDirectory']
if zerobase:
imagebase = 0
imports = datadirectory[1]['Address'].d.l
if len(sys.argv) == 2:
dumpexecutable(filename)
elif len(sys.argv) == 3:
address = int(sys.argv[2], 16)
try:
processexecutable(filename, address)
except KeyError:
print('address %x not found in %s'% (address, filename))
pass
|
[
"arizvisa@gmail.com"
] |
arizvisa@gmail.com
|
e38298b3db6bf24895dd9139bc51769fbef8cd78
|
37f10a692d0e1a9a396f505af60b04f7db44d3e1
|
/01 Algorithms/01 Warmup/CompareTheTriplets.py
|
9b18b2dcebec59ded6a89ae7c2987fd725d10ec4
|
[] |
no_license
|
nbrahman/HackerRank
|
95842f5dbda2ab8aedc7808831c12b9f92a37e03
|
ec5d42d7e578f01efba87a099f42e82512704dca
|
refs/heads/master
| 2021-01-19T11:53:59.685444
| 2017-07-29T16:04:21
| 2017-07-29T16:04:21
| 88,003,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
'''
Alice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, awarding points on a scale from to for three categories: problem clarity, originality, and difficulty.
We define the rating for Alice's challenge to be the triplet , and the rating for Bob's challenge to be the triplet .
Your task is to find their comparison scores by comparing with , with , and with .
If , then Alice is awarded point.
If , then Bob is awarded point.
If , then neither person receives a point.
Given and , can you compare the two challenges and print their respective comparison points?
Input Format
The first line contains space-separated integers, , , and , describing the respective values in triplet .
The second line contains space-separated integers, , , and , describing the respective values in triplet .
Constraints
Output Format
Print two space-separated integers denoting the respective comparison scores earned by Alice and Bob.
Sample Input
5 6 7
3 6 10
Sample Output
1 1
Explanation
In this example:
Now, let's compare each individual score:
, so Alice receives point.
, so nobody receives a point.
, so Bob receives point.
Alice's comparison score is , and Bob's comparison score is . Thus, we print 1 1 (Alice's comparison score followed by Bob's comparison score) on a single line.
'''
if __name__ == '__main__':
A = list(map(int, input().strip().split()))
B = list(map(int, input().strip().split()))
Ascore = sum([1 for a, b in zip(A, B) if a > b])
Bscore = sum([1 for a, b in zip(A, B) if b > a])
print(Ascore, Bscore)
|
[
"nikhil.brahmankar@gmail.com"
] |
nikhil.brahmankar@gmail.com
|
7f7e6202d1b1aa73181f18176a9a42fd131c5023
|
9cef4ef20efd0eec18846242e78be0b9be144c30
|
/homework/20200717/子进程中修改全局变量.py
|
9a7989b2eec30662def98571d1867a21fc6cbba9
|
[] |
no_license
|
Vaild/python-learn
|
4e6511a62a40b6104b081e0f8fe30f7d829901f5
|
5d602daf3b4b7e42349b7d9251df1f4dd62c299c
|
refs/heads/master
| 2022-11-19T00:47:48.808384
| 2020-07-20T14:27:49
| 2020-07-20T14:27:49
| 279,044,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
#!/usr/bin/python3
# coding = UTF-8
# code by va1id
# 定义全局变量
import os
import time
from multiprocessing import Process
N= [1, 2]
def change_process():
print('未修改之前的列表:', N)
print('列表此时的ID是:', os.getpid())
for i in range(10):
N.append(i)
print('修改之后的列表:', N)
print('修改之后的ID:', os.getpid())
time.sleep(1)
def watch_list():
print('列表是:', N)
time.sleep(1)
if __name__ == '__main__':
x = Process(target=change_process)
y = Process(target=watch_list)
x.start()
y.start()
x.join()
y.join()
print('最终的列表是:', N)
print('最终的ID是:', os.getpid())
# 对于整型数的尝试,发现整型数最后的时候打印出来的ID时发生了变化
N = 1
def change_process():
global N
print('未修改之前的列表:', N)
print('列表此时的ID是:', os.getpid())
for i in range(10):
N = i
print('修改之后的列表:', N)
print('修改之后的ID:', os.getpid())
time.sleep(1)
def watch_list():
print('列表是:', N)
time.sleep(1)
if __name__ == '__main__':
x = Process(target=change_process)
y = Process(target=watch_list)
x.start()
y.start()
x.join()
y.join()
print('最终的列表是:', N)
print('最终的ID是:', os.getpid())
|
[
"cubersongwenbo@gmail.com"
] |
cubersongwenbo@gmail.com
|
91bebb09bc6024dfa9e4392aaab0a55020f063b4
|
d7f33fd19ecc59aadebe4f50db605ad39b967dbd
|
/disambig_creation.py
|
36a957f12832a0936243132decf3d61f9165915d
|
[] |
no_license
|
juansaenz/leaguepedia_util
|
9687ab3634ccdf022c637323dceb0638ed5ece88
|
77e3dfe501b333b3a8dc2a04f207b8bf0858dbc3
|
refs/heads/master
| 2020-08-05T19:01:30.131055
| 2019-10-04T11:34:01
| 2019-10-04T11:34:01
| 212,667,697
| 0
| 0
| null | 2019-10-03T19:54:58
| 2019-10-03T19:54:58
| null |
UTF-8
|
Python
| false
| false
| 7,129
|
py
|
import re, threading, mwparserfromhell
from log_into_wiki import *
#################################################################################################
original_name = 'Starky'
irl_name = "Juan Carlos Cano"
new_name = '{} ({})'.format(original_name, irl_name.strip())
init_move = True
blank_edit = False
limit = -1
timeout_limit = 30
listplayer_templates = ["listplayer", "listplayer/Current"]
roster_templates = ["ExtendedRosterLine", "ExtendedRosterLine/MultipleRoles"]
scoreboard_templates = ["MatchRecapS8/Player","Scoreboard/Player"]
stat_templates = ["IPS", "CareerPlayerStats", "MatchHistoryPlayer"]
player_line_templates = ["LCKPlayerLine", "LCSPlayerLine"]
roster_change_templates = ["RosterChangeLine", "RosterRumorLine2",
"RosterRumorLineStay", "RosterRumorLineNot", "RosterRumorLine"]
summary = "Disambiguating {} to {}".format(original_name, new_name)
css_style = " {\n color:orange!important;\n font-weight:bold;\n}"
orig_name_lc = original_name[0].lower() + original_name[1:]
new_name_lc = new_name[0].lower() + new_name[1:]
blank_edit_these = []
#############################################################################################
def savepage(targetpage, savetext):
targetpage.save(savetext, summary=summary, tags="bot_disambig")
def blank_edit_page(page):
textname = str(page.name)
newpage = site.pages[textname]
text = newpage.text(cache=False)
page.save(text, summary="Blank Editing")
def move_page(from_page):
new_page_name = str(from_page.name).replace(original_name, new_name)
new_page = site.pages[new_page_name]
if new_page.exists:
print("{} already exists, cannot move!".format(from_page.name))
else:
print("Moving page {} to {}".format(from_page.name, new_page_name))
from_page.move(new_page_name, reason=summary, no_redirect=True)
blank_edit_these.append(new_page)
def edit_concept(concept):
text = concept.text()
wikitext = mwparserfromhell.parse(text)
for template in wikitext.filter_templates():
if template.name.matches("PlayerGamesConcept"):
i = 1
while template.has(i):
if template.get(i).strip() == original_name:
template.add(i, new_name)
elif template.get(i).strip() == orig_name_lc:
template.add(i, new_name_lc)
i = i + 1
newtext = str(wikitext)
if newtext != text:
concept.save(newtext, summary=summary, tags="bot_disambig")
def edit_subpage(subpage):
text = subpage.text()
wikitext = mwparserfromhell.parse(text)
for stemplate in wikitext.filter_templates():
if stemplate.has(1):
if stemplate.get(1).value.strip() == original_name:
stemplate.add(1, new_name)
newtext = str(wikitext)
if text != newtext:
print("Editing " + subpage.name + "...")
subpage.save(newtext, reason=summary)
def process_page(page):
print("Processing next page: " + page.name)
text = page.text()
origtext = text
# do links first because it's easier to just edit them as a string
if text.lower().startswith('#redirect') and page.name.lower() == original_name.lower():
pass
else:
text = text.replace("[[" + original_name + "]]", "[[" + new_name + "|" + original_name + "]]")
wikitext = mwparserfromhell.parse(text)
for template in wikitext.filter_templates():
process_template(template)
newtext = str(wikitext)
if origtext != newtext or blank_edit:
print("Saving...")
t = threading.Thread(target=savepage, kwargs={"targetpage": page, "savetext": newtext})
t.start()
t.join(timeout=timeout_limit)
else:
print("No changes, skipping")
def check_list(template, param, sep = ','):
if not template.has(param):
return
text_initial = template.get(param).value.strip()
tbl = text_initial.split(sep)
made_changes = False
for i, val in enumerate(tbl):
if val.strip() == original_name:
made_changes = True
tbl[i] = new_name
if made_changes:
template.add(param, sep.join(tbl))
def process_template(template):
def tl_matches(arr, field=None):
if field:
has_field = False
if template.has(field):
has_field = template.get(field).value.strip() == original_name
return [_ for _ in arr if template.name.matches(_)] and has_field
return [_ for _ in arr if template.name.matches(_)]
if tl_matches(['bl'], field=1) and not template.has(2):
template.add(1, new_name)
template.add(2, original_name)
elif tl_matches(listplayer_templates, field=1) and not template.has("link"):
template.add("link", new_name, before=1)
elif tl_matches(roster_templates, field='player') and not template.has('link'):
template.add("link", new_name, before="name")
elif tl_matches(scoreboard_templates, field='name'):
template.add("link", new_name, before="kills")
elif tl_matches(roster_change_templates, field='player'):
template.add("player", new_name + "{{!}}" + original_name)
elif tl_matches(['TeamRoster/Line', 'RosterLineOld'], field='player'):
template.add('link', new_name)
elif tl_matches(player_line_templates, field=1):
template.add(2, new_name)
elif tl_matches(['Player', 'RSRR/Player'], field=1):
template.add('link', new_name)
elif tl_matches(["MatchDetails/Series"], field='mvp'):
template.add("mvplink", new_name, before="mvp")
elif tl_matches(["PentakillLine"], field=6):
template.add("playerlink", new_name, before=6)
elif tl_matches(["MatchSchedule","MatchSchedule/Game"]):
if template.has("mvp"):
if template.get("mvp").value.strip() == original_name:
template.add("mvp", new_name)
check_list(template, 'with')
check_list(template, 'pbp')
check_list(template, 'color')
elif tl_matches(['ExternalContent/Line']):
check_list(template, 'players')
elif tl_matches(['SeasonAward']):
if template.has(1):
if template.get(1).value.strip() == original_name:
template.add('link', new_name)
check_links(template, 'eligibleplayers', 'eligiblelinks', ',', original_name, new_name)
elif tl_matches(['PlayerImageMetadata'], field="playerlink"):
template.add('playerlink', new_name)
elif tl_matches(["PortalCurrentRosters"]):
for pos in ['t', 'j', 'm', 'a', 's']:
for period in ['old', 'new']:
arg_name = pos + '_' + period
arg_link = arg_name + '_links'
check_links(template, arg_name, arg_link, ',', original_name, new_name)
def make_disambig_page():
text = "{{DisambigPage\n|player1=" + new_name + "\n|player2=\n}}"
page = site.pages[original_name]
old_text = page.text()
if 'disambigpage' not in old_text.lower():
page.save(text, summary=summary)
site = login('me','lol')
thispage = site.pages[original_name]
newpage = site.pages[new_name]
if init_move:
move_page(thispage)
subpages = site.allpages(prefix=original_name + "/")
for subpage in subpages:
edit_subpage(subpage)
move_page(subpage)
concept = site.pages["Concept:{}/Games".format(original_name)]
if concept.exists:
edit_concept(concept)
move_page(concept)
pages = thispage.backlinks()
i = 0
for page in pages:
if i == limit:
break
i = i + 1
process_page(page)
print("Blank editing...")
if init_move:
for page in blank_edit_these:
blank_edit_page(page)
make_disambig_page()
print("Done! If some pages stalled out you may still need to abort manually.")
|
[
"18037011+RheingoldRiver@users.noreply.github.com"
] |
18037011+RheingoldRiver@users.noreply.github.com
|
fe8bc6b1e305d6f955d5f51ccdbcbb03567c3c5f
|
33787153c4f85cb626cf16e0e4d40d5970df2871
|
/reservation_system/order/admin.py
|
873c45edd8890e68e2a1d428784197d437bc8309
|
[] |
no_license
|
nazaninsbr/E-Commerce-Website
|
93e14576d69618f749d7fd6f19b6b1b989fa47a8
|
46bed8c58f4adac37dc4ddd881f57e694961b9b2
|
refs/heads/master
| 2021-09-22T14:20:56.940950
| 2018-09-11T05:01:23
| 2018-09-11T05:01:23
| 125,370,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 508
|
py
|
from django.contrib import admin
from .models import Order, OrderItem
class OrderItemInline(admin.TabularInline):
model = OrderItem
raw_id_fields = ['product']
class OrderAdmin(admin.ModelAdmin):
list_display = ['id', 'first_name', 'last_name', 'email', 'address', 'postal_code', 'department', 'paid', 'created',
'updated']
list_filter = ['paid', 'created', 'updated', 'last_name', 'department']
inlines = [OrderItemInline]
admin.site.register(Order, OrderAdmin)
|
[
"nazanin.sabrii@gmail.com"
] |
nazanin.sabrii@gmail.com
|
99addf12f6477d42e6cc49078268d5e6afdf5f2d
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/devcenter/azure-developer-devcenter/samples/create_environment_sample.py
|
637a5e178d90414987a7ffcd3707ca295f73c41e
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,239
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import logging
import os
from azure.developer.devcenter import DevCenterClient
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
def main():
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger()
# Set the values of the dev center endpoint, client ID, and client secret of the AAD application as environment variables:
# DEVCENTER_ENDPOINT, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET
try:
endpoint = os.environ["DEVCENTER_ENDPOINT"]
except KeyError:
LOG.error("Missing environment variable 'DEVCENTER_ENDPOINT' - please set it before running the example")
exit()
# Build a client through AAD
client = DevCenterClient(endpoint, credential=DefaultAzureCredential())
# Fetch control plane resource dependencies
target_project_name = list(client.dev_center.list_projects(top=1))[0]['name']
target_catalog_item_name = list(client.environments.list_catalog_items(target_project_name, top=1))[0]['name']
target_environment_type_name = list(client.environments.list_environment_types(target_project_name, top=1))[0]['name']
# Stand up a new environment
create_response = client.environments.begin_create_environment(target_project_name,
"Dev_Environment",
{"catalogItemName": target_catalog_item_name, "environmentType": target_environment_type_name})
environment_result = create_response.result()
LOG.info(f"Provisioned environment with status {environment_result['provisioningState']}.")
# Tear down the environment when finished
delete_response = client.environments.begin_delete_environment(target_project_name, "Dev_Environment")
delete_response.wait()
LOG.info("Completed deletion for the environment.")
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
e48457d278580c6a39e5c28f37e35ec482015823
|
5b58a332c6bea0688d196aabedfc8ccc49bdd134
|
/experiments/cnn_short_embeddings/get_embeddings.py
|
c2dfad97b2a1949c516d77ecfb2e88218654e2d3
|
[] |
no_license
|
ver228/classify_strains
|
5420c2b3ea8e93b6ba46900c385f52f664f1cbd7
|
dc61e7431410e25ab7c2da0acb6d090cc2ebaabb
|
refs/heads/master
| 2021-09-20T08:52:14.505868
| 2018-08-07T12:26:22
| 2018-08-07T12:26:22
| 108,448,619
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,628
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 14:57:25 2017
@author: ajaver
"""
import sys
import os
import pandas as pd
import torch
import tqdm
import numpy as np
import tables
#Be sure to use abspath linux does not give the path if one uses __file__
_BASEDIR = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.join(_BASEDIR, os.pardir, os.pardir, 'src')
sys.path.append(src_dir)
from classify.flow import SkeletonsFlowFull, get_valid_strains, get_datset_file
import classify.models.model_w_embedding as models
if __name__ == '__main__':
dname = '/Users/ajaver/OneDrive - Imperial College London/classify_strains/trained_models'
#model_file = 'resnet18_w_emb_R_L256_l2_0.1_20171126_010058_best.pth.tar'
#props = dict(
# model_name = 'resnet18_w_emb',
# is_residual = True,
# embedding_size = 256
# )
model_file = 'simple_w_emb_R_L256_l2_0.01_20171126_010327_best.pth.tar'
props = dict(
model_name = 'simple_w_emb',
is_residual = True,
embedding_size = 256
)
embeddings_file = data_file.replace('_skel_smoothed.hdf5', '_embedings.hdf5')
model_path = os.path.join(dname, model_file)
dataset = 'CeNDR'
valid_strains = get_valid_strains(dataset, is_reduced=True)
data_file = get_datset_file(dataset)
gen = SkeletonsFlowFull(
n_batch = 32,
data_file = data_file,
sample_size_seconds = 10,
sample_frequency_s = 0.04,
valid_strains = valid_strains,
label_type = 'row_id',
is_return_snps = False,
transform_type = 'angles'
)
get_model_func = getattr(models, props['model_name'])
model = get_model_func(gen, props['embedding_size'])
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
results = []
for ii, (input_v, row_ids) in enumerate(tqdm.tqdm(gen)):
video_embedding = model.video_model(input_v)
pred = model.classification(video_embedding).max(1)[1]
dat = [x.data.numpy() for x in (row_ids, video_embedding, pred)]
results.append(dat)
#%%
row_ids, embeddings, predictions = map(np.concatenate, zip(*results))
df_g = pd.DataFrame(row_ids.T, columns=['row_id']).groupby('row_id').groups
embedding_groups = []
for irow, row in gen.skeletons_ranges.iterrows():
if irow in df_g:
row_n = row[['experiment_id', 'worm_index', 'strain', 'strain_id']].copy()
row_n['ini'] = df_g[irow].min()
row_n['fin'] = df_g[irow].max()
row_n['skel_group_id'] = irow
embedding_groups.append(row_n)
embedding_groups = pd.DataFrame(embedding_groups)
#%%
snps_embeddings = np.full((gen.n_classes, props['embedding_size']), np.nan, dtype=np.float32)
for strain_id in gen.strain_ids:
strain = gen.strain_codes.loc[strain_id, 'strain']
snps = gen.snps_data[strain].values.T.astype(np.float32)
snps = torch.from_numpy(snps).float()
snps = torch.autograd.Variable(snps)
snps_embedding = model.snp_mapper(snps)
snps_embeddings[strain_id] = snps_embedding.data.numpy()
#%%
TABLE_FILTERS = tables.Filters(
complevel=5,
complib='zlib',
shuffle=True,
fletcher32=True)
fields2copy = ['experiments_data', 'snps_data', 'strains_codes']
with tables.File(data_file, 'r') as fid_old, \
tables.File(embeddings_file, "w") as fid_new:
for field in fields2copy:
tab = fid_old.get_node('/' +field)[:]
fid_new.create_table('/',
field,
obj=tab,
filters=TABLE_FILTERS)
gg = fid_new.create_group('/', 'index_groups')
for field in ['train', 'test', 'val']:
tab = fid_old.get_node('/index_groups/' +field)[:]
fid_new.create_array('/index_groups',
field,
obj=tab)
#%%
with tables.File(embeddings_file, "r+") as fid:
table_type = np.dtype([('experiment_id', np.int32),
('worm_index', np.int32),
('strain', 'S10'),
('ini', np.int32),
('fin', np.int32),
('skel_group_id', np.int32)
])
em = embedding_groups[list(table_type.names)].to_records(index=False).astype(table_type)
fid.create_table('/',
'embedding_groups',
obj=em,
filters=TABLE_FILTERS)
fid.create_carray('/',
'video_embeddings',
obj=embeddings,
filters=TABLE_FILTERS)
fid.create_carray('/',
'predicted_strain_id',
obj=predictions,
filters=TABLE_FILTERS)
fid.create_carray('/',
'snps_embeddings',
obj=snps_embeddings,
filters=TABLE_FILTERS)
|
[
"ajaver@MRC-8791.local"
] |
ajaver@MRC-8791.local
|
e563aed309c589208366c684765d85e75140080d
|
78a28bd6b95041bfe67d8aa6a3a3c111911afaab
|
/03.Complete Python Developer - Zero to Mastery - AN/01.Python Basics/13.1 Exercise Repl.py
|
1e15204ddbb52cbedf68211f4ad2a6edf0cfeb2c
|
[
"MIT"
] |
permissive
|
jailukanna/Python-Projects-Dojo
|
8200a60ab925bf796bd39cb1977e6f0e0a575c23
|
98c7234b84f0afea99a091c7198342d66bbdff5b
|
refs/heads/master
| 2023-03-15T06:54:38.141189
| 2021-03-11T08:17:02
| 2021-03-11T08:17:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
counter = 0
counter += 1
counter += 1
counter += 1
counter += 1
counter -= 1
counter *=2
#Before you click RUN, guess what the counter variable holds in memory!
print(counter)
#6
|
[
"ptyadana@users.noreply.github.com"
] |
ptyadana@users.noreply.github.com
|
26f999a48d12ee717570e8a1ae1e7e96e06c6f69
|
245b0329360b18c32510a6d13b2650fd6ca752cc
|
/ch03/trendline.py
|
14d037ac047d819a1524cf921cf4e087b23e6a60
|
[] |
no_license
|
TanUkkii007/numpy-begginers-guide
|
56c315d207f681bd4e6d70abeac82bfc0db2bad5
|
6d483bc8672947a06d4240c4379f00183da46d8b
|
refs/heads/master
| 2021-01-17T08:08:07.113571
| 2016-06-29T16:45:59
| 2016-06-29T16:45:59
| 61,984,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def fit_line(t,y):
'''Fits t to a line y = at + b'''
A = np.vstack([t, np.ones_like(t)]).T
return np.linalg.lstsq(A, y)[0]
# Determine pivots
h,l,c = np.loadtxt('data.csv', delimiter=',', usecols=(4,5,6), unpack=True)
pivots = (h+l+c)/3
# Fit trend lines
t = np.arange(len(c))
sa, sb = fit_line(t, pivots - (h-l))
ra, rb = fit_line(t, pivots + (h-l))
support = sa*t + sb
resistance = ra*t + rb
condition = (c > support) & (c < resistance)
print("condition", condition)
between_bands = np.where(condition)
print("between_bands", between_bands)
print("support[between_bands]", support[between_bands])
print("c[between_bands]", c[between_bands])
print("resistance[between_bands]", resistance[between_bands])
n_between_bands = len(np.ravel(between_bands))
print("Number points between bands", between_bands)
print("Ratio between bands", float(n_between_bands)/len(c))
print("Tomorrows support", sa * (t[-1] + 1) + sb)
print("Tomorrows resistance", ra * (t[-1] + 1) + rb)
a1 = c[c > support]
a2 = c[c < resistance]
print("Number of points between bands 2nd approach" ,len(np.intersect1d(a1, a2)))
plt.plot(t, c, label='Data')
plt.plot(t, support, '--', lw=2.0, label='Support')
plt.plot(t, resistance, '-.', lw=3.0, label='Resistance')
plt.title('Trend Lines')
plt.xlabel('Days')
plt.ylabel('Price ($)')
plt.grid()
plt.legend()
plt.show()
|
[
"yusuke.007.yasud@gmail.com"
] |
yusuke.007.yasud@gmail.com
|
b45c9b02f05aa2a1d529bacf9ec69c1b70e7e35b
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/Data-Structures-Algos-Codebase/ALGO/__PYTHON/YT_DOWN.py
|
2e47b44e59c00793d16628f53a9120871d31a7a4
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
simport pafy
url = "https://www.youtube.com/watch?v=OE7wUUpJw6I&list=PL2_aWCzGMAwLPEZrZIcNEq9ukGWPfLT4A"
video = pafy.new(url)
print(video.title)
stream=pafy.new(url).streams
best=video.getbest()
for i in stream:
print(i)
print(best.resolution,best.extension)
print(best.url)
best.download(quiet=False)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
2e06b237f8de00ed2eda46de62f6d4a013feeb58
|
5989e503a733e8b29f4c502008446a75c2b43ff8
|
/src/geofr/api/serializers.py
|
e6b9048e6acbf53169edb9aa681077ec2224fb2d
|
[] |
no_license
|
samuelpath/aides-territoires
|
399a6a7b0607ef5a8d2b327247446b239f5b1a42
|
5793bd49d7157a34e08c29e56a46e1e3ead0651f
|
refs/heads/master
| 2022-12-20T14:35:18.671563
| 2020-08-21T08:00:33
| 2020-08-21T08:00:33
| 288,424,578
| 0
| 0
| null | 2020-08-18T10:27:17
| 2020-08-18T10:27:16
| null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
from rest_framework import serializers
from geofr.models import Perimeter
class PerimeterSerializer(serializers.ModelSerializer):
id = serializers.CharField(source='id_slug')
scale = serializers.CharField(source='get_scale_display')
text = serializers.CharField(source='__str__')
class Meta:
model = Perimeter
fields = ('id', 'name', 'scale', 'text')
|
[
"thibault@miximum.fr"
] |
thibault@miximum.fr
|
d9213d2c6b72b46b327ce6a9697cc507a3850b4e
|
c29cdc64f42eae3bc7d584a7b9b84961ce09da04
|
/bitcoinx/__init__.py
|
d8c08b3d58a13838d07a60b0f1dbabaa6c15fe86
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
msinkec/bitcoinX
|
d379e32e9dc0f43700710032ad0ce7ffca2e398d
|
26ec8a8a8c2a6423e0438dea9918c740c96f5eb2
|
refs/heads/master
| 2023-04-28T18:52:12.072225
| 2021-05-11T11:19:44
| 2021-05-11T11:19:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
from .address import *
from .base58 import *
from .bip32 import *
from .chain import *
from .consts import *
from .errors import *
from .hashes import *
from .interpreter import *
from .keys import *
from .misc import *
from .mnemonic import *
from .networks import *
from .packing import *
from .script import *
from .signature import *
from .tx import *
from .work import *
_version_str = '0.4.1'
_version = tuple(int(part) for part in _version_str.split('.'))
__all__ = sum((
address.__all__,
base58.__all__,
bip32.__all__,
chain.__all__,
consts.__all__,
errors.__all__,
hashes.__all__,
interpreter.__all__,
keys.__all__,
misc.__all__,
mnemonic.__all__,
networks.__all__,
packing.__all__,
script.__all__,
signature.__all__,
tx.__all__,
work.__all__,
), ())
|
[
"kyuupichan@gmail.com"
] |
kyuupichan@gmail.com
|
ed146779588cd7189d51bb5d520952ed956f518a
|
7ff0077a55f6bf4a74704e430f354aeabaae3e0b
|
/tensorflow_probability/python/distributions/relaxed_onehot_categorical_test.py
|
a92246b95c13587324e028dd9841e11f232c49ad
|
[
"Apache-2.0"
] |
permissive
|
markaduol/probability
|
50a1d97810d11c747bd9546f977b2937c9e04d78
|
8af21dff96502a5bdc01b1be2c595043a3efc5d1
|
refs/heads/master
| 2020-03-29T20:50:26.001297
| 2018-09-25T21:51:10
| 2018-09-25T21:51:50
| 150,333,784
| 0
| 1
|
Apache-2.0
| 2018-09-25T21:54:49
| 2018-09-25T21:54:49
| null |
UTF-8
|
Python
| false
| false
| 6,579
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Relaxed One-Hot Categorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy.special import gamma
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util
tfd = tfp.distributions
def make_relaxed_categorical(batch_shape, num_classes, dtype=tf.float32):
logits = tf.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtype) - 50.
temperatures = tf.random_uniform(list(batch_shape), 0.1, 10, dtype=tf.float32)
return tfd.RelaxedOneHotCategorical(temperatures, logits)
@test_util.run_all_in_graph_and_eager_modes
class ExpRelaxedOneHotCategoricalTest(tf.test.TestCase):
def testP(self):
temperature = 1.0
logits = [2.0, 3.0, -4.0]
dist = tfd.ExpRelaxedOneHotCategorical(temperature, logits)
expected_p = np.exp(logits)/np.sum(np.exp(logits))
self.assertAllClose(expected_p, self.evaluate(dist.probs))
self.assertAllEqual([3], dist.probs.get_shape())
def testPdf(self):
temperature = .4
logits = [.3, .1, .4]
k = len(logits)
p = np.exp(logits)/np.sum(np.exp(logits))
dist = tfd.ExpRelaxedOneHotCategorical(temperature, logits)
x = self.evaluate(dist.sample())
# analytical ExpConcrete density presented in Maddison et al. 2016
prod_term = p * np.exp(-temperature * x)
expected_pdf = (
gamma(k) * np.power(temperature, k - 1) * np.prod(
prod_term / np.sum(prod_term)))
pdf = self.evaluate(dist.prob(x))
self.assertAllClose(expected_pdf, pdf)
@test_util.run_all_in_graph_and_eager_modes
class RelaxedOneHotCategoricalTest(tf.test.TestCase):
def testLogits(self):
temperature = 1.0
logits = [2.0, 3.0, -4.0]
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
# check p for ExpRelaxed base distribution
self.assertAllClose(logits, self.evaluate(dist._distribution.logits))
self.assertAllEqual([3], dist._distribution.logits.get_shape())
def testSample(self):
temperature = 1.4
# single logit
logits = [.3, .1, .4]
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
self.assertAllEqual([3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 3], self.evaluate(dist.sample(5)).shape)
# multiple distributions
logits = [[2.0, 3.0, -4.0], [.3, .1, .4]]
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
self.assertAllEqual([2, 3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 2, 3], self.evaluate(dist.sample(5)).shape)
# multiple distributions
logits = np.random.uniform(size=(4, 1, 3)).astype(np.float32)
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
self.assertAllEqual([4, 1, 3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 4, 1, 3], self.evaluate(dist.sample(5)).shape)
def testPdf(self):
def analytical_pdf(x, temperature, logits):
# analytical density of RelaxedOneHotCategorical
temperature = np.reshape(temperature, (-1, 1))
if len(x.shape) == 1:
x = np.expand_dims(x, 0)
k = logits.shape[1]
p = np.exp(logits)/np.sum(np.exp(logits), axis=1, keepdims=True)
term1 = gamma(k)*np.power(temperature, k-1)
term2 = np.sum(p/(np.power(x, temperature)), axis=1, keepdims=True)
term3 = np.prod(p/(np.power(x, temperature+1)), axis=1, keepdims=True)
expected_pdf = term1*np.power(term2, -k)*term3
return expected_pdf
temperature = .4
logits = np.array([[.3, .1, .4]]).astype(np.float32)
dist = tfd.RelaxedOneHotCategorical(temperature, logits)
x = self.evaluate(dist.sample())
pdf = self.evaluate(dist.prob(x))
expected_pdf = analytical_pdf(x, temperature, logits)
self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)
# variable batch size
logits = np.array([[.3, .1, .4], [.6, -.1, 2.]]).astype(np.float32)
temperatures = np.array([0.4, 2.3]).astype(np.float32)
dist = tfd.RelaxedOneHotCategorical(temperatures, logits)
x = self.evaluate(dist.sample())
pdf = self.evaluate(dist.prob(x))
expected_pdf = analytical_pdf(x, temperatures, logits)
self.assertAllClose(expected_pdf.flatten(), pdf, rtol=1e-4)
def testShapes(self):
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_relaxed_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_relaxed_categorical(batch_shape,
tf.constant(10, dtype=tf.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
self.assertAllEqual([10], self.evaluate(dist.event_shape_tensor()))
def testUnknownShape(self):
logits_pl = tf.placeholder_with_default(input=[.3, .1, .4], shape=None)
temperature = 1.0
dist = tfd.ExpRelaxedOneHotCategorical(temperature, logits_pl)
self.assertAllEqual([3], self.evaluate(dist.sample()).shape)
self.assertAllEqual([5, 3], self.evaluate(dist.sample(5)).shape)
def testDTypes(self):
# check that sampling and log_prob work for a range of dtypes
for dtype in (tf.float16, tf.float32, tf.float64):
logits = tf.random_uniform(shape=[3, 3], dtype=dtype)
dist = tfd.RelaxedOneHotCategorical(temperature=0.5, logits=logits)
dist.log_prob(dist.sample())
if __name__ == "__main__":
tf.test.main()
|
[
"copybara-piper@google.com"
] |
copybara-piper@google.com
|
6741d8af6bd64f44ce8282efeb4594958db494e4
|
1fc9a12c86be4e440f4f0d95c8b871c79df07545
|
/ML Libariries/python_pandas/dataframe_row.py
|
b1f101d5632b2dcc45428ad701ace69103a14311
|
[] |
no_license
|
Rohit-Gupta-Web3/Articles
|
a56e7f1b36d6d5efd846eec2e1a4036716ac16eb
|
0f584916e065059d4dd1e95e7de874a7830fdff4
|
refs/heads/master
| 2023-05-14T02:50:17.817951
| 2020-07-26T06:44:10
| 2020-07-26T06:44:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
import pandas as pd
data = {'Name':['C','Sharp','Corner'], 'Age':[20,21,22], 'Address':['Delhi','Kanpur','Tamil Nadu']}
df = pd.DataFrame(data)
data1= df.loc[0]
print(data1)
|
[
"rohit.gupta@iic.ac.in"
] |
rohit.gupta@iic.ac.in
|
f2c4e6b1a789554712a287c4f0a34afbf2126b22
|
f9c0e305ca873cd9784f1a6f441fee0ca7fbc468
|
/recom/main/map.py
|
e13f55ae806969fe8b3404e4775d65e18b170a56
|
[] |
no_license
|
flybetter/recom_web
|
b3caac2873786f4a10fbedc8b34d2ea4888aeafe
|
ef06f64931df594f4c8c6b2f72a59e0ad4a5e637
|
refs/heads/master
| 2020-03-27T10:21:35.813149
| 2018-10-10T08:47:56
| 2018-10-10T08:47:56
| 146,412,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,044
|
py
|
from flask import render_template, Blueprint
from urllib import request
import json
from recom.main.recom_glorithm import startup, secondHouseRequestJson, get_history, get_associate_community
import logging
logging.basicConfig(level=logging.DEBUG)
bp = Blueprint('map', __name__, url_prefix='/map')
HISTORY_URL = 'http://tuijianapiv2.house365.com/analysis/getData?cityKey=nj&dataType=accesslog&pageNum=1&pageSize=20&' \
'userId='
@bp.route('/')
def hello_world():
return render_template('map/map.html')
@bp.route('/<string:phone>/history', methods=('GET', 'POST'))
def history(phone):
# response = request.urlopen(HISTORY_URL + phone)
return get_history(phone)
@bp.route('/<string:phone>/recom', methods=('GET', 'POST'))
def recom(phone):
datas = startup(phone)
if type(datas) != str:
datas = secondHouseRequestJson(datas)
return datas
@bp.route('/<string:phone>/associated_community', methods=('GET', 'POST'))
def associated_community(phone):
return get_associate_community(phone)
|
[
"flybetter@163.com"
] |
flybetter@163.com
|
bad111c370e04a62e3e9f19868fc1862a53e48f3
|
fd877cb919622d6a4efa305fb9eaec8a31e8dd37
|
/scripts/coop/extract_idhs.py
|
96006b4849a3fe65260d49d3758153010ba1974b
|
[
"MIT"
] |
permissive
|
NCiobo/iem
|
37df9bc466ffcbe4f6b1f9c29c6b5266559f200c
|
75da5e681b073c6047f5a2fb76721eaa0964c2ed
|
refs/heads/master
| 2021-01-23T09:39:33.090955
| 2017-09-05T16:34:12
| 2017-09-05T16:34:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
"""Extraction as requested by IA Public Health"""
import psycopg2
import sys
YEAR = int(sys.argv[1])
def main(argv):
"""Do Something"""
pgconn = psycopg2.connect(database='postgis', host='localhost',
port=5555, user='nobody')
cursor = pgconn.cursor()
cursor2 = pgconn.cursor()
pgconn2 = psycopg2.connect(database='coop', host='localhost',
port=5555, user='nobody')
cursor3 = pgconn2.cursor()
cursor.execute("""SELECT ugc, ST_X(centroid), ST_Y(centroid)
from ugcs where state = 'IA' and substr(ugc,3,1) = 'C' and end_ts is null
ORDER by ugc ASC""")
for row in cursor:
fips = row[0][3:]
# Get closest climodat site
cursor2.execute("""SELECT id, ST_Distance(geom,
ST_SetSRID(ST_GeomFromText('POINT(%s %s)'), 4326))
from stations where network = 'IACLIMATE' and id != 'IA0000'
and substr(id,3,1) != 'C' ORDER by st_distance ASC LIMIT 1
""", (row[1], row[2]))
sid = cursor2.fetchone()[0]
cursor3.execute("""SELECT year, month,
max(high), min(low), avg((high+low)/2.), sum(precip),
sum(case when high >= 95 then 1 else 0 end),
sum(case when low >= 70 then 1 else 0 end) from alldata_ia WHERE
station = %s and year = %s GROUP by year, month
""", (sid, YEAR))
for row in cursor3:
print ','.join([str(i) for i in [fips, row[0], row[1], row[2],
row[3], row[4], row[5], row[6],
row[7]]])
if __name__ == '__main__':
main(sys.argv)
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
2ebeb1af2486684d1a3e45df73b6fbb9fe41cad1
|
ba157236151a65e3e1fde2db78b0c7db81b5d3f6
|
/String/Shortest Distance to a Character.py
|
61944332f699e54e8ea2514cf4b61df0e868fee3
|
[] |
no_license
|
JaberKhanjk/LeetCode
|
152488ccf385b449d2a97d20b33728483029f85b
|
78368ea4c8dd8efc92e3db775b249a2f8758dd55
|
refs/heads/master
| 2023-02-08T20:03:34.704602
| 2020-12-26T06:24:33
| 2020-12-26T06:24:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,372
|
py
|
class Solution(object):
def produce_range(self,arr,s,e):
if s == e:
return
else:
length = e-s
c = 1
for i in range(s,s+length/2):
arr[i] = c
c += 1
if (e-s) % 2 == 0:
c -= 1
for i in range(s+length/2,e):
arr[i] = c
c -= 1
return
def find_indexes(self,string,char):
indexes = []
for i,each in enumerate(string):
if each == char:
indexes.append(int(i))
return indexes
def shortestToChar(self, S, C):
arr = [0]*len(S)
indexes = self.find_indexes(S,C)
n = len(indexes)
# first
c = 0
for i in range(indexes[0],-1,-1):
arr[i] = c
c += 1
for i in range(0,n-1):
self.produce_range(arr,indexes[i]+1,indexes[i+1])
c = 1
# final
for i in range(indexes[-1]+1,len(S)):
arr[i] = c
c += 1
return arr
"""
:type S: str
:type C: str
:rtype: List[int]
"""
|
[
"spondoncsebuet@gmail.com"
] |
spondoncsebuet@gmail.com
|
26f7c3a515ee832fb3576f44619b37adfd2f0357
|
6f56e44e04a287a2545b4c43a09fa82d6f8551a6
|
/dcp100 - given infinite 2D grid and seq of points to cover in order, find min num steps.py
|
27412150dba3353bf13f3ff326d280e14bae840d
|
[] |
no_license
|
seanchen513/dcp
|
bb7ab2f9632700c5d23ccfa92b0e8a541c508d3b
|
c0b19833c51833d1d80016afe637734316194327
|
refs/heads/master
| 2020-04-10T21:21:23.308335
| 2020-01-30T15:51:49
| 2020-01-30T15:51:49
| 161,294,610
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,553
|
py
|
"""
dcp#100, 416
This problem was asked by Google.
You are in an infinite 2D grid where you can move in any of the 8 directions:
(x,y) to
(x+1, y),
(x - 1, y),
(x, y+1),
(x, y-1),
(x-1, y-1),
(x+1,y+1),
(x-1,y+1),
(x+1,y-1)
You are given a sequence of points and the order in which you need to cover the points. Give the minimum number of steps in which you can achieve it. You start from the first point.
Example:
Input: [(0, 0), (1, 1), (1, 2)]
Output: 2
It takes 1 step to move from (0, 0) to (1, 1). It takes one more step to move from (1, 1) to (1, 2).
"""
"""
Idea: To get minimum number of steps between two points, go diagonal
as much as possible, and then either vertically or horizontally as needed.
Example: (2, 3), (8, 13)
|dx| = |8 - 2| = 6
|dy| = |13 - 3| = 10
Go diagonally (+1, +1) by 6, then go vertically +4.
Go diagonally by (ignore sign/direction):
min(|dx|, |dy|)
Go horizontally/vertically by (ignore sign/direction):
max(|dx|, |dy|) - min(|dx|, |dy|) = | |dx| - |dy| |
Total number of steps is:
max(|dx|, |dy|)
"""
def min_steps(points):
n = len(points)
steps = 0
for i in range(n-1):
dx = points[i+1][0] - points[i][0]
dy = points[i+1][1] - points[i][1]
steps += max(abs(dx), abs(dy))
return steps
points = []
points = [(5, 13)]
points = [(2, 3), (8, 13)]
points = [(0, 0), (1, 1), (1, 2)]
n = min_steps(points)
print("\npoints = {}".format(points))
print("\nmin steps to cover points in order = {}".format(n))
|
[
"seanchen513@gmail.com"
] |
seanchen513@gmail.com
|
5e10d01be687ad36d906c3e893f05564f5edc79e
|
bf4f11bcac4c92975fdcb98c328a03a663d2d9ae
|
/dask_gdf/tests/test_sort.py
|
9c4988271286624f74c78fcbd7e0bede033a9e90
|
[
"Apache-2.0"
] |
permissive
|
Quansight/dask_gdf
|
88063f6ca4d3de5a5e7ed5d2f3e23fbe55cc1523
|
9f67410f4ff6c5cea18f831404d2f2fe7bf632ab
|
refs/heads/master
| 2020-03-28T14:46:50.533877
| 2018-11-06T19:26:56
| 2018-11-06T19:26:56
| 148,521,998
| 1
| 1
|
Apache-2.0
| 2018-09-19T14:43:19
| 2018-09-12T18:12:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,314
|
py
|
import pytest
import numpy as np
import pandas as pd
import pygdf
import dask
import dask_gdf as dgd
@pytest.mark.parametrize('by', ['a', 'b'])
@pytest.mark.parametrize('nelem', [10, 100, 1000])
@pytest.mark.parametrize('nparts', [1, 2, 5, 10])
def test_sort_values(nelem, nparts, by):
df = pygdf.DataFrame()
df['a'] = np.ascontiguousarray(np.arange(nelem)[::-1])
df['b'] = np.arange(100, nelem + 100)
ddf = dgd.from_pygdf(df, npartitions=nparts)
got = ddf.sort_values(by=by).compute().to_pandas()
expect = df.sort_values(by=by).to_pandas().reset_index(drop=True)
pd.util.testing.assert_frame_equal(got, expect)
def test_sort_values_binned():
np.random.seed(43)
nelem = 100
nparts = 5
by = 'a'
df = pygdf.DataFrame()
df['a'] = np.random.randint(1, 5, nelem)
ddf = dgd.from_pygdf(df, npartitions=nparts)
parts = ddf.sort_values_binned(by=by).to_delayed()
part_uniques = []
for i, p in enumerate(parts):
part = dask.compute(p)[0]
part_uniques.append(set(part.a.unique()))
# Partitions do not have intersecting keys
for i in range(len(part_uniques)):
for j in range(i + 1, len(part_uniques)):
assert not (part_uniques[i] & part_uniques[j]), \
"should have empty intersection"
|
[
"michael.lam.sk@gmail.com"
] |
michael.lam.sk@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.