blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42db9ae5b41d878484401904355a985bab0c7f8d
|
f998a574343292d050777f616b408a74fde05738
|
/eshop_docker/eshop/extra_apps/social_core/tests/backends/open_id_connect.py
|
25e6b5db9900819e073facd14c2e6cdb9f222ebc
|
[] |
no_license
|
Boomshakal/Django
|
7987e0572fc902bd56360affea0b5087a4cb04a7
|
a149691c472eab3440028bf2460cd992acec0f8a
|
refs/heads/master
| 2023-01-11T06:16:29.283428
| 2022-12-23T08:00:05
| 2022-12-23T08:00:05
| 199,360,433
| 0
| 0
| null | 2020-06-06T09:37:02
| 2019-07-29T02:01:09
|
Python
|
UTF-8
|
Python
| false
| false
| 6,334
|
py
|
# -*- coding: utf-8 -*-
from calendar import timegm
import os
import sys
import json
import datetime
import unittest2
try:
from jwkest.jwk import RSAKey, KEYS
from jwkest.jws import JWS
from jwkest.jwt import b64encode_item
NO_JWKEST = False
except ImportError:
NO_JWKEST = True
from httpretty import HTTPretty
sys.path.insert(0, '..')
from ...exceptions import AuthTokenError
class OpenIdConnectTestMixin(object):
"""
Mixin to test OpenID Connect consumers. Inheriting classes should also
inherit OAuth2Test.
"""
client_key = 'a-key'
client_secret = 'a-secret-key'
issuer = None # id_token issuer
openid_config_body = None
key = None
def setUp(self):
super(OpenIdConnectTestMixin, self).setUp()
test_root = os.path.dirname(os.path.dirname(__file__))
self.key = RSAKey(kid='testkey').load(os.path.join(test_root, 'testkey.pem'))
HTTPretty.register_uri(HTTPretty.GET,
self.backend.OIDC_ENDPOINT + '/.well-known/openid-configuration',
status=200,
body=self.openid_config_body
)
oidc_config = json.loads(self.openid_config_body)
def jwks(_request, _uri, headers):
ks = KEYS()
ks.add(self.key.serialize())
return 200, headers, ks.dump_jwks()
HTTPretty.register_uri(HTTPretty.GET,
oidc_config.get('jwks_uri'),
status=200,
body=jwks)
def extra_settings(self):
settings = super(OpenIdConnectTestMixin, self).extra_settings()
settings.update({
'SOCIAL_AUTH_{0}_KEY'.format(self.name): self.client_key,
'SOCIAL_AUTH_{0}_SECRET'.format(self.name): self.client_secret,
'SOCIAL_AUTH_{0}_ID_TOKEN_DECRYPTION_KEY'.format(self.name):
self.client_secret
})
return settings
def access_token_body(self, request, _url, headers):
"""
Get the nonce from the request parameters, add it to the id_token, and
return the complete response.
"""
nonce = self.backend.data['nonce'].encode('utf-8')
body = self.prepare_access_token_body(nonce=nonce)
return 200, headers, body
def get_id_token(self, client_key=None, expiration_datetime=None,
issue_datetime=None, nonce=None, issuer=None):
"""
Return the id_token to be added to the access token body.
"""
return {
'iss': issuer,
'nonce': nonce,
'aud': client_key,
'azp': client_key,
'exp': expiration_datetime,
'iat': issue_datetime,
'sub': '1234'
}
def prepare_access_token_body(self, client_key=None, tamper_message=False,
expiration_datetime=None,
issue_datetime=None, nonce=None,
issuer=None):
"""
Prepares a provider access token response. Arguments:
client_id -- (str) OAuth ID for the client that requested
authentication.
expiration_time -- (datetime) Date and time after which the response
should be considered invalid.
"""
body = {'access_token': 'foobar', 'token_type': 'bearer'}
client_key = client_key or self.client_key
now = datetime.datetime.utcnow()
expiration_datetime = expiration_datetime or \
(now + datetime.timedelta(seconds=30))
issue_datetime = issue_datetime or now
nonce = nonce or 'a-nonce'
issuer = issuer or self.issuer
id_token = self.get_id_token(
client_key, timegm(expiration_datetime.utctimetuple()),
timegm(issue_datetime.utctimetuple()), nonce, issuer)
body['id_token'] = JWS(id_token, jwk=self.key, alg='RS256').sign_compact()
if tamper_message:
header, msg, sig = body['id_token'].split('.')
id_token['sub'] = '1235'
msg = b64encode_item(id_token).decode('utf-8')
body['id_token'] = '.'.join([header, msg, sig])
return json.dumps(body)
def authtoken_raised(self, expected_message, **access_token_kwargs):
self.access_token_body = self.prepare_access_token_body(
**access_token_kwargs
)
with self.assertRaisesRegexp(AuthTokenError, expected_message):
self.do_login()
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_signature(self):
self.authtoken_raised(
'Token error: Signature verification failed',
tamper_message=True
)
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_expired_signature(self):
expiration_datetime = datetime.datetime.utcnow() - \
datetime.timedelta(seconds=30)
self.authtoken_raised('Token error: Signature has expired',
expiration_datetime=expiration_datetime)
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_issuer(self):
self.authtoken_raised('Token error: Invalid issuer',
issuer='someone-else')
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_audience(self):
self.authtoken_raised('Token error: Invalid audience',
client_key='someone-else')
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_issue_time(self):
expiration_datetime = datetime.datetime.utcnow() - \
datetime.timedelta(hours=1)
self.authtoken_raised('Token error: Incorrect id_token: iat',
issue_datetime=expiration_datetime)
@unittest2.skipIf(NO_JWKEST, 'No Jwkest installed')
def test_invalid_nonce(self):
self.authtoken_raised(
'Token error: Incorrect id_token: nonce',
nonce='something-wrong'
)
|
[
"362169885@qq.com"
] |
362169885@qq.com
|
f82fa05a647d060994f1394b769def3f788dcc39
|
0eaf0d3f0e96a839f2ef37b92d4db5eddf4b5e02
|
/abc274/b.py
|
1f18cd915d905546bb897ac0c9a184c714fb9018
|
[] |
no_license
|
silphire/atcoder
|
b7b02798a87048757745d99e8564397d1ca20169
|
f214ef92f13bc5d6b290746d5a94e2faad20d8b0
|
refs/heads/master
| 2023-09-03T17:56:30.885166
| 2023-09-02T14:16:24
| 2023-09-02T14:16:24
| 245,110,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
h, w = map(int, input().split())
cc = [
input().rstrip()
for _ in range(h)
]
xx = [0] * w
for x in range(w):
for y in range(h):
if cc[y][x] == '#':
xx[x] += 1
print(*xx)
|
[
"silphire@gmail.com"
] |
silphire@gmail.com
|
325eb1dbf06a23dc59d7f10da7867ee273d97c26
|
0cbc02dd7d1efbe61de04dcf1c6eccb6496bf074
|
/month05/teacher/day02/demo03_dataframe.py
|
f25546461a98a1a27fa428c2ff04b2b73a91f7a7
|
[] |
no_license
|
fsym-fs/Python_AID
|
0b1755c15e20b214940041e81bedb2d5ec99e3f9
|
f806bb02cdb1670cfbea6e57846abddf3972b73b
|
refs/heads/master
| 2021-03-20T06:57:45.441245
| 2020-05-27T14:13:45
| 2020-05-27T14:13:45
| 247,187,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,871
|
py
|
"""
demo03_dataFrame.py
"""
import pandas as pd
import numpy as np
df = pd.DataFrame()
print(df)
# 通过列表创建DataFrame
data = ['王伟超', '王小超', '王大超', '王年轻超']
df = pd.DataFrame(data)
print(df)
data = [['Alex',10],['Bob',12],['Clarke',13]]
df = pd.DataFrame(data, index=['S01', 'S02', 'S03'], columns=['Name', 'Age'])
print(df)
data = [{'a': 1, 'b': 2},{'a': 5, 'b': 10, 'c': 20}]
df = pd.DataFrame(data)
print(df)
# 通过字典创建DataFrame
data = {'Name':['Tom', 'Jack', 'Steve', 'Ricky'],'Age':[28,34,29,42]}
df = pd.DataFrame(data)
print(df)
print(df.axes)
print(df.index)
print(df.columns)
print(df.values)
print(df.head(2))
print(df.tail(2))
# 列访问
print('-' * 50)
d = {'one' : pd.Series([1, 2, 3], index=['a', 'b', 'c']),
'two' : pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']),
'three' : pd.Series([1, 3, 4], index=['a', 'c', 'd'])}
df = pd.DataFrame(d)
print(df['one']) # 访问one这一列
print(df[['one', 'three']])
print(df[df.columns[:2]])
# 列添加
print('-' * 50)
# df['four'] = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
# df['four'] = pd.Series([1, 2, 3, 4])
# df['four'] = [1, 2, 3, 4]
# df['four'] = [1, 2, 3]
df['four'] = pd.Series([1, 2, 3], index=['b', 'c', 'd'])
print(df)
# 列删除
# print('-' * 50)
# del(df['four'])
# df.pop('one')
# df2 = df.drop(['one', 'three'], axis=1)
# print(df2)
# 行访问
print('-' * 50)
print(df.loc['a'])
print(df.loc[['a', 'b']])
print(df.loc['a':'c']) # 标签索引切片,结果中包含a b c
print(df.iloc[[0, 2]])
print(df.iloc[0:2]) # 数字索引切片,结果中包含a b
# 行添加
print('-' * 50)
print(df)
newline = pd.Series([2.2, 3.1, 4.5, 3.2], index=['one', 'two', 'three', 'four'], name='e')
df = df.append(newline)
print(df)
df = df.append(df)
print(df)
# 索引有重复的情况,希望重建索引
df.index = np.arange(10)
print(df)
# 行的删除
df = df.drop(np.arange(4, 10))
print(df)
# dataFrame元素的访问
print(df.loc[2]['four'])
print(df.loc[2, 'four'])
print(df['four'][2])
print(df.loc[2:2].loc[2, 'four'])
# 复合索引
# random.normal() 返回一组服从正态分布随机数,shape:(6,3), 期望85, 标准差为3
data = np.floor(np.random.normal(85, 3, (6,3)))
df = pd.DataFrame(data)
print('-' * 50)
print(df)
# 把行级索引改为复合索引
mindex = [('classA', 'F'), ('classA', 'M'),
('classB', 'F'), ('classB', 'M'),
('classC', 'F'), ('classC', 'M')]
df.index = pd.MultiIndex.from_tuples(mindex)
# 把列级索引改为复合索引
mindex = [('Age', '20+'), ('Age', '25+'), ('Age', '30+')]
df.columns = pd.MultiIndex.from_tuples(mindex)
print(df)
# 通过复合索引访问元素
print(df.loc['classA', 'F']['Age'])
print(df['Age', '30+'])
|
[
"1085414029@qq.com"
] |
1085414029@qq.com
|
c02b81dbf0c54de6a19bd9b9039bd4f20831c548
|
f435b177d625e50bb9beafb191e1df01e3cb30ee
|
/src/pyoiler/problems/euler015.py
|
c9d24f4b6a0d172559219d2b5607eca2cbc11049
|
[] |
no_license
|
bathcat/pyOiler
|
dcf948b0a18a9094314564d177c7827c875de78b
|
3ce4d96277e61346509c2975a0034fb5ba845f23
|
refs/heads/main
| 2023-03-12T10:42:48.837126
| 2021-02-10T19:18:14
| 2021-02-10T19:18:14
| 337,828,844
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
from typing import Iterable, Tuple
from ..shared.more_itertools import flat_map, count
from ..shared.solver import Solver
"""[summary]
2 thoughts on performance:
1. This *enumerates* paths, which isn't necessary.
All we need to do is count them, so just increment
a number when you get to 16,16, and forget about
holding on to the tail.
2. Adding threads should be trivial, especially
after changing the search to depth-first.
Returns:
[type]: [description]
Yields:
[type]: [description]
"""
Position = Tuple[int,int]
class Path():
head:Position
tail: 'Path'
def __init__(self, end:Position, rest:'Path' = None):
self.head = end
self.tail = rest
def to_positions(self) -> Iterable[Position]:
yield self.head
if self.tail:
yield from self.tail.to_positions()
def append(self, p:Position) -> 'Path':
return Path(p, self)
def __str__(self):
ps = list(self.to_positions())
ps.reverse()
return str(ps)
@classmethod
def zero(cls) -> 'Path':
return Path((0,0))
class Lattice():
height:int
width:int
def __init__(self,width, height):
self.width=width
self.height=height
def successor_paths(self, current:Path) -> Iterable[Path]:
if current.head[0] < self.width:
yield current.append((current.head[0] + 1, current.head[1]))
if current.head[1] < self.height:
yield current.append((current.head[0], current.head[1] + 1))
def paths(self) -> Iterable[Path]:
partials = [Path.zero()]
for _ in range(self.height + self.width):
partials = flat_map(self.successor_paths, partials)
return partials
def _solve(print = print):
side = 15
l = Lattice(side,side)
path_count = count(l.paths())
print(f"Count of paths through a {side} lattice is: {path_count}")
print('This approach doesn''t scale.')
return False
description = '''Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down,
there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20×20 grid?
'''
solver = Solver(15,
'Lattice paths',
description,
_solve
)
|
[
"you@example.com"
] |
you@example.com
|
0bf08e9a43aaf6b9a2ef34639ca2ac9cc2f35030
|
6478723d180a8ef39941ba04b80c1eca9f437323
|
/Premuim/1134. Armstrong Number.py
|
5e3cf31015b6a7071c89cb6029a027521886b866
|
[] |
no_license
|
NiuNiu-jupiter/Leetcode
|
2a49a365898ecca393cb1eb53a47f4501b25952d
|
e278ae6ded32f6a2d054ae11ad8fcc45e7bd0f86
|
refs/heads/master
| 2022-11-22T01:05:57.417538
| 2020-07-28T23:34:39
| 2020-07-28T23:34:39
| 182,104,119
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
"""
The k-digit number N is an Armstrong number if and only if the k-th power of each digit sums to N.
Given a positive integer N, return true if and only if it is an Armstrong number.
Example 1:
Input: 153
Output: true
Explanation:
153 is a 3-digit number, and 153 = 1^3 + 5^3 + 3^3.
Example 2:
Input: 123
Output: false
Explanation:
123 is a 3-digit number, and 123 != 1^3 + 2^3 + 3^3 = 36.
Note:
1 <= N <= 10^8
"""
class Solution:
def isArmstrong(self, N: int) -> bool:
target = N
mi = len(str(target))
res ,digit = 0, 0
while N > 0:
digit = N % 10
res += digit**mi
N //= 10
return res == target
|
[
"cmyumo.zhang@gmail.com"
] |
cmyumo.zhang@gmail.com
|
0da148c12136dd8d13acd43683cee98ce7199904
|
c03d102d36ff21675ec3bb58e5d46af8c3f73ff6
|
/polyaxon/tracker/events/job.py
|
0e0cac80c31b9515316660fd5e0da98b98625c6c
|
[
"MIT"
] |
permissive
|
errorsandwarnings/polyaxon
|
40cc1ee5797fe8add0a3bfb693abcfcab1c2f9cb
|
5eec0bc4aa4ad5f2dce8d1c0ef653265bf4fe6be
|
refs/heads/master
| 2020-03-21T05:28:27.001571
| 2018-06-20T06:43:55
| 2018-06-20T07:40:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
import tracker
from event_manager.events import job
tracker.subscribe(job.JobCreatedEvent)
tracker.subscribe(job.JobUpdatedEvent)
tracker.subscribe(job.JobStartedEvent)
tracker.subscribe(job.JobStartedTriggeredEvent)
tracker.subscribe(job.JobSoppedEvent)
tracker.subscribe(job.JobSoppedTriggeredEvent)
tracker.subscribe(job.JobViewedEvent)
tracker.subscribe(job.JobNewStatusEvent)
tracker.subscribe(job.JobFailedEvent)
tracker.subscribe(job.JobSucceededEvent)
tracker.subscribe(job.JobDoneEvent)
tracker.subscribe(job.JobDeletedEvent)
tracker.subscribe(job.JobDeletedTriggeredEvent)
tracker.subscribe(job.JobLogsViewedEvent)
tracker.subscribe(job.JobRestartedEvent)
tracker.subscribe(job.JobRestartedTriggeredEvent)
tracker.subscribe(job.JobStatusesViewedEvent)
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
ca40804f3b11f5bf3ee4a29175aac94bdf3ecb7c
|
32c4b55b781b0b08860e1e56eb3cf226b1dc7644
|
/ask-sdk-model/ask_sdk_model/services/reminder_management/trigger.py
|
af37e6b7ee01251792653e4516fb55111ff43832
|
[
"Apache-2.0"
] |
permissive
|
vertolab/alexa-apis-for-python
|
8d2b4b5b44d0360bfa24508ca7d55e4f2c92e0dd
|
85274cff0818e78d87f7f389e7b0e4613ddaa170
|
refs/heads/master
| 2020-04-29T01:33:15.395179
| 2019-03-15T02:33:42
| 2019-03-15T02:33:42
| 175,734,525
| 0
| 0
|
Apache-2.0
| 2019-03-15T02:29:44
| 2019-03-15T02:29:44
| null |
UTF-8
|
Python
| false
| false
| 5,240
|
py
|
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.services.reminder_management.trigger_type import TriggerType
from ask_sdk_model.services.reminder_management.recurrence import Recurrence
class Trigger(object):
"""
Trigger information for Reminder
:param object_type:
:type object_type: (optional) ask_sdk_model.services.reminder_management.trigger_type.TriggerType
:param scheduled_time: Valid ISO 8601 format - Intended trigger time
:type scheduled_time: (optional) datetime
:param offset_in_seconds: If reminder is set using relative time, use this field to specify the time after which reminder ll ring (in seconds)
:type offset_in_seconds: (optional) int
:param time_zone_id: Intended reminder's timezone
:type time_zone_id: (optional) str
:param recurrence:
:type recurrence: (optional) ask_sdk_model.services.reminder_management.recurrence.Recurrence
"""
deserialized_types = {
'object_type': 'ask_sdk_model.services.reminder_management.trigger_type.TriggerType',
'scheduled_time': 'datetime',
'offset_in_seconds': 'int',
'time_zone_id': 'str',
'recurrence': 'ask_sdk_model.services.reminder_management.recurrence.Recurrence'
}
attribute_map = {
'object_type': 'type',
'scheduled_time': 'scheduledTime',
'offset_in_seconds': 'offsetInSeconds',
'time_zone_id': 'timeZoneId',
'recurrence': 'recurrence'
}
def __init__(self, object_type=None, scheduled_time=None, offset_in_seconds=None, time_zone_id=None, recurrence=None):
# type: (Optional[TriggerType], Optional[datetime], Optional[int], Optional[str], Optional[Recurrence]) -> None
"""Trigger information for Reminder
:param object_type:
:type object_type: (optional) ask_sdk_model.services.reminder_management.trigger_type.TriggerType
:param scheduled_time: Valid ISO 8601 format - Intended trigger time
:type scheduled_time: (optional) datetime
:param offset_in_seconds: If reminder is set using relative time, use this field to specify the time after which reminder ll ring (in seconds)
:type offset_in_seconds: (optional) int
:param time_zone_id: Intended reminder's timezone
:type time_zone_id: (optional) str
:param recurrence:
:type recurrence: (optional) ask_sdk_model.services.reminder_management.recurrence.Recurrence
"""
self.__discriminator_value = None
self.object_type = object_type
self.scheduled_time = scheduled_time
self.offset_in_seconds = offset_in_seconds
self.time_zone_id = time_zone_id
self.recurrence = recurrence
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Trigger):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
[
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] |
ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com
|
804fcab8ec23cea1ffebc6b018cc84cd0f542370
|
c66955c6fc178955c2024e0318ec7a91a8386c2d
|
/testframework/excise/runnerwithallure.py
|
2918eed15525373dfcc856b73f3e07c020e0037c
|
[] |
no_license
|
duheng18/python-study
|
a98642d6ee1b0043837c3e7c5b91bf1e28dfa588
|
13c0571ac5d1690bb9e615340482bdb2134ecf0e
|
refs/heads/master
| 2022-11-30T17:36:57.060130
| 2019-11-18T07:31:40
| 2019-11-18T07:31:40
| 147,268,053
| 1
| 0
| null | 2022-11-22T03:36:51
| 2018-09-04T00:49:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
import pytest
import subprocess
import logging
import allure
import shutil
#为什么我们要设置这个路径到pythonPATH
sys.path.append(os.path.dirname(sys.modules[__name__].__file__))
fileHandler = logging.FileHandler(filename="../log/uiauto.log",encoding="utf-8")
logging.getLogger().setLevel(0)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(module)s:%(lineno)d %(message)s')
fileHandler.setFormatter(formatter)
logging.getLogger().addHandler(fileHandler)
if __name__ == '__main__':
shutil.rmtree('../log/report/xml/')
#pytest.main(['-sq', '--alluredir', '../log/testreport', 'testcases/myselector/test_all_stocks.py'])
#pytest.main(['-sq', '--alluredir', '../log/testreport/xml', 'testcases/login','testcases/myselector'])
#pytest.main(['--alluredir', '../log/report/xml','--allure_severities=blocker', 'testcases/'])
pytest.main(['--alluredir', '../log/report/xml', 'testcases/alluredemo/login/test_login.py::TestLogin::test_2474609'])
#pytest.main(['--alluredir', '../log/report/xml','--allure-severities=blocker', 'testcases/alluredemo/'])
#pytest.main(['--alluredir', '../log/report/xml','--allure-features=测试登录功能', 'testcases/alluredemo/'])
print(subprocess.getstatusoutput('/usr/local/bin/allure generate --clean ../log/report/xml -o ../log/report/html'))
|
[
"emaildh@163.com"
] |
emaildh@163.com
|
73b7beaa9bea2b28e0fd1617ec699f27fe407e5a
|
107f9bbd587decbab2e6188c0085e9f67b5f3708
|
/Extractor/util/DoubleValExtractor.py
|
6bba748c3edadf2ace17f5fc93206e110ab92d6e
|
[
"Apache-2.0"
] |
permissive
|
FloatErrorAnalysis/LLVM
|
4dbcd52a60774847949cf190a71cdf374ca437ce
|
7ce723e1fe7fee227ab7c0ac8d49bca89459957a
|
refs/heads/master
| 2020-04-03T14:56:55.038691
| 2018-11-11T15:09:56
| 2018-11-11T15:09:56
| 155,343,259
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
# 一个专门用于提取ll文件double类型变量和相关函数以及double类型的函数的工具类
''' 全局标识符(函数,全局变量)以“@”字符开头。
本地标识符(注册名称,类型)以'%'字符开头 '''
class DoubleValExtractor:
source_file_path = ''
ll_file_content_list = []
vm_module = []
double_vars = []
double_functions = []
double_statements = []
def __init__(self, source_file_path):
self.source_file_path = source_file_path
with open(self.source_file_path, 'r') as ll_file:
ll_file_content = ll_file.read()
tmp_list = ll_file_content.split('\n')
for line in tmp_list:
self.ll_file_content_list.append(line.strip())
if 'double' in line:
self.double_statements.append(line)
def extract_double_functions(self):
# 定义的double函数,以 define double标识开头 '}'结尾
flag = False
for line in self.ll_file_content_list:
if 'define double' in line:
flag = True
if flag:
self.double_functions.append(line)
if '}' in line:
flag = False
# 申明
if 'declare double' in line:
self.double_functions.append(line)
return self.double_functions
# TODO
def extract_double_vars(self):
for statement in self.double_statements:
# 列出所有double型临时寄存器
if statement.find('%') != -1:
idx = statement.find('%')
if statement[idx + 1: idx + 2].isalnum():
self.double_vars.append('%' + statement[idx + 1: idx + 2])
return list(set(self.double_vars))
def extract_double_concerned_statements(self):
return list(set(self.double_statements + self.extract_double_functions()))
extractor = DoubleValExtractor('/Users/py/GitHub/LLVM/functions/ll_file/sqrt_minus.ll')
with open('double_ll', 'w') as f:
f.writelines(extractor.extract_double_concerned_statements())
|
[
"2529716798@qq.com"
] |
2529716798@qq.com
|
49440e2525655c4cccc5adb43fc2eaae167e8f7e
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/cache/_enums.py
|
687882b04641acfe4856e26c5ddfb16742be973e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 3,490
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'AofFrequency',
'ClusteringPolicy',
'DayOfWeek',
'EvictionPolicy',
'PrivateEndpointServiceConnectionStatus',
'Protocol',
'PublicNetworkAccess',
'RdbFrequency',
'ReplicationRole',
'SkuFamily',
'SkuName',
'TlsVersion',
]
class AofFrequency(str, Enum):
"""
Sets the frequency at which data is written to disk.
"""
AOF_FREQUENCY_1S = "1s"
ALWAYS = "always"
class ClusteringPolicy(str, Enum):
"""
Clustering policy - default is OSSCluster. Specified at create time.
"""
ENTERPRISE_CLUSTER = "EnterpriseCluster"
OSS_CLUSTER = "OSSCluster"
class DayOfWeek(str, Enum):
"""
Day of the week when a cache can be patched.
"""
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
EVERYDAY = "Everyday"
WEEKEND = "Weekend"
class EvictionPolicy(str, Enum):
"""
Redis eviction policy - default is VolatileLRU
"""
ALL_KEYS_LFU = "AllKeysLFU"
ALL_KEYS_LRU = "AllKeysLRU"
ALL_KEYS_RANDOM = "AllKeysRandom"
VOLATILE_LRU = "VolatileLRU"
VOLATILE_LFU = "VolatileLFU"
VOLATILE_TTL = "VolatileTTL"
VOLATILE_RANDOM = "VolatileRandom"
NO_EVICTION = "NoEviction"
class PrivateEndpointServiceConnectionStatus(str, Enum):
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class Protocol(str, Enum):
"""
Specifies whether redis clients can connect using TLS-encrypted or plaintext redis protocols. Default is TLS-encrypted.
"""
ENCRYPTED = "Encrypted"
PLAINTEXT = "Plaintext"
class PublicNetworkAccess(str, Enum):
"""
Whether or not public endpoint access is allowed for this cache. Value is optional but if passed in, must be 'Enabled' or 'Disabled'. If 'Disabled', private endpoints are the exclusive access method. Default value is 'Enabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class RdbFrequency(str, Enum):
"""
Sets the frequency at which a snapshot of the database is created.
"""
RDB_FREQUENCY_1H = "1h"
RDB_FREQUENCY_6H = "6h"
RDB_FREQUENCY_12H = "12h"
class ReplicationRole(str, Enum):
"""
Role of the linked server.
"""
PRIMARY = "Primary"
SECONDARY = "Secondary"
class SkuFamily(str, Enum):
"""
The SKU family to use. Valid values: (C, P). (C = Basic/Standard, P = Premium).
"""
C = "C"
P = "P"
class SkuName(str, Enum):
"""
The type of RedisEnterprise cluster to deploy. Possible values: (Enterprise_E10, EnterpriseFlash_F300 etc.)
"""
ENTERPRISE_E10 = "Enterprise_E10"
ENTERPRISE_E20 = "Enterprise_E20"
ENTERPRISE_E50 = "Enterprise_E50"
ENTERPRISE_E100 = "Enterprise_E100"
ENTERPRISE_FLASH_F300 = "EnterpriseFlash_F300"
ENTERPRISE_FLASH_F700 = "EnterpriseFlash_F700"
ENTERPRISE_FLASH_F1500 = "EnterpriseFlash_F1500"
class TlsVersion(str, Enum):
"""
The minimum TLS version for the cluster to support, e.g. '1.2'
"""
TLS_VERSION_1_0 = "1.0"
TLS_VERSION_1_1 = "1.1"
TLS_VERSION_1_2 = "1.2"
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
084d1e9466ab51b588f6b9e5ee5775e6b2032af3
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/exp-big-987.py
|
98a30d699cfa98b31b62bcdabb1a8d73c564b701
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,178
|
py
|
# Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp2(x: int, y: int, x2: int, y2: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp3(x: int, y: int, x2: int, y2: int, x3: int, y3: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp4(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f($Exp)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
def exp5(x: int, y: int, x2: int, y2: int, x3: int, y3: int, x4: int, y4: int, x5: int, y5: int) -> int:
a: int = 0
a2: int = 0
a3: int = 0
a4: int = 0
a5: int = 0
def f(i: int) -> int:
nonlocal a
nonlocal a2
nonlocal a3
nonlocal a4
nonlocal a5
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
a2 = a * x
a3 = a * x
a4 = a * x
a5 = a * x
return f(i-1)
a = 1
a2 = 1
a3 = 1
a4 = 1
a5 = 1
return f(y)
# Input parameter
n:int = 42
n2:int = 42
n3:int = 42
n4:int = 42
n5:int = 42
# Run [0, n]
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
while i <= n:
print(exp(2, i % 31))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
a6754e24060dada2bc601f991212c3b62a574c61
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02927/s825032151.py
|
0c0da2fdaf320f22fbbb8c13954eeb5e830d6169
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 232
|
py
|
M, D = map(int, input().split())
def f(m, d):
d1, d10 = d % 10, d // 10
return d1 >= 2 and d10 >= 2 and d1 * d10 == m
count = 0
for i in range(1, M+1):
for j in range(1, D + 1):
count += 1 if f(i, j) else 0
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f5321f3e48bf46ddb5a487c404d82dbee8b6acfd
|
0124528676ee3bbaec60df5d6950b408e6da37c8
|
/Projects/QTPy/adafruit-circuitpython-bundle-7.x-mpy-20220601/examples/irremote_transmit.py
|
9595f492cf2210c7860dca7d88ef446d35ffbe60
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
land-boards/lb-boards
|
8127658dc537dcfde0bb59a5018ab75c3f0087f6
|
eeb98cc2003dac1924845d949f6f5bd387376568
|
refs/heads/master
| 2023-06-07T15:44:46.110742
| 2023-06-02T22:53:24
| 2023-06-02T22:53:24
| 4,847,305
| 10
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 968
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""IR transmit example using Circuit Playground Express"""
# pylint: disable-msg=no-member
import time
import pulseio
import pwmio
import board
import digitalio
import adafruit_irremote
# Create a button object to trigger IR transmit
button = digitalio.DigitalInOut(board.D4)
button.direction = digitalio.Direction.INPUT
button.pull = digitalio.Pull.DOWN
# Create a 'pwmio' output, to send infrared signals on the IR transmitter @ 38KHz
pwm = pwmio.PWMOut(board.IR_TX, frequency=38000, duty_cycle=2**15)
pulseout = pulseio.PulseOut(pwm)
# Create an encoder that will take numbers and turn them into NEC IR pulses
encoder = adafruit_irremote.GenericTransmit(
header=[9500, 4500], one=[550, 550], zero=[550, 1700], trail=0
)
while True:
if button.value:
print("IR signal sent!")
encoder.transmit(pulseout, [255, 2, 255, 0])
time.sleep(0.2)
|
[
"doug@douglasgilliland.com"
] |
doug@douglasgilliland.com
|
8b3821d319dca7c778f383c3838af711f2438bfd
|
56f998d88a4cdae9f2c99b6f2013a10b90f227a2
|
/network/admin.py
|
8010ed3923c1ded4a1326c43ba9b78eac1fbe675
|
[] |
no_license
|
lautarodapin/network-course-cs50
|
a45cfa675b7ff475ee1600276cbf47eb19fca7d9
|
2994c6b44eb46f4d303621a4e48604aa672017ea
|
refs/heads/main
| 2023-04-02T06:42:14.599721
| 2021-04-09T20:57:18
| 2021-04-09T20:57:18
| 355,749,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
from django.contrib import admin
from .models import Post, User, Comment
class CommentInline(admin.TabularInline):
model = Comment
extra = 0
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = [
"id",
"content",
"likes",
"user",
]
inlines = [CommentInline]
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = [
"id",
"username",
]
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = [
"id",
"comment",
"user",
"post",
]
|
[
"lautarodapin@gmail.com"
] |
lautarodapin@gmail.com
|
97afbf06615fccf029968bf34a0bc4e21e70c552
|
5c74f8526f185d90093aba3fb213a11de5ea18ba
|
/rx/operators/observable/observeon.py
|
8455b848d1b064d10076f186fa1593f02c7659e2
|
[
"Apache-2.0"
] |
permissive
|
yutiansut/RxPY
|
df02c64cb791bf2a7a97413a75f4d2903e1682b5
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
refs/heads/master
| 2020-03-10T11:07:27.642494
| 2018-12-29T07:02:02
| 2018-12-29T07:02:02
| 129,349,161
| 0
| 0
|
NOASSERTION
| 2018-12-29T07:02:03
| 2018-04-13T04:50:02
|
Python
|
UTF-8
|
Python
| false
| false
| 794
|
py
|
from rx.core import AnonymousObservable, ObservableBase
from rx.core.observeonobserver import ObserveOnObserver
def observe_on(source, scheduler) -> ObservableBase:
"""Wraps the source sequence in order to run its observer callbacks
on the specified scheduler.
Keyword arguments:
scheduler -- Scheduler to notify observers on.
Returns the source sequence whose observations happen on the
specified scheduler.
This only invokes observer callbacks on a scheduler. In case the
subscription and/or unsubscription actions have side-effects
that require to be run on a scheduler, use subscribe_on.
"""
def subscribe(observer, _=None):
return source.subscribe(ObserveOnObserver(scheduler, observer))
return AnonymousObservable(subscribe)
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
44f00df1320decf7dfa7e83714f2c1c267a32738
|
9d461bb7e7db942654a819fd544dd6e59e671841
|
/gui_test.py
|
179acc4ff415a686fb2d97162ac3584cbf8d961d
|
[] |
no_license
|
vimcoper/qt_candle_chart
|
193644057a43ef068292f2bd61e713530485f8e9
|
205c493e3e374562142eaac9992f73be9e0b9d98
|
refs/heads/master
| 2022-07-19T09:03:31.396671
| 2020-05-25T08:19:57
| 2020-05-25T08:19:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
import sys
import time
import numpy as np
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self._main = QtWidgets.QWidget()
print(self._main)
self.setCentralWidget(self._main)
layout = QtWidgets.QVBoxLayout(self._main)
static_canvas = FigureCanvas(Figure(figsize=(5, 3)))
layout.addWidget(static_canvas)
# self.addToolBar(NavigationToolbar(static_canvas, self))
dynamic_canvas = FigureCanvas(Figure(figsize=(5, 3)))
layout.addWidget(dynamic_canvas)
# self.addToolBar(QtCore.Qt.BottomToolBarArea,
# NavigationToolbar(dynamic_canvas, self))
self._static_ax = static_canvas.figure.subplots()
t = np.linspace(0, 10, 501)
self._static_ax.plot(t, np.tan(t), ".")
self._dynamic_ax = dynamic_canvas.figure.subplots()
self._timer = dynamic_canvas.new_timer(
50, [(self._update_canvas, (), {})])
self._timer.start()
def _update_canvas(self):
self._dynamic_ax.clear()
t = np.linspace(0, 10, 101)
# Use fixed vertical limits to prevent autoscaling changing the scale
# of the axis.
self._dynamic_ax.set_ylim(-1.1, 1.1)
# Shift the sinusoid as a function of time.
self._dynamic_ax.plot(t, np.sin(t + time.time()))
self._dynamic_ax.figure.canvas.draw()
if __name__ == "__main__":
# Check whether there is already a running QApplication (e.g., if running
# from an IDE).
qapp = QtWidgets.QApplication.instance()
if not qapp:
qapp = QtWidgets.QApplication(sys.argv)
app = ApplicationWindow()
app.show()
app.activateWindow()
app.raise_()
qapp.exec_()
|
[
"pjt3591oo@gmail.com"
] |
pjt3591oo@gmail.com
|
1e257cc346957e4e15add00df2f9cfc675ebce1c
|
61a72b019346d10c502f7ed4d4894adbfe03c8cb
|
/legacy/structures.py
|
fd6f19c85c10d4ae631646b1bc26fe6ea01bdf4c
|
[
"BSD-2-Clause"
] |
permissive
|
numba/numba-examples
|
10617ced993e1f756595152711c1b6abe8d180a9
|
c423f5419a459f5ab8874fda6d39bb5ea05d04b2
|
refs/heads/master
| 2023-08-23T15:33:21.729427
| 2022-06-27T22:54:27
| 2022-06-27T22:54:27
| 96,823,247
| 168
| 62
|
BSD-2-Clause
| 2022-09-29T19:24:53
| 2017-07-10T21:32:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 547
|
py
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import numpy as np
from numba import jit
record_type = np.dtype([('x', np.double), ('y', np.double)])
a = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=record_type)
@jit
def hypot(data):
result = np.empty_like(data, dtype=np.float64)
# notice access to structure elements 'x' and 'y' via attribute access
for i in range(data.shape[0]):
result[i] = np.sqrt(data[i].x * data[i].x + data[i].y * data[i].y)
return result
print(hypot(a))
|
[
"stuart.archibald@googlemail.com"
] |
stuart.archibald@googlemail.com
|
ea5cf27bff17dbb8236475b3e8ee6c32c4dfa01f
|
5c928e2b5024920d26c93f4b06e93c08a3e61294
|
/portal_rnaseq_galaxy/scripts/api/copy_history_dataset_to_history.py
|
ec02564972b8186fd5dc669fbf1280a839f997fa
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
BioinformaticsArchive/PathogenPortal
|
c390cf78299595d170b20a105afdd5200200ddaf
|
d4b1e525e7e91ce32ec29998a7bcb5a1415706a3
|
refs/heads/master
| 2021-01-22T12:49:48.599355
| 2013-08-13T16:16:13
| 2013-08-13T16:16:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!/usr/bin/env python
import os, sys
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit
try:
assert sys.argv[3]
data = {}
data['from_hda_id'] = sys.argv[3]
except IndexError:
print 'usage: %s key url hda_file_id' % os.path.basename( sys.argv[0] )
print ' library_file_id is from /api/libraries/<library_id>/contents/<library_file_id>'
sys.exit( 1 )
submit( sys.argv[1], sys.argv[2], data )
|
[
"anwarren@vbi.vt.edu"
] |
anwarren@vbi.vt.edu
|
4a2caa48c63041ee507c0375de604edc7effa7d2
|
cc6a674cab1dc959189b9edff975625f4815bc1c
|
/Transformers-from-scratch/examples/set_context.py
|
2317e03224a6ee1213b8d2ce86f2a61db92c7e66
|
[
"MIT"
] |
permissive
|
shreyansh26/DL-Code-Repository
|
15173042f566ea42f96eb65283347927a2fab4ff
|
f1974eedc1fef54b2d274703390a22721e46f502
|
refs/heads/master
| 2023-07-15T23:15:05.484609
| 2021-08-30T15:41:20
| 2021-08-30T15:41:20
| 382,834,342
| 0
| 0
| null | 2021-07-04T12:11:08
| 2021-07-04T11:27:13
|
Python
|
UTF-8
|
Python
| false
| false
| 203
|
py
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../tfb')))
import tfb
|
[
"shreyansh.pettswood@gmail.com"
] |
shreyansh.pettswood@gmail.com
|
ae0760e8766262053cf1d4a77e9a84e482e76efa
|
9f714608155e7f8b92cea3dd4bda78f3ac1f56a2
|
/Resume/put_item.py
|
a58e932cec941fad48372545f0eed3a34fb0491c
|
[] |
no_license
|
yenkuanlee/FoodResume
|
0258dbaf29ac3efc864b47b70fdc14b8acc37dac
|
2e7bff23235d569cf4caaba86f956b1bad749082
|
refs/heads/master
| 2020-03-20T23:58:25.545722
| 2018-07-09T04:12:37
| 2018-07-09T04:12:37
| 137,871,950
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
# -*- coding: utf-8 -*-
import json
from web3 import Web3, HTTPProvider, TestRPCProvider
from web3.contract import ConciseContract
import os
import sys
Cpath = os.path.dirname(os.path.realpath(__file__))
host = 'localhost'
#account = '0x42946c2bb22ad422e7366d68d3ca07fb1862ff36' ## supplier
account = '0xe6ab871f860d9f28764d5d2e0672396a7643710e' ## gmeal
passwd = '123'
# web3.py instance
w3 = Web3(HTTPProvider('http://'+host+':3000'))
w3.personal.unlockAccount(account,passwd)
f = open(Cpath+'/resume.json','r')
line = f.readline()
Jline = json.loads(line)
f.close()
abi = Jline['abi']
contract_address = Jline['contract_address']
# Contract instance in concise mode
contract_instance = w3.eth.contract(abi, contract_address, ContractFactoryClass=ConciseContract)
contract_instance.Record(sys.argv[1],transact={'from': account})
|
[
"yenkuanlee@gmail.com"
] |
yenkuanlee@gmail.com
|
1b73f4236d71f8b4f37038833d494e5d23ba0b35
|
87b904ebf11d416567a7e49b91b8e9934f67c6f3
|
/show_df_as_html_table.py
|
d8dd6bfadb056408f98c24475be1fe9ce25d2c20
|
[
"MIT"
] |
permissive
|
NathanKr/pandas-playground
|
a701f524aa48f22f6680e48c597206e10f8222e5
|
a5355c59cb61ca3a7dcce590ed42d56a6b943783
|
refs/heads/main
| 2023-06-05T11:07:52.061327
| 2021-07-02T02:35:15
| 2021-07-02T02:35:15
| 328,917,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import pandas as pd
import os
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
snp500_df = pd.read_html(url)[0]
file_path = os.path.join('temp','snp500_df_to_html.html')
print(f'write snp500_df to {file_path}')
snp500_df.to_html(file_path)
|
[
"natankrasney@gmail.com"
] |
natankrasney@gmail.com
|
a59830d94f066732fabbf3056710ff933a7aef39
|
dddbc7dea28cc36fb110f83acbc4b6290c9dea2d
|
/Final/playground.py
|
88ae3ba8f2832e52c36bc0d0c8f6f3411df0c682
|
[] |
no_license
|
rubcuadra/TC2025_PA
|
82551d6b10a214b99a4d7110492c7c0c01188eed
|
1893e1950709db009933d3f9ae9a84e9d8354241
|
refs/heads/master
| 2020-03-27T15:06:26.398245
| 2018-11-28T00:28:26
| 2018-11-28T00:28:26
| 146,698,373
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
from onitampy.board import OnitamaBoard
from onitampy.movements import OnitamaCards,DECK_INDEXES
if __name__ == '__main__':
board = OnitamaBoard()
# print(board)
board.cards[0].pop()
board.cards[0].pop()
board.cards[0].add('EEL')
board.cards[0].add('COBRA')
board.cards[1].pop()
board.cards[1].pop()
board.cards[1].add('GOOSE')
board.cards[1].add('CRAB')
print(board.canMove( board.BLUE, (4,2),"EEL",(3,1)))
|
[
"rubcuadra@gmail.com"
] |
rubcuadra@gmail.com
|
1c255e182e83fd5efcc23f3fcd88ce421d2cfc4b
|
ff58ba25d940ed34d9684efab04adef85d1e1c0f
|
/src/management/__init__.py
|
1cef4964de4e5953fc495c1d642a6ac0fde493ef
|
[] |
no_license
|
afsmith/Kneto-Sello
|
e9046a81ff83652531adc55aab3f90f77af5b5be
|
a1b12daf8a04ef485ddcaa2944b2d87878a8cdd0
|
refs/heads/master
| 2021-03-27T17:31:23.830989
| 2013-06-04T07:29:58
| 2013-06-04T07:29:58
| 6,720,999
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 BLStream Sp. z o.o. (http://blstream.com/)
#
# Authors:
# Marek Mackiewicz <marek.mackiewicz@blstream.com>
#
"""Application for user management.
This application handles management of users and their permissions.
"""
# vim: set et sw=4 ts=4 sts=4 tw=78:
|
[
"andrew.smith@kneto.fi"
] |
andrew.smith@kneto.fi
|
885b4a9b841ec2f7c0f895d76b18602485199964
|
846b11ccf549aba144c1824a24292a4850860ca7
|
/5-ExerciciosFuncoes/4.py
|
dcb657a9902f0c3fd3e9da6f637323357681ce71
|
[] |
no_license
|
paulocesarcsdev/ExerciciosPython
|
6d1feff293e7efc4cd3fbc62eee0add93f76db99
|
25bfaa6dc5cb294242e478a2b253a8ca5d9c7078
|
refs/heads/master
| 2023-05-15T00:53:22.151884
| 2021-06-10T03:04:04
| 2021-06-10T03:04:04
| 337,847,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
'''
Faça um programa, com uma função que necessite de um argumento.
A função retorna o valor de caractere ‘P’, se seu argumento for positivo, e ‘N’, se seu argumento for zero ou negativo.
'''
def peso(valor):
if valor % 2 == 0:
return " 'P' "
else:
return " 'N' "
numero = int(input('Entre o o valor: '))
print(peso(numero))
|
[
"paulocesarcs.dev@gmail.com"
] |
paulocesarcs.dev@gmail.com
|
148b5b1f7ca8a9c5547ea64c99330d158348a5a4
|
9ac6fda4872f67faf8ce3cb541f10cea692e72ee
|
/main.py
|
28467183158d45176c947599464a73fb8bbdd146
|
[] |
no_license
|
markbirds/OpenCV-Face-Recognition
|
4db776d286313d9d93d464a4bce131add1f0921a
|
f486e3e0e37c4cd6cb23818a17f09194bfd9582e
|
refs/heads/master
| 2023-02-08T01:01:22.442349
| 2021-01-04T13:06:45
| 2021-01-04T13:06:45
| 326,679,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,246
|
py
|
import numpy as np
import cv2
import json
# loading features to be used for face detection
face_cascade = cv2.CascadeClassifier('src/haar_cascades/haarcascade_frontalface_default.xml')
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read("./src/recognizer/face_trained.yml")
with open('./src/recognizer/registered_faces.json') as f:
registered_faces = json.load(f)['registered_faces']
# capture live feed from webcam
cap = cv2.VideoCapture(0)
while(True):
# read frames and covert to grayscale
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# get coordinates of faces
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# draw rectangle around face roi
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
face_roi_gray = gray[y:y+h, x:x+w]
id_, conf = face_recognizer.predict(face_roi_gray)
font = cv2.FONT_HERSHEY_SIMPLEX
name = registered_faces[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x,y-20), font, 1, color, stroke, cv2.LINE_AA)
# display resulting frame
cv2.imshow('Face detection',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
[
"fowenpatrick@gmail.com"
] |
fowenpatrick@gmail.com
|
402d662f1e8116b3c4afdf8427dc4cddb4b05546
|
2d276785c3663d4798be462115291c4706dbd255
|
/Python从菜鸟到高手/chapter5/demo5.04.py
|
5cbb5b358fee5ddbaa83e6d82be4b05bab815d51
|
[] |
no_license
|
bupthl/Python
|
81c92433bd955663e6cda5fe7cab5ea3d067c3de
|
bdb33aeeb179a43100b9ef7129a925c63a133fd3
|
refs/heads/master
| 2022-02-21T11:02:40.195265
| 2019-08-16T05:49:18
| 2019-08-16T05:49:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
'''
--------《Python从菜鸟到高手》源代码------------
欧瑞科技版权所有
作者:李宁
如有任何技术问题,请加QQ技术讨论群:264268059
或关注“极客起源”订阅号或“欧瑞科技”服务号或扫码关注订阅号和服务号,二维码在源代码根目录
如果QQ群已满,请访问https://geekori.com,在右侧查看最新的QQ群,同时可以扫码关注公众号
“欧瑞学院”是欧瑞科技旗下在线IT教育学院,包含大量IT前沿视频课程,
请访问http://geekori.com/edu或关注前面提到的订阅号和服务号,进入移动版的欧瑞学院
“极客题库”是欧瑞科技旗下在线题库,请扫描源代码根目录中的小程序码安装“极客题库”小程序
关于更多信息,请访问下面的页面
https://geekori.com/help/videocourse/readme.html
'''
# 引用string模块中的Template类
from string import Template
template1 = Template("$s是我最喜欢的编程语言, $s非常容易学习,而且功能强大")
# 指定格式化参数s的值是Python
print(template1.substitute(s='Python'))
# 当格式化参数是一个字符串的一部分时,为了和字符串的其他部分区分开,
# 需要用一对大括号将格式化参数变量括起来
template2 = Template("${s}stitute")
print(template2.substitute(s='sub'))
template3 = Template("$dollar$$相当于多少$pounds")
# 替换两个格式化参数变量
print(template3.substitute(dollar=20,pounds='英磅'))
template4 = Template("$dollar$$相当于多少$pounds")
data = {}
data['dollar'] = 100
data['pounds'] = '英磅'
# 使用字典指定格式化参数值
print(template4.substitute(data))
|
[
"registercn@outlook.com"
] |
registercn@outlook.com
|
4649a07a27a686ce6bfe64cbce1f3e49493be5e0
|
23bfacd796850e9e2766bf3db3dcdfb640aa9cf4
|
/anamdesktop/entry.py
|
75ca190d87ad46083c09260be33ead5581aeadf0
|
[] |
no_license
|
yeleman/anam-desktop
|
f223e77099c4ca261414e19746ef8237dfcada32
|
eefac7f58c84964b7871addffe5cc3201a299ae0
|
refs/heads/master
| 2021-03-12T17:54:27.987963
| 2020-10-22T18:56:34
| 2020-10-22T18:56:34
| 91,448,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import sys
from PyQt5 import QtWidgets, QtCore
from anamdesktop import setlocale, logger
from anamdesktop.ui.main import MainWindow
def destroy():
logger.info("Exiting Application")
QtCore.QCoreApplication.instance().quit
sys.exit(0)
def main():
logger.info("Starting Application")
app = QtWidgets.QApplication(sys.argv)
app.lastWindowClosed.connect(destroy)
setlocale()
window = MainWindow()
window.reset()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
[
"rgaudin@gmail.com"
] |
rgaudin@gmail.com
|
31f593769f4303c41f20eafa27f63464428e87b0
|
829b0a557d3cc43a108f9b76d748e923fba8d928
|
/lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/source_regexp/TestSourceRegexBreakpoints.py
|
2258989806066f4b103f6d06ed3b3d0720c6e638
|
[
"NCSA",
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
ljh740/llvm-project
|
31766f1f687939a679531d372d56755dbb5c415b
|
89295aa3f2aebcd930e5ee7272ca47349bb7767d
|
refs/heads/sbingner/master
| 2023-04-06T14:15:22.003403
| 2020-01-07T08:36:49
| 2020-01-07T08:36:49
| 255,562,403
| 0
| 0
|
Apache-2.0
| 2021-04-15T14:56:23
| 2020-04-14T09:12:17
| null |
UTF-8
|
Python
| false
| false
| 3,778
|
py
|
"""
Test lldb breakpoint setting by source regular expression.
This test just tests the source file & function restrictions.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSourceRegexBreakpoints(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_location(self):
self.build()
self.source_regex_locations()
def test_restrictions(self):
self.build()
self.source_regex_restrictions()
def source_regex_locations(self):
""" Test that restricting source expressions to files & to functions. """
# Create a target by the debugger.
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# First look just in main:
target_files = lldb.SBFileSpecList()
target_files.Append(lldb.SBFileSpec("a.c"))
func_names = lldb.SBStringList()
func_names.AppendString("a_func")
source_regex = "Set . breakpoint here"
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, func_names)
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 1,
"a.c in a_func should give one breakpoint, got %d." %
(num_locations))
loc = main_break.GetLocationAtIndex(0)
self.assertTrue(loc.IsValid(), "Got a valid location.")
address = loc.GetAddress()
self.assertTrue(
address.IsValid(),
"Got a valid address from the location.")
a_func_line = line_number("a.c", "Set A breakpoint here")
line_entry = address.GetLineEntry()
self.assertTrue(line_entry.IsValid(), "Got a valid line entry.")
self.assertTrue(line_entry.line == a_func_line,
"Our line number matches the one lldbtest found.")
def source_regex_restrictions(self):
""" Test that restricting source expressions to files & to functions. """
# Create a target by the debugger.
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# First look just in main:
target_files = lldb.SBFileSpecList()
target_files.Append(lldb.SBFileSpec("main.c"))
source_regex = "Set . breakpoint here"
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList())
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 2,
"main.c should have 2 matches, got %d." %
(num_locations))
# Now look in both files:
target_files.Append(lldb.SBFileSpec("a.c"))
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList())
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 4,
"main.c and a.c should have 4 matches, got %d." %
(num_locations))
# Now restrict it to functions:
func_names = lldb.SBStringList()
func_names.AppendString("main_func")
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, func_names)
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 2,
"main_func in main.c and a.c should have 2 matches, got %d." %
(num_locations))
|
[
"teemperor@gmail.com"
] |
teemperor@gmail.com
|
41e1aa9d1cb1c62fe89cbc3761521eaf73dce401
|
498cc670e199d8d3da497b8350b845c0717c505e
|
/readit/__init__.py
|
6723f5391c8b467acf39e926f77185ca47b08323
|
[] |
no_license
|
dave-shawley/readit
|
1a0b24fb859f00c6fc415647028ab5ee27453328
|
2a05f1de29ddc18ccae81b866d3da5b0a10d0236
|
refs/heads/master
| 2020-05-14T14:51:47.951498
| 2013-02-06T01:57:03
| 2013-02-06T01:57:03
| 6,038,460
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
# order is important here
from .helpers import LinkMap
from .reading import Reading
from .user import User
# flaskapp import required to be last since it depends on
# other readit exports
from .flaskapp import app, Application
class MoreThanOneResultError(Exception):
"""You will encounter me when a single instance is expected and more
than one is found or supplied."""
pass
__all__ = ['app', 'Application', 'LinkMap', 'MoreThanOneResultError',
'Reading', 'User']
|
[
"daveshawley@gmail.com"
] |
daveshawley@gmail.com
|
7aa4bba2c4b8fdf4ef4913eb41b7114c6a87829a
|
49dd2e801ae161b97abc2e7704debf1b19ef5f5d
|
/config.py
|
9ad5353015e986a27c9e8308ff1fbbbcc0b6cdba
|
[] |
no_license
|
Ggzzhh/Learn_Flask
|
7b207a45454e71def3bf332c4ee381a03c5f2082
|
3cd52b674e11fcfd883266e504fb11134ae23337
|
refs/heads/master
| 2021-01-20T09:23:39.089481
| 2017-09-21T01:52:52
| 2017-09-21T01:52:52
| 101,593,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,048
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# 基础设置
class Config:
# 设置密匙
SECRET_KEY = os.environ.get('SECRET_KEY') or 'GG0914ZH'
# 每次请求结束后自动提交数据库变动设置为true
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
# Flask-SQLAlchemy 将会追踪对象的修改并且发送信号
SQLALCHEMY_TRACK_MODIFICATIONS = True
# 邮箱主题前缀
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky-test]'
# 寄件人
FLASKY_MAIL_SENDER = '某管理员 <gggzh@139.com>'
# 文章分页显示数
FLASKY_POSTS_PER_PAGE = 10
# 粉丝页分页显示数
FLASK_FOLLOWERS_PER_PAGE = 15
# 评论分页显示数
FLASKY_COMMENTS_PER_PAGE = 15
# 管理员邮箱
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
# 查询速度慢于多少秒
FLASKY_SLOW_DB_QUERY_TIME = 0.5
# SQLAlchemy记录查询
SQLALCHEMY_RECORD_QUERIES = True
# SSL协议开关
SSL_DISABLE = True
MAIL_SERVER = 'smtp.139.com' # 服务器地址
MAIL_PORT = 465 # 服务器端口号
MAIL_USE_TLS = False # 默认为False
MAIL_USE_SSL = True # 打开邮箱SSL安全协议
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') # 在环境变量中获取账号
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') # 在环境变量中获取密码
@staticmethod
def init_app(app):
pass
# 开发配置
class DevelopmentConfig(Config):
DEBUG = True # 调试开关
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
# 测试设置
class TestingConfig(Config):
TESTING = True # 测试开关
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# 发送错误到管理员邮箱
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
[
"471992509@qq.com"
] |
471992509@qq.com
|
aca38c5c4660e95c314a00a95abe93394e298433
|
0ba1743e9f865a023f72a14d3a5c16b99ee7f138
|
/problems/test_0037_bit_field.py
|
24161f2a4e3a6e98aa27f1d11d43512d52de7cfa
|
[
"Unlicense"
] |
permissive
|
chrisxue815/leetcode_python
|
d0a38a4168243b0628256825581a6df1b673855c
|
a33eb7b833f6998972e5340d383443f3a2ee64e3
|
refs/heads/main
| 2022-06-20T15:09:27.221807
| 2022-06-02T21:55:35
| 2022-06-02T21:55:35
| 94,590,264
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,778
|
py
|
import unittest
class Solution:
def solveSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
rows = [-1] * 9
cols = [-1] * 9
blocks = [-1] * 9
for rownum, row in enumerate(board):
for colnum, num in enumerate(row):
if num != '.':
num = int(num)
mask = ~(1 << num)
rows[rownum] &= mask
cols[colnum] &= mask
blocks[rownum // 3 * 3 + colnum // 3] &= mask
def dfs(rownum, colnum):
while True:
if colnum < 8:
colnum += 1
elif rownum < 8:
colnum = 0
rownum += 1
else:
return True
if board[rownum][colnum] == '.':
break
blocknum = rownum // 3 * 3 + colnum // 3
for num in range(1, 10):
mask = 1 << num
if rows[rownum] & mask and cols[colnum] & mask and blocks[blocknum] & mask:
rows[rownum] &= ~mask
cols[colnum] &= ~mask
blocks[blocknum] &= ~mask
if dfs(rownum, colnum):
board[rownum][colnum] = str(num)
return True
rows[rownum] |= mask
cols[colnum] |= mask
blocks[blocknum] |= mask
return False
dfs(0, -1)
class Test(unittest.TestCase):
def test(self):
self._test([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9],
], [
[5, 3, 4, 6, 7, 8, 9, 1, 2],
[6, 7, 2, 1, 9, 5, 3, 4, 8],
[1, 9, 8, 3, 4, 2, 5, 6, 7],
[8, 5, 9, 7, 6, 1, 4, 2, 3],
[4, 2, 6, 8, 5, 3, 7, 9, 1],
[7, 1, 3, 9, 2, 4, 8, 5, 6],
[9, 6, 1, 5, 3, 7, 2, 8, 4],
[2, 8, 7, 4, 1, 9, 6, 3, 5],
[3, 4, 5, 2, 8, 6, 1, 7, 9],
])
def _test(self, board, expected):
board = [[str(num) if num != 0 else '.' for num in row] for row in board]
Solution().solveSudoku(board)
board = [[int(num) for num in row] for row in board]
self.assertEqual(expected, board)
if __name__ == '__main__':
unittest.main()
|
[
"chrisxue815@gmail.com"
] |
chrisxue815@gmail.com
|
b095020e31a76e851a1c922f39050300a38635b1
|
942f0b081d2271978ffe20fbbfa8d687b57e5c02
|
/leetcode-june-challenge/largest_divisible_subset.py
|
78df0caffe4029792b53277570585a87054ba1a7
|
[] |
no_license
|
simtb/coding-puzzles
|
99762322606bb505d82924d4d5843db1c04aafbd
|
9e1d53e35b2117240eb357d7930cdb8cfd891c8e
|
refs/heads/master
| 2021-04-12T15:46:40.181048
| 2021-02-28T23:47:36
| 2021-02-28T23:47:36
| 249,089,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
"""
Given a set of distinct positive integers, find the largest subset such that every pair (Si, Sj) of elements in this subset satisfies:
Si % Sj = 0 or Sj % Si = 0.
If there are multiple solutions, return any subset is fine.
"""
class Solution:
def largestDivisibleSubset(self, nums) -> List[int]:
if not nums:
return []
nums.sort()
ans: List[List[int]] = [[num] for num in nums]
for i in range(len(nums)):
for j in range(i):
if nums[i] % nums[j] == 0 and len(ans[i]) < len(ans[j]) + 1:
ans[i] = ans[j] + [nums[i]]
return max(ans, key=len)
|
[
"simeon@Sims-MacBook-Pro.local"
] |
simeon@Sims-MacBook-Pro.local
|
8e81d9fa2913d6a02e05fbb400cf98948cbc3c60
|
6e68584f2819351abe628b659c01184f51fec976
|
/Centre_College/CSC_117/CSC_117_Python_Files/custom_library.py
|
b0fe3dea23a2323e7f981eba4de241d6c2dc5a3d
|
[] |
no_license
|
DanSGraham/code
|
0a16a2bfe51cebb62819cd510c7717ae24b12d1b
|
fc54b6d50360ae12f207385b5d25adf72bfa8121
|
refs/heads/master
| 2020-03-29T21:09:18.974467
| 2017-06-14T04:04:48
| 2017-06-14T04:04:48
| 36,774,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,531
|
py
|
###
### A custom library to make CSC 117 labs quicker
###By Daniel Graham
##
from graphics import *
#These save the buttons for use later
button_list = []
points_list = []
button_text_list = []
def button(win_name, button_name, x1,y1,x2,y2,buttontext, button_outline = None, button_fill=None, text_color=None, text_size = 12, text_style = 'normal', default = None):
"""Given 12 inputs window name, button name, points,
text to input, and other options, this function draws a button and saves its points/name to lists for checking
in button_checker. Order of input is:
win_name, button name, position, text, outline, fill, text color, text size, text style"""
#these lines establish the buttons_list and points_list as global variables
global button_list
global points_list
global button_text_list
#If the points are not given in the correct order this code rearranges them.
if x1 > x2:
x1,x2 = x2,x1
if y1 > y2:
y1, y2 = y2, y1
#This code draws the button
button_name = Rectangle(Point(x1, y1), Point(x2, y2))
button_text = Text(Point((x1 + (x2-x1)/2), (y1+ (y2-y1)/2)), str(buttontext))
button_name.draw(win_name)
button_text.draw(win_name)
#The next parts allow for colors!!!
if button_outline != default or button_outline != '':
button_name.setOutline(button_outline)
if button_fill != default or button_fill != '':
button_name.setFill(button_fill)
if text_color != default or text_color != '':
button_text.setTextColor(text_color)
if text_size != default or text_size != '':
button_text.setSize(text_size)
if text_style != default or text_style != '':
button_text.setStyle(text_style)
#These lines store the button name and points for use later in the checker
button_list.append(button_name)
button_text_list.append(button_text)
points_list.append(x1)
points_list.append(y1)
points_list.append(x2)
points_list.append(y2)
return button_name
def button_check(win_name):
"""This function takes button points and a window name and checks which button was clicked. Must have the lists named button_list and points_list"""
#establishes global variables
global button_list
global points_list
while True:
clicked_point = win_name.getMouse()
clicked_x = clicked_point.getX()
clicked_y = clicked_point.getY()
for i in range(len(button_list)):
if clicked_x > points_list[i*4] and clicked_x < points_list[(2)+(4*i)] and clicked_y > points_list[1+4*i] and clicked_y < points_list[3 + 4*i]:
return button_list[i]
def button_undraw(to_undraw):
"""This function undraws a list of buttons or single button from the window"""
global button_text_list
global button_list
if type(to_undraw) == list :
for button in to_undraw:
button.undraw()
index_of_text_undraw = to_undraw.index(button)
button_text_list[index_of_text_undraw].undraw()
elif type(to_undraw) != list :
button = to_undraw
button.undraw()
index_of_text_undraw = button_list.index(button)
button_text_list[index_of_text_undraw].undraw()
button_list.remove(button)
button_text_list.remove(button_text_list[index_of_text_undraw])
def test():
window = GraphWin('Test Window')
close_button = 0 #initialize the button variable
close_button = button(window, close_button, 1,1,150,150, "close")#Set each button variable equal to the button it refers to
no_close_button = 0
no_close_button = button(window, no_close_button, 160,160,180,180, "No close")
if button_check(window) == close_button: #check which button variable is called
print "Close!"
elif button_check(window) == no_close_button:
print "No close"
#close_button.undraw() The issue with this approach is the text does not get undrawn.
button_undraw(close_button)#undraw desired buttons
window.getMouse()
button_undraw(button_list)
window.getMouse
window.close()
#Running into errors still
#For some reason the remaining list is not undrawing and the no close button has to be clicked twice to return its value
if __name__ == '__main__':
test()
|
[
"dan.s.graham@gmail.com"
] |
dan.s.graham@gmail.com
|
6c84cb245a34a99d80a0b5e9643a2cc14e435e3e
|
f4fa497cbd99e24e242a133e002c8142051a6902
|
/words/urls.py
|
796ee61ba06c25dd5414fda69636f201e9dbaf5d
|
[] |
no_license
|
austinbrovick/django-economics
|
3b8e79a21e2a17a52e57b2a0731f4e797ee1b8c2
|
9ce51bd7e134e84409c48ae541e01456d48af2cb
|
refs/heads/master
| 2021-01-17T12:37:47.587407
| 2016-06-20T03:44:05
| 2016-06-20T03:44:05
| 58,336,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
from django.conf.urls import url
from .views import word_list, WordCreate, word_detail, DefinitionCreate, DefinitionDownVote, DefinitionUpVote, words_micro, words_macro, words_both
urlpatterns = [
url(r'^$', word_list, name='words_list'),
url(r'^create/$', WordCreate.as_view(), name='words_word_create'),
url(r'^(?P<pk>\d+)/$', word_detail, name='words_word_detail'),
url(r'^create/(?P<pk>\d+)/$', DefinitionCreate.as_view(), name='words_definition_create'),
url(r'^upvote/(?P<pk>\d+)/$', DefinitionUpVote.as_view(), name='words_definition_upvote'),
url(r'^downvote/(?P<pk>\d+)/$', DefinitionDownVote.as_view(), name='words_definition_downvote'),
url(r'^microeconomics/$', words_micro, name='words_micro'),
url(r'^macroeconomics/$', words_macro, name='words_macro'),
url(r'^micro_and_macro/$', words_both, name='words_both'),
]
|
[
"austinbrovick@gmail.com"
] |
austinbrovick@gmail.com
|
924d29ab36ead397539c2dbdea111bceb73f20aa
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/bob/edbcf1a652c246b7a144d8311374fbc3.py
|
e715104cfa6369084ca1fb912c106baa188e1191
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
def hey(message):
message = message.strip()
if not message:
return 'Fine. Be that way!'
if message.isupper():
return 'Woah, chill out!'
if message.endswith('?'):
return 'Sure.'
return 'Whatever.'
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
cb69a7b7282f4fb2539a466dee032cf0532748eb
|
78d5a6e0846cb6b03544e4f717651ca59dfc620c
|
/treasury-admin/interface/migrations/0005_auto_20171026_1641.py
|
47e6661136111d9a0fd9cdbb9048864ba031a0c8
|
[] |
no_license
|
bsca-bank/treasury-admin
|
8952788a9a6e25a1c59aae0a35bbee357d94e685
|
5167d6c4517028856701066dd5ed6ac9534a9151
|
refs/heads/master
| 2023-02-05T12:45:52.945279
| 2020-12-13T08:07:41
| 2020-12-13T08:07:41
| 320,323,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-26 15:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('interface', '0004_auto_20171026_0918'),
]
operations = [
migrations.RemoveField(
model_name='apbkcom',
name='date_cr',
),
migrations.RemoveField(
model_name='apbkcom',
name='date_dr',
),
]
|
[
"cn.makodo@gmail.com"
] |
cn.makodo@gmail.com
|
b674cb6e1d39ea0db9ce015053153a2bdaba5038
|
699cad5fee497cce94463decf1bf2b811e3fd244
|
/16이미지요리/watermark.py
|
783d8c4b885bcab80a88fe98c929e68daade41b9
|
[] |
no_license
|
Jeonghwan-Yoo/brain_python3
|
91974019a29013abe8c9f9ed132c48b404259e2f
|
a22e870515e760aaa497cbc99305977cf2f01a3d
|
refs/heads/master
| 2020-07-27T00:02:29.604848
| 2019-09-16T13:16:09
| 2019-09-16T13:16:09
| 208,802,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
import sys
from wand.image import Image
if len(sys.argv) < 3:
print('{0} <Image 1> <Image 2>'.format(sys.argv[0]))
sys.exit()
image1_path=sys.argv[1]
image2_path=sys.argv[2]
with Image(filename=image1_path) as image1:
with Image(filename=image2_path) as image2:
with image1.clone() as clone:
clone.watermark(image2, 0.7, 100, 100)
clone.save(filename=image1_path+'_'+image2_path)
|
[
"dwgbjdhks2@gmail.com"
] |
dwgbjdhks2@gmail.com
|
9ecbb22d8a49abc54ee231701a047f52f535810c
|
099da16d748e89106b6abea62e49641afe68d04b
|
/migen/build/platforms/upduino_v1.py
|
8f41eb95fde882e115a3239ed0c902de246e7fcf
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
chipmuenk/migen
|
a191db2e0bbe8f6e1dfc87e54acf2ded13ce69a0
|
a7eb394f46ac9d71f4598919294aa9efd1137bfe
|
refs/heads/master
| 2020-05-18T23:29:09.950066
| 2019-11-01T18:56:14
| 2019-11-01T18:56:14
| 184,712,987
| 3
| 0
|
NOASSERTION
| 2019-05-03T07:08:37
| 2019-05-03T07:08:36
| null |
UTF-8
|
Python
| false
| false
| 2,945
|
py
|
from migen import *
from migen.build.generic_platform import *
from migen.build.lattice import LatticePlatform
from migen.build.lattice.programmer import IceStormProgrammer
_io = [
("rgb_led", 0,
Subsignal("r", Pins("41")),
Subsignal("g", Pins("40")),
Subsignal("b", Pins("39")),
IOStandard("LVCMOS33")
),
]
spiflash = [
# Only usable in PROG FLASH mode - see JP2 header
("spiflash", 0,
Subsignal("cs_n", Pins("16"), IOStandard("LVCMOS33")),
Subsignal("clk", Pins("15"), IOStandard("LVCMOS33")),
Subsignal("mosi", Pins("14"), IOStandard("LVCMOS33")),
Subsignal("miso", Pins("17"), IOStandard("LVCMOS33")),
),
]
_connectors = [
# JP5's pinout is all Free, except 1 (3.3V) and 2 (GND).
# 3 4 5 6 7 8 9 10 11 12 13 14 15 16
("JP5", "23 25 26 27 32 35 31 37 34 43 36 42 38 28"),
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
("JP6", "12 21 13 19 18 11 9 6 44 4 3 48 45 47 46 2"),
]
class MachClock(Module):
def __init__(self, period, out):
self.specials += Instance("SB_HFOSC",
i_CLKHFPU=C(1),
i_CLKHFEN=C(1),
o_CLKHF=out
)
class HfoscRouting(Module):
def __init__(self):
self.hfosc_used = False # Only one default clock,
self.mach_clk_sig = Signal()
def mk_clk(self, name, clk_period):
if not self.hfosc_used:
self.mach_clk_sig.name_override = name
self.submodules.mclk = MachClock(clk_period, self.mach_clk_sig)
self.hfosc_used = True
else:
raise ConstraintError
return self.mach_clk_sig
class Platform(LatticePlatform):
default_clk_name = "sb_hfosc"
default_clk_period = 48
def __init__(self):
self.sb_hfosc_routing = HfoscRouting() # Internal oscillator routing.
LatticePlatform.__init__(self, "ice40-up5k-sg48", _io, _connectors,
toolchain="icestorm")
def request(self, *args, **kwargs):
try:
sig = GenericPlatform.request(self, *args, **kwargs)
except ConstraintError:
# ICE40UP5K internal clock
if args[0] == "sb_hfosc":
# Do not add to self.constraint_manager.matched because we
# don't want this signal to become part of the UCF.
sig = self.sb_hfosc_routing.mk_clk("sb_hfosc", 48)
return sig
def do_finalize(self, f, *args, **kwargs):
f += self.sb_hfosc_routing.get_fragment()
# Handle cases where hfosc is default not default.
if self.default_clk_name != "sb_hfosc":
GenericPlatform.do_finalize(self, f, *args, **kwargs)
if self.default_clk_name == "sb_hfosc":
self.default_clk_period = 48
|
[
"sb@m-labs.hk"
] |
sb@m-labs.hk
|
d84ff8f2c78aba9a9b9af85e7fa3a9b9e16bab5c
|
3ea9509a26e59fafc4f53d4c5cf82cf1c600c2dc
|
/nn/registry.py
|
087c86b1fec4023f905a9b9be17f8b60f5d60428
|
[
"Apache-2.0"
] |
permissive
|
mgilgamesh/grl
|
696230afe03d8332909452941c5d36cf23bd734c
|
7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf
|
refs/heads/master
| 2023-08-30T23:10:39.439264
| 2021-10-31T04:17:06
| 2021-10-31T04:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import functools
from nn.utils import Dummy
class Registry:
def __init__(self, name):
self._mapping = {None: Dummy}
def register(self, name):
def _thunk(func):
self._mapping[name] = func
return func
return _thunk
def get(self, name):
if isinstance(name, str) or name is None:
return self._mapping[name]
return name
def contain(self, name):
return name in self._mapping
def get_all(self):
return self._mapping
layer_registry = Registry(name='layer')
am_registry = Registry(name='am') # convolutional attention modules
block_registry = Registry(name='block')
subsample_registry = Registry(name='subsample')
cnn_registry = Registry(name='cnn')
def register_all(registry, globs):
for k, v in globs.items():
if isinstance(v, functools.partial):
registry.register(k)(v)
|
[
"122134545@qq.com"
] |
122134545@qq.com
|
5b44eb7cc37bf636603b548625f23fcca036ddc4
|
3d705ec48c94373817e5f61d3f839988910431e3
|
/lib/interface/boss/bill_handler.py
|
5bcbc1c4db585f80edef42bb49152c62ff1591ea
|
[] |
no_license
|
namesuqi/zeus
|
937d3a6849523ae931162cd02c5a09b7e37ebdd8
|
3445b59b29854b70f25da2950016f135aa2a5204
|
refs/heads/master
| 2022-07-24T14:42:28.600288
| 2018-03-29T08:03:09
| 2018-03-29T08:03:09
| 127,256,973
| 0
| 0
| null | 2022-07-07T22:57:57
| 2018-03-29T07:53:16
|
Python
|
UTF-8
|
Python
| false
| false
| 6,514
|
py
|
# coding=utf-8
"""
boss系统-计费模块 自动化用例相关的脚本
__author__ = 'liwenxuan'
20170605
"""
import time
from random import choice
from lib.database.pykafka_handler import pykafka_producer, pykafka_consumer
from lib.interface.boss.time_handler import get_second_to_int
from lib.database.mysql_db_v2 import MysqlDB
from lib.interface.boss.environment_constant import BOSS_CRM_HOST
def send_billing_logs_to_kafka(kafka_hosts, schema_host, schema_port, topic, logs_list, consumer_group):
"""
直接往kafka写入logs, 并确认可以从kafka消费到所有写入的logs
:param kafka_hosts: kafka集群的host, 如 "192.168.1.230:9092,192.168.1.232:9092,192.168.1.191:9092,192.168.1.189:9092"
:param schema_host: schema的host, 如 "192.168.1.230"
:param schema_port: schema的port, 如 8081
:param topic: logs的topic, 如 "test_b_download_flow"
:param logs_list: 需要写入的logs, 如 [{k1: v1, k2: v2, ...}, ...]
:param consumer_group: logs的消费组, 如 "boss_bill_daily_test"
:return:
"""
pykafka_consumer(kafka_hosts, schema_host, schema_port, topic, consumer_group)
time.sleep(1)
pykafka_producer(kafka_hosts, schema_host, schema_port, logs_list, topic, write_time=-7)
time.sleep(5)
actual_logs_count = len(pykafka_consumer(kafka_hosts, schema_host, schema_port, topic, consumer_group))
if actual_logs_count == len(logs_list):
return True
else:
print "total", len(logs_list), "logs, receive", actual_logs_count, "logs"
return False
def create_download_logs_list(block_count, logs_count, prefix, ts_second, domain_list):
"""
创建download的logs的list
:param block_count: 将一堆logs记作一个block(块, 与boss的block概念不同), block的数量
:param logs_count: 一个block中包含的logs的数量
:param prefix: log的peer_id的prefix
:param ts_second: log的时间戳(秒级)
:param domain_list: log的url包含的域名的可选范围
:return:
"""
log_list = []
for i in range(block_count):
flow = 1000000 + i * 10240
log_id_prefix = get_second_to_int() # log的16位id标识的前八位, 表示发log的日期(天)及时间(时分秒)
for j in range(logs_count):
log_id = str(log_id_prefix) + str(j).rjust(8, "F")
peer_id = str(prefix).zfill(8) + "FFFFFFFF" + log_id
url = "http://{0}/".format(choice(domain_list))
timestamp = (int(ts_second) - choice(range(0, 301))) * 1000 - choice(range(0, 1000))
log = {"id": log_id, "timestamp": timestamp, "peer_id": peer_id, "url": url, "play_type": "live",
"vvid": "boss_daily_test", "duration": 60, "app": flow, "cdn": flow*3, "p2p": flow*4,
"public_ip": "192.168.0.0", "sdk_agent_name": "boss_daily_test", "sdk_agent_version": "3.11.0"}
log_list.append(log)
return log_list
def create_upload_logs_list(block_count, logs_count, prefix, ts_second):
"""
创建upload的logs的list
:param block_count: 将一堆logs记作一个block(块, 与boss的block概念不同), block的数量
:param logs_count: 一个block中包含的logs的数量
:param prefix: log的peer_id的prefix
:param ts_second: log的时间戳(秒级)
:return:
"""
log_list = []
for i in range(block_count):
flow = 1000000 + i * 10240
log_id_prefix = get_second_to_int() # log的16位id标识的前八位, 表示发log的日期(天)及时间(时分秒)
for j in range(logs_count):
log_id = str(log_id_prefix) + str(j).rjust(8, "F")
peer_id = str(prefix).zfill(8) + "FFFFFFFF" + log_id
timestamp = (int(ts_second) - choice(range(0, 301))) * 1000 - choice(range(0, 1000))
log = {"id": log_id, "timestamp": timestamp, "peer_id": peer_id, "play_type": "live", "duration": 60,
"upload": flow, "public_ip": "192.168.0.0"}
log_list.append(log)
return log_list
def compare_results_for_billing(block_count, logs_count, prefix, ts_second, category, price, unit):
"""
比较预期结果与实际结果是否相符
:param block_count: 将一堆logs记作一个block(块, 与boss的block概念不同), block的数量
:param logs_count: 一个block中包含的logs的数量
:param prefix: log的peer_id的prefix
:param ts_second: log的时间戳(秒级)
:param category: 计费类别, "download"/"upload"
:param price: CRM中设定的计费单价
:param unit: CRM中设定的计价单位, "KB"/"MB"/"GB"
:return:
"""
assert unit in ("KB", "MB", "GB")
account = 0
for i in range(logs_count):
flow = 1000000 + i * 10240
account += flow
print "account one block:", account
total_account = account * block_count
print "account all block:", total_account
total_money = total_account * price
print "money (B):", total_money
if unit == "KB":
expect_account = float(total_account)/1024
expect_money = float(total_money)/1024
elif unit == "MB":
expect_account = float(total_account)/1024/1024
expect_money = float(total_money)/1024/1024
else:
expect_account = float(total_account)/1024/1024/1024
expect_money = float(total_money)/1024/1024/1024
timestamp_end = int(ts_second)
timestamp_start = timestamp_end - 10 * 60
sql = "select sum(account), sum(money) from {0}_billing where ts between {1} and {2} and prefix = '{3}'"\
.format(category, timestamp_start, timestamp_end, prefix)
mysql_db = MysqlDB(host=BOSS_CRM_HOST)
actual_account, actual_money = mysql_db.execute(sql).one_by_one()
del mysql_db
if abs(actual_account - expect_account) <= 0.000001 and abs(actual_money - expect_money) <= 0.000001:
return True
else:
print "account - expect:", expect_account, "; actual:", actual_account
print "money - expect:", expect_money, "; actual:", actual_money
return False
def clear_logs(customer_id, category):
# 为避免自动化测试累积的数据占用boss自动化服务器的空间, 每次测试结束, 清空无效的数据(logs)
ts_millisecond = (int(time.time()) - 86400 * 5) * 1000
sql = "delete from {0}_log_{1} where timestamp <= {2}".format(category, customer_id, ts_millisecond)
mysql_db = MysqlDB(host=BOSS_CRM_HOST)
mysql_db.execute(sql)
time.sleep(1)
del mysql_db
|
[
"suqi_name@163.com"
] |
suqi_name@163.com
|
57a53454f6247d419b233ade15faf301089c4935
|
c94a678a2b78907d79bfdbde2a0f19f345d5d68c
|
/code/week03/two_level_menu.py
|
e03ffd4315bd6fb842bf1a80b7ce673bf4c09a25
|
[] |
no_license
|
Heroes-Academy/IntroPython_Winter2017
|
7f26c009e2d2706128f2fc7ad906e95b4c7324c2
|
fdf467fa95b3d0708d40711dfcc9c734c9dd1226
|
refs/heads/master
| 2021-01-13T03:53:20.238013
| 2017-03-12T15:21:38
| 2017-03-12T15:21:38
| 78,315,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 587
|
py
|
print("My firt menu")
print("1. Hear a joke")
menu_choice = # get the selection
if # put the condition in here#
print("Do you want a rabbit joke or science joke?")
next_menu_choice = # get the next selection
if next_menu_choice == "rabbit":
print("What do you call a happy rabbit?")
print("A hop-timist!")
elif ### put the condition for science here:
print("What did the receiver say to the radio wave?")
print("Ouch! That megahertz!")
else:
print("I don't have that joke")
else:
print("I don't have that menu option!")
|
[
"brian.c.mcmahan@gmail.com"
] |
brian.c.mcmahan@gmail.com
|
6c3b088984d81299af5aed927b416186025fa04c
|
eda678c6158431430fa195fd5d51c424293fc724
|
/experiments/subvariant_transfer/Snakefile
|
de29e27ae35c8a0f3a52d11fb6d2e2853a241462
|
[] |
no_license
|
ohsu-comp-bio/dryads-research
|
8e75ecf812aa3c5139cffacf43116772d6a36376
|
c5c4b9e3c5e4ae5820b1dcfa669abf222e85d0db
|
refs/heads/master
| 2023-04-12T20:55:52.147569
| 2021-08-14T21:36:57
| 2021-08-14T21:36:57
| 139,887,441
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,196
|
import os
LOCALDIR = os.path.join(os.environ['CODEDIR'],
'HetMan', 'experiments', 'subvariant_transfer')
TMPDIR = os.path.join(os.environ['TEMPDIR'],
'HetMan', 'subvariant_transfer',
"{}__samps-{}".format(config['cohorts'],
config['samp_cutoff']),
config['mut_levels'],
"{}_{}".format(config['classif'], config['ex_mtype']))
OUTDIR = os.path.join(os.environ['DATADIR'], 'HetMan', 'subvariant_transfer',
"{}__samps-{}".format(config['cohorts'],
config['samp_cutoff']))
localrules: target, consolidate
rule target:
input:
expand("{outdir}/out-data__{mut_levels}_{classif}_{ex_mtype}.p",
outdir=OUTDIR, mut_levels=config['mut_levels'],
classif=config['classif'], ex_mtype=config['ex_mtype'])
threads: 1
rule transfer:
output: "{TMPDIR}/output/out_task-{task_id}.p"
threads: 12
shell: """
set +u; source activate HetMan; set -u;
export OMP_NUM_THREADS=1;
sleep $(( ({wildcards.task_id} + 1) * $(shuf -i 1-13 -n 1) ));
python {LOCALDIR}/fit_transfer.py \
{config[classif]} {config[ex_mtype]} \
--use_dir={TMPDIR} --task_count={config[task_count]} \
--task_id={wildcards.task_id}
"""
rule consolidate:
input:
expand("{tmpdir}/output/out_task-{task_id}.p",
tmpdir=TMPDIR, task_id=range(config['task_count']))
output:
expand("{outdir}/out-data__{mut_levels}_{classif}_{ex_mtype}.p",
outdir=OUTDIR, mut_levels=config['mut_levels'],
classif=config['classif'], ex_mtype=config['ex_mtype'])
threads: 1
shell: """
set +u; source activate HetMan; set -u;
python {LOCALDIR}/merge_transfer.py {TMPDIR}
out_tag={config[mut_levels]}_{config[classif]}_{config[ex_mtype]}
cp {TMPDIR}/out-data.p {OUTDIR}/out-data__${{out_tag}}.p
cp {TMPDIR}/setup/cohort-data.p {OUTDIR}/cohort-data__${{out_tag}}.p
"""
|
[
"mgrzad@gmail.com"
] |
mgrzad@gmail.com
|
|
96ee7dbb079ed5aff687ac0b049615f1919675e7
|
3d8838dab84f880a9131994608c146c032eaaa6f
|
/uevents/views.py
|
ff56af7d1111b36e4ef3a3c1ec4a29826f35ed93
|
[] |
no_license
|
sergiosalonso/uevents
|
9f69c0d09a51216b3de67b37d5b2901557a32157
|
94d5adb36488194657c65817dc8ba45b16ce416a
|
refs/heads/master
| 2022-05-07T22:08:10.155075
| 2019-05-07T09:08:48
| 2019-05-07T09:08:48
| 185,363,746
| 0
| 0
| null | 2022-04-22T21:13:03
| 2019-05-07T09:02:57
|
Python
|
UTF-8
|
Python
| false
| false
| 253
|
py
|
from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name='index.html'
class LogOutView(TemplateView):
template_name='success_logout.html'
class LogInView(TemplateView):
template_name='success_login.html'
|
[
"you@example.com"
] |
you@example.com
|
30c0310b3a8e60252424745a39a32a7ea679b905
|
2c9b77d91f1ba7ece443711c8c9c7280732b07fe
|
/time_trial_gui/lib/racer_driver/echo_trial_job.py
|
a4672c7d5f55994814ec380c55f7755607ad3925
|
[
"MIT"
] |
permissive
|
andresriancho/time_trial
|
d68c06dfc7fa2fc6c396b6e813d8df23ad068f76
|
d7a23dae0bc4e2ecb3eb1ea0f4a94e21861571cc
|
refs/heads/master
| 2021-01-18T02:34:46.045593
| 2015-10-30T16:46:38
| 2015-10-30T16:46:38
| 45,219,305
| 0
| 1
| null | 2015-10-30T00:33:01
| 2015-10-30T00:33:00
| null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
import subprocess
CPP_ECHO_TIMING_EXECUTABLE = "../racer/bin/run_timing_client"
def run_echo_trial_job(trial):
print("Executing Echo Trial...")
#TODO: get this from a config file
cmd = []
cmd.append(CPP_ECHO_TIMING_EXECUTABLE)
cmd.append(trial.target_host)
cmd.append(str(trial.target_port))
cmd.append(str(int(trial.real_time)))
cmd.append(str(trial.core_affinity))
cmd.append(str(trial.delay))
cmd.append(str(trial.reps))
print(cmd)
return subprocess.check_output(cmd)
|
[
"andres.riancho@gmail.com"
] |
andres.riancho@gmail.com
|
9cfc1c903389414320a80047a53517d24b5020bd
|
283bbf2ce575ea72010e9823907285b08d20fce4
|
/breathecode/authenticate/migrations/0001_initial.py
|
401599c5ba55399253edb86f4edba14a5ef0a547
|
[] |
no_license
|
AnMora/apiv2
|
c084ffcb4ff5b7a0a01dac8fca26f4f4c37aad97
|
fa3b3f0ce4a069facdecd18e133c7b4222a0004a
|
refs/heads/master
| 2023-05-19T23:00:34.257230
| 2021-06-08T21:17:56
| 2021-06-08T21:17:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
# Generated by Django 3.0.7 on 2020-06-16 06:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CredentialsGithub',
fields=[
('github_id', models.IntegerField(primary_key=True, serialize=False)),
('token', models.CharField(max_length=255)),
('email', models.CharField(max_length=150, unique=True)),
('avatar_url', models.CharField(max_length=255)),
('name', models.CharField(max_length=150)),
('blog', models.CharField(max_length=150)),
('bio', models.CharField(max_length=255)),
('company', models.CharField(max_length=150)),
('twitter_username', models.CharField(blank=True, max_length=50, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.OneToOneField(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"aalejo@gmail.com"
] |
aalejo@gmail.com
|
3c228e9863635cdf5f5389d7f9a128c741ce52bc
|
c934e7c27f0e72385218a14b4e2a7e94a747a360
|
/google-cloud-sdk/lib/googlecloudsdk/command_lib/util/crc32c.py
|
42382ea1e774df38f8d3f3912594d1f689df477a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
PrateekKhatri/gcloud_cli
|
5f74b97494df4f61816026af9460b9c4d8e89431
|
849d09dd7863efecbdf4072a504e1554e119f6ae
|
refs/heads/master
| 2023-03-27T05:53:53.796695
| 2021-03-10T04:08:14
| 2021-03-10T04:08:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for calculating CRC32C checksums."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import base64
import six
# pylint: disable=g-import-not-at-top
try:
# TODO(b/175725675) Make google_crc32c available with Cloud SDK.
import google_crc32c
IS_GOOGLE_CRC32C_AVAILABLE = True
except ImportError:
import gcloud_crcmod as crcmod
IS_GOOGLE_CRC32C_AVAILABLE = False
print('using crcmod')
# pylint: enable=g-import-not-at-top
def get_crc32c():
"""Returns an instance of Hashlib-like helper for CRC32C operations.
Returns:
The google_crc32c.Checksum instance
if google-crc32c (https://github.com/googleapis/python-crc32c) is
available. If not, returns the predefined.Crc instance from crcmod library.
Usage:
# Get the instance.
crc = get_crc32c()
# Update the instance with data. If your data is available in chunks,
# you can update each chunk so that you don't have to keep everything in
# memory.
for chunk in chunks:
crc.update(data)
# Get the digest.
crc_digest = crc.digest()
"""
if IS_GOOGLE_CRC32C_AVAILABLE:
return google_crc32c.Checksum()
return crcmod.predefined.Crc('crc-32c')
def get_crc32c_checksum(data):
"""Calculates the CRC32C checksum of the provided data.
Args:
data (bytes): The bytes over which the checksum should be calculated.
Returns:
An int representing the CRC32C checksum of the provided bytes.
"""
crc = get_crc32c()
crc.update(six.ensure_binary(data))
return int(crc.hexdigest(), 16)
def get_crc32c_hash(data):
"""Calculates the CRC32C hash for the provided data.
This returns the base64 encoded version of the CRC32C digest, which is handy
for GCS objects which store the CRC32C Hash in this format.
Args:
data (bytes): Bytes over which the hash should be calculated.
Returns:
A string represnting the base64 encoded CRC32C hash.
"""
crc = get_crc32c()
crc.update(six.ensure_binary(data))
return base64.b64encode(crc.digest()).decode('ascii')
def does_crc32c_checksum_match(data, data_crc32c_checksum):
"""Checks if checksum for the data matches the supplied checksum.
Args:
data (bytes): Bytes over which the checksum should be calculated.
data_crc32c_checksum (int): Checksum against which data's checksum will be
compared.
Returns:
True iff both checksums match.
"""
return get_crc32c_checksum(data) == data_crc32c_checksum
|
[
"actions@github.com"
] |
actions@github.com
|
eadc5422a39457611dd5e83a0283a5b1f65b9fe1
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/app_code_base_info.py
|
306617ad18fd84a8907b99a86bf0874ecf9207a2
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,356
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AppCodeBaseInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'app_code': 'str',
'id': 'str',
'app_id': 'str',
'create_time': 'str'
}
attribute_map = {
'app_code': 'app_code',
'id': 'id',
'app_id': 'app_id',
'create_time': 'create_time'
}
def __init__(self, app_code=None, id=None, app_id=None, create_time=None):
"""AppCodeBaseInfo
The model defined in huaweicloud sdk
:param app_code: App Code值 支持英文,+_!@#$%+/=,且只能以英文和+、/开头,64-180个字符。
:type app_code: str
:param id: 编号
:type id: str
:param app_id: 应用编号
:type app_id: str
:param create_time: 创建时间
:type create_time: str
"""
self._app_code = None
self._id = None
self._app_id = None
self._create_time = None
self.discriminator = None
self.app_code = app_code
if id is not None:
self.id = id
if app_id is not None:
self.app_id = app_id
if create_time is not None:
self.create_time = create_time
@property
def app_code(self):
"""Gets the app_code of this AppCodeBaseInfo.
App Code值 支持英文,+_!@#$%+/=,且只能以英文和+、/开头,64-180个字符。
:return: The app_code of this AppCodeBaseInfo.
:rtype: str
"""
return self._app_code
@app_code.setter
def app_code(self, app_code):
"""Sets the app_code of this AppCodeBaseInfo.
App Code值 支持英文,+_!@#$%+/=,且只能以英文和+、/开头,64-180个字符。
:param app_code: The app_code of this AppCodeBaseInfo.
:type app_code: str
"""
self._app_code = app_code
@property
def id(self):
"""Gets the id of this AppCodeBaseInfo.
编号
:return: The id of this AppCodeBaseInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AppCodeBaseInfo.
编号
:param id: The id of this AppCodeBaseInfo.
:type id: str
"""
self._id = id
@property
def app_id(self):
"""Gets the app_id of this AppCodeBaseInfo.
应用编号
:return: The app_id of this AppCodeBaseInfo.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this AppCodeBaseInfo.
应用编号
:param app_id: The app_id of this AppCodeBaseInfo.
:type app_id: str
"""
self._app_id = app_id
@property
def create_time(self):
"""Gets the create_time of this AppCodeBaseInfo.
创建时间
:return: The create_time of this AppCodeBaseInfo.
:rtype: str
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this AppCodeBaseInfo.
创建时间
:param create_time: The create_time of this AppCodeBaseInfo.
:type create_time: str
"""
self._create_time = create_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppCodeBaseInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
d823a54db8bd861ca7aad7a392278d086fec9ee3
|
002f694e38c4b028e70b393510eaa98eb3b4d20f
|
/ga3c/EnvironmentHandler.py
|
8d60913691d177b0b2c7fa4f25f75fe3f2750de8
|
[] |
no_license
|
jmribeiro/HybridGA3C
|
993cb4579ba0253b3b10a2160982398a0ca07e09
|
9b452e877c5c6ca0e8482c9ba3d6c3d9df7acec1
|
refs/heads/master
| 2020-03-08T15:09:47.305742
| 2018-04-19T14:20:23
| 2018-04-19T14:20:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
from queue import Queue
import gym
import numpy as np
import scipy.misc as misc
import Config
class EnvironmentHandler:
def __init__(self, render, render_lock):
self.environment = gym.make(Config.ATARI_GAME)
self.action_space = self.environment.action_space.n
self.lookback_memory = Queue(maxsize=Config.STACKED_FRAMES)
self.should_render = render
self.render_lock = render_lock
def reset(self):
self.lookback_memory.queue.clear()
state = self._state(self.environment.reset())
while state is None:
state, _, _ = self.step(0)
return state
def step(self, action):
if self.should_render:
with self.render_lock:
self.environment.render()
observation, reward, done, _ = self.environment.step(action)
next_state = self._state(observation)
return next_state, reward, done
def _state(self, observation):
# Already had full depth, remove the oldest
if self.lookback_memory.full(): self.lookback_memory.get()
# Add the new one
self.lookback_memory.put(self._preprocess(observation))
# Game hasn't stacked enough frames yet
if not self.lookback_memory.full():
return None
else:
# Stack state
state = np.array(self.lookback_memory.queue)
return np.transpose(state, [1, 2, 0])
def _preprocess(self, observation):
grayscale_image = np.dot(observation[..., :3], [0.299, 0.587, 0.114])
resized_image = misc.imresize(grayscale_image, [Config.IMAGE_HEIGHT, Config.IMAGE_WIDTH], 'bilinear')
processed_image = resized_image.astype(np.float32) / 128.0 - 1.0
return processed_image
|
[
"jmribeiro77209@gmail.com"
] |
jmribeiro77209@gmail.com
|
23ff84bfd5f8cbbc3030ea2a7ba5a7f0bb1b580c
|
73eb0cd7364a35cbc44b9d51bc7ff63d9646d540
|
/Python Cheat sheets/Classname.py
|
4aa4f0aed6fc6ac21c461e297e34912fc9a5dbee
|
[] |
no_license
|
RMJ2/DI_bootcamp
|
09ac8f5b9915db641f0f29fd0f556d43907b3b21
|
632338d57fc4c838a4eb201056e6651b865740a2
|
refs/heads/main
| 2023-03-08T16:16:15.015672
| 2021-02-23T09:04:23
| 2021-02-23T09:04:23
| 329,610,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,037
|
py
|
# Step 1
# Create a new class called Animal.
# It should take 2 parameters, "species" and "habitat"
# (make the class defintion and the init method)
# Step2
# add another parameter to your class, which is the sound that the animal makes.
# write a method called talk, that prints out the animals sound.
# Step3
# create 2 instance of animals, and make each one talk.
class Animal: #class name
def __init__(self, species, habitat, sound): # initialise with parameters.
self.species = species # create definition and the init method.
self.habitat = habitat
self.sound = sound
def talk(self): # method called talk (must add self.)
print(self.sound) # prints the animals sound
''' Step 3 '''
a1 = Animal('lion', 'mountains', 'meow') #instance (Instantiate) 1 - including the parameters
a1.talk() # call the function inside the class.
a2 = Animal('cat', 'house', 'roar')
a2.talk()
#---------------------------------------------------------------------------------------------
# Exercise 2 : Dogs
# Create a class Dog.
# In this class, create a method __init__, that takes two parameters : nameand height. This function instantiates two attributes, which values are the parameters.
# Create a method named bark that prints “ goes woof!”
# Create a method jump that prints the following “ jumps cm high!” where x is the height*2.
# Outside of the class, create an object davids_dog. His dog’s name is “Rex” and his height is 50cm.
# Print the details of his dog by calling the methods.
# Create an object sarahs_dog. Her dog’s name is “Teacup” and his height is 20cm.
# Print the details of her dog by calling the methods.
# Create an if statement outside of the class to check which dog is bigger. Print the name of the bigger dog.
# class Dog:
# def __init__(self, name, height):
# self.name = name
# self.height = height
# def bark(self):
# print('goes woof!')
# def jump(self):
# x = height*2
# print(f'jumps {x}cm high!')
# davids_dog = Dog('Rex', 50)
# print(f'{davids_dog.name} is {davids_dog.height}cm.')
# sarahs_dog = Dog('Teacup', 20)
# print(f'{sarahs_dog.name} is {sarahs_dog.height}cm.')
# list_of_dogs = [davids_dog, sarahs_dog]
# tallest_for_now = 0
# tallest_dog = None
# for dog in list_of_dogs:
# if dog.height > tallest_for_now:
# tallest_dog = dog
# tallest_for_now = dog.height
# print(f' The tallest dog is {tallest_dog.name}')
#---------------------------------------------------------------------------------------------
# Bank Account
class Bank():
def __init__(self, account, pin): #initialise parameters
self.account = account
self.pin = pin
self.balance = 0 # value for starting balance
self.history = [] # empty list to store history of deposits and withdrawels.
def deposit(self, amount):
if amount <= 0: # to ensure no negative deposit can be made
print('You must deposit a positive amount')
else:
self.balance += amount # new balance, made by amount deposited
self.history.append(f'Deposit: {amount}') # update list of history for amount deposited.
def withdraw(self, amount):
if amount > self.balance: #to ensure no withdrawal of negative funds is made
print('You do not have enough funds')
else:
self.balance -= amount # new balance, made by amount withdrawn
self.history.append(f'Withdraw: {amount}') # update history listfor amount withdrawn
return amount # returns amount in terminal
def show_balance(self):
print(self.balance) # print balance
def show_history(self): #
for thing in self.history: # item in list (instead of printing on one line)
print(thing) # prints item line by line
b1 = Bank(12345, 54321)
b1.deposit(100)
b1.deposit(100)
b1.withdraw(30)
#---------------------------------------------------------------------------------------------
# Calculator
class Calc:
def add(x,y):
answer = x + y
print(answer)
def sub(x,y):
answer = x - y
print(answer)
def mult(x,y):
answer = x * y
print(answer)
def div(x,y):
answer = x / y
print(answer)
print(Calc.add(5,5))
print(Calc.sub(5,5))
print(Calc.mult(5,5))
print(Calc.div(5,5))
|
[
"myEmail@example.com"
] |
myEmail@example.com
|
5d8c946b3c08384e5b38e1628c8a3c8731002b8e
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/alertsmanagement/v20210401/__init__.py
|
b63d1d0b1d3561f095248d826033a4565d0bb685
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_smart_detector_alert_rule import *
from .smart_detector_alert_rule import *
from ._inputs import *
from . import outputs
|
[
"github@mikhail.io"
] |
github@mikhail.io
|
9ebfefbdb3f40d1c7ba46b70b96d8d45174cab84
|
1a964c7860f9d95c31ca4b8fd4d36a74da1cbf2f
|
/ParsingWebsite/venv/bin/pip3.7
|
b012e092643cb6604adcbaf8463d46f14d877e20
|
[] |
no_license
|
azatnt/ParsingKolesa
|
8aebb2159ef6d2458604b4530809ca55c0fd5b33
|
c20cea97acb3a25f9ac6632de7afea38df59332f
|
refs/heads/main
| 2023-02-20T09:13:16.431579
| 2021-01-25T09:31:59
| 2021-01-25T09:31:59
| 332,691,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
7
|
#!/Users/sulpak/PycharmProjects/ParsingWebsite/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"58590243+pr1nce07@users.noreply.github.com"
] |
58590243+pr1nce07@users.noreply.github.com
|
01d218f4d47a23dac5ba88de6147b92fbf6542a9
|
e073d58c135e4b27b861946a6e84aa5b2e0ae7f2
|
/datastructure/trie_tree/FindWords.py
|
0a243c9f396bfd1364a4582bc2eff965ce8faf41
|
[] |
no_license
|
yinhuax/leet_code
|
c4bdb69752d441af0a3bcc0745e1133423f60a7b
|
9acba92695c06406f12f997a720bfe1deb9464a8
|
refs/heads/master
| 2023-07-25T02:44:59.476954
| 2021-09-04T09:07:06
| 2021-09-04T09:07:06
| 386,097,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,529
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Mike
# @Contact : 597290963@qq.com
# @Time : 2021/2/12 16:42
# @File : FindWords.py
from typing import List
"""
给定一个 m x n 二维字符网格 board 和一个单词(字符串)列表 words,找出所有同时在二维网格和字典中出现的单词。
单词必须按照字母顺序,通过 相邻的单元格 内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母在一个单词中不允许被重复使用。
作者:力扣 (LeetCode)
链接:https://leetcode-cn.com/leetbook/read/trie/x7hd9g/
来源:力扣(LeetCode)
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
"""
from collections import defaultdict
class TrieTree(object):
def __init__(self):
self.children = defaultdict(TrieTree)
self.is_word = ''
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
"""
使用前缀树+深度优先搜索
:param board:
:param words:
:return:
"""
root = TrieTree()
n = len(board)
m = len(board[0])
for word in words:
cur_node = root
for s in word:
cur_node = cur_node.children[s]
cur_node.is_word = word
def dfs(i, j, node):
latter = board[i][j]
if latter not in node.children:
return
if node.children[latter].is_word:
result.append(node.children[latter].is_word)
# 这个单词已经找到过了
node.children[latter].is_word = ''
# 标记防止重复
board[i][j] = '#'
for tem_i, tem_j in ((-1, 0), (1, 0), (0, -1), (0, 1)):
index_i = tem_i + i
index_j = tem_j + j
if 0 <= index_i < n and 0 <= index_j < m and board[index_i][index_j] in node.children[latter].children:
dfs(index_i, index_j, node.children[latter])
board[i][j] = latter
result = []
for i in range(n):
for j in range(m):
dfs(i, j, root)
return result
if __name__ == '__main__':
print(Solution().findWords(
board=[["o", "a", "a", "n"], ["e", "t", "a", "e"], ["i", "h", "k", "r"], ["i", "f", "l", "v"]],
words=["oath", "pea", "eat", "rain"]))
|
[
"597290963@qq.com"
] |
597290963@qq.com
|
b8cb1da9859a8b7d3e73511d1f3d1e79c33b94ba
|
b987d02490ab85b51f95d04a18731c1718b740fd
|
/ctpn_new/network/anchorlayer/proposal_target_tf.py
|
5849421c836ecb9440008a3b4f7f6fcae493d893
|
[] |
no_license
|
lzd0825/text_detection_main
|
2948a4600ea9d1109ba7d1ddb163b634531f91a2
|
e2b5defd44fd31135be1bf8f7129d0e656d4a2ac
|
refs/heads/master
| 2020-03-27T20:22:09.179680
| 2018-04-03T12:10:20
| 2018-04-03T12:10:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,657
|
py
|
from .generate_anchors import generate_anchors
from lib import load_config
import numpy as np
from .anchor_nms_pf import anchor_nms
cfg = load_config()
def proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info, _feat_stride=(16,)):
"""
'rpn_cls_prob_reshape': softmax以后的概率值,形状为(1, H, W, Ax2)
'rpn_bbox_pred': 回归,即y和高度,形状是[1, H, W, 20],
'im_info': 图片信息,一个三维向量,包含高,宽,缩放比例
cfg_key: 字符串, "TEST"
_feat_stride = [16,]
anchor_scales = [16,]
cfg_key = 'TEST'
Returns
----------
rpn_rois : (1 x H x W x A, 5) e.g. [0, x1, y1, x2, y2]
"""
_anchors = generate_anchors() # 生成基本的10个anchor
_num_anchors = _anchors.shape[0] # 10个anchor
assert rpn_cls_prob_reshape.shape[0] == 1, \
'Only single item batches are supported'
nms_thresh = cfg.TEST.RPN_NMS_THRESH # nms用参数,阈值是0.7
min_size = cfg.TEST.RPN_MIN_SIZE # 候选box的最小尺寸,目前是16,高宽均要大于16
positive_thresh = cfg.TEST.LINE_MIN_SCORE # 大于这个分数阈值的判为正例
# TODO 后期需要修改这个最小尺寸,改为8?
height, width = rpn_cls_prob_reshape.shape[1:3] # feature-map的高宽
# 取出前景的得分,不去关心 背景的得分
# (1, H, W, A) 这里取出的全部是前景的得分
scores = np.reshape(np.reshape(rpn_cls_prob_reshape, [1, height, width, _num_anchors, 2])[:, :, :, :, 1],
[1, height, width, _num_anchors])
# 模型所输出的盒子回归
bbox_deltas = rpn_bbox_pred # 模型输出的pred是相对值,需要进一步处理成真实图像中的坐标
# Enumerate all shifts
# 同anchor-target-layer-tf这个文件一样,生成anchor的shift,进一步得到整张图像上的所有anchor
shift_x = np.arange(0, width) * _feat_stride
shift_y = np.arange(0, height) * _feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
A = _num_anchors
K = shifts.shape[0] # feature-map的像素个数
anchors = _anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
bbox_deltas = bbox_deltas.reshape((-1, 2)) # (HxWxA, 2) 模型所输出的盒子回归值
anchors = anchors.reshape((K * A, 4)) # 这里得到的anchor就是整张图像上的所有anchor
proposals = bbox_transform_inv(anchors, bbox_deltas) # 做逆变换,得到box在图像上的真实坐标
proposals = proposals.reshape((K, 4*A))
scores = scores.reshape((K, A))
# 非极大值抑制,以列表形式输出进行列非极大值抑制后的文本片段以及相应的分数
proposals, scores = anchor_nms(height, width, proposals, scores, nms_thresh, positive_thresh)
proposals = np.array(proposals).reshape((-1, 4))
scores = np.array(scores).reshape((-1, 1))
# 对盒子进行裁剪,以保证不会超出图片边框
proposals = clip_boxes(proposals, im_info[:2]) # 将所有的proposal修建一下,超出图像范围的将会被修剪掉
# 移除那些proposal小于一定尺寸的proposal
keep = _filter_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :] # 保留剩下的proposal
scores = scores[keep]
# score按得分的高低进行排序,返回脚标
order = scores.ravel().argsort()[::-1]
proposals = proposals[order, :]
scores = scores[order]
blob = np.hstack((scores.astype(np.float32, copy=False), proposals.astype(np.float32, copy=False)))
# blob返回一個多行5列矩陣,第一行爲分數,後四行爲盒子坐標
# bbox_deltas爲多行两列矩陣,每行爲一個回歸值
return blob
def bbox_transform_inv(boxes, deltas):
"""
:param boxes: shape是(H×W×10,4)每一行爲一個anchor的真實坐標
:param deltas: (H×W×10,2)每行对应y个高度的回归
:return:
"""
# y的回归 = (GT的y - anchor的y) / anchor的高
# 高的回归 = log(GT的高 / anchor的高)
boxes = boxes.astype(deltas.dtype, copy=False)
# widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
# ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dy = deltas[:, 0]
dh = deltas[:, 1]
pred_ctr_y = dy * heights + ctr_y
pred_h = np.exp(dh) * heights
pred_boxes = np.zeros(boxes.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0] = boxes[:, 0]
# y1
pred_boxes[:, 1] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2] = boxes[:, 2]
# y2
pred_boxes[:, 3] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
:param boxes: [N, 4]分别对应x1,y1,x2,y2
:param im_shape: 二维向量,分别是图片的
:return:
"""
# x1 >= 0
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], im_shape[0] - 1), 0)
return boxes
def _filter_boxes(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
|
[
"1071221759@qq.com"
] |
1071221759@qq.com
|
3a2fc39ab03587a3455433a7490ceb28ba16c6a7
|
373035950bdc8956cc0b74675aea2d1857263129
|
/spar_python/report_generation/ta1/ta1_analysis_input.py
|
874d943a04b1e044f025d7ffc970b415bbae653f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
limkokholefork/SPARTA
|
5d122cd2e920775d61a5404688aabbafa164f22e
|
6eeb28b2dd147088b6e851876b36eeba3e700f16
|
refs/heads/master
| 2021-11-11T21:09:38.366985
| 2017-06-02T16:21:48
| 2017-06-02T16:21:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,808
|
py
|
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: SY
# Description: A analysis input class
#
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 11 Sep 2013 SY Original version
# *****************************************************************
# SPAR imports:
import spar_python.report_generation.ta1.ta1_config as config
import spar_python.report_generation.ta1.ta1_schema as t1s
import spar_python.report_generation.ta1.ta1_test_database as t1tdb
class Input(dict):
"""Represents an input object"""
@property
def test_db(self):
"""Gives a test database object with the specified database number of
records and record size"""
return t1tdb.TestDatabase(
db_num_records=self.get(t1s.DBF_NUMRECORDS),
db_record_size=self.get(t1s.DBF_RECORDSIZE),
short_database_names=config.SHORT_DATABASE_NAMES)
def get_constraint_list(self):
"""Returns a constraint list based on the given arguments."""
desired_constraints_list = [
(t1s.DBF_TABLENAME, t1s.DBF_NUMRECORDS),
(t1s.DBF_TABLENAME, t1s.DBF_RECORDSIZE),
(t1s.DBF_TABLENAME, t1s.DBF_CAT),
(t1s.DBF_TABLENAME, t1s.DBF_SUBCAT),
(t1s.DBF_TABLENAME, t1s.DBF_SUBSUBCAT),
(t1s.DBP_TABLENAME, t1s.DBP_SELECTIONCOLS),
(t1s.DBA_TABLENAME, t1s.DBA_FIELD)]
constraint_list = []
for (table, field) in desired_constraints_list:
if self.get(field):
constraint_list.append((table, field, self.get(field)))
return constraint_list
|
[
"mitchelljd@ll.mit.edu"
] |
mitchelljd@ll.mit.edu
|
05ac163c99119bb20a966bfb6c4d464daccb8fdf
|
f48f9798819b12669a8428f1dc0639e589fb1113
|
/office/misc/t1utils/actions.py
|
4646311793129d513d7af2b63c65c2693fad65e7
|
[] |
no_license
|
vdemir/PiSiPackages-pardus-2011-devel
|
781aac6caea2af4f9255770e5d9301e499299e28
|
7e1867a7f00ee9033c70cc92dc6700a50025430f
|
refs/heads/master
| 2020-12-30T18:58:18.590419
| 2012-03-12T03:16:34
| 2012-03-12T03:16:34
| 51,609,831
| 1
| 0
| null | 2016-02-12T19:05:41
| 2016-02-12T19:05:40
| null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("NEWS", "README")
|
[
"kaptan@pisipackages.org"
] |
kaptan@pisipackages.org
|
3252542c883a849ab435d4838cce3bbf887c8247
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_library_operations.py
|
990d2d64e53b57c638fc900f4358193a5994d38a
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,218
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._library_operations import build_get_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LibraryOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.synapse.aio.SynapseManagementClient`'s
:attr:`library` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, library_name: str, workspace_name: str, **kwargs: Any
) -> _models.LibraryResource:
"""Get library by name.
Get library by name in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param library_name: Library name. Required.
:type library_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LibraryResource or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.LibraryResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[_models.LibraryResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
library_name=library_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("LibraryResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/libraries/{libraryName}"
}
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
e000797abae0baf2ef7c3b2faedebcc2cf39dbd4
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_2/jblmuz001/question1.py
|
2a078b63ff7024e2ae2a73d45ec3628af59c4b52
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
#Question 1
import math
x=eval(input("Enter a year:\n"))
if(x%400==0 or (x%4==0 and x%100!=0)):
print(x, "is a leap year.")
else:
print(x, "is not a leap year.")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
c466266df26b42152c21f2fafa8ea9251a2c1683
|
cd014fae6791f51a9a382f34dbdcee6d61d84e30
|
/62_From_the_border_of_hell/62.py
|
6e0657bcb1f623ade641513504631b8ec63c4cef
|
[
"Apache-2.0"
] |
permissive
|
ckclark/Hackquest
|
1505f50fc2c735db059205d1c9bbba1832cc5059
|
65ed5fd32e79906c0e36175bbd280d976c6134bd
|
refs/heads/master
| 2021-01-16T19:32:29.434790
| 2015-09-29T13:39:04
| 2015-09-29T13:39:04
| 42,388,846
| 13
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
question = [
'''Toni, I write you this letter tonight with''',
'''ebony pen and ink, to show my deep love''',
'''and to give you my advise. If you pack your bag''',
'''later tonight, do not feel responsible for future''',
'''happenings, as they aren't your fault. Even, if a''',
'''small part inside of you may think like that. All''',
'''alternatives are equally bad. Just make sure sin''',
'''doesn't conquer your life, and, please''',
'''don't have unprotected sex with Mary Ann in the car late at night!''',
]
ans = []
for s in question:
ans.append(s[0])
ans.append(s[-1])
print ''.join(ans)
# The eagle has landed!
|
[
"clark.ck@gmail.com"
] |
clark.ck@gmail.com
|
7ad1139df56926c27e7758e42935202a504b94cb
|
cb5093d193352c521dcc60da62dd8fc8a3564231
|
/devel/lib/python2.7/dist-packages/rqt_multiplot/__init__.py
|
bf5cb228f8bf0f85959b0386f71f9ff7b566ee4d
|
[] |
no_license
|
ElliWhite/proj515_ws
|
85555dbad029d7fd10c8ffbfb8352b9cd7b4db53
|
ce51e547f2f4761850cef9116a85a34b232160c6
|
refs/heads/master
| 2020-04-23T22:11:59.284827
| 2019-05-24T16:46:30
| 2019-05-24T16:46:30
| 171,493,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/elliottwhite/proj515_ws/src/rqt_multiplot_plugin/rqt_multiplot/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"elliott.white@students.plymouth.ac.uk"
] |
elliott.white@students.plymouth.ac.uk
|
b8d68d8d894d6c17419300003a7d20d74344d72a
|
c9a809c5ef2a6b5e7e50da548c182510d203f430
|
/salt/runners/state.py
|
c518e3a0d78bbbd446286c27c563e844fd7152c4
|
[
"Apache-2.0"
] |
permissive
|
andyyumiao/saltx
|
676a44c075ce06d5ac62fc13de6dcd750b3d0d74
|
a05c22a60706b5c4389adbd77581b5cf985763b5
|
refs/heads/master
| 2022-02-24T00:51:42.420453
| 2022-02-09T06:46:40
| 2022-02-09T06:46:40
| 231,860,568
| 1
| 5
|
NOASSERTION
| 2022-02-09T06:46:40
| 2020-01-05T03:10:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,000
|
py
|
# -*- coding: utf-8 -*-
'''
Execute orchestration functions
'''
# Import pytohn libs
from __future__ import absolute_import, print_function
import logging
# Import salt libs
import salt.loader
import salt.utils
import salt.utils.event
from salt.exceptions import SaltInvocationError
LOGGER = logging.getLogger(__name__)
def orchestrate(mods,
saltenv='base',
test=None,
exclude=None,
pillar=None,
pillarenv=None,
pillar_enc=None,
orchestration_jid=None):
'''
.. versionadded:: 0.17.0
Execute a state run from the master, used as a powerful orchestration
system.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* :py:mod:`Docs for the master-side state module <salt.states.saltmod>`
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver
salt-run state.orchestrate webserver saltenv=dev test=True
salt-run state.orchestrate webserver saltenv=dev pillarenv=aws
.. versionchanged:: 2014.1.1
Runner renamed from ``state.sls`` to ``state.orchestrate``
.. versionchanged:: 2014.7.0
Runner uses the pillar variable
.. versionchanged:: develop
Runner uses the pillar_enc variable that allows renderers to render the pillar.
This is usable when supplying the contents of a file as pillar, and the file contains
gpg-encrypted entries.
.. seealso:: GPG renderer documentation
CLI Examples:
.. code-block:: bash
salt-run state.orchestrate webserver pillar_enc=gpg pillar="$(cat somefile.json)"
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
if pillarenv is None and 'pillarenv' in __opts__:
pillarenv = __opts__['pillarenv']
if saltenv is None and 'saltenv' in __opts__:
saltenv = __opts__['saltenv']
running = minion.functions['state.sls'](
mods,
test,
exclude,
pillar=pillar,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_enc=pillar_enc,
__pub_jid=orchestration_jid,
orchestration_jid=orchestration_jid)
ret = {'data': {minion.opts['id']: running}, 'outputter': 'highstate'}
res = salt.utils.check_state_result(ret['data'])
if res:
ret['retcode'] = 0
else:
ret['retcode'] = 1
return ret
# Aliases for orchestrate runner
orch = salt.utils.alias_function(orchestrate, 'orch')
sls = salt.utils.alias_function(orchestrate, 'sls')
def orchestrate_single(fun, name, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_single fun=salt.wheel name=key.list_all
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.single'](
fun,
name,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret
def orchestrate_high(data, test=None, queue=False, pillar=None, **kwargs):
'''
Execute a single state orchestration routine
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt-run state.orchestrate_high '{
stage_one:
{salt.state: [{tgt: "db*"}, {sls: postgres_setup}]},
stage_two:
{salt.state: [{tgt: "web*"}, {sls: apache_setup}, {
require: [{salt: stage_one}],
}]},
}'
'''
if pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
'Pillar data must be formatted as a dictionary'
)
__opts__['file_client'] = 'local'
minion = salt.minion.MasterMinion(__opts__)
running = minion.functions['state.high'](
data,
test=None,
queue=False,
pillar=pillar,
**kwargs)
ret = {minion.opts['id']: running}
__jid_event__.fire_event({'data': ret, 'outputter': 'highstate'}, 'progress')
return ret
def event(tagmatch='*',
count=-1,
quiet=False,
sock_dir=None,
pretty=False,
node='master'):
r'''
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2014.7.0
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this pattern; uses the same matching semantics as Salt's Reactor.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
.. versionadded:: 2016.3.0
CLI Examples:
.. code-block:: bash
# Reboot a minion and run highstate when it comes back online
salt 'jerry' system.reboot && \\
salt-run state.event 'salt/minion/jerry/start' count=1 quiet=True && \\
salt 'jerry' state.highstate
# Reboot multiple minions and run highstate when all are back online
salt -L 'kevin,stewart,dave' system.reboot && \\
salt-run state.event 'salt/minion/*/start' count=3 quiet=True && \\
salt -L 'kevin,stewart,dave' state.highstate
# Watch the event bus forever in a shell while-loop.
salt-run state.event | while read -r tag data; do
echo $tag
echo $data | jq --color-output .
done
.. seealso::
See :blob:`tests/eventlisten.sh` for an example of usage within a shell
script.
'''
statemod = salt.loader.raw_mod(__opts__, 'state', None)
return statemod['state.event'](
tagmatch=tagmatch,
count=count,
quiet=quiet,
sock_dir=sock_dir,
pretty=pretty,
node=node)
|
[
"yumiao3@jd.com"
] |
yumiao3@jd.com
|
8f1eca7502fed553159d86ab0a9a9fc3b4e6cc4e
|
830465731dfda87b4141546262f20d74c29297bf
|
/PWN/picoCTF2018/gps/sol.py
|
30b5d3d95371c78ea26ca454d6de1719db8ff1e5
|
[] |
no_license
|
jchen8tw-research/CTF
|
f559d7ca0e16a730335b11caeeae208c42e8bf17
|
f49615c24437a9cc6a2c20d6b30cb5abf7a32b71
|
refs/heads/master
| 2023-03-17T12:29:08.630613
| 2021-03-23T06:31:26
| 2021-03-23T06:31:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 591
|
py
|
#coding=utf-8
from pwn import *
import re
#context.log_level = 'debug'
#context(arch='amd64', os='linux', bits = '64')
context.binary = './gps'
debug = 0
if not debug:
r = remote('2018shell.picoctf.com', 49351)
else:
r = process('./gps')
nop = asm(shellcraft.nop())
shell = asm(shellcraft.sh())
#shell = asm(pwnlib.shellcraft.amd64.linux.sh())
payload = nop * (0x1000 - len(shell) - 3) + shell
addr = int(r.recvuntil('> ').split('\n')[9].split(': ')[1][2:], 16)
addr += 2000
log.info('addr: {}'.format(addr))
r.sendline(payload)
r.sendlineafter('> ', hex(addr)[2:])
r.interactive()
|
[
"cpr1014@gmail.com"
] |
cpr1014@gmail.com
|
7d06158920b29367d17b448de2179236f193de27
|
b19c9fe62eaa309851dc11f6fd7a05bda463fb58
|
/bigfish/apps/reports/urls.py
|
5e6efab43043385bd460b9f0af28d1b83c72092f
|
[] |
no_license
|
hyu9999/bigfish
|
3ff3b025982e71bd6dd80f60ad6c70e735e98936
|
4189fdcacc20795a4778b53c9d47d6fdd3e71811
|
refs/heads/master
| 2022-07-08T13:55:12.908583
| 2019-03-22T09:36:12
| 2019-03-22T09:36:12
| 177,055,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
from django.conf.urls import url
from rest_framework import routers
router = routers.SimpleRouter()
# router.register(r'studies/enters', EnterStudyViewSet)
# router.register(r'studies/conversations', ConversationViewSet)
# router.register(r'studies/savedatainfo', SaveDataInfoViews)
# router.register(r'studies/savedatadetails', SaveDataDetailsViews)
# router.register(r'studies/examinationreport', ExaminationReportViewSet)
# router.register(r'studies/ratingreport', RatingReportViewSet)
# router.register(r'studies/practical_course_record', PracticalCourseRecordViewSet)
# router.register(r'studies/request_exception', RequestExceptionViewSet)
urlpatterns = router.urls
|
[
"757147959@qq.com"
] |
757147959@qq.com
|
ab77d94e70a98628a260f53902bdd8a90be36265
|
ab1d0fcd4900e0a88d49999cbbde4b06cc441e5d
|
/Labs/Lab 3/Lab3/Boids.py
|
fd61d89efc38e7991605f09d6257f5c325460d9d
|
[] |
no_license
|
ThomasMGilman/ETGG1803_ConceptsOf3DGraphicsAndMath
|
bf261b7ce16bb686e42b1a2600aa97b4f8984b65
|
fdf4e216b117769246154cd360b2c321f4581354
|
refs/heads/master
| 2020-03-29T23:14:05.715926
| 2018-09-26T17:18:25
| 2018-09-26T17:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,695
|
py
|
import flock
import pygame
import math3d
import random
# Pygame startup
pygame.init()
win_width = 800
win_height = 600
screen = pygame.display.set_mode((win_width, win_height))
fontObj = pygame.font.SysFont("Courier New", 12)
clock = pygame.time.Clock()
done = False
paused = False
# This is a list of circular "obstacles" (pos_vector, rad)
obstacles = []
for i in range(3):
x = random.randint(0, win_width)
y = random.randint(0, win_height)
obstacles.append([math3d.VectorN(x, y), random.randint(50, 150)])
# Create the flock. Flock-members shouldn't spawn on obstacles (if doing the bonus)
F = flock.Flock((0,0,win_width,win_height), 20, obstacles)
# The mouse position (or None if the user isn't clicking)
mpos = None
# Game Loop
while not done:
# Update
deltaTime = clock.tick() / 1000.0
if paused:
deltaTime = 0.0 # Everything remains functional, but we don't move anything...
F.update(deltaTime, mpos)
# Input
event = pygame.event.poll()
if event.type == pygame.KEYDOWN and event.key == pygame.K_p:
paused = not paused
keys = pygame.key.get_pressed()
mx, my = pygame.mouse.get_pos()
if keys[pygame.K_ESCAPE]:
done = True
if pygame.mouse.get_pressed()[0]:
mouseClicked = True
mpos = math3d.VectorN(mx, my)
else:
mouseClicked = False
mpos = None
# Draw
screen.fill((0,0,0))
for o in obstacles:
pygame.draw.circle(screen, (0,128,0), o[0].int(), o[1])
F.render(screen)
if mouseClicked:
screen.blit(fontObj.render("--Mouse Button Down--", False, (255,255,255)), (0,0))
pygame.display.flip()
# Shutdown
pygame.quit()
|
[
"Thomas.Gilman@ymail.com"
] |
Thomas.Gilman@ymail.com
|
52e69b7d9ab9be96457650a33223304431a7087b
|
583d03a6337df9f1e28f4ef6208491cf5fb18136
|
/dev4qx/madeira/handlers/data/zhixin.py
|
575cabc375e33c3480809e11ab34c9e4197cd44f
|
[] |
no_license
|
lescpsn/lescpsn
|
ece4362a328f009931c9e4980f150d93c4916b32
|
ef83523ea1618b7e543553edd480389741e54bc4
|
refs/heads/master
| 2020-04-03T14:02:06.590299
| 2018-11-01T03:00:17
| 2018-11-01T03:00:17
| 155,309,223
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,763
|
py
|
# 智信接口
import logging
import json
import time
import tornado.gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from utils.encryption_decryption import to_md5
request_log = logging.getLogger("madeira.request")
RESULT_MAP = {
'2': 0, # 成功
'602': 9, # 服务器数据接收异常
'603': 9, # 请求数据参数格式错误
'606': 9, # 数据签名错误
'621': 9, # 商户余额不足
'622': 9, # 商户不存在
'623': 9, # 商品配置不正确
'624': 9, # 商品未配置
'615': 9, # 号码归属地信息未配置
'625': 9, # 重复订单号
'751': 9, # IP地址未绑定
'626': 9, # 订单号不存在
}
@tornado.gen.coroutine
def up_zhixin(handler, partner):
handler.up_req_time = time.localtime()
time_now = time.localtime()
secret_key = partner["secret_key"]
mrch_no = partner["mrch_no"]
site_num = ""
request_time = time.strftime("%Y%m%d%H%M%S", time_now)
client_order_no = handler.order_id
product_type = 4
phone_no = handler.mobile
cp = ""
city_code = ""
recharge_type = 0
recharge_desc = ""
notify_url = partner["notify_url"]
recharge_amount = None
k = 'private:zhixin:{carrier}:{price}'.format(carrier=handler.carrier, price=handler.price)
recharge_amount = handler.slave.get(k)
if recharge_amount is None:
handler.up_result = 5003
return handler.up_result
sign = to_md5(
"city_code" + city_code + "client_order_no" + client_order_no + "cp" + cp + "mrch_no" + mrch_no + "notify_url" + notify_url + "phone_no" + phone_no + "product_type" + str(
product_type) + "recharge_amount" + str(
recharge_amount) + "recharge_desc" + recharge_desc + "recharge_type" + str(
recharge_type) + "request_time" + request_time + "site_num" + site_num + secret_key)
body = {
"mrch_no": mrch_no,
"site_num": site_num,
"request_time": request_time,
"client_order_no": client_order_no,
"product_type": product_type,
"phone_no": phone_no,
"cp": cp,
"city_code": city_code,
"recharge_amount": recharge_amount,
"recharge_type": recharge_type,
"recharge_desc": recharge_desc,
"notify_url": notify_url,
"sign": sign,
}
body = json.dumps(body)
url = partner["url_busi"]
h = {'Content-Type': 'application/json; charset=utf-8'}
result = 9999
up_result = None
http_client = AsyncHTTPClient()
try:
request_log.info("REQU %s", body, extra={'orderid': handler.order_id})
response = yield http_client.fetch(url, method='POST', body=body, headers=h, request_timeout=120)
except HTTPError as http_error:
request_log.error('CALL UPSTREAM FAIL %s', http_error, extra={'orderid': handler.order_id})
result = 60000 + http_error.code
response = None
except Exception as e:
request_log.error('CALL UPSTREAM FAIL %s', e, extra={'orderid': handler.order_id})
response = None
finally:
http_client.close()
handler.up_resp_time = time.localtime()
if response and response.code == 200:
response_body = response.body.decode('utf8')
request_log.info("RESP %s", response_body, extra={'orderid': handler.order_id})
try:
response_body = json.loads(response_body)
up_result = response_body["code"]
result = RESULT_MAP.get(up_result, 9)
handler.up_result = up_result
except Exception as e:
result = 9999
handler.up_result = result
request_log.error('PARSE UPSTREAM %s', e, extra={'orderid': handler.order_id})
return result
|
[
"lescpsn@aliyun.com"
] |
lescpsn@aliyun.com
|
b39e713e9b9d37a4a0137e5f0283d1dbfadfd28d
|
3986a89bb2c7fbc679dae33b0e1c280caa032885
|
/marketing/models.py
|
7f955fcc2f80924dcb266fe5005f66793ebf8076
|
[] |
no_license
|
sajalmia381/ecommerce
|
9d46d9e00b5c58b294bc6d96019d389a24f57952
|
9e09da97c714b42bb415ff3cce87ff91cd69f925
|
refs/heads/master
| 2022-12-11T15:00:37.643467
| 2019-08-12T14:20:45
| 2019-08-12T14:20:45
| 123,375,046
| 0
| 0
| null | 2022-12-08T02:10:36
| 2018-03-01T03:09:26
|
CSS
|
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save
from .utils import MailChimp
# Create your models here.
class MarketingPreference(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
subscribe = models.BooleanField(default=True)
mailchimp_mes = models.TextField(null=True, blank=True)
timestremp = models.DateTimeField(auto_now_add=True)
update_on = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.email
def marking_pre_update_reciver(sender, instance, created, *args, **kwargs):
if created:
status_code, response_data = MailChimp().subscribe(instance.user.email)
print(status_code, response_data)
post_save.connect(marking_pre_update_reciver, sender=MarketingPreference)
def make_marketing_pre_reciver(sender, instance, created, *args, **kwargs):
if created:
MarketingPreference.objects.get_or_create(user=instance)
post_save.connect(make_marketing_pre_reciver, sender=settings.AUTH_USER_MODEL)
|
[
"sajal_mia@ymail.com"
] |
sajal_mia@ymail.com
|
b4e15b33e55d23e30e8e2f6b8a321eafb8f54723
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/309/usersdata/293/73227/submittedfiles/atm.py
|
2c93c82af2e85472309450b343bbc2b08ab565f8
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
money=int(input("Digite um valor: "))
r1=money%20
r2=r1%10
r3=r2%5
r4=r3%2
d1=money//20
d2=r1//10
d3=r2//5
d4=r3//2
d5=r4//1
if money<0:
print("Valor inválido")
else:
print(str(d1)+str(d2))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
604bca4b8f0b2686a9201ccb1c91a2fd818f3ee0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02836/s667985812.py
|
1658fec98933a591e5a14d44dd868cbef7443c97
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
S = input()
leng = len(S)
count = 0
if leng % 2 == 0:
S1 = S[:int(leng/2)]
S2 = S[int(leng/2):]
for i in range(int(leng/2)):
if S1[i] == S2[int(leng/2)-1-i]:
count += 1
print(int(leng/2)-count)
else:
S1 = S[:int(leng/2)]
S2 = S[int(leng/2)+1:]
for i in range(int(leng/2)):
if S1[i] == S2[int(leng/2)-1-i]:
count += 1
print(int(leng/2)-count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
af2f0512e91ea49f71983aa68b1076a656ccefd2
|
090d43fb627fd1d91bb3d745f008485f15f8f189
|
/wotw_highlighter/block_header.py
|
be74c109bcd8bee32e87936500abd1df1f77b06e
|
[
"ISC"
] |
permissive
|
wizardsoftheweb/wotw-highlighter
|
6153ebc50d574d963b060393591c76c44c13ba4c
|
f9c2c91f5ebc506192e81573942b4989c80ae2bb
|
refs/heads/master
| 2021-04-25T11:24:12.816987
| 2018-02-24T23:21:16
| 2018-02-24T23:21:16
| 111,818,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,623
|
py
|
"""This file provides a class to attach a header to a block"""
from wotw_highlighter.block_options import BlockOptions
class BlockHeader(BlockOptions):
"""This class compiles and renders a block's header (if any)"""
RENDER_AN_OPTION_NOT_INCLUDED = ''
ERROR_NEED_blob_path_OR_TITLE = ValueError('''\
blob_path and alternate_title cannot both be empty when generating a header\
''')
def validate(self):
"""Overrides super validate"""
if self.blob_path is None and self.title is None:
raise self.ERROR_NEED_blob_path_OR_TITLE
@staticmethod
def construct_code_tab(contents, active=False):
"""
This convenience method wraps contents in the proper markup
Parameters:
contents: The contents of the tab
active: Whether or not the tab should be marked as active
"""
return (
'<div class="code-tab%s">'
'%s'
'</div>'
% (
(
' active'
if active
else ''
),
contents
)
)
def render_git_ref_name_tab(self):
"""Renders the VCS branch tab"""
if self.git_ref_name and 'HEAD' != self.git_ref_name:
return self.construct_code_tab(self.git_ref_name)
return self.RENDER_AN_OPTION_NOT_INCLUDED
def render_title_tab(self):
"""Renders the blob title"""
title = (
self.title
if self.title
else self.blob_path
)
return self.construct_code_tab(title, True)
def render_external_source_link_tab(self):
"""Renders the VCS link tab"""
if self.external_source_link:
tab_body = (
'<a target="_blank" href="%s">'
'view source <i class="fa fa-external-link"></i>'
'</a>'
% (self.external_source_link)
)
return self.construct_code_tab(tab_body)
return self.RENDER_AN_OPTION_NOT_INCLUDED
def render_full_header(self):
"""Renders the entire header row"""
return (
'<tr class="code-header">'
'<td></td>'
'<td class="code-header">'
'%s'
'%s'
'%s'
'</td>'
'</tr>'
% (
self.render_title_tab(),
self.render_git_ref_name_tab(),
self.render_external_source_link_tab()
)
)
def __str__(self):
return self.render_full_header()
|
[
"cj@wizardsoftheweb.pro"
] |
cj@wizardsoftheweb.pro
|
d415c73efb114941108de5cc70bf361106a5cb61
|
9655434fa24cff892af8a6a54fc448ef7075926a
|
/scrapy框架/day07/redisSpiderPro/redisSpiderPro/spiders/redisSpiderTest.py
|
db9f1980547be6a56ec5fd15b5ca791643ba9bd8
|
[] |
no_license
|
chenrun666/Spider
|
acaa6849726417e0df56d4e43b52fd1de22ac1d8
|
2ec2e5621d0eaa15d2a2bcc2fa11642a9441888c
|
refs/heads/master
| 2020-04-09T04:42:05.168983
| 2019-01-15T13:21:40
| 2019-01-15T13:21:40
| 160,032,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisSpider
from redisSpiderPro.items import RedisspiderproItem
class RedisspidertestSpider(RedisSpider):
name = 'redisSpiderTest'
#allowed_domains = ['www.xxx,com']
#start_urls = ['http://www.xxx,com/']
redis_key = 'data' #调度器队列的名称
url = 'http://db.pharmcube.com/database/cfda/detail/cfda_cn_instrument/'
pageNum = 1
def parse(self, response):
num = response.xpath('/html/body/div/table/tbody/tr[1]/td[2]/text()').extract_first()
name = response.xpath('/html/body/div/table/tbody/tr[2]/td[2]/text()').extract_first()
item = RedisspiderproItem()
item['num'] = num
item['name'] = name
yield item
if self.pageNum <= 10000:
self.pageNum += 1
new_url = self.url + str(self.pageNum)
yield scrapy.Request(url=new_url,callback=self.parse)
|
[
"17610780919@163.com"
] |
17610780919@163.com
|
b08549cdc9930326c9806f5c1e261d6761327e2b
|
4af5c720758bd4ef36ccf94934fa79ddfc6d29ab
|
/pelicanconf.py
|
a7b514784cb9a8bf6cbbdf25f15b355abd50c4a4
|
[] |
no_license
|
juhhcarmona/grupyrp.github.io
|
5151fff8463821d8976ddf175281755b21a54675
|
9c1c68185ae95bd419bbb939493c3940fd5b319b
|
refs/heads/pelican
| 2021-01-11T03:56:10.644068
| 2016-10-18T20:10:58
| 2016-10-18T20:10:58
| 71,271,500
| 0
| 0
| null | 2016-10-18T17:03:34
| 2016-10-18T17:03:31
|
Python
|
UTF-8
|
Python
| false
| false
| 3,042
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Grupy-RP'
SITENAME = u'Grupy-RP'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Sao_Paulo'
THEME = 'themes/malt'
SITE_LOGO = 'images/logo/logo.png'
SITE_BACKGROUND_IMAGE = 'images/banners/aerea.jpg'
STATIC_PATHS = ['images', ]
WELCOME_TITLE = 'Grupy-RP'
DEFAULT_LANG = u'pt'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
PAGE_URL = '{slug}'
PAGE_SAVE_AS = '{slug}.html'
INDEX_SAVE_AS = "blog/index.html"
PLUGIN_PATHS = ['./plugins']
PLUGINS = [
'members'
]
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
SOCIAL_LINKS = (
{
"href": "https://github.com/grupyrp",
"icon": "fa-github",
"text": "GitHub",
},
{
"href": "https://www.facebook.com/grupyrp",
"icon": "fa-facebook",
"text": "Facebook",
},
{
"href": "https://groups.google.com/forum/#!forum/grupy-rp",
"icon": "fa-envelope",
"text": "Lista de emails",
},
)
MALT_HOME = [
{
"color": "blue-grey lighten-5",
"title": "O que Fazemos?",
"items": [
{
"title": "Comunidade",
"icon": "fa-comments",
"text": (
"Somos uma comunidade de desenvolvedores e entusiastas da "
"linguagem de programação Python, aqui alguns lugares onde "
"nos encontrar"),
"buttons": [
{
"text": "Saiba Mais",
"href": "comunidade",
},
],
},
{
"title": "Membros",
"icon": "fa-users",
"text": (
"Nosso grupo é formado pelos mais diversos tipos de "
"pessoas, com histórias e personalidades diferentes, veja"
"quem somos"),
"buttons": [
{
"text": "Conheça",
"href": "membros",
},
],
},
{
"title": "Projetos",
"icon": "fa-briefcase",
"text": "",
"buttons": [
{
"text": "Mais detalhes",
"href": "",
},
],
},
]
},
]
# Uncomment following line if you want document-relative URLs when developing
# RELATIVE_URLS = True
|
[
"daniloshiga@gmail.com"
] |
daniloshiga@gmail.com
|
4beef22bf29700d3794b948c98dbaa4b55e1f8e0
|
7db6c1865cf9102808824ff06cda747b6e572a21
|
/Python/Test/Time/time_count.py
|
9d6d2bf361c05494395587a1c48789d455799998
|
[] |
no_license
|
hyteer/testing
|
1f6cabc1d2b67faa4533e6ad7eb5be8c13d542c9
|
1d8b47b3bbb2daf00e4f15b5d18e86111ea4e113
|
refs/heads/master
| 2020-05-21T16:19:08.243676
| 2017-01-03T01:25:17
| 2017-01-03T01:25:17
| 60,914,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
import time
from datetime import datetime
def compute(i):
for i in range(1,i):
i = i+1
return i
start_time = datetime.now()
print "start:%s" % str(start_time)
n = compute(100000)
end_time = datetime.now()
print "end:%s" % str(end_time)
#elapsed_time = end_time - start_time
#print "elapsed_time:%s" % str(elapsed_time)
#print "start:%r, End:%r" % (start_time, end_time)
#rint datetime.now()
|
[
"hyteer@qq.com"
] |
hyteer@qq.com
|
88f98b361bb900da84472e106fdc314378c2e695
|
facbdbdadacd23f6c83d266116dc14744741070f
|
/Core_Python/Day-22/Dict/13.py
|
b12af90d65221bfd295325d609af46cfb2c20761
|
[] |
no_license
|
Yogesh-Singh-Gadwal/YSG_Python
|
51b6b53fe34567bf066b6e487c00da766b47ac6b
|
f0d6841e1f92d1d2b27d8ecdd332d40b49a5ca69
|
refs/heads/master
| 2023-06-06T04:40:12.004713
| 2021-07-06T19:59:26
| 2021-07-06T19:59:26
| 292,482,586
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
# dict
import time
d1 = {
"a":"micky",
"b":"akira",
"c":"rahul"
}
print(d1)
print(type(d1))
print()
time.sleep(3)
d1['d'] = 'amit'
print(d1)
|
[
"noreply@github.com"
] |
Yogesh-Singh-Gadwal.noreply@github.com
|
fc99177cb2c2d349aa7b8d333935662131b2b0d2
|
ba86ef56fb2ff1a8bf9be3058b58b9e48e7b50ce
|
/apps/rrhh/urls/activoUrls.py
|
f9ebbd3822e6e27218afabebd53c16babc23aa94
|
[] |
no_license
|
robertowest/lubre_homepage
|
277f8fc81512b482fbea539234f30ef3eb801480
|
9de02443ba2ee3cd48afd2b7d580a09081fe84f2
|
refs/heads/master
| 2023-07-14T04:39:38.640155
| 2021-08-30T17:43:56
| 2021-08-30T17:43:56
| 223,473,409
| 0
| 0
| null | 2020-05-07T13:50:46
| 2019-11-22T19:34:22
|
Python
|
UTF-8
|
Python
| false
| false
| 563
|
py
|
from django.urls import path
from apps.rrhh.views import activoViews as views
app_name = "activo"
urlpatterns = [
path('', views.ActivoTemplateView.as_view(), name='index'),
path('listado/', views.ActivosListView.as_view(), name='list'),
path('<int:fk>/crear/', views.ActivoCreateView.as_view(), name='create'),
path('<int:pk>/', views.ActivoDetailView.as_view(), name='detail'),
path('<int:pk>/modificar/', views.ActivoUpdateView.as_view(), name='update'),
path('<int:pk>/eliminar/', views.ActivoDeleteView.as_view(), name='delete'),
]
|
[
"roberto.west@gmail.com"
] |
roberto.west@gmail.com
|
d1ca7482114376f98e1dcdf854e9233fdc546a85
|
71f7d58c9a33fc8fdfdd85d5f432565010856c5a
|
/ciscripts/check/project/__init__.py
|
69052c33b396fab013dd28c9d6aedeb5ed0e50c7
|
[
"MIT"
] |
permissive
|
polysquare/polysquare-ci-scripts
|
32a3bbcab62d77c1dfcbbf0ad78a23306e67d8c6
|
9978f0600ea964a9f2dffd9f4eb01a10d08d6788
|
refs/heads/master
| 2022-10-27T23:37:25.192253
| 2018-02-22T02:03:11
| 2018-02-22T02:03:20
| 28,320,857
| 2
| 2
|
MIT
| 2022-10-23T07:10:40
| 2014-12-22T01:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
# /ciscripts/check/project/__init__.py
#
# Module loader file for /ciscripts/check/project.
#
# See /LICENCE.md for Copyright information
"""Module loader file for /ciscripts/check/project."""
|
[
"smspillaz@gmail.com"
] |
smspillaz@gmail.com
|
49cf7d40a41f08cf4b9942fb0992993977cdd6cb
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/Gluon_ResNet50_v1b_for_PyTorch/timm/models/layers/halo_attn.py
|
5cb9d54dd40bdc666fb9eb7c60ee2eaa1c43e199
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"CC-BY-NC-4.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 7,400
|
py
|
# Copyright 2019 Ross Wightman
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Halo Self Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
@misc{2103.12731,
Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
Jonathon Shlens},
Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones},
Year = {2021},
}
Status:
This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me.
Trying to match the 'H1' variant in the paper, my parameter counts are 2M less and the model
is extremely slow. Something isn't right. However, the models do appear to train and experimental
variants with attn in C4 and/or C5 stages are tolerable speed.
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Tuple, List
import torch
from torch import nn
import torch.nn.functional as F
from .weight_init import trunc_normal_
def rel_logits_1d(q, rel_k, permute_mask: List[int]):
""" Compute relative logits along one dimension
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
Args:
q: (batch, height, width, dim)
rel_k: (2 * window - 1, dim)
permute_mask: permute output dim according to this
"""
B, H, W, dim = q.shape
rel_size = rel_k.shape[0]
win_size = (rel_size + 1) // 2
x = (q @ rel_k.transpose(-1, -2))
x = x.reshape(-1, W, rel_size)
# pad to shift from relative to absolute indexing
x_pad = F.pad(x, [0, 1]).flatten(1)
x_pad = F.pad(x_pad, [0, rel_size - W])
# reshape and slice out the padded elements
x_pad = x_pad.reshape(-1, W + 1, rel_size)
x = x_pad[:, :W, win_size - 1:]
# reshape and tile
x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1)
return x.permute(permute_mask)
class PosEmbedRel(nn.Module):
""" Relative Position Embedding
As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
"""
def __init__(self, block_size, win_size, dim_head, scale):
"""
Args:
block_size (int): block size
win_size (int): neighbourhood window size
dim_head (int): attention head dim
scale (float): scale factor (for init)
"""
super().__init__()
self.block_size = block_size
self.dim_head = dim_head
self.scale = scale
self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale)
self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * self.scale)
def forward(self, q):
B, BB, HW, _ = q.shape
# relative logits in width dimension.
q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
# relative logits in height dimension.
q = q.transpose(1, 2)
rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
rel_logits = rel_logits_h + rel_logits_w
rel_logits = rel_logits.reshape(B, BB, HW, -1)
return rel_logits
class HaloAttn(nn.Module):
""" Halo Attention
Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
- https://arxiv.org/abs/2103.12731
"""
def __init__(
self, dim, dim_out=None, stride=1, num_heads=8, dim_head=16, block_size=8, halo_size=3, qkv_bias=False):
super().__init__()
dim_out = dim_out or dim
assert dim_out % num_heads == 0
self.stride = stride
self.num_heads = num_heads
self.dim_head = dim_head
self.dim_qk = num_heads * dim_head
self.dim_v = dim_out
self.block_size = block_size
self.halo_size = halo_size
self.win_size = block_size + halo_size * 2 # neighbourhood window size
self.scale = self.dim_head ** -0.5
# FIXME not clear if this stride behaviour is what the paper intended
# Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
# data in unfolded block form. I haven't wrapped my head around how that'd look.
self.q = nn.Conv2d(dim, self.dim_qk, 1, stride=self.stride, bias=qkv_bias)
self.kv = nn.Conv2d(dim, self.dim_qk + self.dim_v, 1, bias=qkv_bias)
self.pos_embed = PosEmbedRel(
block_size=block_size // self.stride, win_size=self.win_size, dim_head=self.dim_head, scale=self.scale)
def reset_parameters(self):
std = self.q.weight.shape[1] ** -0.5 # fan-in
trunc_normal_(self.q.weight, std=std)
trunc_normal_(self.kv.weight, std=std)
trunc_normal_(self.pos_embed.height_rel, std=self.scale)
trunc_normal_(self.pos_embed.width_rel, std=self.scale)
def forward(self, x):
B, C, H, W = x.shape
assert H % self.block_size == 0 and W % self.block_size == 0
num_h_blocks = H // self.block_size
num_w_blocks = W // self.block_size
num_blocks = num_h_blocks * num_w_blocks
q = self.q(x)
q = F.unfold(q, kernel_size=self.block_size // self.stride, stride=self.block_size // self.stride)
# B, num_heads * dim_head * block_size ** 2, num_blocks
q = q.reshape(B * self.num_heads, self.dim_head, -1, num_blocks).transpose(1, 3)
# B * num_heads, num_blocks, block_size ** 2, dim_head
kv = self.kv(x)
# FIXME I 'think' this unfold does what I want it to, but I should investigate
kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size)
kv = kv.reshape(
B * self.num_heads, self.dim_head + (self.dim_v // self.num_heads), -1, num_blocks).transpose(1, 3)
k, v = torch.split(kv, [self.dim_head, self.dim_v // self.num_heads], dim=-1)
attn_logits = (q @ k.transpose(-1, -2)) * self.scale # FIXME should usual attn scale be applied?
attn_logits = attn_logits + self.pos_embed(q) # B * num_heads, block_size ** 2, win_size ** 2
attn_out = attn_logits.softmax(dim=-1)
attn_out = (attn_out @ v).transpose(1, 3) # B * num_heads, dim_v // num_heads, block_size ** 2, num_blocks
attn_out = F.fold(
attn_out.reshape(B, -1, num_blocks),
(H // self.stride, W // self.stride),
kernel_size=self.block_size // self.stride, stride=self.block_size // self.stride)
# B, dim_out, H // stride, W // stride
return attn_out
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
9090de0981c2a4334712d26275bc1f06aeb6a383
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_lees.py
|
0c52ef364ea8500533dfc82c6c80a47232551d67
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
from xai.brain.wordbase.nouns._lee import _LEE
#calss header
class _LEES(_LEE, ):
def __init__(self,):
_LEE.__init__(self)
self.name = "LEES"
self.specie = 'nouns'
self.basic = "lee"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
e86906546d5709bb143c540a3d02b9fb77e10673
|
27b86f422246a78704e0e84983b2630533a47db6
|
/tests/test_05_tools/test_534_dwg_info.py
|
3b9e92c3d0eced9c637c8286aceef1c82a2dbdfa
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 988
|
py
|
# Copyright (c) 2022, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.dwginfo import dwg_info
R12 = "41 43 31 30 30 39"
R2000 = "41 43 31 30 31 35"
R2018 = "41 43 31 30 33 32"
R20XX = "41 43 31 30 33 33"
unknown1 = "32 32 31 30 33 32"
unknown2 = ""
def data(s) -> bytes:
return bytes(int(x, 16) for x in s.split())
@pytest.mark.parametrize(
"s,ver,rel",
[
(R12, "AC1009", "R12"),
(R2000, "AC1015", "R2000"),
(R2018, "AC1032", "R2018"),
(R20XX, "AC1033", "unknown"),
],
ids=["R12", "R2000", "R2018", "unknown"],
)
def test_detect(s, ver, rel):
info = dwg_info(data(s))
assert info.version == ver
assert info.release == rel
@pytest.mark.parametrize(
"s", [unknown1, unknown2],
ids=["invalid", "empty"],
)
def test_detect_invalid(s):
info = dwg_info(data(s))
assert info.version == "invalid"
assert info.release == "invalid"
if __name__ == "__main__":
pytest.main([__file__])
|
[
"me@mozman.at"
] |
me@mozman.at
|
f60fa889f48e5d98c9ed095639ff9bbcdbced23b
|
364085d006bb0e31f915091a07125501ab455277
|
/amplify/agent/util/http.py
|
2a6ec08a67743e03fcc23d237fde672d3aa78fd6
|
[
"BSD-2-Clause"
] |
permissive
|
digideskio/digidesk-amplified
|
6f64768c28b7ecc32088259f07498df6956341ae
|
547f899d6fd47dc726df28ee90bf3511f02bd6cf
|
refs/heads/master
| 2020-12-30T23:21:08.300692
| 2016-04-14T12:20:01
| 2016-04-14T12:20:01
| 56,352,865
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,597
|
py
|
# -*- coding: utf-8 -*-
import ujson
import time
import requests
import logging
import zlib
from amplify.agent import Singleton
from amplify.agent.context import context
requests.packages.urllib3.disable_warnings()
"""
WHY DO YOU DISABLE THIS WARNING?
We don't want to show you redundant messages.
IS IT A REAL PROBLEM?
No. It is not a real problem.
It's just a notification that urllib3 uses standard Python SSL library.
GIVE ME MORE DETAILS!
By default, urllib3 uses the standard library’s ssl module.
Unfortunately, there are several limitations which are addressed by PyOpenSSL.
In order to work with Python OpenSSL bindings urllib3 needs
requests[security] to be installed, which contains cryptography,
pyopenssl and other modules.
The problem is we CAN'T ship Amplify with built-in OpenSSL & cryptography.
You can install those libs manually and enable warnings back.
More details: https://urllib3.readthedocs.org/en/latest/security.html#pyopenssl
"""
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class HTTPClient(Singleton):
def __init__(self):
config = context.app_config
self.timeout = float(config['cloud']['api_timeout'])
self.verify_ssl_cert = config['cloud']['verify_ssl_cert']
self.gzip = config['cloud']['gzip']
self.session = None
self.url = None
self.proxies = config.get('proxies') # Support old configs which don't have 'proxies' section
if self.proxies and self.proxies.get('https', '') == '':
self.proxies = None # Pass None to trigger requests default scraping of environment variables
self.update_cloud_url()
logging.getLogger("requests").setLevel(logging.WARNING)
def update_cloud_url(self):
config = context.app_config
content_type = 'binary/octet-stream' if self.gzip else 'application/json'
self.url = '%s/%s' % (config['cloud']['api_url'], config['credentials']['api_key'])
self.session = requests.Session()
self.session.headers.update({
'Content-Type': content_type,
'User-Agent': 'nginx-amplify-agent/%s' % context.version
})
def make_request(self, location, method, data=None, timeout=None, json=True, log=True):
url = location if location.startswith('http') else '%s/%s' % (self.url, location)
timeout = timeout if timeout is not None else self.timeout
payload = ujson.encode(data) if data else '{}'
payload = zlib.compress(payload, self.gzip) if self.gzip else payload
start_time = time.time()
result, http_code = '', 500
try:
if method == 'get':
r = self.session.get(
url,
timeout=timeout,
verify=self.verify_ssl_cert,
proxies=self.proxies
)
else:
r = self.session.post(
url,
data=payload,
timeout=timeout,
verify=self.verify_ssl_cert,
proxies=self.proxies
)
http_code = r.status_code
r.raise_for_status()
result = r.json() if json else r.text
return result
except Exception as e:
if log:
context.log.error('failed %s "%s", exception: "%s"' % (method.upper(), url, e.message))
context.log.debug('', exc_info=True)
raise e
finally:
end_time = time.time()
log_method = context.log.info if log else context.log.debug
context.log.debug(result)
log_method(
"%s %s %s %s %s %.3f" % (method, url, http_code, len(payload), len(result), end_time - start_time)
)
def post(self, url, data=None, timeout=None, json=True):
return self.make_request(url, 'post', data=data, timeout=timeout, json=json)
def get(self, url, timeout=None, json=True, log=True):
return self.make_request(url, 'get', timeout=timeout, json=json, log=log)
def resolve_uri(uri):
"""
Resolves uri if it's not absolute
:param uri: str uri
:return: str url
"""
if not(uri.startswith('http://') or uri.startswith('https://')):
return '127.0.0.1%s' % uri
else:
return uri
|
[
"dedm@nginx.com"
] |
dedm@nginx.com
|
e0bfd11f7270a4b660b186cb8e2368ef570c68ff
|
aa0bf4e774ff82065927dbddf34be19c09b64c9c
|
/examples/ex1.py
|
64ac038d0826dd17f9a516ada94d2873b01d30ab
|
[
"BSD-3-Clause"
] |
permissive
|
grst/ipymd
|
510ea6feb2726fadfe24ebbcbf3981c104fad8d8
|
4a57c4212b8e71848d51826859c2a3e478037e28
|
refs/heads/grst
| 2023-04-02T14:38:34.154687
| 2020-12-02T11:37:38
| 2020-12-02T11:37:38
| 87,005,381
| 38
| 6
|
BSD-3-Clause
| 2018-08-28T11:33:46
| 2017-04-02T18:08:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 511
|
py
|
# List of ipymd cells expected for this example.
output = [
{'cell_type': 'markdown',
'source': '# Header'},
{'cell_type': 'markdown',
'source': 'A paragraph.'},
{'cell_type': 'markdown',
'source': 'Python code:'},
{'cell_type': 'code',
'input': 'print("Hello world!")',
'output': 'Hello world!'},
{'cell_type': 'markdown',
'source': 'JavaScript code:'},
{'cell_type': 'markdown',
'source': '```javascript\nconsole.log("Hello world!");\n```'}
]
|
[
"cyrille.rossant@gmail.com"
] |
cyrille.rossant@gmail.com
|
48218e1f9f444bb01ae4752bef0b91bea2ed4dcb
|
c6a101547c2b7f36fe83a725974a8a7f02cf176d
|
/data_structures/bst/bt_to_bst.py
|
a3943ce19459c0432a31bde8205bf3bcf1beb69f
|
[
"MIT"
] |
permissive
|
prabhupant/python-ds
|
737cc35574de5c2ece0f0813cf00775324a8dbe7
|
f7d6d78fedaf84b7527965bb1798b7a8da989474
|
refs/heads/master
| 2023-08-22T05:04:22.937675
| 2022-10-04T01:29:39
| 2022-10-04T01:29:39
| 199,366,418
| 2,325
| 704
|
MIT
| 2022-10-10T13:01:10
| 2019-07-29T02:48:57
|
Python
|
UTF-8
|
Python
| false
| false
| 767
|
py
|
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def store_inorder(root, inorder):
if root is None:
return
store_inorder(root.left, inorder)
inorder.append(root.data)
store_inorder(root.right, inorder)
def count_nodes(root):
if root is None:
return 0
return count_nodes(root.left) + count_nodes(root.right) + 1
def array_to_bst(arr, root):
if root is None:
return
array_to_bst(arr, root.left)
root.data = arr[0]
arr.pop(0)
array_to_bst(arr, root.right)
def bt_to_bst(root):
if root is None:
return
n = count_nodes(root)
arr = []
store_inorder(root, arr)
arr.sort()
array_to_bst(arr, root)
|
[
"prabhupant09@gmail.com"
] |
prabhupant09@gmail.com
|
0fa67e76425e468c985c3025e54b6202be4272fd
|
f9e265f39cdfa568e67acb50840f9655fc4d65f7
|
/builtinfunctionstypes.py
|
b39cd0b6875e905da2d2d93d4b3b82210ca30fcf
|
[] |
no_license
|
raymondmar61/pythonwilliamfiset
|
5a4fc7faba6880f3df6b3ded98cc6d17925e7895
|
aae7c533f48efbe91a4e7c2d640f2032cd97e1f3
|
refs/heads/master
| 2021-01-25T04:42:19.546278
| 2017-09-28T22:15:30
| 2017-09-28T22:15:30
| 93,469,377
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
#williamfiset 30 Builtins 5 of 6 Types
# tuple, list, str, bool, int, float
from math import pi as PIE
print(tuple("My_Python")) #print ('M', 'y', '_', 'P', 'y', 't', 'h', 'o', 'n')
print(tuple((1,2,3))) #print (1, 2, 3)
print(tuple( ['G','N','U'] )) #print ('G', 'N', 'U'). List becomes a tuple
print(list(range(10))) #print [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(list("23456")) #print ['2', '3', '4', '5', '6']. Separates each string character into a list with elements
print(list((1,2,3,4))) #print [1, 2, 3, 4]. Tuple becomes a list.
print(str(True)) #print True
print(str("1234567")) #print 1234567
print(str(PIE)) #print 3.141592653589793
print(bool(1>3)) #print False boolean returns True or False
print(bool('a' < 'v')) #print True boolean returns True or False
print(bool(1==1)) #print True boolean returns True or False
print(int(456)) #print 456
print(int("453")) #print 453 converts string to integer
#print(int( [567] )) #error message because can't convert a list to an integer
print(float(PIE)) #print 3.141592653589793
print(float("1.474")) #print 1.474
print(float(508)) #print 508.0
#set an unordered list of unique elements, final result is a list with no duplicates
list_ = [1,1,1,2,3,4,4,4]
print(set(list_)) #print {1, 2, 3, 4}
print("\n")
my_set = set()
my_set.add(5)
my_set.add(1)
my_set.add(2)
print(my_set) #print {1, 2, 5}
my_set.update([11,1,6,8])
print(my_set) #print {1, 2, 5, 6, 8, 11}
print(list(my_set)) #print [1, 2, 5, 6, 8, 11] as a list
|
[
"raym61@hotmail.com"
] |
raym61@hotmail.com
|
57145efae0a73a250ab079b71515694b7e3fa35e
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007.2/desktop/freedesktop/hal/actions.py
|
8548d7eb4c9dc4cbaff444aa99ef55843bd1fd65
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2007 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import libtools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "hal-0.5.9.1"
def setup():
autotools.configure("--enable-policy-kit \
--enable-acpi-ibm \
--enable-acpi-toshiba \
--with-dell-backlight \
--enable-umount-helper \
--enable-sonypic \
--enable-doxygen-docs \
--with-usb-csr \
--with-macbook \
--with-macbookpro \
--with-cpufreq \
--with-hal-user=hal \
--with-hal-group=hal \
--with-dbus-sys=/etc/dbus-1/system.d \
--disable-docbook-docs \
--disable-gtk-doc \
--disable-static \
--with-pid-file=/var/run/hald.pid")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
# We install this in a seperate package to avoid gnome-python dep
pisitools.remove("/usr/bin/hal-device-manager")
pisitools.removeDir("/usr/share/hal/device-manager/")
# See ya...
pisitools.removeDir("/etc/hotplug.d/")
pisitools.dodoc("AUTHORS", "COPYING", "ChangeLog", "NEWS", "README")
# Needed for hal's new cache infrastructure
pisitools.dodir("/var/lib/cache/hald/")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
971cbd5365ebabe295b53bc89246a7dab9884348
|
18239524612cf572bfeaa3e001a3f5d1b872690c
|
/clients/oathkeeper/python/ory_oathkeeper_client/models/swagger_create_rule_parameters.py
|
fa695454e4e5de58f8d17064a76a47b6c668c873
|
[
"Apache-2.0"
] |
permissive
|
simoneromano96/sdk
|
2d7af9425dabc30df830a09b26841fb2e8781bf8
|
a6113d0daefbbb803790297e4b242d4c7cbbcb22
|
refs/heads/master
| 2023-05-09T13:50:45.485951
| 2021-05-28T12:18:27
| 2021-05-28T12:18:27
| 371,689,133
| 0
| 0
|
Apache-2.0
| 2021-05-28T12:11:41
| 2021-05-28T12:11:40
| null |
UTF-8
|
Python
| false
| false
| 3,509
|
py
|
# coding: utf-8
"""
ORY Oathkeeper
ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. # noqa: E501
The version of the OpenAPI document: v0.0.0-alpha.37
Contact: hi@ory.am
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ory_oathkeeper_client.configuration import Configuration
class SwaggerCreateRuleParameters(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'body': 'SwaggerRule'
}
attribute_map = {
'body': 'Body'
}
def __init__(self, body=None, local_vars_configuration=None): # noqa: E501
"""SwaggerCreateRuleParameters - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this SwaggerCreateRuleParameters. # noqa: E501
:return: The body of this SwaggerCreateRuleParameters. # noqa: E501
:rtype: SwaggerRule
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this SwaggerCreateRuleParameters.
:param body: The body of this SwaggerCreateRuleParameters. # noqa: E501
:type: SwaggerRule
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SwaggerCreateRuleParameters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SwaggerCreateRuleParameters):
return True
return self.to_dict() != other.to_dict()
|
[
"noreply@github.com"
] |
simoneromano96.noreply@github.com
|
4415b7bf0416b19028465628c5d14a17d2e84962
|
932a6797f1e97c7f8c96af83647fc27d2324765e
|
/python/1138. Alphabet Board Path.py
|
6b59f12b589ba7b471f1b8f5ad023ddb023c67ed
|
[] |
no_license
|
rhzx3519/leetcode
|
1245e0a19dbcb4b853eb0ac369601f31171b55c5
|
2fe336e0de336f6d5f67b058ddb5cf50c9f00d4e
|
refs/heads/master
| 2023-08-17T01:27:49.674440
| 2023-08-16T23:08:59
| 2023-08-16T23:08:59
| 85,682,362
| 3
| 1
| null | 2021-05-08T12:10:56
| 2017-03-21T09:23:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
class Solution(object):
def alphabetBoardPath(self, target):
"""
:type target: str
:rtype: str
"""
board = ["abcde", "fghij", "klmno", "pqrst", "uvwxy", "z"]
m, n = len(board), len(board[0])
vis = [[-1]*len(board[0]) for _ in range(len(board))]
dirt = 'RLUD'
d = {(-1, 0): 'U', (1, 0): 'D', (0, -1): 'L', (0, 1): 'R'}
def bfs(r, c, t):
que = [(r, c)]
while que:
x, y = que.pop(0)
# print x, y
if board[x][y]==t:
return (x, y)
for i, (dx, dy) in enumerate(((1, 0), (-1, 0), (0, 1), (0, -1))):
nx = x + dx
ny = y + dy
if nx<0 or nx>=m or ny<0 or ny>=len(board[nx]) or vis[nx][ny] != -1:
continue
vis[nx][ny] = (x, y)
que.append((nx, ny))
return (-1, -1)
def find(start, end):
prev = [end]
while end != start:
end = vis[end[0]][end[1]]
prev.append(end)
# print prev
cmd = ['!']
for i in range(1, len(prev)):
k = (prev[i-1][0] - prev[i][0], prev[i-1][1] - prev[i][1])
cmd.append(d[k])
# print cmd
return ''.join(cmd[::-1])
ans = []
r = c = 0
for t in target:
vis = [[-1]*n for _ in range(m)]
end = bfs(r, c, t)
# print vis
path = find((r, c), end)
r, c = end
# print (r, c), t
ans.append(path)
# print ans
return ''.join(ans)
if __name__ == '__main__':
target = 'leet'
su = Solution()
su.alphabetBoardPath(target)
|
[
"louzhenghao@itiger.com"
] |
louzhenghao@itiger.com
|
a344d7024b0846d0428ce64b15b6e3afecb52464
|
1ee910d6602123eb1328f56419b04e31b3761b6b
|
/bin/pilfile.py
|
9958e72c9cff0d5f0c078db012a9bcb82355fa89
|
[
"MIT"
] |
permissive
|
mraza007/Pizza-or-Not-a-Pizza
|
7fc89e0905c86fbd3c77a9cc834a4b6098912aeb
|
6ad59d046adbd6be812c7403d9cb8ffbdbd6b0b8
|
refs/heads/master
| 2022-12-15T15:47:34.779838
| 2018-07-04T02:28:56
| 2018-07-04T02:28:56
| 127,992,302
| 30
| 4
|
MIT
| 2022-11-22T00:43:51
| 2018-04-04T01:56:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
#!/home/muhammad/image-recognition/bin/python3
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
Image.DEBUG += 1
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
|
[
"muhammadraza0047@gmail.com"
] |
muhammadraza0047@gmail.com
|
bee9bac253802fcac7d2c1eb66160555ca7defa8
|
3345eebefad6f4348f29cdec2f2d59a89c843861
|
/mac/shop/migrations/0003_contact.py
|
ef5562ab9bfba669d4149c8dd469d07e2bea2681
|
[] |
no_license
|
AyushiiJain/My-Ecommerce-Website
|
fc8e9ccc2a106f2341e1fcb5b718679e2fd7b3bd
|
9bdfc9bbd1c7d4573db2d9138b9996abe2f5c1ad
|
refs/heads/master
| 2023-05-17T09:24:23.932158
| 2021-06-09T08:54:36
| 2021-06-09T08:54:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
# Generated by Django 3.2.2 on 2021-05-11 07:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20210508_0633'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('msg_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('email', models.CharField(default='', max_length=70)),
('phone', models.CharField(default='', max_length=70)),
('desc', models.CharField(default='', max_length=500)),
],
),
]
|
[
"msinghal3102@gmail.com"
] |
msinghal3102@gmail.com
|
2e67cfa924d6e66b5a606f3dd3c8371825938f80
|
d12c4bb550c71bb3eaad5dbea099fa79d8001316
|
/CodingNinjasOOPS/MethodOverrriding2.py
|
ba5f63e34530b739d0a728f0ae2216e44eaac359
|
[] |
no_license
|
nilesh7808/Coding-Ninjas-DSA-Programs
|
8fdd980424e8b21026825ff5d444df7d70545629
|
bfdc2bd9b317721d30b8109e6fb37c61bfc577e8
|
refs/heads/master
| 2023-04-29T02:54:01.247715
| 2021-05-22T03:34:34
| 2021-05-22T03:34:34
| 369,706,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
class vehicle:
def __init__(self,color):
self.color = color
def print(self):
print("The color of Car is:",self.color)
class Car(vehicle):
def print(self):
super().print() # Now it will look to the parent class
print("This is Pretty Good ")
c = Car("Black")
c.print()
|
[
"rajnilesh088@gmail.com"
] |
rajnilesh088@gmail.com
|
bebb93feff90d5c700d2dac9c625f44c6b54a245
|
6732dce33ccc8d3912c7dd9bb5a029988586a649
|
/samples/apps/search_organize_users.py
|
0e3db637d8cf7cb4f2b9ef4ce7081cabd4862e0d
|
[
"Apache-2.0"
] |
permissive
|
hamada2029/gdata-python3
|
8a0d3cb53b707b7ad2f826a486df254c813e7463
|
c1028f6567b480908b90848523bebaf78e6b49f7
|
refs/heads/master
| 2021-01-22T12:53:28.196826
| 2014-11-30T07:05:30
| 2014-11-30T07:05:30
| 46,613,040
| 1
| 0
| null | 2015-11-21T11:44:20
| 2015-11-21T11:44:19
| null |
UTF-8
|
Python
| false
| false
| 5,004
|
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Search users with a given pattern and move to a new organization.
Sample to move users to a new organization based on a pattern using
the User Provisioning and Organization Provisioning APIs.
Usage:
$ python search_organize_users.py
"""
__author__ = 'Shraddha Gupta <shraddhag@google.com>'
from optparse import OptionParser
import re
from gdata.apps.client import AppsClient
from gdata.apps.organization.client import OrganizationUnitProvisioningClient
import gdata.gauth
BATCH_SIZE = 25
SCOPES = ('https://apps-apis.google.com/a/feeds/user/ '
'https://apps-apis.google.com/a/feeds/policies/')
USER_AGENT = 'SearchAndOrganizeUsers'
class SearchAndOrganizeUsers(object):
"""Search users with a pattern and move them to organization."""
def __init__(self, client_id, client_secret, domain):
"""Create a new SearchAndOrganizeUsers object configured for a domain.
Args:
client_id: [string] The clientId of the developer.
client_secret: [string] The clientSecret of the developer.
domain: [string] The domain on which the functions are to be performed.
"""
self.client_id = client_id
self.client_secret = client_secret
self.domain = domain
def AuthorizeClient(self):
"""Authorize the clients for making API requests."""
self.token = gdata.gauth.OAuth2Token(
client_id=self.client_id, client_secret=self.client_secret,
scope=SCOPES, user_agent=USER_AGENT)
uri = self.token.generate_authorize_url()
print('Please visit this URL to authorize the application:')
print(uri)
# Get the verification code from the standard input.
code = input('What is the verification code? ').strip()
self.token.get_access_token(code)
self.user_client = AppsClient(domain=self.domain, auth_token=self.token)
self.org_client = OrganizationUnitProvisioningClient(
domain=self.domain, auth_token=self.token)
def OrganizeUsers(self, customer_id, org_unit_path, pattern):
"""Find users with given pattern and move to an organization in batches.
Args:
customer_id: [string] customer_id to make calls to Organization API.
org_unit_path: [string] path of organization unit where users are moved
pattern: [regex object] regex to match with users
"""
users = self.user_client.RetrieveAllUsers()
matched_users = []
# Search the users that match given pattern
for user in users.entry:
if (pattern.search(user.login.user_name) or
pattern.search(user.name.given_name) or
pattern.search(user.name.family_name)):
user_email = '%s@%s' % (user.login.user_name, self.domain)
matched_users.append(user_email)
# Maximum BATCH_SIZE users can be moved at one time
# Split users into batches of BATCH_SIZE and move in batches
for i in range(0, len(matched_users), BATCH_SIZE):
batch_to_move = matched_users[i: i + BATCH_SIZE]
self.org_client.MoveUserToOrgUnit(customer_id,
org_unit_path, batch_to_move)
print(('Number of users moved = %d' % len(matched_users)))
def Run(self, org_unit_path, regex):
self.AuthorizeClient()
customer_id_entry = self.org_client.RetrieveCustomerId()
customer_id = customer_id_entry.customer_id
pattern = re.compile(regex)
print(('Moving Users with the pattern %s' % regex))
self.OrganizeUsers(customer_id, org_unit_path, pattern)
def main():
usage = 'Usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('--DOMAIN',
help='Google Apps Domain, e.g. "domain.com".')
parser.add_option('--CLIENT_ID',
help='Registered CLIENT_ID of Domain.')
parser.add_option('--CLIENT_SECRET',
help='Registered CLIENT_SECRET of Domain.')
parser.add_option('--ORG_UNIT_PATH',
help='Orgunit path of organization where to move users.')
parser.add_option('--PATTERN',
help='Pattern to search in users')
(options, args) = parser.parse_args()
if not (options.DOMAIN and options.CLIENT_ID and options.CLIENT_SECRET
and options.ORG_UNIT_PATH and options.PATTERN):
parser.print_help()
return
sample = SearchAndOrganizeUsers(options.CLIENT_ID, options.CLIENT_SECRET,
options.DOMAIN)
sample.Run(options.ORG_UNIT_PATH, options.PATTERN)
if __name__ == '__main__':
main()
|
[
"jvarshney20@gmail.com"
] |
jvarshney20@gmail.com
|
4343b66f7f4aa4b421a9916100526490275a5e63
|
e4de060c295fba0d0386d0a7678e744ced18b920
|
/build/move_base_flex/mbf_costmap_core/catkin_generated/pkg.installspace.context.pc.py
|
d99db631ffe34a45fa7eeeca31b56d21fdb231f6
|
[] |
no_license
|
jbenzhhn/carla_hhn
|
af9497d01ce1f34ee0016ca660a0cc5af5f71be8
|
abd803bcdd506641c8152ec994468518ea809f1b
|
refs/heads/master
| 2023-04-05T10:50:28.934452
| 2021-04-07T14:31:41
| 2021-04-07T14:31:41
| 355,151,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "std_msgs;geometry_msgs;mbf_abstract_core;mbf_utility;tf;costmap_2d;nav_core".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mbf_costmap_core"
PROJECT_SPACE_DIR = "/home/automotive/catkin_ws/install"
PROJECT_VERSION = "0.3.4"
|
[
"johannes.benz@hs-heilbronn.de"
] |
johannes.benz@hs-heilbronn.de
|
011004d9088a8eae9ee9471922e947f0df13e0e9
|
1d7ae7f6e7a0df98d92f9ec5f277752d14924a94
|
/fake-very-small-test/tmp/Environment_jq.py
|
5e6d4b61381271a2d593cd1c8cc162dbd1a0feb7
|
[] |
no_license
|
lindsaymorgan/Mobike-Bike-Sharing-System-Dispatch-Optimization-Using-Reinforcement-Learning
|
1e6b1aa3c64d2ff2e31b5d9dcc4abdc11e10679c
|
6c8a329fae5c2ac8db45a3d8c55b308aae8ad804
|
refs/heads/master
| 2023-05-02T07:39:49.089459
| 2021-05-23T02:26:14
| 2021-05-23T02:26:14
| 279,467,461
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,717
|
py
|
import numpy as np
from copy import deepcopy
import scipy.stats as stats
class State:
def __init__(self, state, region_count, car_num, out_nums, in_nums, capacity_each_step, reward=0, t=0, reward_sum=0, R=None):
self.region_count = region_count
self.car_num = car_num
self.state = state
self.out_nums = out_nums
self.in_nums = in_nums
self.capacity_each_step = capacity_each_step
self.reward = reward
self.reward_sum = reward_sum
self._R = R
self.t = t
self.__hash = None
self.feasible_actions = np.zeros((self.region_count, 2 * self.capacity_each_step + 1))
def get_hash(self):
if not self.__hash:
self.__hash = tuple(self.state).__hash__()
return self.__hash
def __repr__(self):
return str(tuple(self.state))
@property
def region_state(self):
return self.state[:self.region_count]
@region_state.setter
def region_state(self, value):
self._R = None
self.__hash = None
self.state[:self.region_count] = value
@property
def car_pos(self):
return self.state[self.region_count:self.region_count + self.car_num]
@car_pos.setter
def car_pos(self, value):
self.state[self.region_count:self.region_count + self.car_num] = value
@property
def bike_on_car(self):
return self.state[self.region_count + self.car_num:]
@bike_on_car.setter
def bike_on_car(self, value):
self.state[self.region_count + self.car_num:] = value
@property
def R(self) -> int:
"""
:return: Reward
"""
# if self._R:
# return self._R
# self.region_state += self.in_nums[self.t,]
# self.region_state -= self.out_nums[self.t+1 ,]
# raw_R = np.sum(self.region_state[self.region_state < 0])
# self.region_state += self.out_nums[self.t+1 ,]
# self.region_state -= self.in_nums[self.t]
self.region_state += self.in_nums[self.t,]
raw_R = np.mean(
[stats.poisson.cdf(i, j) for i, j in zip(self.region_state, self.out_nums[self.t + 1,])])
self.region_state -= self.in_nums[self.t]
self._R = raw_R
return raw_R
def out_stage(self):
"""
before move happens -- external bikes depart
"""
self.region_state -= self.out_nums[self.t,]
self.region_state[self.region_state < 0] = 0
return self.region_state
def in_stage(self):
"""
after move happens -- external bikes arrive
"""
self.region_state += self.in_nums[self.t,]
self.t += 1
def check_feasible(self, current_region, current_car, move) -> bool:
"""
Return True for feasible action, False for not feasible
:param state: State object, state to check
:param current_region: index of region
:param move: number of bikes to load/unload (must be within -capacity_each_step ~ capacity_each_step)
:param current_car: index of car
:return:
"""
# \ and (tmp_obs[-self.obs_dim + region] - self.out_nums[int(current_eps+1), region]) * move <= 0
#move 正数移入区块 负数移出区块
if move + self.region_state[current_region] >= 0 and move <= self.bike_on_car[current_car]:
return True # 合法动作
else:
return False # 非法动作
def update_feasible_action(self, current_car):
for region in range(self.region_count):
for move in range(-self.capacity_each_step, self.capacity_each_step + 1):
self.feasible_actions[region, move] = self.check_feasible(region, current_car, move)
def step(self, current_region, current_car, move, prev_state_R=None):
"""
Perform move action
:param current_region:
:param current_car:
:param move:
:param prev_state_R:
:return:
"""
new_state = State(deepcopy(self.state), self.region_count,
self.car_num, self.out_nums, self.in_nums,
self.reward, self.t, self.reward_sum, self.R)
# if (move > 0 or move + new_state.region_state[current_region] >= 0) and move <= new_state.bike_on_car[current_car]:
if move + new_state.region_state[current_region] >= 0 and move <= new_state.bike_on_car[current_car]:
new_state.region_state[current_region] += move
# 更新货车状态
new_state.bike_on_car[current_car] -= move # 更新货车上的单车数
new_state.car_pos[current_car] = current_region # 更新货车位置
new_state.reward = new_state.R
if prev_state_R:
new_state.reward -= prev_state_R
new_state.reward_sum += new_state.reward
return new_state
class Env(object):
def __init__(self, initial_region_state, capacity_each_step, max_episode, car_count, need):
"""
:param initial_region_state: List, number of bikes in each region, e.g. [15, 15, 15, 15]
:param capacity_each_step: maximum number of load/unload bikes each step (only one of load/unload per step)
:param max_episode: max time
:param car_count: number of cars
:param need: external change driven by customers
"""
self.initial_region_state = initial_region_state
self.region_count = len(initial_region_state)
self.capacity_each_step = capacity_each_step
self.car_num = car_count
# length of one-hot action vector: for each region, each car can load/unload maximum transport_capacity of bike
self.action_dim = self.region_count * (2 * self.capacity_each_step + 1)
# length of state: number of bike at each region + location of each car + number of bike on each car
self.obs_dim = self.region_count + 2 * self.car_num
self.start_region = need.groupby('start_region')
self.end_region = need.groupby('end_region')
self.t_index = {i: str(i) for i in range(max_episode + 1)}
self.out_nums = np.array([need.groupby('start_region')[str(i)].agg(np.sum) for i in range(max_episode + 1)])
self.in_nums = np.array([need.groupby('end_region')[str(i)].agg(np.sum) for i in range(max_episode + 1)])
# current episode
self.t = 0
def new_state(self):
"""
Initialize state
:return:
"""
state = State(np.asarray(self.initial_region_state + [0] * self.car_num * 2), self.region_count,
self.car_num, self.out_nums, self.in_nums, self.capacity_each_step)
return state
|
[
"lindsaymarymorgan@gmail.com"
] |
lindsaymarymorgan@gmail.com
|
3f9e557501df5b989853196d4359ca663f35ee37
|
266b3911034ffe37f6c1c88ae4061f5792676c8b
|
/scripts/irods/logging_infrastructure.py
|
d368b224ecfbbe73002eb15b15cd538eb3162081
|
[
"BSD-3-Clause"
] |
permissive
|
trel/irods
|
cca485264f4189cb9fc9ce63f204faf5ff9f1ff5
|
dc462b0e90f3d715546329570f5950dd425dc489
|
refs/heads/master
| 2022-05-20T16:51:46.864969
| 2021-10-04T17:55:26
| 2021-10-04T17:59:34
| 73,592,300
| 1
| 0
|
NOASSERTION
| 2021-10-04T17:59:35
| 2016-11-13T03:03:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
#! /usr/bin/python
from __future__ import print_function
import os
import sys
import platform
import subprocess
import shutil
import logging
from .log import register_tty_handler
def rsyslog_config_path():
return '/etc/rsyslog.d/00-irods.conf'
def logrotate_config_path():
return '/etc/logrotate.d/irods'
def setup_rsyslog_and_logrotate(register_tty=True):
l = logging.getLogger(__name__)
l.setLevel(logging.INFO)
if register_tty:
register_tty_handler(sys.stdout, logging.INFO, logging.WARNING)
# Copy rsyslog configuration file into place if it does not exist
# and restart the rsyslog daemon so that the configuration is loaded.
dst = rsyslog_config_path()
if not os.path.isfile(dst):
l.info('Configuring rsyslog ...')
shutil.copyfile('/var/lib/irods/packaging/irods.rsyslog', dst)
l.info('done.')
l.info('Restarting rsyslog ...')
if 'Ubuntu' == platform.linux_distribution()[0]:
subprocess.call(['service', 'rsyslog', 'restart'])
else:
subprocess.call(['systemctl', 'restart', 'rsyslog'])
l.info('done.')
else:
l.info('rsyslog already configured.')
# Copy logrotate configuration file into place if it does not exist.
dst = logrotate_config_path()
if not os.path.isfile(dst):
l.info('Configuring logrotate ...')
shutil.copyfile('/var/lib/irods/packaging/irods.logrotate', dst)
l.info('done.')
else:
l.info('logrotate already configured.')
|
[
"terrellrussell@gmail.com"
] |
terrellrussell@gmail.com
|
356f23dcc0f34092b262caed148b54b7583618e5
|
ace7e98719c756cff4e4baf7c92e546cbc0b92ca
|
/LeetCode/firstMissingPositive.py
|
37817e06877b8d07f503696fc1fe9d2f340a9bb4
|
[] |
no_license
|
armsky/OnlineJudge
|
f4159326c92a794695cca8a162280fef32f95a2a
|
c658b78c920aa94c25b3d932cd7e46c0df82b19a
|
refs/heads/master
| 2020-04-15T01:21:18.158217
| 2015-12-11T03:05:28
| 2015-12-11T03:05:28
| 21,989,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
"""
Given an unsorted integer array, find the first missing positive integer.
For example,
Given [1,2,0] return 3,
and [3,4,-1,1] return 2.
Your algorithm should run in O(n) time and uses constant space.
"""
class Solution:
# @param A, a list of integers
# @return an integer
def firstMissingPositive(self, A):
for i in range(len(A)):
while A[i] != i+1:
if A[i] <= 0 or A[i] > len(A) or A[i] == A[A[i]-1]:
break
else:
temp = A[A[i]-1]
A[A[i]-1] = A[i]
A[i] = temp
print A
for i in range(len(A)):
if A[i] != i+1:
return i+1
return len(A)+1
solution = Solution()
print solution.firstMissingPositive([1,2,0])
print solution.firstMissingPositive([3,4,0,2])
|
[
"armsky1988@gmail.com"
] |
armsky1988@gmail.com
|
ca9dd99a19a954466f615b47a1b8dbff97ea320d
|
bc7ae358f8932d2bc5358c70f6c1edd92905aeb9
|
/transparent test.py
|
2cb32e0157490b5231ac64cbc8bc1f60fe954ddd
|
[] |
no_license
|
Aaron-Hsiao1/Python_Sylvan
|
415b383bebff619062ee6ef70339ec6a1fe43965
|
b293e36caf215be7721cf869b494d0fd4860e5b2
|
refs/heads/master
| 2023-07-19T19:04:55.211094
| 2021-08-21T19:34:03
| 2021-08-21T19:34:03
| 323,982,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
import pygame
pygame.init()
X = 570
Y = 900
screen_width = 1000
screen_height = 1000
Width = 30
Height = 30
Speed = 8
looping = True
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("blank")
while looping:
pygame.time.delay(5)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# def blit_alpha(target, source, opacity):
# x = 100
# y = 100
# temp = pygame.Surface((source.get_width(), source.get_height())).convert()
# temp.blit(target, (-x, -y))
# temp.blit(source, (100, 100))
# temp.set_alpha(50)
# target.blit(temp, (l00,100))
keys = pygame.key.get_pressed()
if keys[pygame.K_UP]:
Y -= Speed
if keys[pygame.K_DOWN]:
Y += Speed
if keys[pygame.K_LEFT]:
X -= Speed
if keys[pygame.K_RIGHT]:
X += Speed
screen.fill((0, 0, 0))
pygame.draw.rect(screen, (0,255,0), (X, Y, Width, Height))
s = pygame.Surface((500,500)) # the size of your rect
s.set_alpha(150) # alpha level
s.fill((255,255,255)) # this fills the entire surface
screen.blit(s, (250,250)) # (0,0) are the top-left coordinates
pygame.display.update()
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
8ffaa6d47074616ebc145529b8fae389dbe8f338
|
fb55adfc901176c1bae6914b51c0eedc7eab44a3
|
/tasks.py
|
79acd1db41870a008b6336a3f9a68773cdc68847
|
[
"MIT"
] |
permissive
|
yijiangh/coop_assembly
|
b82ab7b17b956ff33beafe329a48c083cfb7f940
|
71108b0639323cf3d996d63b0f702d45f4d60d67
|
refs/heads/master
| 2023-04-03T07:36:36.444159
| 2020-02-05T16:40:08
| 2020-02-05T16:40:08
| 228,839,363
| 8
| 0
|
MIT
| 2019-12-18T12:51:08
| 2019-12-18T12:51:06
| null |
UTF-8
|
Python
| false
| false
| 6,917
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import codecs
import contextlib
import glob
import os
import sys
from shutil import rmtree
from xml.dom.minidom import parse
from invoke import Collection, Exit, task
# For automatic doc deployment
# from paramiko import SSHClient
# from paramiko.client import AutoAddPolicy
# from scp import SCPClient
try:
input = raw_input
except NameError:
pass
BASE_FOLDER = os.path.dirname(__file__)
PACKAGE_NAME = 'coop_assembly'
class Log(object):
def __init__(self, out=sys.stdout, err=sys.stderr):
self.out = out
self.err = err
def flush(self):
self.out.flush()
self.err.flush()
def write(self, message):
self.flush()
self.out.write(message + '\n')
self.out.flush()
def info(self, message):
self.write('[INFO] %s' % message)
def warn(self, message):
self.write('[WARN] %s' % message)
log = Log()
def confirm(question):
while True:
response = input(question).lower().strip()
if not response or response in ('n', 'no'):
return False
if response in ('y', 'yes'):
return True
print('Focus! It is either (y)es or (n)o', file=sys.stderr)
@task(default=True)
def help(ctx):
"""Lists available tasks and usage."""
ctx.run('invoke --list')
log.write('Use "invoke -h <taskname>" to get detailed help for a task.')
@task(help={
'docs': 'True to generate documentation, otherwise False',
'bytecode': 'True to clean up compiled python files, otherwise False.',
'builds': 'True to clean up build/packaging artifacts, otherwise False.'})
def clean(ctx, docs=True, bytecode=True, builds=True):
"""Cleans the local copy from compiled artifacts."""
with chdir(BASE_FOLDER):
if builds:
ctx.run('python setup.py clean')
if bytecode:
for root, dirs, files in os.walk(BASE_FOLDER):
for f in files:
if f.endswith('.pyc'):
os.remove(os.path.join(root, f))
if '.git' in dirs:
dirs.remove('.git')
folders = []
if docs:
folders.append('docs/_build/')
folders.append('dist/')
if bytecode:
folders.append('src/{}/__pycache__'.format(PACKAGE_NAME))
if builds:
folders.append('build/')
folders.append('src/{}.egg-info/'.format(PACKAGE_NAME))
for folder in folders:
rmtree(os.path.join(BASE_FOLDER, folder), ignore_errors=True)
@task(help={
'rebuild': 'True to clean all previously built docs before starting, otherwise False.',
'doctest': 'True to run doctest snippets, otherwise False.',
# 'check_links': 'True to check all web links in docs for validity, otherwise False.'
})
def docs(ctx, rebuild=False, doctest=False): #, check_links=False):
"""Builds package's HTML documentation."""
with chdir(BASE_FOLDER):
if rebuild:
clean(ctx)
if doctest:
ctx.run('sphinx-build -b doctest docs dist/docs/{}'.format(PACKAGE_NAME))
ctx.run('sphinx-build -b html docs dist/docs/{}'.format(PACKAGE_NAME))
# if check_links:
# ctx.run('sphinx-build -b linkcheck -c docs . dist/docs/{}'.format(PACKAGE_NAME))
# @task()
# def deploy_docs(ctx, scp_server='darch.ethz.ch'):
# """Deploy docs to the documentation server.
#
# Published to: xxx address"""
#
# DOCS_PATH = os.path.join(BASE_FOLDER, 'dist', 'docs', PACKAGE_NAME)
# with chdir(DOCS_PATH):
# scp_username = os.environ.get('SCP_USERNAME')
# scp_password = os.environ.get('SCP_PASSWORD')
#
# print('Connecting to {} as {}...'.format(scp_server, scp_username))
#
# with SSHClient() as ssh:
# ssh.set_missing_host_key_policy(AutoAddPolicy)
# ssh.connect(scp_server, username=scp_username, password=scp_password)
#
# scp = SCPClient(ssh.get_transport())
# scp.put(DOCS_PATH, recursive=True, remote_path='htdocs')
#
# print('Done')
@task()
def check(ctx):
"""Check the consistency of documentation, coding style and a few other things."""
with chdir(BASE_FOLDER):
log.write('Checking ReStructuredText formatting...')
ctx.run('python setup.py check --strict --metadata --restructuredtext')
# log.write('Running flake8 python linter...')
# ctx.run('flake8 src setup.py')
# log.write('Checking python imports...')
# ctx.run('isort --check-only --diff --recursive src tests setup.py')
# log.write('Checking MANIFEST.in...')
# ctx.run('check-manifest')
@task(help={
'checks': 'True to run all checks before testing, otherwise False.',
'build': 'test build, default to false',
})
def test(ctx, checks=True, build=False):
"""Run all tests."""
with chdir(BASE_FOLDER):
if checks:
check(ctx)
if build:
log.write('Checking build')
ctx.run('python setup.py clean --all sdist bdist_wheel') #bdist_wheel
if sys.platform == 'win32':
ctx.run('powershell -Command "& pip install --verbose $(ls dist/*.tar.gz | % {$_.FullName})"')
else:
ctx.run('pip install --verbose dist/*.tar.gz')
log.write('Running pytest')
ctx.run('pytest --doctest-modules --cov={} tests --cov-report term-missing'.format(PACKAGE_NAME))
@task(help={
'release_type': 'Type of release follows semver rules. Must be one of: major, minor, patch.',
'bump_version': 'Bumpversion, true or false, default to false'})
def release(ctx, release_type, bump_version=False):
"""Releases the project in one swift command!"""
if release_type not in ('patch', 'minor', 'major'):
raise Exit('The release type parameter is invalid.\nMust be one of: major, minor, patch')
with chdir(BASE_FOLDER):
if bump_version:
ctx.run('bumpversion %s --verbose' % release_type)
ctx.run('invoke docs test')
ctx.run('python setup.py clean --all sdist bdist_wheel')
if confirm('You are about to upload the release to pypi.org. Are you sure? [y/N]'):
files = ['dist/*.whl', 'dist/*.gz', 'dist/*.zip']
dist_files = ' '.join([pattern for f in files for pattern in glob.glob(f)])
if len(dist_files):
ctx.run('twine upload --skip-existing %s' % dist_files)
else:
raise Exit('No files found to release')
else:
raise Exit('Aborted release')
@contextlib.contextmanager
def chdir(dirname=None):
current_dir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(current_dir)
|
[
"yijiangh@mit.edu"
] |
yijiangh@mit.edu
|
fb3e5fb2d325fe624ac277cba857887429060102
|
325fde42058b2b82f8a4020048ff910cfdf737d7
|
/src/storage-blob-preview/azext_storage_blob_preview/vendored_sdks/azure_storage_blob/v2019_12_12/_shared/authentication.py
|
b11dc57578087edc183689b24d68b515df7a6a00
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
ebencarek/azure-cli-extensions
|
46b0d18fe536fe5884b00d7ffa30f54c7d6887d1
|
42491b284e38f8853712a5af01836f83b04a1aa8
|
refs/heads/master
| 2023-04-12T00:28:44.828652
| 2021-03-30T22:34:13
| 2021-03-30T22:34:13
| 261,621,934
| 2
| 5
|
MIT
| 2020-10-09T18:21:52
| 2020-05-06T01:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 5,199
|
py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import sys
try:
from urllib.parse import urlparse, unquote
except ImportError:
from urlparse import urlparse # type: ignore
from urllib2 import unquote # type: ignore
try:
from yarl import URL
except ImportError:
pass
try:
from azure.core.pipeline.transport import AioHttpTransport
except ImportError:
AioHttpTransport = None
from azure.core.exceptions import ClientAuthenticationError
from azure.core.pipeline.policies import SansIOHTTPPolicy
from . import sign_string
logger = logging.getLogger(__name__)
# wraps a given exception with the desired exception type
def _wrap_exception(ex, desired_type):
msg = ""
if ex.args:
msg = ex.args[0]
if sys.version_info >= (3,):
# Automatic chaining in Python 3 means we keep the trace
return desired_type(msg)
# There isn't a good solution in 2 for keeping the stack trace
# in general, or that will not result in an error in 3
# However, we can keep the previous error type and message
# TODO: In the future we will log the trace
return desired_type('{}: {}'.format(ex.__class__.__name__, msg))
class AzureSigningError(ClientAuthenticationError):
"""
Represents a fatal error when attempting to sign a request.
In general, the cause of this exception is user error. For example, the given account key is not valid.
Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info.
"""
# pylint: disable=no-self-use
class SharedKeyCredentialPolicy(SansIOHTTPPolicy):
def __init__(self, account_name, account_key):
self.account_name = account_name
self.account_key = account_key
super(SharedKeyCredentialPolicy, self).__init__()
@staticmethod
def _get_headers(request, headers_to_sign):
headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value)
if 'content-length' in headers and headers['content-length'] == '0':
del headers['content-length']
return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
@staticmethod
def _get_verb(request):
return request.http_request.method + '\n'
def _get_canonicalized_resource(self, request):
uri_path = urlparse(request.http_request.url).path
try:
if isinstance(request.context.transport, AioHttpTransport) or \
isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport):
uri_path = URL(uri_path)
return '/' + self.account_name + str(uri_path)
except TypeError:
pass
return '/' + self.account_name + uri_path
@staticmethod
def _get_canonicalized_headers(request):
string_to_sign = ''
x_ms_headers = []
for name, value in request.http_request.headers.items():
if name.startswith('x-ms-'):
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value is not None:
string_to_sign += ''.join([name, ':', value, '\n'])
return string_to_sign
@staticmethod
def _get_canonicalized_resource_query(request):
sorted_queries = list(request.http_request.query.items())
sorted_queries.sort()
string_to_sign = ''
for name, value in sorted_queries:
if value is not None:
string_to_sign += '\n' + name.lower() + ':' + unquote(value)
return string_to_sign
def _add_authorization_header(self, request, string_to_sign):
try:
signature = sign_string(self.account_key, string_to_sign)
auth_string = 'SharedKey ' + self.account_name + ':' + signature
request.http_request.headers['Authorization'] = auth_string
except Exception as ex:
# Wrap any error that occurred as signing error
# Doing so will clarify/locate the source of problem
raise _wrap_exception(ex, AzureSigningError)
def on_request(self, request):
string_to_sign = \
self._get_verb(request) + \
self._get_headers(
request,
[
'content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range'
]
) + \
self._get_canonicalized_headers(request) + \
self._get_canonicalized_resource(request) + \
self._get_canonicalized_resource_query(request)
self._add_authorization_header(request, string_to_sign)
#logger.debug("String_to_sign=%s", string_to_sign)
|
[
"noreply@github.com"
] |
ebencarek.noreply@github.com
|
02356b4c95f1fe3d5b2b19948af77d16597c26f6
|
dffb9a8855adecc4bd4d21b9168a4e9bdc75e508
|
/arrandmatrix/q9.py
|
e71297c66dbcfdb2e98b4e05b14779bcc6c7eb44
|
[
"MIT"
] |
permissive
|
ResolveWang/algorithm_qa
|
95278c4459e38c55225304210770efb61d934fcc
|
a0cb649acaf8cf9d808272bc15f1951f2c05c828
|
refs/heads/master
| 2021-07-17T09:32:01.845815
| 2018-08-13T13:54:42
| 2018-08-13T13:54:42
| 100,107,109
| 90
| 29
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,873
|
py
|
"""
问题描述:给定一个排序数组arr和整数k,不重复打印arr中所有相加和为k的不降序二元组。
例如,arr=[-8, -4, -3, 0, 1, 2, 4, 5, 8, 9], k=10,打印结果为:
1,9
2,8
补充题目:
给定排序数组arr和整数k,不重复打印arr中所有相加和为k的不降序三元组。
例如,arr=[-8, -4, -3, 0, 1, 2, 4, 5, 8, 9], k=10,打印结果为:
-4, 5, 9
-3, 4, 9
-3, 5, 8
0, 1, 9
0, 2, 8
1, 4, 5
"""
class KnumOfSum:
@classmethod
def get_two_tuple_of_sum(cls, arr, k, print_value=False):
if not arr or len(arr) == 1:
return
left = 0
right = len(arr) - 1
res = []
while left < right:
left_value = arr[left]
right_value = arr[right]
if left_value + right_value == k:
if left > 0 and arr[left-1] == arr[left]:
pass
else:
left += 1
right -= 1
if print_value:
print(left_value, right_value)
res.append((left_value, right_value))
elif left_value + right_value < k:
left += 1
else:
right -= 1
return res
@classmethod
def get_three_tuple_of_sum(cls, arr, k):
if not arr or len(arr) < 3:
return
for i in range(len(arr)):
new_k = k - arr[i]
if i > 0 and arr[i] == arr[i-1]:
continue
else:
res = cls.get_two_tuple_of_sum(arr[i+1:], new_k)
if res:
for x, y in res:
print(arr[i], x, y)
if __name__ == '__main__':
my_arr = [-8, -4, -3, 0, 1, 2, 2, 4, 5, 8, 9]
KnumOfSum.get_two_tuple_of_sum(my_arr, 10, True)
KnumOfSum.get_three_tuple_of_sum(my_arr, 10)
|
[
"1796246076@qq.com"
] |
1796246076@qq.com
|
c533a3df7e8cc711386467ffd3b763470cc473f5
|
41e3065d6f29449251f1cc79cb340fa273ac5c61
|
/0x07-python-test_driven_development/2-matrix_divided.py
|
863f0dc62cc8b16f472da3460c0908a01aaa3834
|
[] |
no_license
|
BD20171998/holbertonschool-higher_level_programming
|
856fa3a7fcfafd3e17ebd7dd4cf9d3e5a609fd1f
|
bfa78d25bd4527e06cf1bf54cbc00722449d9a30
|
refs/heads/master
| 2021-07-16T01:58:42.911959
| 2020-11-15T07:18:19
| 2020-11-15T07:18:19
| 226,976,859
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
#!/usr/bin/python3
"""
This is an example of the matrix_divided function.
>>> matrix = [[1, 2, 3], [4, 5, 6]]
>>> print(matrix_divided(matrix, 3))
[[0.33, 0.67, 1.0], [1.33, 1.67, 2.0]]
"""
def matrix_divided(matrix, div):
"""
This function divides a matrix by an integer or float and returns a new
matrix divided by that number
"""
if (matrix == [] or matrix[0] == []):
raise TypeError('matrix must be a matrix (list of lists) of '
'integers/floats')
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if (
type(matrix[i][j]) is not int and
type(matrix[i][j]) is not float
):
raise TypeError('matrix must be a matrix (list of lists) of '
'integers/floats')
x = len(matrix[0])
for i in range(1, len(matrix)):
if len(matrix[i]) != x:
raise TypeError('Each row of the matrix must have the same size')
if div == 0:
raise ZeroDivisionError('division by zero')
if type(div) is not int and type(div) is not float:
raise TypeError('div must be a number')
newmat = matrix[:]
newmat = [
[float(round(newmat[i][j]/div, 2)) for j in range(len(newmat[i]))]
for i in range(len(newmat))]
return newmat
|
[
"robert.deprizio@gmail.com"
] |
robert.deprizio@gmail.com
|
a42ee1435bedd3c4a0940ee63ffcaa7c0387407f
|
612325535126eaddebc230d8c27af095c8e5cc2f
|
/depot_tools/git_upstream_diff.py
|
cc0a2006e585c1a00a443d4763335359281640b6
|
[
"BSD-3-Clause"
] |
permissive
|
TrellixVulnTeam/proto-quic_1V94
|
1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673
|
feee14d96ee95313f236e0f0e3ff7719246c84f7
|
refs/heads/master
| 2023-04-01T14:36:53.888576
| 2019-10-17T02:23:04
| 2019-10-17T02:23:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import sys
import subprocess2
import git_common as git
def main(args):
default_args = git.config_list('depot-tools.upstream-diff.default-args')
args = default_args + args
parser = argparse.ArgumentParser()
parser.add_argument('--wordwise', action='store_true', default=False,
help=(
'Print a colorized wordwise diff '
'instead of line-wise diff'))
opts, extra_args = parser.parse_known_args(args)
cur = git.current_branch()
if not cur or cur == 'HEAD':
print 'fatal: Cannot perform git-upstream-diff while not on a branch'
return 1
par = git.upstream(cur)
if not par:
print 'fatal: No upstream configured for branch \'%s\'' % cur
return 1
cmd = [git.GIT_EXE, 'diff', '--patience', '-C', '-C']
if opts.wordwise:
cmd += ['--word-diff=color', r'--word-diff-regex=(\w+|[^[:space:]])']
cmd += [git.get_or_create_merge_base(cur, par)]
cmd += extra_args
return subprocess2.check_call(cmd)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
[
"2100639007@qq.com"
] |
2100639007@qq.com
|
2b55e707ba9ff684d0159528ae983b83dceb3b6d
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_22433.py
|
a406e0709451fe6478ee3ca985fe25f91ec65b95
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
# How to disable button until check box is checked in pyqt?
connect(checkbox, SIGNAL(stateChanged(int)), button, SLOT(buttonStateChanged(int)));
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
c5d99d7ef7f2e3f44b277992637c51bbbcdc00d1
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/TprimeTprime/TprimeTprimeToTHTHinc_M_625_TuneZ2star_8TeV-madgraph_cff.py
|
4d932f949768263c528bf82c33419cee1ca13447
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,115
|
py
|
import FWCore.ParameterSet.Config as cms
#from Configuration.Generator.PythiaUEZ2Settings_cfi import *
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6HadronizerFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring(
'PMAS(25,1)=125.00D0 !mass of Higgs',
'MSTP(1) = 4',
'MSEL=8 ! fourth generation (t4) fermions',
'MWID(8)=2',
'MSTJ(1)=1 ! Fragmentation/hadronization on or off',
'MSTP(61)=1 ! Parton showering on or off',
'PMAS(5,1)=4.8 ! b quark mass', #from Spring11 4000040
'PMAS(6,1)=172.5 ! t quark mass', #from Spring11 4000040
'PMAS(8,1) = 625.0D0 ! tprime quarks mass',
'PMAS(8,2) = 6.25D0',
'PMAS(8,3) = 62.5D0',
'VCKM(1,1) = 0.97414000D0',
'VCKM(1,2) = 0.22450000D0',
'VCKM(1,3) = 0.00420000D0',
'VCKM(1,4) = 0.02500000D0',
'VCKM(2,1) = 0.22560000D0',
'VCKM(2,2) = 0.97170000D0',
'VCKM(2,3) = 0.04109000D0',
'VCKM(2,4) = 0.05700000D0',
'VCKM(3,1) = 0.00100000D0',
'VCKM(3,2) = 0.06200000D0',
'VCKM(3,3) = 0.91000000D0',
'VCKM(3,4) = 0.41000000D0',
'VCKM(4,1) = 0.01300000D0',
'VCKM(4,2) = 0.04000000D0',
'VCKM(4,3) = 0.41000000D0',
'VCKM(4,4) = 0.91000000D0',
'MDME(66,1)=0 ! g t4',
'MDME(67,1)=0 ! gamma t4',
'MDME(68,1)=0 ! Z0 t (2 : on for particle, off for anti-particle) ',
'MDME(69,1)=0 ! W d',
'MDME(70,1)=0 ! W s',
'MDME(71,1)=0 ! W b (3 : off for particle, on for particle) ',
'MDME(72,1)=0 ! W b4',
'KFDP(73,2)=6 ! defines H0 t',
'MDME(73,1)=1 ! h0 t4',
'MDME(74,1)=-1 ! H+ b',
'MDME(75,1)=-1 ! H+ b4',
'BRAT(66) = 0.0D0',
'BRAT(67) = 0.0D0',
'BRAT(68) = 0.0D0',
'BRAT(69) = 0.0D0',
'BRAT(70) = 0.0D0',
'BRAT(71) = 0.0D0',
'BRAT(72) = 0.0D0',
'BRAT(73) = 1.0D0',
'BRAT(74) = 0.0D0',
'BRAT(75) = 0.0D0',
'MDME(174,1)=1 !Z decay into d dbar',
'MDME(175,1)=1 !Z decay into u ubar',
'MDME(176,1)=1 !Z decay into s sbar',
'MDME(177,1)=1 !Z decay into c cbar',
'MDME(178,1)=1 !Z decay into b bbar',
'MDME(179,1)=1 !Z decay into t tbar',
'MDME(180,1)=-1 !Z decay into b4 b4bar',
'MDME(181,1)=-1 !Z decay into t4 t4bar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=1 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=1 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=1 !Z decay into nu_tau nu_taubar',
'MDME(188,1)=-1 !Z decay into tau4 tau4bar',
'MDME(189,1)=-1 !Z decay into nu_tau4 nu_tau4bar',
'MDME(190,1)=1 !W decay into u dbar',
'MDME(191,1)=1 !W decay into c dbar',
'MDME(192,1)=1 !W decay into t dbar',
'MDME(193,1)=-1 !W decay into t4 dbar',
'MDME(194,1)=1 !W decay into u sbar',
'MDME(195,1)=1 !W decay into c sbar',
'MDME(196,1)=1 !W decay into t sbar',
'MDME(197,1)=-1 !W decay into t4 sbar',
'MDME(198,1)=1 !W decay into u bbar',
'MDME(199,1)=1 !W decay into c bbar',
'MDME(200,1)=1 !W decay into t bbar',
'MDME(201,1)=-1 !W decay into t4 bbar',
'MDME(202,1)=-1 !W decay into u b4bar',
'MDME(203,1)=-1 !W decay into c b4bar',
'MDME(204,1)=-1 !W decay into t b4bar',
'MDME(205,1)=-1 !W decay into t4 b4bar',
'MDME(206,1)=1 !W decay into e- nu_e',
'MDME(207,1)=1 !W decay into mu nu_mu',
'MDME(208,1)=1 !W decay into tau nu_tau',
'MDME(209,1)=-1 !W decay into tau4 nu_tau4'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
),
jetMatching = cms.untracked.PSet(
scheme = cms.string("Madgraph"),
mode = cms.string("auto"), # soup, or "inclusive" / "exclusive"
MEMAIN_etaclmax = cms.double(5.0),
MEMAIN_qcut = cms.double(-1),
MEMAIN_nqmatch = cms.int32(-1),
MEMAIN_minjets = cms.int32(-1),
MEMAIN_maxjets = cms.int32(-1),
MEMAIN_showerkt = cms.double(0),
MEMAIN_excres = cms.string(''),
outTree_flag = cms.int32(0)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch"
] |
sha1-5c9a4926c1ea08b633689ec734e2440da58b8c56@cern.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.