blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68add924debc9d40e2c9e6e8b0177bc5b786cd89
|
2adcbbbecf90e4fbb89755a8a68f86b8fe70910a
|
/pythinkutils/aio/jwt/tornado/handler/AuthHandler.py
|
cf9795dfe29d2da870a4610c81273d1f7a526deb
|
[] |
no_license
|
ThinkmanWang/ThinkEventTrack
|
53e3b205787c2fcefb20d24fef0f98465dcb925e
|
b65072a3236a183c1cc1ac835cd79f2f46fd10d7
|
refs/heads/master
| 2023-08-03T11:38:33.099014
| 2021-09-19T13:17:39
| 2021-09-19T13:17:39
| 406,753,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# -*- coding: utf-8 -*-
from pythinkutils.aio.jwt.tornado.handler.BaseHandler import BaseHandler
from pythinkutils.common.StringUtils import *
class AuthHandler(BaseHandler):
async def create_token(self, szAppId, szSecret):
pass
async def token_valid(self):
pass
async def get_uid_name(self):
pass
async def get_userinfo(self):
pass
async def get_token(self):
pass
async def get_permission_list(self):
pass
|
[
"wangxf1985@gmail.com"
] |
wangxf1985@gmail.com
|
23bb6267bc9316d2c29589fa9f9f4bbc4070480d
|
9084751a90f977fc722f90892e62c6596d0a26c6
|
/staticpy/util/helper.py
|
c426b40cab25aefe97cd88114e4f10b9981420d3
|
[
"BSD-3-Clause"
] |
permissive
|
SnowWalkerJ/StaticPy
|
5d3b4723cd7b78283ab95ec3021bdcf0dfe67a6c
|
818b7f009af7a6040313791993f543779781dddf
|
refs/heads/master
| 2020-07-28T17:50:21.072169
| 2020-01-14T06:21:39
| 2020-01-14T06:21:39
| 209,484,058
| 19
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
class Cls:
"""
This class is to refer to the "current" class when it hasn't been completely
defined.
Example
=======
.. code-block:: python
class A:
def __init__(self, value):
self.value: int = 1
def __add__(self, other: Cls) -> Cls:
return A(self.value + othervalue)
The signature of operator `__add__` means it adds and returns an object of type `A`
"""
|
[
"jike3212001@163.com"
] |
jike3212001@163.com
|
f4c2668459c92a992bfea23f219d566210944f98
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_imported.py
|
878ac0a3c2bf379c3bee6feb4d0d7b3a98dbeee7
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 240
|
py
|
from xai.brain.wordbase.verbs._import import _IMPORT
#calss header
class _IMPORTED(_IMPORT, ):
def __init__(self,):
_IMPORT.__init__(self)
self.name = "IMPORTED"
self.specie = 'verbs'
self.basic = "import"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
c3e66a8e8814e8e5285b5c62076236e5e92a2c5c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_barmaids.py
|
ed81aeb5d23f31b1164a1e5b2abbd9d585783d17
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#calss header
class _BARMAIDS():
def __init__(self,):
self.name = "BARMAIDS"
self.definitions = barmaid
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['barmaid']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
41362c6f99bb8f283f580c623b5714413bdd9cef
|
b201a0a88022b4c567f9c74346d60ab17f46ef64
|
/supervised_learning/0x08-deep_cnns/0-inception_block.py
|
c25e5a099742a0fae8f1a181e73471d332ce9741
|
[] |
no_license
|
Diegokernel/holbertonschool-machine_learning
|
929d9b8ac0fcdecbf28b76c09799f86c4b48d38e
|
a51fbcb76dae9281ff34ace0fb762ef899b4c380
|
refs/heads/master
| 2020-12-21T18:34:03.579632
| 2020-10-15T20:51:32
| 2020-10-15T20:51:32
| 236,523,731
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,189
|
py
|
#!/usr/bin/env python3
"""inception"""
import tensorflow.keras as K
def inception_block(A_prev, filters):
"""Builds an inception block as described in Going Deeper with
Convolutions (2014).
The concatenated output of the inception block
"""
convly_1 = K.layers.Conv2D(filters=filters[0],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(A_prev)
convly_2P = K.layers.Conv2D(filters=filters[1],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(A_prev)
layer_pool = K.layers.MaxPooling2D(pool_size=(3, 3),
padding='same',
strides=(1, 1))(A_prev)
convly_3 = K.layers.Conv2D(filters=filters[2],
kernel_size=3,
padding='same',
kernel_initializer='he_normal',
activation='relu')(convly_2P)
convly_3P = K.layers.Conv2D(filters=filters[3],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(A_prev)
convly_3s = K.layers.Conv2D(filters=filters[4],
kernel_size=5,
padding='same',
kernel_initializer='he_normal',
activation='relu')(convly_3P)
OFPP = K.layers.Conv2D(filters=filters[5],
kernel_size=1,
padding='same',
kernel_initializer='he_normal',
activation='relu')(layer_pool)
mid_layer = K.layers.Concatenate(axis=3)([convly_1,
convly_3, convly_3s, OFPP])
return mid_layer
|
[
"777@holbertonschool.com"
] |
777@holbertonschool.com
|
7efd7bacbcbce83194fa14d887cdaec9746271a3
|
714e36b745a5b2b5fc4e9b267b3fa214a9fa3d9a
|
/scripts/matplotlib/32plot_multi_ax.py
|
2d24a31b7ad2f8682a42094864e96bfd9abaa085
|
[] |
no_license
|
j3ffyang/ai
|
e89b4618c96e2085f37047c88d95f89d0a5409c9
|
5da753d2a1c9793564a32ac80911c1d2e35e8605
|
refs/heads/master
| 2022-12-10T21:12:48.432682
| 2020-08-12T07:56:11
| 2020-08-12T07:56:11
| 141,972,057
| 2
| 1
| null | 2022-11-22T02:55:29
| 2018-07-23T06:37:15
|
Python
|
UTF-8
|
Python
| false
| false
| 251
|
py
|
import matplotlib.pyplot as plt
fig= plt.figure(figsize=(10, 5))
ax1= fig.add_subplot(121) # 1= axes lie horizontally, 2= 2 cols
ax2= fig.add_subplot(122)
ax1.bar([1,2,3], [3,4,5])
ax2.barh([0.5, 1, 2.5], [0, 1, 2]) # horizontal bar
plt.show()
|
[
"j3ffyang@gmail.com"
] |
j3ffyang@gmail.com
|
f1e2664fa2d0bd72aa21b35789a9c70c94b02c4b
|
29bd55d171733586f24f42151d44f4312b6a610e
|
/keras/keras09_R2_test_answer.py
|
290e451c6c941e5b4cc6a1795f1120eefece9faa
|
[] |
no_license
|
votus777/AI_study
|
66ab1da2b8e760d0c52b0ed2b2f74158e14f435b
|
f4e38d95690c8ee84d87c02dc20a1ea59c495f04
|
refs/heads/master
| 2022-12-04T15:52:14.855624
| 2020-08-20T06:12:52
| 2020-08-20T06:12:52
| 262,975,960
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,406
|
py
|
'''
* 정답 *
Overfitting이 나게 해라!
epoch를 컴터지기 직전까지 돌려라
단 너무 많이 돌리면 - 값이 나온다.
'''
# 1. 데이터
import numpy as np
x_train = np.array([1,2,3,4,5,6,7,8,9,10])
y_train = np.array([1,2,3,4,5,6,7,8,9,10])
x_test = np.array([11,12,13,14,15])
y_test = np.array([11,12,13,14,15])
x_pred = np.array([16, 17, 18])
# 2. 모델 구성
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(5, input_dim = 1))
model.add(Dense(5))
model.add(Dense(5))
model.add(Dense(5))
model.add(Dense(1))
# 3. 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mse'])
model.fit(x_train, y_train, epochs=30000, batch_size = 1)
# 4. 평가, 예측
loss,mse = model.evaluate(x_test, y_test, batch_size = 1)
print("loss : ", loss)
print("mse : ", mse)
'''
y_pred = model.predict(x_pred)
print("y_pred : ", y_pred)
'''
y_predict = model.predict(x_test)
print(y_predict)
#________RMSE 구하기___________________
from sklearn.metrics import mean_squared_error
def RMSE(y_test ,y_pred) :
return np.sqrt(mean_squared_error(y_test, y_predict))
# y_test = 실제값, y_pred = 예측값
print("RMSE : ", RMSE(y_test, y_predict))
#________R2 구하기_____________________
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predict)
print("R2 score : ", r2)
|
[
"votus777@users.noreply.github.com"
] |
votus777@users.noreply.github.com
|
637294feb6424a6229c798af7673ec45462eb36b
|
6f1034b17b49f373a41ecf3a5a8923fb4948992b
|
/pychron/entry/providers/geodeepdive.py
|
5b215e86d942b7a12705bd99e2a5055fe0130c7a
|
[
"Apache-2.0"
] |
permissive
|
NMGRL/pychron
|
a6ec1854488e74eb5d3ff53eee8537ecf98a6e2f
|
8cfc8085393ace2aee6b98d36bfd6fba0bcb41c6
|
refs/heads/main
| 2023-08-30T07:00:34.121528
| 2023-06-12T17:43:25
| 2023-06-12T17:43:25
| 14,438,041
| 38
| 28
|
Apache-2.0
| 2023-08-09T22:47:17
| 2013-11-15T23:46:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
# ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import requests
API_URL = "https://geodeepdive.org/api"
def get_snippet(term):
s = requests.Session()
url = "{}/snippets?term={}".format(API_URL, term)
r = s.get(url)
obj = r.json()
return obj["success"]["data"]
if __name__ == "__main__":
g = get_snippet("Fish Canyon")
for o in g:
print(o)
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
5bbd918ca6dd43ecbc614c38cc3504aae453f7ff
|
3d4247362747e3763b72cd97ba39164387cfc07b
|
/tests/conftest.py
|
f2e6030f9a4269a460b002190ed04975970e3692
|
[
"MIT"
] |
permissive
|
gitter-badger/bocadillo
|
d5f104ff47ef5b9ee61163b7a4f3eb21d3e7da8b
|
ec1122ec6d62e8c90060b3ab3eb8cf7fb7deb433
|
refs/heads/master
| 2020-04-12T23:34:39.904514
| 2018-12-22T12:17:13
| 2018-12-22T12:17:13
| 162,823,961
| 0
| 0
|
MIT
| 2018-12-22T15:56:06
| 2018-12-22T15:56:06
| null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
from typing import NamedTuple
import pytest
from click.testing import CliRunner
from bocadillo import API
from .utils import RouteBuilder
@pytest.fixture
def api():
return API()
@pytest.fixture
def builder(api: API):
return RouteBuilder(api)
class TemplateWrapper(NamedTuple):
name: str
context: dict
rendered: str
source_directory: str
def _create_template(api, tmpdir_factory, dirname):
templates_dir = tmpdir_factory.mktemp(dirname)
template_file = templates_dir.join("hello.html")
template_file.write("<h1>Hello, {{ name }}!</h1>")
api.templates_dir = str(templates_dir)
return TemplateWrapper(
name="hello.html",
context={"name": "Bocadillo"},
rendered="<h1>Hello, Bocadillo!</h1>",
source_directory=dirname,
)
@pytest.fixture
def template_file(api: API, tmpdir_factory):
return _create_template(api, tmpdir_factory, dirname="templates")
@pytest.fixture
def template_file_elsewhere(api: API, tmpdir_factory):
return _create_template(api, tmpdir_factory, dirname="templates_elsewhere")
@pytest.fixture
def runner():
return CliRunner()
|
[
"florimond.manca@gmail.com"
] |
florimond.manca@gmail.com
|
81e4750f6e1eafec47a415e18716602934030d5a
|
ef6229d281edecbea3faad37830cb1d452d03e5b
|
/ucsmsdk/mometa/ape/ApeControllerChassis.py
|
92e616892a55b50b06f2dfb1df372998f463b50d
|
[
"Apache-2.0"
] |
permissive
|
anoop1984/python_sdk
|
0809be78de32350acc40701d6207631322851010
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
refs/heads/master
| 2020-12-31T00:18:57.415950
| 2016-04-26T17:39:38
| 2016-04-26T17:39:38
| 57,148,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,276
|
py
|
"""This module contains the general information for ApeControllerChassis ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ApeControllerChassisConsts():
pass
class ApeControllerChassis(ManagedObject):
"""This is ApeControllerChassis class."""
consts = ApeControllerChassisConsts()
naming_props = set([u'index'])
mo_meta = MoMeta("ApeControllerChassis", "apeControllerChassis", "Chassis-[index]", VersionMeta.Version101e, "InputOutput", 0x3f, [], ["read-only"], [u'apeControllerManager'], [u'apeControllerEeprom'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"index": MoPropertyMeta("index", "index", "uint", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"index": "index",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, index, **kwargs):
self._dirty_mask = 0
self.index = index
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ApeControllerChassis", parent_mo_or_dn, **kwargs)
|
[
"test@cisco.com"
] |
test@cisco.com
|
8f304b7f865adf1b910f47d4f554e7f22e87c2c7
|
b6406cd1e6d951934e762ec2ac925cddf4716ae9
|
/verification_feed/app.py
|
f72a52e96e4339287768e573bf79e527800013db
|
[
"MIT"
] |
permissive
|
agiamas/activity-stream
|
e5da92f00c26a8d7d99c4b2c5d4469e3500315cc
|
2b6a23de082950736e71380932b89b0a0e984b89
|
refs/heads/master
| 2020-03-28T17:14:02.023352
| 2018-09-05T17:32:51
| 2018-09-05T17:32:51
| 148,770,350
| 0
| 0
| null | 2018-09-14T09:57:53
| 2018-09-14T09:57:53
| null |
UTF-8
|
Python
| false
| false
| 3,912
|
py
|
import asyncio
from datetime import (
datetime,
timedelta,
timezone,
)
import logging
import os
import sys
from aiohttp import web
LOGGER_NAME = 'activity-stream-verification-feed'
async def run_application():
app_logger = logging.getLogger(LOGGER_NAME)
app_logger.debug('Examining environment...')
port = os.environ['PORT']
app_logger.debug('Examining environment: done')
await create_incoming_application(port)
async def create_incoming_application(port):
app_logger = logging.getLogger(LOGGER_NAME)
async def handle(request):
timestamp = int(request.match_info['timestamp'])
def get_next_page_href(next_timestamp):
return str(request.url.with_scheme(request.headers.get(
'X-Forwarded-Proto', 'http')).with_path(f'/{next_timestamp}'))
return web.json_response(get_page(timestamp, get_next_page_href))
app_logger.debug('Creating listening web application...')
app = web.Application()
app.add_routes([web.get(r'/{timestamp:\d+}', handle)])
access_log_format = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i" %{X-Forwarded-For}i'
runner = web.AppRunner(app, access_log_format=access_log_format)
await runner.setup()
site = web.TCPSite(runner, '0.0.0.0', port)
await site.start()
app_logger.debug('Creating listening web application: done')
def setup_logging():
stdout_handler = logging.StreamHandler(sys.stdout)
aiohttp_log = logging.getLogger('aiohttp.access')
aiohttp_log.setLevel(logging.DEBUG)
aiohttp_log.addHandler(stdout_handler)
app_logger = logging.getLogger(LOGGER_NAME)
app_logger.setLevel(logging.DEBUG)
app_logger.addHandler(stdout_handler)
def get_page(timestamp, get_next_page_href):
''' Creates dummy activities where one has been created every second for the past 24 hours'''
now = datetime.now(timezone.utc).replace(microsecond=0)
one_day_ago = now - timedelta(hours=24)
first_timestamp = int(one_day_ago.timestamp())
final_timestamp = int(now.timestamp())
max_per_page = 1000
first_timestamp_of_page = max(first_timestamp, timestamp)
final_timestamp_of_page = min(first_timestamp_of_page + max_per_page, final_timestamp)
timestamps = range(first_timestamp_of_page, final_timestamp_of_page)
return {
'@context': [
'https://www.w3.org/ns/ettystreams',
{
'dit': 'https://www.trade.gov.uk/ns/activitystreams/v1'
}
],
'orderedItems': [
{
'actor': {
'dit:activityStreamVerificationFeedOrganizationId': '1',
'type': [
'Organization',
'dit:activityStreamVerificationFeedOrganization'
]
},
'dit:application': 'activityStreamVerificationFeed',
'id': f'dit:activityStreamVerificationFeed:Verifier:{activity_id}:Create',
'object': {
'id': f'dit:activityStreamVerificationFeed:Verifier:{activity_id}',
'type': [
'Document',
'dit:activityStreamVerificationFeed:Verifier'
],
'url': f'https://activitystream.uktrade.io/activities/{activity_id}'
},
'published': datetime.utcfromtimestamp(timestamp).isoformat(),
'type': 'Create'
}
for timestamp in timestamps
for activity_id in [str(timestamp)]
],
'type': 'Collection',
**({'next': get_next_page_href(final_timestamp_of_page)} if timestamps else {}),
}
def main():
setup_logging()
loop = asyncio.get_event_loop()
loop.create_task(run_application())
loop.run_forever()
if __name__ == '__main__':
main()
|
[
"michal@charemza.name"
] |
michal@charemza.name
|
b6e9b3b07c2c919a348d0916afd90fca07c31ad4
|
a74cabbe1b11fc8ef575ea86f2543cd95db78ec9
|
/python_program/q1457_Pseudo_Palindromic_Paths_in_a_Binary_Tree.py
|
3cb032d2740ae837af114fe21693659a88007536
|
[] |
no_license
|
tszandy/leetcode
|
87e3ccf291b2879637d2d8238935a455b401a78a
|
f1f4361541dcffbb291285663c8820d7ffb37d2f
|
refs/heads/master
| 2023-04-06T15:34:04.847875
| 2023-03-26T12:22:42
| 2023-03-26T12:22:42
| 204,069,234
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
from typing import List
from collections import Counter,defaultdict,deque
from math import *
from functools import reduce,lru_cache,total_ordering
import numpy as np
from heapq import *
from bisect import bisect_left,bisect_right
from itertools import count,zip_longest
import queue
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pseudoPalindromicPaths (self, root: Optional[TreeNode]) -> int:
self.counter = Counter()
self.count_palin = 0
self.recursive(root)
return self.count_palin
def recursive(self,node):
if node == None:
return
self.counter[node.val]+=1
if node.left==node.right==None:
self.check_palindrome()
else:
self.recursive(node.left)
self.recursive(node.right)
self.counter[node.val]+=1
def check_palindrome(self):
count_single = 0
for val in self.counter.values():
if val%2==1:
count_single+=1
if count_single>=2:
return
self.count_palin +=1
sol = Solution()
# input
[2,3,1,3,1,null,1]
[2,1,1,1,3,null,null,null,null,null,1]
[9]
# output
output = sol.pseudoPalindromicPaths(root)
# answer
answer = ""
print(output, answer, answer == output)
|
[
"444980834@qq.com"
] |
444980834@qq.com
|
ae65ef4938bb05f4be99939c1298eac1dfe34aed
|
76f1331d083d360fb3822312537e72d4ff9d50b5
|
/spider/strong_spider/spider/antispider/proxypool/proxy_crawler.py
|
47c2dca816b1f993382664123b02647d33042a3d
|
[] |
no_license
|
ZouJoshua/ml_project
|
2fe0efee49aa1454b04cd83c61455232601720a6
|
b1d8eb050182cd782bc6f3bb3ac1429fe22ab7b7
|
refs/heads/master
| 2021-07-22T10:37:56.452484
| 2020-05-09T09:54:39
| 2020-05-09T09:54:39
| 158,562,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,767
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 2018/9/5 15:51
@File : proxy_crawler.py
@Desc : 代理下载调度
"""
from gevent import monkey
monkey.patch_all()
import sys
import time
import gevent
from gevent.pool import Pool
from multiprocessing import Queue, Process, Value
import setting
from spider.tools.utils import md5
from proxy_htmldownloader import HtmlDownloader
from proxy_htmlparser import HtmlParser
from proxy_validator import Validator
from proxy_sqlitedb import ProxySqliteDB
from proxy_pipeline import SqlitePipeline
def start_proxycrawl(proxy_queue, db_proxy_num, myip):
crawl = ProxyCrawl(proxy_queue, db_proxy_num, myip)
crawl.run()
class ProxyCrawl(object):
proxies = set()
def __init__(self, proxy_queue, db_proxy_num, myip):
self.crawl_pool = Pool(setting.THREADNUM)
self.queue = proxy_queue
self.db_proxy_num = db_proxy_num
self.myip = myip
def run(self):
while True:
self.proxies.clear()
str_ = 'Starting crawl proxy!'
sys.stdout.write(str_ + "\r\n")
sys.stdout.flush()
proxylist = ProxySqliteDB.get_all()
spawns = []
for proxy in proxylist:
spawns.append(gevent.spawn(Validator.detect_from_db, self.myip, proxy, self.proxies))
if len(spawns) >= setting.MAX_CHECK_CONCURRENT_PER_PROCESS:
gevent.joinall(spawns)
spawns= []
gevent.joinall(spawns)
self.db_proxy_num.value = len(self.proxies)
str_ = 'IPProxyPool----->>>>>>>>db exists ip:%d' % len(self.proxies)
if len(self.proxies) < setting.MINNUM:
str_ += '\r\nIPProxyPool----->>>>>>>>now ip num < MINNUM, start crawling...'
sys.stdout.write(str_ + "\r\n")
sys.stdout.flush()
spawns = []
for p in setting.parserList:
spawns.append(gevent.spawn(self.crawl, p))
if len(spawns) >= setting.MAX_DOWNLOAD_CONCURRENT:
gevent.joinall(spawns)
spawns= []
gevent.joinall(spawns)
else:
str_ += '\r\nIPProxyPool----->>>>>>>>now ip num meet the requirement,wait UPDATE_TIME...'
sys.stdout.write(str_ + "\r\n")
sys.stdout.flush()
time.sleep(setting.UPDATE_TIME)
def crawl(self, parser):
html_parser = HtmlParser()
for url in parser['urls']:
response = HtmlDownloader.download(url)
if response is not None:
proxylist = html_parser.parse(response, parser)
if proxylist is not None:
for proxy in proxylist:
proxy_str = '%s:%s' % (proxy['ip'], proxy['port'])
proxy['proxy_id'] = md5(proxy_str)
if proxy_str not in self.proxies:
self.proxies.add(proxy_str)
while True:
if self.queue.full():
time.sleep(0.1)
else:
self.queue.put(proxy)
break
if __name__ == "__main__":
DB_PROXY_NUM = Value('i', 0)
q1 = Queue()
q2 = Queue()
p0 = Process(target=start_api_server)
p1 = Process(target=start_proxycrawl, args=(q1, DB_PROXY_NUM))
p2 = Process(target=Validator.validator, args=(q1, q2))
p3 = Process(target=SqlitePipeline.save_data, args=(q2, DB_PROXY_NUM))
p0.start()
p1.start()
p2.start()
p3.start()
|
[
"joshua_zou@163.com"
] |
joshua_zou@163.com
|
5b7240a4bf1fca1148ffca29d3a7222e1e4a4373
|
b565143dbd490ad2721af7d8578483aa053383d0
|
/recipes/tsl-hopscotch-map/all/conanfile.py
|
bd7f158542108161ebd2d641b6d256fd96a8682a
|
[
"MIT"
] |
permissive
|
czoido/conan-center-index
|
da7fbe837c88e3a65f7f2d6ed24ada62eb601c69
|
7952190873e49e23996fc7192a76e5917c49ec8a
|
refs/heads/master
| 2023-07-20T02:13:08.706521
| 2022-12-13T17:01:47
| 2022-12-13T17:01:47
| 228,573,093
| 0
| 0
|
MIT
| 2022-12-13T17:01:49
| 2019-12-17T08:49:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,853
|
py
|
from conans import ConanFile, tools
import os
required_conan_version = ">=1.43.0"
class TslHopscotchMapConan(ConanFile):
name = "tsl-hopscotch-map"
license = "MIT"
description = "C++ implementation of a fast hash map and hash set using hopscotch hashing"
topics = ("structure", "hash map", "hash set")
homepage = "https://github.com/Tessil/hopscotch-map"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "arch", "compiler", "build_type"
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
def package_id(self):
self.info.header_only()
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "tsl-hopscotch-map")
self.cpp_info.set_property("cmake_target_name", "tsl::hopscotch_map")
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.filenames["cmake_find_package"] = "tsl-hopscotch-map"
self.cpp_info.filenames["cmake_find_package_multi"] = "tsl-hopscotch-map"
self.cpp_info.names["cmake_find_package"] = "tsl"
self.cpp_info.names["cmake_find_package_multi"] = "tsl"
self.cpp_info.components["hopscotch_map"].names["cmake_find_package"] = "hopscotch_map"
self.cpp_info.components["hopscotch_map"].names["cmake_find_package_multi"] = "hopscotch_map"
self.cpp_info.components["hopscotch_map"].set_property("cmake_target_name", "tsl::hopscotch_map")
|
[
"noreply@github.com"
] |
czoido.noreply@github.com
|
694f4c1a1be63ef2db87c8057b17478b668dafac
|
7da87c6d4c4d8443f1a9930b5edc277ce2a6c358
|
/009_triangles.py
|
642b3ede7b4aa5ace5b0e7d418acbdaca8585824
|
[] |
no_license
|
kazamari/CodeAbbey
|
2e1f28a20d5f773fc08b5b20899d437d5ba420f2
|
4f5031585ddad8d8be71ee1f80872712b139051e
|
refs/heads/master
| 2021-04-15T14:49:10.839383
| 2018-03-23T12:18:17
| 2018-03-23T12:18:17
| 126,477,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
'''
Input data: First line will contain number of triplets.
Other lines will contain triplets themselves (each in separate line).
Answer: You should output 1 or 0 for each triplet (1 if triangle could be built and 0 otherwise).
Example:
data:
2
3 4 5
1 2 4
answer:
1 0
'''
from sys import stdin
def is_triangle(triplet):
a, b, c = sorted(triplet)
return int(c <= a + b)
print(*[is_triangle(map(int, line.rstrip().split())) for i, line in enumerate(stdin) if i > 0])
|
[
"maha_on@yahoo.com"
] |
maha_on@yahoo.com
|
15a64d10e6323cff866a56ab5326f9f9abfe8c10
|
97aa1181a8305fab0cfc635954c92880460ba189
|
/torch/nn/intrinsic/modules/fused.py
|
47f26dbdc203a870d09cbb6c19b5a68b2b260f95
|
[
"BSD-2-Clause"
] |
permissive
|
zhujiang73/pytorch_mingw
|
64973a4ef29cc10b96e5d3f8d294ad2a721ccacb
|
b0134a0acc937f875b7c4b5f3cef6529711ad336
|
refs/heads/master
| 2022-11-05T12:10:59.045925
| 2020-08-22T12:10:32
| 2020-08-22T12:10:32
| 123,688,924
| 8
| 4
|
NOASSERTION
| 2022-10-17T12:30:52
| 2018-03-03T12:15:16
|
C++
|
UTF-8
|
Python
| false
| false
| 5,967
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from torch.nn import Conv1d, Conv2d, Conv3d, ReLU, Linear, BatchNorm1d, BatchNorm2d, BatchNorm3d
class ConvReLU1d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv1d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU1d, self).__init__(conv, relu)
class ConvReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU2d, self).__init__(conv, relu)
class ConvReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert type(conv) == Conv3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super(ConvReLU3d, self).__init__(conv, relu)
class LinearReLU(torch.nn.Sequential):
r"""This is a sequential container which calls the Linear and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, relu):
assert type(linear) == Linear and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(linear), type(relu))
super(LinearReLU, self).__init__(linear, relu)
class ConvBn1d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 1d and Batch Norm 1d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv1d and type(bn) == BatchNorm1d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn1d, self).__init__(conv, bn)
class ConvBn2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn2d, self).__init__(conv, bn)
class ConvBnReLU1d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv1d and type(bn) == BatchNorm1d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU1d, self).__init__(conv, bn, relu)
class ConvBnReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU2d, self).__init__(conv, bn, relu)
class ConvBn3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d and Batch Norm 3d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn):
assert type(conv) == Conv3d and type(bn) == BatchNorm3d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super(ConvBn3d, self).__init__(conv, bn)
class ConvBnReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the Conv 3d, Batch Norm 3d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, bn, relu):
assert type(conv) == Conv3d and type(bn) == BatchNorm3d and \
type(relu) == ReLU, 'Incorrect types for input modules{}{}{}' \
.format(type(conv), type(bn), type(relu))
super(ConvBnReLU3d, self).__init__(conv, bn, relu)
class BNReLU2d(torch.nn.Sequential):
r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert type(batch_norm) == BatchNorm2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(batch_norm), type(relu))
super(BNReLU2d, self).__init__(batch_norm, relu)
class BNReLU3d(torch.nn.Sequential):
r"""This is a sequential container which calls the BatchNorm 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert type(batch_norm) == BatchNorm3d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(batch_norm), type(relu))
super(BNReLU3d, self).__init__(batch_norm, relu)
|
[
"zhujiangmail@hotmail.com"
] |
zhujiangmail@hotmail.com
|
7ca2f0338361b838e4f68540221bd77ee9e62925
|
3c349aa9cd58b50d9179bbc9d5f5c2403c491543
|
/ex33_polygons.py
|
035896acae30a0b26de698d7161f57a52b167776
|
[] |
no_license
|
dryabokon/geometry
|
a2f85f8681d5e878a327235380668ebdb858e70c
|
9024e963f6a9f308101e6d477d89ce3323038117
|
refs/heads/master
| 2023-04-30T12:06:18.130607
| 2023-04-27T18:23:49
| 2023-04-27T18:23:49
| 156,690,211
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,872
|
py
|
import cv2
import numpy
import shapely.geometry as geom
from scipy.spatial import ConvexHull
# ----------------------------------------------------------------------------------------------------------------------
import tools_polygons_i
import tools_draw_numpy
# ----------------------------------------------------------------------------------------------------------------------
N = 50
image = numpy.full((600, 800, 3), 255, dtype=numpy.uint8)
folder_out = './images/output/'
# ----------------------------------------------------------------------------------------------------------------------
def get_shape(N,center,radius):
x = center[0] + radius*numpy.array([numpy.sin(a/(N-1)*2*numpy.pi) for a in range(N)])
y = center[1] + radius*numpy.array([numpy.cos(a/(N-1)*2*numpy.pi) for a in range(N)])
x+= 0.9*radius*numpy.random.random(N)
y+= 0.9*radius*numpy.random.random(N)
points = numpy.concatenate((x.reshape((-1,1)),y.reshape((-1,1))),axis=1)
hull = ConvexHull(numpy.array(points))
cntrs = numpy.array(points)[hull.vertices]
points = numpy.array([(point[0], point[1]) for point in cntrs])
return points
# ----------------------------------------------------------------------------------------------------------------------
def interpolate(points1,points2,color1,color2):
p1s = geom.Polygon(points1)
p2s = geom.Polygon(points2)
I = tools_polygons_i.PolygonInterpolator(p1=p1s, p2=p2s)
X, Y, C0, C1, C2 = [], [], [], [], []
for pair in I.tuple_pairs:
X.append(numpy.linspace(pair[0][0], pair[1][0], N))
Y.append(numpy.linspace(pair[0][1], pair[1][1], N))
C0 = numpy.linspace(color1[0], color2[0], N).reshape((-1, 1))
C1 = numpy.linspace(color1[1], color2[1], N).reshape((-1, 1))
C2 = numpy.linspace(color1[2], color2[2], N).reshape((-1, 1))
X = numpy.array(X).T
Y = numpy.array(Y).T
C = numpy.concatenate([C0, C1, C2], axis=1).astype(numpy.uint8)
return X, Y, C
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
N1 = 13
N2 = 15
c1 = (400,300)
c2 = (430,220)
r1 = 100
r2 = 150
color1 = (0, 10, 255)
color2 = (255,128,15)
points1 = get_shape(N1,c1,r1)
points2 = get_shape(N2,c2,r2)
# cv2.imwrite(folder_out+'start.png',tools_draw_numpy.draw_contours(image, points1, color=color1,transperency=0.9))
# cv2.imwrite(folder_out+'stop.png' ,tools_draw_numpy.draw_contours(image, points2, color=color2,transperency=0.9))
X, Y, C = interpolate(points1, points2, color1, color2)
for i in range(X.shape[0]):
p = numpy.concatenate((X[i].reshape((-1,1)),Y[i].reshape((-1,1))),axis=1)
cv2.imwrite(folder_out+'%03d.png'%i,tools_draw_numpy.draw_contours(image, p, color=C[i],transperency=0.9))
|
[
"d.ryabokon@gmail.com"
] |
d.ryabokon@gmail.com
|
dda6f3a8ef102c9ba39babca4de4c8d9b3f4dd59
|
25f61fb72a60f95e10aff5809e67e95526c5fff7
|
/leetcode-30day-challenge/May-2019/W1_1_first_bad_version.py
|
a7d316cfa6cd858db77346c1f616205b8c0c0f1e
|
[] |
no_license
|
pradeepsinngh/A-Problem-A-Day
|
19db6baccc68f216cd8206118dafb2cbec962671
|
f5d598bbb60786a99c00fb338145c564fa70cf84
|
refs/heads/master
| 2021-07-18T02:23:04.248782
| 2020-05-04T22:12:27
| 2020-05-04T22:12:27
| 150,642,029
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
'''
# Prob: First Bad Version
# You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version of your product fails the quality check. Since each version is developed based on the previous version, all the versions after a bad version are also bad.
# Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the following ones to be bad.
# You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to find the first bad version. You should minimize the number of calls to the API.
```
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution(object):
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
left = 1
right = n
while left < right:
mid = (left + right)/ 2
if not isBadVersion(mid):
left = mid+1
else:
right = mid
return left
|
[
"noreply@github.com"
] |
pradeepsinngh.noreply@github.com
|
852c57d9837e0941432c42026f1c82d0451da187
|
3cdb4faf34d8375d6aee08bcc523adadcb0c46e2
|
/web/env/lib/python3.6/site-packages/django/contrib/auth/migrations/0008_alter_user_username_max_length.py
|
7c9dae09500de428c3e2cea1c22b0419c38beedd
|
[
"MIT",
"GPL-3.0-only"
] |
permissive
|
rizwansoaib/face-attendence
|
bc185d4de627ce5adab1cda7da466cb7a5fddcbe
|
59300441b52d32f3ecb5095085ef9d448aef63af
|
refs/heads/master
| 2020-04-25T23:47:47.303642
| 2019-09-12T14:26:17
| 2019-09-12T14:26:17
| 173,157,284
| 45
| 12
|
MIT
| 2020-02-11T23:47:55
| 2019-02-28T17:33:14
|
Python
|
UTF-8
|
Python
| false
| false
| 752
|
py
|
from django.contrib.auth import validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(
error_messages={'unique': 'A user with that username already exists.'},
help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.',
max_length=150,
unique=True,
validators=[validators.UnicodeUsernameValidator()],
verbose_name='username',
),
),
]
|
[
"rizwansoaib@gmail.com"
] |
rizwansoaib@gmail.com
|
3afca15c44b03004f1a13b16f2ce4c2a33cdf1b7
|
d1c53def818f9c7e1bd660e3303a754f297aff43
|
/code/ch7/4_13_b.py
|
ea53b0fad8491987b7b00a7fb0c995791a2c1aef
|
[] |
no_license
|
khimacademy/c104
|
dcdcae13499e5b68905f09ea009e1a2b9f552e1c
|
83443858d5b85c23c107fa09cd672d17549776ee
|
refs/heads/master
| 2020-03-26T10:57:52.536935
| 2018-08-25T06:17:04
| 2018-08-25T06:17:04
| 144,822,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
'''
4-13. 뷔페
뷔페 스타일의 음식점이 있는데 기본 음식은 다섯 가지밖에 없습니다. 단순한 음식 다섯 가지를 생각하고 튜플로 저장하세요.
- for 루프를 써서 이 식당의 각 음식을 출력하세요.
- 항목 중 하나를 수정하는 시도를 해보고 파이썬에서 변경을 거부하는지 확인하세요.
- 식당에서 메뉴를 교체하려 합니다. 항목 중 두 개를 다른 음식으로 바꾸세요. 튜플을 덮어쓰는 코드 블록을 추가하고, for 루프를 써서 바뀐 메뉴의 각 항목을 출력하세요.
Output:
You can choose from the following menu items:
- rockfish sandwich
- halibut nuggets
- smoked salmon chowder
- salmon burger
- crab cakes
Our menu has been updated.
You can now choose from the following items:
- rockfish sandwich
- halibut nuggets
- smoked salmon chowder
- black cod tips
- king crab legs
'''
|
[
"sarang.khim@gmail.com"
] |
sarang.khim@gmail.com
|
340cdd25e6ba61fafd87bd9b7fb641673e3470b8
|
ad0b7af5c3547be1081e77594d98fa9939576c69
|
/program/SGD.py
|
fecb93355de1de1e74268e73e1c5d474d0eba2db
|
[] |
no_license
|
UESTC-Liuxin/ML
|
29ce4a576215520f87049bf1024f659cbd7a0e64
|
f88ddaa6eb97d1bb31a64ba3a8448fa9f2bead32
|
refs/heads/master
| 2022-07-29T22:24:12.997064
| 2020-05-24T12:48:32
| 2020-05-24T12:48:32
| 261,960,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
# import d2lzh as d2l
import math
# from mxnet import nd
import numpy as np
import matplotlib.pyplot as plt
def train_2d(trainer): # 本函数将保存在d2lzh包中方便以后使用
x1, x2, s1, s2 = -5, -2, 0, 0 # s1和s2是自变量状态,本章后续几节会使用
results = [(x1, x2)]
for i in range(20):
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print('epoch %d, x1 %f, x2 %f' % (i + 1, x1, x2))
return results
def show_trace_2d(f, results): # 本函数将保存在d2lzh包中方便以后使用
plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.02), np.arange(-3.0, 1.0, 0.02))
print(x1,x2)
plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
eta = 0.1
def f_2d(x1, x2): # 目标函数
return x1 ** 2 + 2 * x2 ** 2
def gd_2d(x1, x2, s1, s2):
return (x1 - eta * 2 * x1, x2 - eta * 4 * x2, 0, 0)
def sgd_2d(x1, x2, s1, s2):
return (x1 - eta * (2 * x1 + np.random.normal(0.1)),
x2 - eta * (4 * x2 + np.random.normal(0.1)), s1, s2)
show_trace_2d(f_2d, train_2d(sgd_2d))
|
[
"625092890@qq.com"
] |
625092890@qq.com
|
8d2bf3bac2b602bbaeb7eb68a7b28172a7b6631f
|
d1d79d0c3889316b298852834b346d4246825e66
|
/blackbot/core/wss/ttp/art/art_T1055-1.py
|
ce733c638fed1dee79e232881503ae921bc201e5
|
[] |
no_license
|
ammasajan/Atomic-Red-Team-Intelligence-C2
|
78d1ed2de49af71d4c3c74db484e63c7e093809f
|
5919804f0bdeb15ea724cd32a48f377bce208277
|
refs/heads/master
| 2023-07-17T12:48:15.249921
| 2021-08-21T20:10:30
| 2021-08-21T20:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,874
|
py
|
from blackbot.core.utils import get_path_in_package
from blackbot.core.wss.atomic import Atomic
from terminaltables import SingleTable
import os
import json
class Atomic(Atomic):
def __init__(self):
self.name = 'Privesc/T1055-1'
self.controller_type = ''
self.external_id = 'T1055'
self.blackbot_id = 'T1055-1'
self.version = ''
self.language = 'boo'
self.description = self.get_description()
self.last_updated_by = 'Blackbot, Inc. All Rights reserved'
self.references = ["System.Management.Automation"]
self.options = {
'OutString': {
'Description' : 'Appends Out-String to the PowerShellCode',
'Required' : False,
'Value' : True,
},
'BypassLogging': {
'Description' : 'Bypasses ScriptBlock and Techniques logging',
'Required' : False,
'Value' : True,
},
'BypassAmsi': {
'Description' : 'Bypasses AMSI',
'Required' : False,
'Value' : True,
}
}
def payload(self):
with open(get_path_in_package('core/wss/ttp/art/src/powershell.boo'), 'r') as ttp_src:
src = ttp_src.read()
pwsh_script = get_path_in_package('core/wss/ttp/art/pwsh_ttp/privilegeEscalation/T1055-1')
with open(pwsh_script) as pwsh:
src = src.replace("POWERSHELL_SCRIPT", pwsh.read())
src = src.replace("OUT_STRING", str(self.options["OutString"]["Value"]).lower())
src = src.replace("BYPASS_LOGGING", str(self.options["BypassLogging"]["Value"]).lower())
src = src.replace("BYPASS_AMSI", str(self.options["BypassAmsi"]["Value"]).lower())
return src
def get_description(self):
path = get_path_in_package('core/wss/ttp/art/pwsh_ttp/privilegeEscalation/T1055-1')
with open(path) as text:
head = [next(text) for l in range(4)]
technique_name = head[0].replace('#TechniqueName: ', '').strip('\n')
atomic_name = head[1].replace('#AtomicTestName: ', '').strip('\n')
description = head[2].replace('#Description: ', '').strip('\n')
language = head[3].replace('#Language: ', '').strip('\n')
aux = ''
count = 1
for char in description:
if char == '&':
continue
aux += char
if count % 126 == 0:
aux += '\n'
count += 1
out = '{}: {}\n{}\n\n{}\n'.format(technique_name, language, atomic_name, aux)
return out
|
[
"root@uw2artic201.blackbot.net"
] |
root@uw2artic201.blackbot.net
|
55405e3d6382798737ab5eaecff2a1af521ff606
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/MuonSpectrometer/MuonCablings/MuonCablingServers/python/__init__.py
|
46ac7323b0e0050a72f16f3a5a65d27ae41069ce
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# File: MuonCablingServersConfig/__init__.py
__version__ = '1.0.0'
__author__ = 'alessandro.dimattia@roma1.infn.it'
__all__ = [ 'MuonCablingServersConfig' ]
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
51df6f78e9135e31af7333b8fb60f766d0b4e202
|
389569a591284a2adcdc38046114e7b1038afd94
|
/polygon/polygon/main_test1.py
|
e577151b68aaee98d00b80a0414e0977b53d0787
|
[] |
no_license
|
xytysingle/AnnotationTool
|
b797daf2fd472f602341b16f24fb1ed9b702aef1
|
a217d4376ceee739e0d8c43515c403133982e86e
|
refs/heads/master
| 2020-04-11T18:16:10.438919
| 2019-07-31T10:21:18
| 2019-07-31T10:21:18
| 161,992,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,227
|
py
|
# import re
#
# s = '1中文中文:123456aa哈哈哈bbcc'
# print(re.match(u"[\u4e00-\u9fa5]+", s) )
# # None. 只从字符串的开始匹配,没有匹配上返回None,否则返回matchobject
#
# pat = '中文'
# print(re.search(pat, s).group())
# # matchobject. 对整个字符串进行匹配,,没有匹配上返回None,否则返回matchobject
#
# newpat = '这里是中文内容'
# news = re.sub(pat, newpat, s) # 正则部分替换,将s中的所有符合pat的全部替换为newpat,newpat也可以是函数
# print(news)
#
#
#
# def newpat_func(matched):
# return "这里是" + matched.group() + u"内容"
#
#
# print(re.sub(pat, newpat_func, s))
# for i,bbox in enumerate
#
# from tkinter import *
# import threading, time
# trace = 0
# class CanvasEventsDemo:
# def __init__(self, parent=None):
# canvas = Canvas(width=300, height=300, bg='beige')
# canvas.pack()
# canvas.bind('<ButtonPress-1>', self.onStart) # click
# canvas.bind('<B1-Motion>', self.onGrow) # and drag
# canvas.bind('<Double-1>', self.onClear) # delete all
# canvas.bind('<ButtonPress-3>', self.onMove) # move latest
# self.canvas = canvas
# self.drawn = None
# self.kinds = [canvas.create_oval, canvas.create_rectangle]
# def onStart(self, event):
# self.shape = self.kinds[0]
# self.kinds = self.kinds[1:] + self.kinds[:1] # start dragout
# self.start = event
# self.drawn = None
# def onGrow(self, event): # delete and redraw
# canvas = event.widget
# if self.drawn: canvas.delete(self.drawn)
# objectId = self.shape(self.start.x, self.start.y, event.x, event.y)
# if trace: print(objectId)
# self.drawn = objectId
# def onClear(self, event):
# event.widget.delete('all') # use tag all
# def onMove(self, event):
# if self.drawn: # move to click spot
# if trace: print(self.drawn)
# canvas = event.widget
# diffX, diffY = (event.x - self.start.x), (event.y - self.start.y)
# canvas.move(self.drawn, diffX, diffY)
# self.start = event
# class CanvasEventsDemoTags(CanvasEventsDemo):
# def __init__(self, parent=None):
# CanvasEventsDemo.__init__(self, parent)
# self.canvas.create_text(100, 8, text='Press o and r to move shapes')
# self.canvas.master.bind('<KeyPress-o>', self.onMoveOvals)
# self.canvas.master.bind('<KeyPress-r>', self.onMoveRectangles)
# self.kinds = self.create_oval_tagged, self.create_rectangle_tagged
# def create_oval_tagged(self, x1, y1, x2, y2):
# objectId = self.canvas.create_oval(x1, y1, x2, y2)
# self.canvas.itemconfig(objectId, tag='ovals', fill='blue')
# return objectId
# def create_rectangle_tagged(self, x1, y1, x2, y2):
# objectId = self.canvas.create_rectangle(x1, y1, x2, y2)
# self.canvas.itemconfig(objectId, tag='rectangles', fill='red')
# return objectId
# def onMoveOvals(self, event):
# print('moving ovals')
# self.moveInSquares(tag='ovals') # move all tagged ovals
# def onMoveRectangles(self, event):
# print('moving rectangles')
# self.moveInSquares(tag='rectangles')
# def moveInSquares(self, tag): # 5 reps of 4 times per sec
# for i in range(5):
# for (diffx, diffy) in [(+20, 0), (0, +20), (-20, 0), (0, -20)]:
# self.canvas.move(tag, diffx, diffy)
# self.canvas.update() # force screen redraw/update
# time.sleep(0.25) # pause, but don't block gui
# class CanvasEventsDemoThread(CanvasEventsDemoTags):
# def moveEm(self, tag):
# for i in range(5):
# for (diffx, diffy) in [(+20, 0), (0, +20), (-20, 0), (0, -20)]:
# self.canvas.move(tag, diffx, diffy)
# time.sleep(0.25) # pause this thread only
# def moveInSquares(self, tag):
# threading.Thread(self.moveEm, (tag,)).start()
# if __name__ == '__main__':
# CanvasEventsDemoThread()
# mainloop()
#python tkinter menu
from tkinter import *
# some vocabulary to keep from getting confused. This terminology
# is something I cooked up for this file, but follows the man pages
# pretty closely
#
#
#
# This is a MENUBUTTON
# V
# +-------------+
# | |
#
# +------------++------------++------------+
# | || || |
# | File || Edit || Options | <-------- the MENUBAR
# | || || |
# +------------++------------++------------+
# | New... |
# | Open... |
# | Print |
# | | <------ This is a MENU. The lines of text in the menu are
# | | MENU ENTRIES
# | +---------------+
# | Open Files > | file1 |
# | | file2 |
# | | another file | <------ this cascading part is also a MENU
# +----------------| |
# | |
# | |
# | |
# +---------------+
__author__ = {'name' : 'Hongten',
'Email' : 'hongtenzone@foxmail.com',
'Blog' : 'http://www.cnblogs.com/hongten',
'QQ' : '648719819',
'Created' : '2013-09-10'}
# _*_ coding:utf-8 _*_
# from tkinter import *
# tk = Tk()
# canvas = Canvas(width=500,height=500)
# canvas.pack()
#
#
# #canvas.create_polygon(0,0,250,250,fill = 'red')
#
# def echo_event(evt):
# #打印键盘事件
# if evt.type == "2":
# print("键盘:%s" % evt.keysym)
# #打印鼠标操作
# if evt.type == "4":
# print("鼠标: %s" % evt.num)
# #
# print(evt.type)
#
# #键盘事件
# # canvas.bind_all("<KeyPress>",echo_event)
# #如果绑定指定的键盘,则"<Key>" 或者"<KeyPress>"都可以,具体到指定键的话后面加入下划线和指定的键就好了,如:绑定小写字母t和Left键
# canvas.bind("<KeyPress-t>",echo_event)
# canvas.bind_all("<KeyPress-Left>",echo_event)
# #鼠标事件
# canvas.bind_all("<Double-Button-1>",echo_event)
# canvas.bind_all("<Button-1>",echo_event)
# canvas.bind_all("<Button-2>",echo_event)
# canvas.bind_all("<Button-3>",echo_event)
# if __name__ == '__main__':
# mainloop()
# from tkinter import *
#
#
# def call_back(event):
# print(event.keysym)
#
#
# def main():
# root_login = Tk()
#
# # 创建一个框架,在这个框架中响应事件
# frame = Frame(root_login,
# width=200, height=200,
# background='green')
#
# # 这样就不用查看 键盘特殊按键的keysym表了。
# # 试一下就知道了
# frame.bind("<KeyPress>", call_back)
# frame.pack()
#
# # 当前框架被选中,意思是键盘触发,只对这个框架有效
# frame.focus_set()
#
# mainloop()
#
#
# if __name__ == '__main__':
# main()
from tkinter import *
class make_list(Listbox):
def __init__(self,master, **kw):
self.canvas=Canvas(master,width=500,height=600,bg='green')
self.canvas.pack()
self.canvas.create_rectangle(0,50,100,100,dash=' ')
if __name__ == '__main__':
tk = Tk()
make_list(tk)
tk.mainloop()
|
[
"2463072824@qq.com"
] |
2463072824@qq.com
|
3d0f4e92f82aeca0ee2486764345fa694bfe6656
|
8fd28b248511f42ad8732ca1e574aada33908376
|
/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py
|
02c50db20d8fa02519d4fe1ad7364d0967dfce15
|
[
"Apache-2.0"
] |
permissive
|
vt-vl-lab/video-data-aug
|
28bd175535cab1444055502389c8f5d7d75e4bd2
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
refs/heads/main
| 2023-09-01T02:36:40.034893
| 2021-07-21T01:31:42
| 2021-07-21T01:31:42
| 352,920,339
| 29
| 6
|
Apache-2.0
| 2021-07-21T01:29:36
| 2021-03-30T08:06:54
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,720
|
py
|
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained=None,
lateral=False,
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
cls_head=dict(
type='I3DHead',
in_channels=2048,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5))
train_cfg = None
test_cfg = dict(average_clips=None)
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=8,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD', lr=0.1, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5))
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/slowonly_r50_8x8x1_256e_kinetics400_rgb'
load_from = None
resume_from = None
find_unused_parameters = False
|
[
"zouyuliang123@gmail.com"
] |
zouyuliang123@gmail.com
|
d07b9ec027e3387ad373a6fcb4dc243fa3964750
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_swagger/_public/modeldb/metadata/model/MetadataGetLabelsRequestResponse.py
|
18ffa3ba5b5186c19c9a7ab2962468b46c703524
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628
| 2023-08-30T18:45:13
| 2023-08-30T18:45:13
| 71,305,435
| 844
| 142
|
Apache-2.0
| 2023-09-14T19:24:13
| 2016-10-19T01:07:26
|
Java
|
UTF-8
|
Python
| false
| false
| 566
|
py
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class MetadataGetLabelsRequestResponse(BaseType):
def __init__(self, labels=None):
required = {
"labels": False,
}
self.labels = labels
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('labels', None)
if tmp is not None:
d['labels'] = [tmp for tmp in tmp]
return MetadataGetLabelsRequestResponse(**d)
|
[
"noreply@github.com"
] |
VertaAI.noreply@github.com
|
de67c007364dfb0b71dd50d487c78eea39e615d6
|
cfbbe1303ed4a2feaf7e0023e62aa910b7eee733
|
/doc/conf.py
|
a4b1bca880898a236fea88f11043c0d203a934b7
|
[
"BSD-3-Clause"
] |
permissive
|
717524640/fatiando
|
8fa4fef8920770735d1a0d655259e87bc9382001
|
bf09661c40423bec85e47f15a14b786f25b7e873
|
refs/heads/master
| 2021-01-20T21:29:26.742259
| 2015-03-18T19:15:45
| 2015-03-18T19:15:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,807
|
py
|
# -*- coding: utf-8 -*-
import sys
import os
import datetime
import sphinx_bootstrap_theme
# Sphinx needs to be able to import fatiando to use autodoc
sys.path.append(os.path.pardir)
# and the cookbook.py module to build the cookbook
sys.path.append(os.path.split(os.path.abspath(__file__))[0])
from fatiando import __version__, __commit__
import cookbook
# Build the cookbook recipes
cookbook.build(os.path.join(os.pardir, 'cookbook'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',
]
# Sphinx project configuration
templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
master_doc = 'index'
# General information about the project
year = datetime.date.today().year
project = u'Fatiando a Terra'
copyright = u'2010-{:d}, Leonardo Uieda'.format(year)
if len(__version__.split('-')) > 1:
version = '-'.join([__version__.split('-')[0], 'dev'])
else:
version = __version__
# I'll use the release to place the commit hash at the footer of the site
release = __commit__.split('-')[0] # Get rid of -dirty
doi = '10.6084/m9.figshare.1115194'
# These enable substitutions using |variable| in the rst files
rst_epilog = """
.. |doi| replace:: {doi}
.. |doilink| replace:: doi:`{doi} <http://dx.doi.org/{doi}>`__
.. |year| replace:: {year}
""".format(doi=doi, year=year)
html_last_updated_fmt = '%b %d, %Y'
html_title = 'Fatiando a Terra'
html_short_title = 'Fatiando a Terra'
html_logo = '_static/fatiando-logo-noborder.png'
html_favicon = u'favicon.ico'
html_static_path = ['_static']
html_extra_path = ['.nojekyll', 'CNAME']
html_use_smartypants = True
pygments_style = 'sphinx'
add_function_parentheses = False
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FatiandoATerraDoc'
# Theme config
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootswatch_theme': "flatly",
'navbar_title': 'fatiando',
'navbar_site_name': "Site",
'navbar_links': [
("Cite us", "cite"),
("Install", "install"),
("Docs", "docs"),
('<i class="fa fa-github-square fa-lg" title="Source code on Github"></i>',
"https://github.com/fatiando/fatiando", True),
('<i class="fa fa-envelope fa-lg" title="Mailing list"></i>',
"https://groups.google.com/d/forum/fatiando", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-default",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
'bootstrap_version': "3",
}
|
[
"leouieda@gmail.com"
] |
leouieda@gmail.com
|
7d1953fb1da45033d63049d7b07cc49f0cbb273e
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/e93cdecef17fe3027247389bba5934607c63372e-<run>-fix.py
|
358973030efe43d7b9bb143ecbcf224ff7f0da99
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
def run(self, tmp=None, task_vars=None):
if (self._play_context.connection != 'local'):
return dict(failed=True, msg=('invalid connection specified, expected connection=local, got %s' % self._play_context.connection))
module = module_loader._load_module_source(self._task.action, module_loader.find_plugin(self._task.action))
if (not getattr(module, 'USE_PERSISTENT_CONNECTION', False)):
return super(ActionModule, self).run(tmp, task_vars)
provider = self.load_provider()
pc = copy.deepcopy(self._play_context)
pc.network_os = 'junos'
pc.remote_addr = (provider['host'] or self._play_context.remote_addr)
if (self._task.action == 'junos_netconf'):
pc.connection = 'network_cli'
pc.port = (provider['port'] or self._play_context.port or 22)
else:
pc.connection = 'netconf'
pc.port = (provider['port'] or self._play_context.port or 830)
pc.remote_user = (provider['username'] or self._play_context.connection_user)
pc.password = (provider['password'] or self._play_context.password)
pc.private_key_file = (provider['ssh_keyfile'] or self._play_context.private_key_file)
pc.timeout = (provider['timeout'] or self._play_context.timeout)
display.vvv(('using connection plugin %s' % pc.connection), pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = self._get_socket_path(pc)
display.vvvv(('socket_path: %s' % socket_path), pc.remote_addr)
if (not os.path.exists(socket_path)):
if (pc.connection == 'netconf'):
(rc, out, err) = connection.exec_command('open_session()')
else:
(rc, out, err) = connection.exec_command('open_shell()')
if (rc != 0):
return {
'failed': True,
'msg': 'unable to connect to control socket',
}
elif (pc.connection == 'network_cli'):
(rc, out, err) = connection.exec_command('prompt()')
while str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
(rc, out, err) = connection.exec_command('prompt()')
task_vars['ansible_socket'] = socket_path
return super(ActionModule, self).run(tmp, task_vars)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
408df962b97cfeeb22d6bd8877223f600cbcaf2e
|
3ee982b28adec7154777a9962dacae5c17fbebe0
|
/data3/hadoop-2/PageRank1/PageRank.py
|
86f0a067ab8c03d973c7f59125f0f7d6d44aa98d
|
[] |
no_license
|
nycdatasci/aetna
|
406a5194b0ffff6b78c2ce1d34c2b090c3d82840
|
095c476210ebe4fef0a702a6a0a56981fe91c8ff
|
refs/heads/master
| 2020-04-24T05:40:48.464871
| 2019-03-15T15:06:42
| 2019-03-15T15:06:42
| 171,737,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 875
|
py
|
#!/usr/bin/env python
import os
from sys import argv
script, input_file, iters = argv
iters = int(iters)
streaming = '''hadoop jar $HADOOP_HOME/share/hadoop/tools/lib/hadoop-streaming-*.jar \
-files PageRankMap.py,PageRankReduce.py \
-input %s \
-output PageRankOutput \
-mapper PageRankMap.py \
-reducer PageRankReduce.py
''' % input_file
get_pop = 'hadoop fs -get PageRankOutput/part-00000'
rm_output = 'hadoop fs -rm -R PageRankOutput'
update_pop_local = 'mv part-00000 pop_table'
rm_pop_table = 'hadoop fs -rm pop_table'
update_pop_hdfs = 'hadoop fs -put pop_table'
os.system("hadoop fs -put %s" % input_file)
for i in range(iters):
os.system(streaming)
os.system(get_pop)
os.system(rm_output)
os.system(update_pop_local)
os.system(rm_pop_table)
os.system(update_pop_hdfs)
print("%d th iteration:" % (i+1))
os.system("cat pop_table")
|
[
"yanshu.usc@gmail.com"
] |
yanshu.usc@gmail.com
|
7c79dc9fc7a73da8dbaa46e617aa02e1400a73a7
|
ae8f61a8c0c4a569f00529c3f07c73dbfc884f71
|
/tiled/server/models.py
|
f46b8b543646104f2e61cef8595a3bcd5815ac21
|
[
"BSD-3-Clause"
] |
permissive
|
untzag/tiled
|
1ba705303193312711d8ac75b977a26d6d9e7571
|
43a8ba82660ce3be077f2b6b060bdd2a23cf956b
|
refs/heads/main
| 2023-04-18T18:34:13.545139
| 2021-04-28T21:27:59
| 2021-04-28T21:27:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,385
|
py
|
import enum
import pydantic
import pydantic.dataclasses
import pydantic.generics
from typing import Any, Dict, Generic, List, Optional, TypeVar
DataT = TypeVar("DataT")
class Error(pydantic.BaseModel):
code: int
message: str
class Response(pydantic.generics.GenericModel, Generic[DataT]):
data: Optional[DataT]
error: Optional[Error]
meta: Optional[dict]
links: Optional[dict]
@pydantic.validator("error", always=True)
def check_consistency(cls, v, values):
if v is not None and values["data"] is not None:
raise ValueError("must not provide both data and error")
if v is None and values.get("data") is None:
raise ValueError("must provide data or error")
return v
class EntryType(str, enum.Enum):
catalog = "catalog"
reader = "reader"
class EntryFields(str, enum.Enum):
metadata = "metadata"
structure_family = "structure_family"
microstructure = "structure.micro"
macrostructure = "structure.macro"
count = "count"
client_type_hint = "client_type_hint"
none = ""
class CatalogAttributes(pydantic.BaseModel):
metadata: Optional[dict] # free-form, user-specified dict
count: Optional[int]
client_type_hint: Optional[str]
class StructureFamilies(str, enum.Enum):
array = "array"
dataframe = "dataframe"
variable = "variable"
data_array = "data_array"
dataset = "dataset"
class ReaderAttributes(pydantic.BaseModel):
metadata: Optional[dict] # free-form, user-specified dict
structure_family: Optional[StructureFamilies]
structure: Optional[Any] # TODO Figure out how to deal with dataclasses in FastAPI
class Resource(pydantic.BaseModel):
"A JSON API Resource"
id: str
type: EntryType
meta: Optional[dict]
class CatalogResource(Resource):
"Representation of a Catalog as a JSON API Resource"
attributes: CatalogAttributes
class ReaderResource(Resource):
"Representation of a Reader as a JSON API Resource"
attributes: ReaderAttributes
class Token(pydantic.BaseModel):
access_token: str
token_type: str
class TokenData(pydantic.BaseModel):
username: Optional[str] = None
class About(pydantic.BaseModel):
api_version: int
library_version: str
formats: Dict[str, List[str]]
aliases: Dict[str, Dict[str, List[str]]]
queries: List[str]
|
[
"dallan@bnl.gov"
] |
dallan@bnl.gov
|
51cbf596ebc778a532ab71e8d4bc5c3334d54fc2
|
6fe477a55bd565c78b55e5ec79ae704186e2c1fc
|
/chatgui.py
|
aceede2560445b3cbe0b67e7c8c2db77c079ae00
|
[] |
no_license
|
jayz6/chatboty
|
4f7ef8ea0105394230b076261f0f5a4828abfcde
|
e419faac730c9644269b6093e35bc375e2f723b8
|
refs/heads/master
| 2023-06-15T14:45:36.542008
| 2021-07-11T12:27:29
| 2021-07-11T12:27:29
| 374,563,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,646
|
py
|
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
def clean_up_sentence(sentence):
# tokenize the pattern - split words into array
sentence_words = nltk.word_tokenize(sentence)
# stem each word - create short form for word
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words - matrix of N words, vocabulary matrix
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
def predict_class(sentence, model):
# filter out predictions below a threshold
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
#Creating GUI with tkinter
import tkinter
from tkinter import *
def send():
msg = EntryBox.get("1.0",'end-1c').strip()
EntryBox.delete("0.0",END)
if msg != '':
ChatLog.config(state=NORMAL)
ChatLog.insert(END, "You: " + msg + '\n\n')
ChatLog.config(foreground="#442265", font=("Verdana", 12 ))
res = chatbot_response(msg)
ChatLog.insert(END, "Bot: " + res + '\n\n')
ChatLog.config(state=DISABLED)
ChatLog.yview(END)
base = Tk()
base.title("VISHNU Assistance-Bot")
base.geometry("400x500")
base.resizable(width=FALSE, height=FALSE)
#Create Chat window
ChatLog = Text(base, bd=0, bg="white", height="8", width="50", font="Arial",)
ChatLog.config(state=DISABLED)
#Bind scrollbar to Chat window
scrollbar = Scrollbar(base, command=ChatLog.yview, cursor="heart")
ChatLog['yscrollcommand'] = scrollbar.set
#Create Button to send message
SendButton = Button(base, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
bd=0, bg="#32de97", activebackground="#3c9d9b",fg='#ffffff',
command= send )
#Create the box to enter message
EntryBox = Text(base, bd=0, bg="white",width="29", height="5", font="Arial")
# EntryBox.bind("<Return>", send)
#Place all components on the screen
scrollbar.place(x=376,y=6, height=386)
ChatLog.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)
base.mainloop()
|
[
"you@example.com"
] |
you@example.com
|
5a238a1582affefe50b7405410ac9c64ff303309
|
edcc26728370aa5bfabfbf5615933c34b108ed21
|
/sketches/readLines/readLines.pyde
|
eb9caaf92a7754dd7c22440ad54ab00eda3f75ef
|
[
"MIT"
] |
permissive
|
kantel/processingpy
|
9e94f4116257e9cfcd59c1f71d7572559c703058
|
1eef60347d41563aef7a092ff35434bd47d931d2
|
refs/heads/master
| 2023-08-15T12:04:46.713124
| 2023-07-29T16:41:14
| 2023-07-29T16:41:14
| 73,006,897
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 599
|
pyde
|
# Font-Test auf UTF-8; aus der Dokumentation:
# Starting with Processing release 0134,
# all files loaded and saved by the Processing API
# use UTF-8 encoding.
font = None
def setup():
size(500, 500)
# fontList = PFont.list()
# printArray(fontList)
font = createFont("Palatino-Roman", 32)
textFont(font)
noLoop()
def draw():
background(30)
textSize(32)
u = 50
text("Seltsame Zeichen", 20, u)
u = 80
textSize(24)
lines = loadStrings("boxer.txt")
for line in lines:
print(line)
text(line, 20, u, 460, 500)
u += 80
|
[
"joerg@kantel.de"
] |
joerg@kantel.de
|
a0bb2dd4a72b7f004884bdda564b3762452634f9
|
c2d681e9a4c7b1be07e9d581ad3ac00a5c783604
|
/classes.py
|
f9cca27f147dc0814ed33409926ddb4d9e4d635e
|
[] |
no_license
|
DennisMufasa/PythoneCode
|
6291ddf2d08a8361fe82e81bc5747eb0123848f6
|
d9b74205de0c60fec2c088e1b6c2b0b7a91c1273
|
refs/heads/master
| 2021-04-05T23:45:50.256668
| 2019-04-07T16:17:20
| 2019-04-07T16:17:20
| 124,888,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
class Mufasa:
def __init__(self,name,age,profession,health):
self.name=name
self.age=age
self.profession=profession
self.health=health
def career(self):
if self.profession=='Developer':
print(self.name,' is a developer.')
else:
print(self.name,' is not qualified for this job.')
# wewe=Mufasa('Vicky',23,'IT','Insured')
# wewe.career()
# mimi=Mufasa('Dennis',23,'Developer','Insured')
# mimi.career()
# mimi=Mufasa('Dennis',23,'developer','insured')
# mimi.details('Dennis',23,'developer')
# mimi.job('Anthony','doctor')
# mimi.miaka('Vicky',23)
# mimi.afya('Walter','not insured')
|
[
"denny.muasa@gmail.com"
] |
denny.muasa@gmail.com
|
c936f1c418ea6c456cf0dd6c2b5cec291e39acf2
|
905f40a4ad8e17bb4871cf87b6ee184a76a77c2a
|
/BCM/management/commands/remove_templates.py
|
bd73841db58291945cf21bda90d4758966cf6519
|
[] |
no_license
|
sai9912/mypyton
|
5e1f7ca278051d5f588af1d9accae5fd1780020b
|
338fd6396dbdce971bc542718fbb9608bdcfc2a7
|
refs/heads/master
| 2022-12-16T05:04:34.590818
| 2019-04-18T09:18:06
| 2019-04-18T09:18:06
| 176,324,427
| 0
| 0
| null | 2022-12-08T02:31:10
| 2019-03-18T16:16:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
from django.core.management import BaseCommand
from member_organisations.models import MemberOrganisation, ProductTemplate, ProductAttribute, ProductPackaging
IMPORT_DEBUG = True
class Command(BaseCommand):
"""
python manage.py remove_templates
"""
def add_arguments(self, parser):
parser.add_argument('mo_slug', nargs='?', default='', type=str)
def handle(self, *args, **options):
mo_slug = options.get('mo_slug')
try:
mo = MemberOrganisation.objects.get(slug=mo_slug)
except MemberOrganisation.DoesNotExist:
pass
else:
product_templates = ProductTemplate.objects.filter(member_organisation_id=mo.pk)
product_attributes = set()
for template in product_templates:
product_attributes |= set(template.attributes.all().values_list('pk', flat=True))
# delete attributes
ProductAttribute.objects.filter(id__in=product_attributes).delete()
# delete templates
product_templates_count = product_templates.count()
product_templates.delete()
if IMPORT_DEBUG and product_templates_count:
print('{attr} ProductAttribute and {c} ProductTemplate related to {mo} are removed'
.format(attr=len(product_attributes), c=product_templates_count, mo=mo_slug))
# delete orphaned attributes
product_attributes = ProductAttribute.objects.filter(member_organisation_id=mo.pk)
product_attributes_count = product_attributes.count()
product_attributes.delete()
if IMPORT_DEBUG and product_attributes_count:
print('{attr} orphaned ProductAttribute related to {mo} are removed'
.format(attr=product_attributes_count, mo=mo_slug))
# delete prod packaging too
ProductPackaging.objects.filter(member_organisation=mo.pk).delete()
|
[
"root@ip-172-31-29-10.ap-south-1.compute.internal"
] |
root@ip-172-31-29-10.ap-south-1.compute.internal
|
79ecffd0003b8f52b8e02699f96264b491844e07
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/Palindromes/solution.py
|
2287b35b7d8a759993933e56e1c6c55558875c88
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
s = input().strip()
temp = s
if len(set(s)) > 1:
while s == temp[::-1]:
temp = temp[:-1]
print(len(temp))
else:
print(0)
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
a04ef908392a402d41fe90499306a7c8d326b53a
|
9307d42ca27c8f07115197851a4d2355a7492abc
|
/shared/views.py
|
ef3b27912011c7aa2b20c022011f458b65b3a524
|
[] |
no_license
|
dkutelov/djangoevents
|
2a1c05d187a32557d4e5195cbe349efb05611ce4
|
9b6c4c9db366d7e282542cb853123dcca6191f8e
|
refs/heads/master
| 2023-01-29T15:37:47.664373
| 2020-12-13T13:17:58
| 2020-12-13T13:17:58
| 314,523,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from django.core.exceptions import PermissionDenied
class GroupRequiredMixin:
groups = None
def dispatch(self, request, *args, **kwargs):
user = request.user
if not user.is_authenticated:
raise PermissionDenied
groups_set = set(self.groups or [])
raw_groups = user.groups.all()
user_groups = set([group.name for group in raw_groups])
if not user_groups.intersection(groups_set) and \
not user.is_superuser:
raise PermissionDenied
return super().dispatch(request, *args, **kwargs)
|
[
"dari.k@abv.bg"
] |
dari.k@abv.bg
|
e772c8886cb376be4c127bf28013a9fc58b59e69
|
2eff2b24d5b6f5dffc42c9cbde6102ec9317502f
|
/src/Bits.py
|
49ff824b30bb9cf591b4e5c06713a9fefbb9b567
|
[] |
no_license
|
JakobKallestad/Python-Kattis
|
599a14e71a8d5c52aae779b8db3d35f0e4d01e88
|
51656964e79cc861e53f574785aacb213ef10b46
|
refs/heads/master
| 2022-10-24T23:12:45.599813
| 2021-12-08T12:31:54
| 2021-12-08T12:31:54
| 156,881,692
| 2
| 1
| null | 2022-10-02T12:36:57
| 2018-11-09T15:34:09
|
Python
|
UTF-8
|
Python
| false
| false
| 231
|
py
|
n_test_cases = int(input())
for _ in range(n_test_cases):
line = input()
max_bits = 0
for i in range(1, len(line)+1):
num = bin(int(line[:i]))
max_bits = max(max_bits, num.count('1'))
print(max_bits)
|
[
"Jakob.Kallestad@student.uib.no"
] |
Jakob.Kallestad@student.uib.no
|
644400cc50052b08c364c1f2f950b52d631c448a
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/Athena/training/exercises/exercises/software_craftsmanship/fancy_math/fancy_math_solution.py
|
f6812946aeb6e236e271e1b69504b96c23643970
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546
| 2021-07-16T01:38:55
| 2021-07-16T01:38:55
| 87,686,563
| 8
| 2
| null | 2023-07-11T22:49:03
| 2017-04-09T05:57:30
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
""" Test Driven Development Example
Write a function called `slope` that calculates the slope
between two points. A point is specified by a two element
sequence of (x,y) coordinates.
>>> pt1 = [0.0, 0.0]
>>> pt2 = [1.0, 2.0]
>>> slope(pt1, pt2)
2.0
Use Test Driven Development. Write your tests first in
a separate file called tests_fancy_math.py.
Run your tests using the "nosetests" shell command. You can
do this by changing to the "slope" directory where your
fancy_math.py is defined and running "nosetests". From IPython,
you can run it like this:
In [1]: cd <someplace>/exercises/slope
In [2]: !nosestests
...
--------------------------------------------------
Ran 3 tests in 0.157s
If you would like to see more verbose output, use the "-v"
option:
In [3]: !nosetests -v
test_fancy_math.test_slope_xxx ... ok
test_fancy_math.test_slope_yyy ... ok
...
By default, nose captures all output and does not print it
to the screen. If you would like to see the output of print
statements, use the "-s" flag.
"""
from __future__ import division
from numpy import Inf
def slope(pt1, pt2):
dy = pt2[1] - pt1[1]
dx = pt2[0] - pt1[0]
try:
slope = dy/dx
except ZeroDivisionError:
if dy > 0:
slope = Inf
else:
slope = -Inf
return slope
|
[
"GreenJedi@protonmail.com"
] |
GreenJedi@protonmail.com
|
387e099c4bd771eae8b41a1b0168202680e46074
|
31747dd8c61085421d7bd4166f7bd4f9429cf914
|
/tests/test_visitors/test_tokenize/test_keywords/test_starts_with_dot.py
|
71c475e0052d8cbb4394f153df8ed6fd363f4019
|
[
"MIT"
] |
permissive
|
edytagarbarz/wemake-python-styleguide
|
0e9ed4080a13a6727b8e80785e113b8407409352
|
74b86156d73c2a4fe9c755138f6953fec41fab3b
|
refs/heads/master
| 2021-03-03T19:21:54.807089
| 2020-03-07T23:35:15
| 2020-03-07T23:35:15
| 245,981,718
| 1
| 1
|
MIT
| 2020-03-09T08:31:55
| 2020-03-09T08:31:54
| null |
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
import pytest
from wemake_python_styleguide.violations.consistency import (
LineStartsWithDotViolation,
)
from wemake_python_styleguide.visitors.tokenize.syntax import (
WrongKeywordTokenVisitor,
)
# Correct:
correct_dot_attr = """
some_line = some.attr(
some.other,
)
"""
correct_elipsis = """
first[
1,
...,
]
"""
correct_string_dot = '".start!"'
# Wrong:
wrong_dot_start1 = """
some = (
MyModel.objects.filter(some=1)
.exclude(other=2)
)
"""
wrong_dot_start2 = """
some = (
MyModel.objects.filter(some=1)
.exclude(other=2)
)
"""
@pytest.mark.parametrize('code', [
wrong_dot_start1,
wrong_dot_start2,
])
def test_wrong_dot_start(
parse_tokens,
assert_errors,
default_options,
code,
):
"""Ensures that lines cannot be started with ``.`` char."""
file_tokens = parse_tokens(code)
visitor = WrongKeywordTokenVisitor(
default_options, file_tokens=file_tokens,
)
visitor.run()
assert_errors(visitor, [LineStartsWithDotViolation])
@pytest.mark.parametrize('code', [
correct_dot_attr,
correct_elipsis,
correct_string_dot,
])
def test_correct_dot_start(
parse_tokens,
assert_errors,
default_options,
code,
):
"""Ensures that lines can be started with other chars."""
file_tokens = parse_tokens(code)
visitor = WrongKeywordTokenVisitor(
default_options, file_tokens=file_tokens,
)
visitor.run()
assert_errors(visitor, [])
|
[
"mail@sobolevn.me"
] |
mail@sobolevn.me
|
14b359d92f3e1da24aa1a431953c9de91141cae3
|
449f6888bff99d7e4fd86fa6ffa6b3316084e34e
|
/Solutions/248.py
|
febd366352a6f0c6d1e9f9292f83bc6b4f1906f7
|
[
"MIT"
] |
permissive
|
All3yp/Daily-Coding-Problem-Solutions
|
e94679a5858b8a83ffe58d14b824fe80de21a694
|
199b9606474edb45bd14b20b511b691ada437586
|
refs/heads/master
| 2023-03-18T21:06:30.675503
| 2021-03-13T03:52:31
| 2021-03-13T03:52:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
"""
Problem:
Find the maximum of two numbers without using any if-else statements, branching, or
direct comparisons.
"""
def get_max(num1: int, num2: int) -> int:
return num1 ^ ((num1 ^ num2) & -(num1 < num2))
if __name__ == "__main__":
print(get_max(1, 5))
print(get_max(4, 3))
print(get_max(-3, 6))
print(get_max(5, -4))
print(get_max(-4, -2))
print(get_max(-3, -6))
"""
SPECS:
TIME COMPLEXITY: O(1)
SPACE COMPLEXITY: O(1)
"""
|
[
"ruppysuppy@gmail.com"
] |
ruppysuppy@gmail.com
|
7af9d20303a6cb0534c7e8fa34538d9028d47d3a
|
e40381a0aa3320616e5a5b82533c2c5cfe0fa2ce
|
/Dark_Scripts/plot_ValidationScores-LoopSeeds.py
|
40dc9786c4c96c2cd21b3edd21e4a9bd9de57c82
|
[
"MIT"
] |
permissive
|
zmlabe/predictGMSTrate
|
7220b26f86839699635fe2f04e45348095183bc7
|
ac4238c7f1c33dc9d30382e4dbdc26a2f63352f5
|
refs/heads/main
| 2023-04-10T03:46:16.053123
| 2023-01-11T14:08:27
| 2023-01-11T14:08:27
| 396,942,451
| 4
| 3
|
MIT
| 2022-01-19T22:15:19
| 2021-08-16T19:30:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,612
|
py
|
"""
Create plots to show validation scores for different seeds
Author : Zachary M. Labe
Date : 27 September 2021
Version : 2 (mostly for testing)
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as c
import numpy as np
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
### Hyperparamters for files of the ANN model
rm_ensemble_mean = True
COUNTER = 100
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/LoopSeeds/'
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/Scores/'
### Read in seeds
seeds = np.load(directorydata + 'LoopSeedsResultsfor_ANNv2_OHC100_hiatus_EnsembleMeanRemoved_SEEDS.npz')
random_segment_seedq = seeds['random_segment_seedall']
random_network_seedq = seeds['random_network_seedall']
accval = np.empty((COUNTER))
precval = np.empty((COUNTER))
recallval = np.empty((COUNTER))
f1val = np.empty((COUNTER))
for lo in range(COUNTER):
if rm_ensemble_mean == True:
vari_predict = ['OHC100']
fac = 0.7
random_segment_seed = random_segment_seedq[lo]
random_network_seed = random_network_seedq[lo]
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = np.arange(0.1,1.2,0.1)
yearsall = np.arange(1990,2099+1,1)
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
savename = 'LoopSeedsResultsfor_ANNv2_'+vari_predict[0]+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
scores = np.load(directorydata + savename + '_SCORES_%s.npz' % lo)
accval[lo] = scores['accval']
precval[lo] = scores['precval']
recallval[lo] = scores['recallval']
f1val[lo] = scores['f1_val']
### Gather data and place percent
alldata = np.asarray([accval,precval,recallval,f1val]) * 100
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Graph for scores
fig = plt.figure()
ax = plt.subplot(111)
plotdata = alldata.transpose()
adjust_spines(ax, ['left', 'bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_color('dimgrey')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_linewidth(2)
ax.tick_params('both',length=4,width=2,which='major',color='dimgrey')
ax.tick_params(axis="x",which="both",bottom = False,top=False,
labelbottom=False)
ax.yaxis.grid(zorder=1,color='darkgrey',alpha=0.7,clip_on=False,linewidth=0.5)
def set_box_color(bp, color):
plt.setp(bp['boxes'],color=color)
plt.setp(bp['whiskers'], color=color,linewidth=1.5)
plt.setp(bp['caps'], color='w',alpha=0)
plt.setp(bp['medians'], color='w',linewidth=1.5)
positionsq = np.array(range(alldata.shape[0]))
bpl = plt.boxplot(plotdata,positions=positionsq,widths=0.6,
patch_artist=True,sym='')
# Modify boxes
cp= 'maroon'
set_box_color(bpl,cp)
plt.plot([], c=cp, label=r'\textbf{VALIDATION}',clip_on=False)
leg = plt.legend(shadow=False,fontsize=11,loc='upper center',
bbox_to_anchor=(0.5,1.14),fancybox=True,ncol=4,frameon=False,
handlelength=5,handletextpad=1)
for line,text in zip(leg.get_lines(), leg.get_texts()):
text.set_color(line.get_color())
for i in range(plotdata.shape[1]):
y = plotdata[:,i]
x = np.random.normal(positionsq[i], 0.04, size=len(y))
plt.plot(x, y,color='teal', alpha=0.5,zorder=10,marker='.',linewidth=0,markersize=5,markeredgewidth=0,clip_on=False)
plt.yticks(np.arange(0,101,10),list(map(str,np.round(np.arange(0,101,10),2))),
fontsize=6)
plt.ylim([10,90])
plt.text(-0.3,3,r'\textbf{ACCURACY}',fontsize=10,color='dimgrey',
ha='left',va='center')
plt.text(1.,3,r'\textbf{PRECISION}',fontsize=10,color='dimgrey',
ha='center',va='center')
plt.text(2.2,3,r'\textbf{RECALL}',fontsize=10,color='dimgrey',
ha='right',va='center')
plt.text(3.27,3,r'\textbf{F1-SCORE}',fontsize=10,color='dimgrey',
ha='right',va='center')
plt.ylabel(r'\textbf{Score [\%]}',color='k',fontsize=10)
if rm_ensemble_mean == True:
plt.savefig(directoryfigure + 'ValidationScores-LoopSeeds_Hiatus_EDA-v2_rmENSEMBLEmean.png',dpi=300)
else:
plt.savefig(directoryfigure + 'ValidationScores-LoopSeeds_Hiatus_EDA-v2.png',dpi=300)
|
[
"zmlabe@rams.colostate.edu"
] |
zmlabe@rams.colostate.edu
|
d87d2fd7a1df348093b2c383f2a073227f39de42
|
4e691a59c67915d5e2cc5a367137dfb02894f4cc
|
/main/migrations/0005_auto_20191118_1131.py
|
f8539f0214cc916cee8f0cc19c14e6206fb9725e
|
[] |
no_license
|
antocaroca/clase-4
|
fd7395e25debfa807fde2c5823b968747c5d9222
|
b88b78b022102156ba01cd4804307fafd3c6966b
|
refs/heads/master
| 2022-04-30T14:48:03.361465
| 2019-11-23T13:22:33
| 2019-11-23T13:22:33
| 223,596,303
| 0
| 0
| null | 2022-04-22T22:53:32
| 2019-11-23T13:51:00
|
Python
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
# Generated by Django 2.2.7 on 2019-11-18 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_activemanager'),
]
operations = [
migrations.AlterField(
model_name='productag',
name='products',
field=models.ManyToManyField(blank=True, to='main.Producto'),
),
]
|
[
"antonella.caroca@cci-entel.cl"
] |
antonella.caroca@cci-entel.cl
|
ee912e4cffffee0781c744461002eba138ced516
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj9-10_233719.19+062504.0/sdB_sdssj9-10_233719.19+062504.0_lc.py
|
0f09a4e3130e72011e6fee900f36b7ea6ab8c394
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[354.329958,6.417778], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj9-10_233719.19+062504.0/sdB_sdssj9-10_233719.19+062504.0_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
880d0dabd65d48c9ab7140cf7942a975a4dc87e3
|
f5e7882e9fa8dca9b49d74819918063963eaf515
|
/FILES/prob2.py
|
09b48eba87e8bfcd175f537d161d9ee3a423e5a3
|
[] |
no_license
|
marri88/python-base
|
66ede5e3da3bce92e2661fabf8a2119644dd5ab3
|
00002724412f4081ee6d1b91c22cb1ccb5ed92fe
|
refs/heads/master
| 2023-06-09T16:13:08.921614
| 2021-07-02T09:16:38
| 2021-07-02T09:16:38
| 382,293,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 592
|
py
|
# -week_2:Работа С файлами-
#####################################################################################
# first picture: prob №2
# Создайте файл users.txt.
# Напишите программу которая спрашивает у пользователя его Логин и Пароль и записывает в файл users.txt.
r = open('/home/aimira/python/python3/week2files/users.txt', 'w')
a = input("name: ")
b = input("password: ")
r.write(f"name: {a}, password: {b}")
r.close
with open('users.txt', 'r') as c:
print(c.read())
|
[
"aymira.narzhigitova@gmail.com"
] |
aymira.narzhigitova@gmail.com
|
86550fb12d249da7e19769464f8dd19eee7951c5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_sharpening.py
|
511f6993ea47ed80907f9f44fb3fcbe24158ade6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
#calss header
class _SHARPENING():
def __init__(self,):
self.name = "SHARPENING"
self.definitions = sharpen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sharpen']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
73418a4efc33f77e2110c61b4fae9a8d028f2537
|
14a26103e97b1669ca6f1f44996c4ad65b9f0179
|
/bim1/week4/16.6.5.py
|
714f4bcd5a24a3f05a9b49443eec7b91589b1e9e
|
[] |
no_license
|
igorbragaia/CES-22
|
6ea0fc3da7ba2274080954071f0070ba78a7f1f4
|
7c09bdec315421e57f2cd44d50f919f3965144ac
|
refs/heads/master
| 2021-09-17T14:40:31.426602
| 2018-07-02T18:54:40
| 2018-07-02T18:54:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
import sys
def test(did_pass):
"""
Prints test result
:param did_pass: test result
:return:
"""
linenum = sys._getframe(1).f_lineno # Get the caller's line number.
if did_pass:
msg = "Test at line {0} ok.".format(linenum)
else:
msg = ("Test at line {0} FAILED.".format(linenum))
print(msg)
class Point:
""" Create a new Point, at coordinates x, y """
def __init__(self, x=0, y=0):
""" Create a new point at x, y """
self.x = x
self.y = y
class Rectangle:
""" A class to manufacture rectangle objects"""
def __init__(self, posn, w, h):
""" Initiate rectangle at posn Point, with width w, height h"""
self.corner = posn
self.width = w
self.height = h
def collide(self, rect2):
"""
Checks if current rect and rect2 collides
:param rect2: Rectangle object
:return: boolean
"""
return (self.pointInsideCheck(rect2.corner) or
self.pointInsideCheck(Point(rect2.corner.x + rect2.height, rect2.corner.y)) or
self.pointInsideCheck(Point(rect2.corner.x, rect2.corner.y + rect2.width)) or
self.pointInsideCheck(Point(rect2.corner.x + rect2.height, rect2.corner.y + rect2.width)))
def pointInsideCheck(self, point):
"""
checks if a point is inside current rect
:param point: Point object
:return: boolean
"""
return (point.y >= self.corner.y and point.y <= self.corner.y + self.width and
point.x >= self.corner.x and point.x <= self.corner.x + self.height)
def __str__(self):
return "({0}, {1}, {2})".format(self.corner, self.width, self.height)
print(Rectangle(Point(0, 0), 100, 200).collide(Rectangle(Point(100, 101), 5, 10)))
print(Rectangle(Point(0, 0), 100, 200).collide(Rectangle(Point(100, 99), 5, 10)))
|
[
"igor.bragaia@gmail.com"
] |
igor.bragaia@gmail.com
|
3d4a9d8593553e43461522b4f38e4009058c4b7f
|
46355bd117d38191f2ebd23e9250ab121bf839fc
|
/Airbnb/roundPrice.py
|
dfa2edbe6aa3ad782e2593c4a64dd5308c29feb7
|
[] |
no_license
|
Jason003/Interview_Code_Python
|
f1e1a59e87cfada78a931be6a27a51898442aca4
|
75dbc8d3906bd00c8129c8ed0b584794c8b41d6b
|
refs/heads/master
| 2020-08-12T20:11:56.454848
| 2019-11-12T01:36:52
| 2019-11-12T01:36:52
| 214,835,963
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
import math
def roundPrice(A):
# attention: in python3, round(1.5) = 1 !!!
def round(x):
fac = x - math.floor(x)
return math.ceil(x) if fac >= 0.5 else math.floor(x)
if not A:
return A
roundSum = sum(map(round, A))
sumRound = round(sum(A))
print(roundSum)
print(sumRound)
res = [round(a) for a in A]
if roundSum == sumRound:
return res
elif roundSum > sumRound:
cnt = roundSum - sumRound # need to make cnt number to round(number) - 1
nums = sorted([(a - math.floor(a), a, i) for i, a in enumerate(A)])
for fac, a, i in nums:
if fac >= 0.5 and cnt > 0:
res[i] = math.floor(a)
cnt -= 1
else:
res[i] = round(a)
return res
else:
cnt = sumRound - roundSum # need to make cnt number to round(number) + 1
nums = sorted([(a - math.floor(a), a, i) for i, a in enumerate(A)])[::-1]
for fac, a, i in nums:
if fac < 0.5 and cnt > 0:
res[i] = math.ceil(a)
cnt -= 1
else:
res[i] = round(a)
return res
print(roundPrice([1,2,3,4]))
|
[
"792576519@qq.com"
] |
792576519@qq.com
|
e38b81ecb5dcb797e6cabf948bf96fed37fc0bb9
|
22bf086e3e7d43b72f0d05aaa3359b766a688a79
|
/scripts/extract_clues_info.py
|
5b22dcb47717b771f546fc7cd1ccda68132dab69
|
[] |
no_license
|
kaspermunch/humanXsweeps
|
aa7a4e6a175be276713f17f79d7179a5dd644ff5
|
3a2c4aa496aaffa837eb15dd3d382f7712266f38
|
refs/heads/master
| 2023-04-07T13:36:11.619973
| 2023-03-18T08:05:18
| 2023-03-18T08:05:18
| 161,376,285
| 1
| 0
| null | 2023-01-11T14:12:39
| 2018-12-11T18:25:53
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 886
|
py
|
import sys
import re
import os
import pandas as pd
import h5py
_, output_file_name, steps_dir, *clues_file_names = sys.argv # pylint: disable=unbalanced-tuple-unpacking
# open output file:
output_file = open(output_file_name, 'w')
# loop over base names of
for clues_file_name in clues_file_names:
# 98000000_99500000_1.bed_98614821.h5
start, end, chain, pos = re.search(r'(\d+)_(\d+)_(\d+).bed_(\d+).h5', clues_file_name).groups()
h5_path = os.path.join(steps_dir, clues_file_name)
if os.path.getsize(h5_path) == 0:
log_likelihood_ratio = 'NA'
selection_coef = 'NA'
else:
h5 = h5py.File(h5_path, 'r')
log_likelihood_ratio = h5['logLikelihoodRatios'][h5.attrs['iHat'], h5.attrs['jHat']]
selection_coef = h5.attrs['sHat']
print(start, end, pos, chain, log_likelihood_ratio, selection_coef, sep='\t', file=output_file)
|
[
"kaspermunch@birc.au.dk"
] |
kaspermunch@birc.au.dk
|
71ba6aabb8bb089ad893dcdffa33d9eec54dcd76
|
65dc8b306c1a22dc3a8ebf53d399135c5b894b69
|
/guestbook/forms.py
|
7936a074493597f70ce0116429f3fece381a3b2e
|
[] |
no_license
|
tokibito/python-hackathon2-guestbook
|
49e7f144d2c4f9920abea639816645d0edbca292
|
dfa152cf6cb7ebea1779c63846e36f8dbb90d8c1
|
refs/heads/master
| 2020-07-25T08:20:37.459468
| 2009-11-13T04:53:40
| 2009-11-13T04:53:40
| 208,228,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
# coding: utf8
from django import forms
from guestbook.models import Greeting
class GreetingForm(forms.ModelForm):
"""
ゲストブックの書き込みフォーム
モデルを元に生成する
"""
class Meta:
model = Greeting
# 書き込み日時は除く
exclude = ('create_at',)
|
[
"xxshss@yahoo.co.jp"
] |
xxshss@yahoo.co.jp
|
b006bdd2968ab6b20d0f4cebef10dca4504e7561
|
b8ea631aae5d132c7b0236684d5f7c12d3c222be
|
/ABC/ABC_114C_zentan.py
|
6204c22fc19b6d2aae27bac78f48b889f8173b21
|
[] |
no_license
|
Ryushi-tech/card3
|
68c429313142e58d4722a1cd5a4acc4ab39ca41f
|
883636b2f518e38343a12816c5c641b60a87c098
|
refs/heads/master
| 2021-07-05T22:46:33.089945
| 2020-12-12T15:31:00
| 2020-12-12T15:31:00
| 209,176,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import itertools as it
n = int(input())
l = len(str(n))
res = []
for i in range(3, l + 1):
for gen in it.product("357", repeat=i):
gen_s = "".join(gen)
if int(gen_s) <= n:
res.append(gen_s)
cnt = 0
for r in res:
if all(r.count(c) for c in "357"):
cnt += 1
print(cnt)
|
[
"mryrys@gmail.com"
] |
mryrys@gmail.com
|
93d9228177ee76f0b5d92291e7212790f6c0557e
|
6a0e51fc8d2ea8819711acb7948f4c21c277c771
|
/the_list.py
|
bf381ad611c0d6716c2dc2a1241e1b868e580752
|
[] |
no_license
|
sunzhiyan/Python
|
eb0bd60edbc85e670ad02af9009df53f5936bff3
|
3881d124b71f81b7e0bd1e70bcd336238fbcfa3f
|
refs/heads/master
| 2022-10-08T21:38:22.799102
| 2021-03-05T02:32:57
| 2021-03-05T02:32:57
| 244,139,633
| 1
| 0
| null | 2022-09-23T22:42:18
| 2020-03-01T11:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 321
|
py
|
# -*- encoding: utf-8 -*-
'''
@File : 列表.py
@Time : 2020/03/03 22:09:13
@Author : xdbcb8
@Version : 1.0
@Contact : xdbcb8@qq.com
@WebSite : www.xdbcb8.com
'''
# here put the import lib
list=[56,23,89,99,56,45,87,56,65,100]
print(max(list))
print(min(list))
print(sum(list))
print(sum(list)/len(list))
|
[
"noreply@github.com"
] |
sunzhiyan.noreply@github.com
|
38b3cfd6f2bac9bca72f1b1a81f348801111e789
|
00cb405170a6a9572bef0ec8f373813eada08c03
|
/Game Structure/geometry/version5/mypainter.py
|
ce5e1b632d327bbb6cd9614946dd573bfe5136dd
|
[] |
no_license
|
MarcPartensky/Python-Games
|
c0ad2857be5832d6029642bb0a96bc8e403a12e3
|
ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed
|
refs/heads/master
| 2022-09-03T00:04:16.402288
| 2022-08-12T17:10:22
| 2022-08-12T17:10:22
| 166,606,022
| 2
| 1
| null | 2021-03-07T16:20:15
| 2019-01-19T23:56:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,413
|
py
|
from mycase import Case
from myabstract import Point, Form
import mycolors
import numpy as np
import shelve
from pygame.locals import *
class Painter:
def __init__(self, *args, **kwargs):
"""Create a painter."""
self.paints = [Board(), Paint(*args, **kwargs)]
self.paint_brush = PaintBrush()
self.painting = 0
def __call__(self, surface):
"""Main loop of the painter."""
while surface.open:
surface.check()
surface.control()
surface.clear()
surface.show()
self.show(surface)
self.control(surface)
surface.flip()
def control(self, surface):
"""Control the painter."""
cursor = surface.point()
cursor = [round(c + 1 / 2) for c in cursor]
self.print(surface)
self.paint(surface)
def print(self, surface):
"""Print the state of the painter on the surface."""
if self.painting == None:
surface.print("Create a new painting.", [-10, 12])
def paint(self, surface):
"""Paint using the surface and the paint."""
keys = surface.press()
click = surface.click()
cursor = surface.point()
cursor = [round(c + 1 / 2) for c in cursor]
self.paint_brush.setPosition(cursor)
p = self.getPaint(cursor)
if p is not None:
c = self.paints[p].getCase(cursor)
if keys[K_r]:
self.paint_brush.setRandomColor()
if keys[K_a]:
self.paint_brush.lightenColor()
if keys[K_b]:
self.paint_brush.darkenColor()
if keys[K_f]:
self.refreshBoard()
if p is None:
if click:
self.createPaint(cursor)
return
if keys[K_s]:
self.save(self.paints[p])
if keys[K_l]:
self.load(p)
if c is None:
return
if keys[K_c]:
self.paint_brush.copyColor(self.paints[p].cases[c])
if not click:
return
self.paint_brush.paint(surface, self.paints[p], c)
def createPaint(self, position):
"""Create a paint."""
size = [20, 20]
self.paints.append(Paint(position, size))
def save(self, paint):
"""Save the paint."""
print("File saved")
with shelve.open('paints') as p:
p["test"] = paint
def load(self, p):
"""Load a paint."""
print("File loaded")
with shelve.open("paints") as paints:
paint = paints["test"]
self.paints[p] = paint
def refreshBoard(self):
"""Change the colors of the board."""
self.paints[0].generate()
def show(self, surface):
"""Show the paints of the painter."""
for paint in self.paints:
paint.show(surface)
self.paint_brush.show(surface)
def getPaint(self, position):
"""Return the case containing the position if there is one."""
for i in range(len(self.paints)):
if position in self.paints[i]:
return i
class PaintBrush:
def __init__(self, position=[0, 0], size=[1, 1], color=mycolors.GREEN):
"""Create a paint brush for the painter."""
self.position = position
self.size = size
self.color = color
def paint(self, surface, paint, c):
"""Color a case."""
paint.cases[c].color = self.color
def copyColor(self, case):
"""Copy the color of the case."""
self.color = case.color
def setRandomColor(self):
"""Set the color of the brush to a random color."""
self.color = mycolors.random()
def lightenColor(self, surface):
"""Lighten the brush."""
self.color = mycolors.lighten(self.color)
def darkencolor(self, surface):
"""Darken the color."""
self.color = mycolors.darken(self.color)
def setPosition(self, position):
"""Set the position of the brush."""
self.position = position
def show(self, surface):
"""Show the paint brush on the surface."""
x, y = self.position
case = Case((x - 1, y - 1), size=self.size, color=self.color)
case.show(surface, fill=False, side_color=mycolors.RED)
class Paint:
"""Paint object reserves an area to draw objects in."""
@classmethod
def random(cls, position=[0, 0], size=[10, 10]):
"""Create a random paint."""
return cls(position, size)
def __init__(self, position=[0, 0], size=[10, 10]):
"""Create a board object."""
self.position = position
self.size = size
self.cases = []
self.generate()
def getCorners(self):
"""Return the corners of the paint."""
px, py = self.position
sx, sy = self.size
corners = (px, py, px + sx, py + sy)
return corners
def generate(self):
"""Generate random cases all over the paint."""
cases = []
xmin, ymin, xmax, ymax = self.getCorners()
for y in np.arange(ymin, ymax):
for x in np.arange(xmin, xmax):
case = Case([float(x), float(y)], color=mycolors.WHITE)
cases.append(case)
self.cases = cases
def __contains__(self, position):
"""Determine if the point is in the paint."""
x, y = position
xmin, ymin, xmax, ymax = self.getCorners()
return (xmin <= x <= xmax) and (ymin <= ymax)
def getCase(self, position):
"""Return the case containing the position if there is one."""
for i in range(len(self.cases)):
if position in self.cases[i]:
return i
def getForm(self):
"""Return the form corresponding to the area of the painting."""
xmin, ymin, xmax, ymax = self.getCorners()
ps = [Point(xmin, ymin), Point(xmax, ymin),
Point(xmax, ymax), Point(xmin, ymax)]
return Form(ps)
def show(self, surface):
"""Show the paint by showing all its cases."""
f = self.getForm()
for case in self.cases:
case.show(surface, side_color=mycolors.WHITE)
f.side_color = mycolors.WHITE
f.side_width = 3
f.show(surface)
def save(self):
"""Save the paint."""
with shelve.open('paints') as paints:
paints[test] = self
class Board(Paint):
def __init__(self):
"""Create an accesory for the painter."""
self.position = [-12, -10]
self.size = [1, 20]
self.generate()
def generate(self):
"""Generate random cases for the board."""
x, y = self.position
sx, sy = self.size
self.cases = [Case([x, y - sy // 2], color=mycolors.random())
for y in range(sy)]
def show(self, surface):
"""Show the paint by showing all its cases."""
f = self.getForm()
for case in self.cases:
case.show(surface, side_color=mycolors.BLACK)
f.side_color = mycolors.BLACK
f.side_width = 3
f.show(surface)
f[0].showText(surface, "Board")
if __name__ == "__main__":
from mycontext import Surface
from myzone import Zone
surface = Surface(name="Painter")
painter = Painter([0, 0], [8, 8])
#print([0,0] in painter.paints[0])
painter(surface)
|
[
"marc.partensky@gmail.com"
] |
marc.partensky@gmail.com
|
270742e36424951d8024a33594d64497bf5758e2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/331/usersdata/293/94058/submittedfiles/funcoes1.py
|
cc9f91b72aa5433b90fafe6a6f90c9742a3cf68e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# -*- coding: utf-8 -*-
def crescente(n,lista_crescente):
#escreva o código da função crescente aqui
cont_crescente=0
for i in range(0,n-1,1):
if lista_crescente[i]<lista_crescente[i+1]
cont_crescente= cont_crescente + 1
if cont_crescente==len(lista_crescente)-1
return ("S")
else:
return ("N")
#escreva as demais funções
#escreva o programa principal
print(crescente(6,[1,2,3,4,5,6]))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1fa310f9ae1a6793e62ea6ef82a206a3635c31df
|
d74daa1dfe1f4eac96ceb1d006c59ba19b55d37a
|
/CS401/GG-Project/GG-Project/productProcessers/ProductReader.py
|
e6a04f2b514a3650ae352f1cd5ba9b37d87b4018
|
[] |
no_license
|
muratcancicek/Assignment-Projects
|
7aac0cced54f392e26b39f6bc46af813faddd628
|
41c7df2b60f20eb840d409f3fedb4ec6feeafdcc
|
refs/heads/master
| 2021-06-06T17:27:11.616251
| 2017-09-06T12:17:40
| 2017-09-06T12:17:40
| 58,016,251
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,422
|
py
|
from MainSrc.PythonVersionHandler import *
from .BsonIO import *
from paths import *
def getCategories(filename):
categoriesData = readBson("categories.bson")
categories = categoriesData['data']['categories']
print_(len(categories))
deepestCategories = [c for c in categories if c['deepest'] == 'Y']
print_(len(deepestCategories))
sortedCategories = sorted(deepestCategories, key=lambda k: k['productCount'], reverse = True)
#print sortedCategories[0]['productCount']
return sortedCategories, [c['categoryCode'] for c in sortedCategories]
def getCrowdedCategories(filename = "categories.bson", n = 100):
sortedCategories = getCategories(filename, n)
finalCategories = sortedCategories[:n]
return finalCategories, [c['categoryCode'] for c in finalCategories]
def getCategoryCodes(codes):
line = '('
for code in codes[:-1]:
line += '\"' + code + '\", '
line += '\"' + codes[-1] + '\")'
print_(line)
def readCodeLines():
codesText = open('codes.txt', "rb")
codeLines = codesText.read().split('\n')
codeLists = [l[1:-1].replace(', ', ',').split(',') for l in codeLines]
for lis in codeLists:
print_(len(lis))
return codeLists
def getCategoriesFromProducts(filename):
products = readBson(filename)
print_('Product Count:', len(products))
codes = [p['category']['code'].encode("utf-8") for p in products]
uniqueCodes = set(codes)
return list(uniqueCodes)
def summarizeProducts(filename, countPerCategory = 10):
uniqueCodes = getCategoriesFromProducts(filename)
counts = {}
for code in uniqueCodes:
counts[code] = codes.count(code)
print_('Product Count per Category:', counts)
storedCodes = [k for k, v in counts.iteritems() if v == countPerCategory]
print_('Stored Product Count:', len(storedCodes))
return storedCodes, uniqueCodes, counts
def getremainingCodes(total, storedFile):
storedCodes, uniqueCodes, counts = summarizeProducts(storedFile)
crowdedCategories, crowdedCodes = getCrowdedCategories(total + len(uniqueCodes))
unstoredCodes = [c for c in crowdedCodes if not c in storedCodes]
print_('Unstored Product Count:', len(unstoredCodes))
intersectionCodes = [c for c in crowdedCodes if c in storedCodes]
print_('Intersection Product Count:', intersectionCodes)
finalCodes = unstoredCodes[:total-len(storedCodes)]
print_('Final Product Count:', len(finalCodes))
intersectCodes = [c for c in finalCodes if c in storedCodes]
print_('Intersection Product Count:', len(intersectCodes))
return finalCodes
def getProducts(filename):
return readBson(filename)
def getProductsByCategoryCode(productList):
codes = [p['category']['code'] for p in productList]
uniqueCodes = set(codes)
categoryList = list(uniqueCodes)
productDict = {}
for category in categoryList:
productDict[category] = []
for product in productList:
productDict[product['category']['code']].append(product)
return productDict
def getExpandedProductsByCategoryCode(productList, code):
return [product for product in productList if product['category_code'] == code]
def mergeProducts():
product230 = evalBson('products230.bson')
product780 = evalBson('products780.bson')
product230Dict = getProductsByCategoryCode(product230)
product230Dict.pop('rc',0)
product780Dict = getProductsByCategoryCode(product780)
#productDict = product230Dict + product780Dict
productDict = {}
productDict = product230Dict.copy()
productDict.update(product780Dict)
return productDict
def fixQuotesOnProduct(product):
if '\"' in product['title']:
product['title'] = fixQuotes(product['title'])
if product['subTitle'] != None:
if '\"' in product['subTitle']:
product['subTitle'] = fixQuotes(product['subTitle'])
for spec in product['specs']:
if '\"' in spec['values'][0]:
spec['values'][0] = fixQuotes(spec['values'][0])
return product
def generateGroupedProductsList(readingFileName = 'products.bson', writingFileName = 'groupedProducts.bson'):
unorderedProductList = evalBson(readingFileName, decoding = 'unicode-escape')
categoryProductsMap = getProductsByCategoryCode(unorderedProductList)
orderedProductList = []
categoryCodes = []
for categoryCode in categoryProductsMap.keys():
categoryCodes.append(categoryCode)
categoryCodes.sort()
for categoryCode in categoryCodes:
orderedProductList.extend(categoryProductsMap[categoryCode])
writeToBson(orderedProductList, writingFileName, decoding = 'unicode-escape', printText = True)
print_('WARNING: Encoding Error')
def generateCategoryCodeNameMap():
categories = evalBson('categories.bson')
cd = getCategoriesFromProducts('products.bson')
map = {}
for c in categories['data']['categories']:
if c['categoryCode'] in cd:
map[c['categoryCode']] = c['categoryName']
writeToBson(map, commonFolder + 'categoryCodeNames.json')
def readProducts(products = None, fileName = commonFolder + 'products.json', decoding = 'utf-8'):
return evalBson(fileName) if products == None else products
def readExpandedProducts(products = None, fileName = commonFolder + 'expandedProducts.bson', decoding = 'utf-8'):
return readProducts(products, fileName, decoding)
|
[
"muratcancicek0@gmail.com"
] |
muratcancicek0@gmail.com
|
ac36af1e489a5c9b31c7590762b0a39c7afac82e
|
7243df7ee2090f76a3d82b898f8a0f2e82198071
|
/csv_to_kml.py
|
e98a31ab4bea4bcfb7c771d945aefad94930d2ad
|
[] |
no_license
|
TimSC/air-quality-analysis-2018
|
6e3b8dce1a4ab76b36a362229572e20f7014db09
|
64a163a1b406f708a66a6f510647d9bfd49b7e5c
|
refs/heads/master
| 2020-03-18T13:32:16.576221
| 2019-06-13T13:07:49
| 2019-06-13T13:07:49
| 134,790,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,320
|
py
|
#!/usr/bin/env python
'''Example of generating KML from data in a CSV file
References:
'''
from __future__ import print_function
import csv
import urllib2
from datetime import datetime
from lxml import etree
from pykml.factory import KML_ElementMaker as KML
def makeExtendedDataElements(datadict):
'''Converts a dictionary to ExtendedData/Data elements'''
edata = KML.ExtendedData()
for key, value in datadict.iteritems():
edata.append(KML.Data(KML.value(value), name=key + "_"))
return edata
doc = KML.Document()
iconstyles = [
[1,'ff000000'],
[1.2,'ff00ff00'],#10s
[1.4,'ff00ff44'],#20s
[1.6,'ff00cc88'],#30s ffb400
[1.8,'ff00aaaa'],#40s
[2.0,'ff0000ff'],#50s
]
# create a series of Icon Styles
for i, (scale, color) in enumerate(iconstyles):
doc.append(
KML.Style(
KML.IconStyle(
KML.color(color),
KML.scale(scale),
KML.Icon(
KML.href("https://maps.google.com/mapfiles/kml/shapes/caution.png"),
),
KML.hotSpot(x="0.5",y="0",xunits="fraction",yunits="fraction"),
),
#balloonstyle,
id="pollution-style-{threshold}".format(threshold=i),
)
)
adverseStyles = [
['negligible',1,'ff888888'],
['slight',1.33,'ff0000aa'],
['moderate',1.66,'ff0000cc'],
['substantial',2.0,'ff0000ff'],
]
for band, scale, color in adverseStyles:
doc.append(
KML.Style(
KML.IconStyle(
KML.color(color),
KML.scale(scale),
KML.Icon(
KML.href("http://earth.google.com/images/kml-icons/track-directional/track-0.png"),
),
KML.hotSpot(x="0.5",y="0",xunits="fraction",yunits="fraction"),
),
#balloonstyle,
id="adverse-style-{threshold}".format(threshold=band),
)
)
beneficialStyles = [
['negligible',1,'ff888888'],
['slight',1.33,'ff00aa00'],
['moderate',1.66,'ff00cc00'],
['substantial',2.0,'ff00ff00'],
]
for band, scale, color in beneficialStyles:
doc.append(
KML.Style(
KML.IconStyle(
KML.color(color),
KML.scale(scale),
KML.Icon(
KML.href("http://earth.google.com/images/kml-icons/track-directional/track-8.png"),
),
KML.hotSpot(x="0.5",y="0",xunits="fraction",yunits="fraction"),
),
#balloonstyle,
id="beneficial-style-{threshold}".format(threshold=band),
)
)
doc.append(KML.Folder())
receptorPosDict = {}
for row in csv.reader(open("receptors.csv")):
receptorPosDict[int(row[0])] = float(row[1]), float(row[2])
# read in a csv file, and create a placemark for each record
for rowNum, row in enumerate(csv.reader(open("2026-Nitrogen Dioxide Results.csv"))):
if rowNum < 5: continue
receptor, baseline2015, without2026, with2026, pcAqal, change, pcRelChange, significant, direction = row
baseline2015 = float(baseline2015)
receptorNum = int(receptor[1:])
if receptorNum not in receptorPosDict:
print ("No position for receptor", receptorNum)
continue
pos = receptorPosDict[receptorNum]
#print (receptor, pos, baseline2015, significant, direction)
labelData = {}
labelData['Receptor'] = receptorNum
labelData['NO2 Baseline (2015)'] = baseline2015
labelData['NO2 Without scheme (2026)'] = without2026
labelData['NO2 With scheme (2026)'] = with2026
labelData['Impact'] = "{} {}".format(significant, direction)
if 0:
pm = KML.Placemark(
#KML.name("NO2={0}".format(baseline2015)),
KML.styleUrl(
"#pollution-style-{thresh}".format(
thresh=int(baseline2015/10.0)
)
),
makeExtendedDataElements(labelData),
KML.Point(
KML.coordinates("{0},{1}".format(pos[1], pos[0]))
)
)
doc.Folder.append(pm)
if 1:
if direction=="Adverse":
pm = KML.Placemark(
KML.styleUrl(
"#adverse-style-{thresh}".format(
thresh=significant.lower()
)
),
makeExtendedDataElements(labelData),
KML.Point(
KML.coordinates("{0},{1}".format(pos[1], pos[0]))
)
)
doc.Folder.append(pm)
if direction=="Beneficial":
pm = KML.Placemark(
KML.styleUrl(
"#beneficial-style-{thresh}".format(
thresh=significant.lower()
)
),
makeExtendedDataElements(labelData),
KML.Point(
KML.coordinates("{0},{1}".format(pos[1], pos[0]))
)
)
doc.Folder.append(pm)
# check if the schema is valid
from pykml.parser import Schema
schema_gx = Schema("kml22gx.xsd")
schema_gx.assertValid(doc)
fi = open("out.kml", "wt")
fi.write(etree.tostring(doc, pretty_print=True))
fi.close()
|
[
"tim2009@sheerman-chase.org.uk"
] |
tim2009@sheerman-chase.org.uk
|
14949c91a4d0ab5a8e7d226a1ccfb3b8e203319e
|
b74d9c0655593d488f1bbf3e6d97a6d587fae9e8
|
/printing/wsgi.py
|
d8b6927a26ae5b4d5ae3ab65cd51e6dc08659019
|
[] |
no_license
|
dbca-wa/printing
|
d3353dce75412cfb1a1cf4a1f3f88373b4d36194
|
e0c5359fecf84a5512c4b9ede71f56acd9058bf9
|
refs/heads/master
| 2022-12-12T23:02:43.993583
| 2016-04-28T07:48:28
| 2016-04-28T07:48:28
| 57,281,158
| 0
| 0
| null | 2020-09-09T09:07:44
| 2016-04-28T07:41:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for printing project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "printing.settings")
application = get_wsgi_application()
|
[
"rocky.chen@dpaw.wa.gov.au"
] |
rocky.chen@dpaw.wa.gov.au
|
829d8ceb31ec21a8324a4ee14faa7bf5ad47e755
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/modules/events/papers/controllers/base.py
|
ae72c3fe163c825494ededfcc8484349b10fe67e
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383
| 2018-04-27T09:28:25
| 2018-04-27T09:28:25
| 101,277,325
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
from __future__ import unicode_literals
from flask import request, session
from werkzeug.exceptions import Forbidden, NotFound
from fossir.modules.events.contributions.models.contributions import Contribution
from fossir.modules.events.controllers.base import RHDisplayEventBase
from fossir.modules.events.management.controllers.base import ManageEventMixin
from fossir.modules.events.util import check_event_locked
class RHPapersBase(RHDisplayEventBase):
"""Base class for all paper-related RHs"""
EVENT_FEATURE = 'papers'
def _check_access(self):
RHDisplayEventBase._check_access(self)
# Only let managers access the management versions.
if self.management and not self.event.cfp.is_manager(session.user):
raise Forbidden
@property
def management(self):
"""Whether the RH is currently used in the management area"""
return request.view_args.get('management', False)
class RHManagePapersBase(ManageEventMixin, RHPapersBase):
"""
Base class for all paper-related RHs that require full event
management permissions
"""
ROLE = 'paper_manager'
DENY_FRAMES = True
@property
def management(self):
"""Whether the RH is currently used in the management area"""
return request.view_args.get('management', True)
class RHJudgingAreaBase(RHPapersBase):
"""Base class for all paper-related RHs only available to judges/managers"""
def _check_access(self):
RHPapersBase._check_access(self)
if not session.user or not self.event.cfp.can_access_judging_area(session.user):
raise Forbidden
check_event_locked(self, self.event)
class RHPaperBase(RHPapersBase):
PAPER_REQUIRED = True
normalize_url_spec = {
'locators': {
lambda self: self.contribution
}
}
def _process_args(self):
RHPapersBase._process_args(self)
self.contribution = Contribution.get_one(request.view_args['contrib_id'], is_deleted=False)
self.paper = self.contribution.paper
if self.paper is None and self.PAPER_REQUIRED:
raise NotFound
def _check_access(self):
RHPapersBase._check_access(self)
if not self._check_paper_protection():
raise Forbidden
check_event_locked(self, self.event)
def _check_paper_protection(self):
"""Perform a permission check on the current paper.
Override this in case you want to check for more specific
privileges than the generic "can access".
"""
return self.contribution.can_access(session.user)
|
[
"hodardhazwinayo@gmail.com"
] |
hodardhazwinayo@gmail.com
|
d89c6d607ae28029364a25d266750f8ce316d329
|
e718d3ccc181a72e7bfe0faad42285f2829a96e5
|
/GDP_projectOriginal/service/urls.py
|
a21fadcfe3bedc40bf12129e1924b069fbbf87aa
|
[] |
no_license
|
Jerrykim91/GDP_project
|
8a17e75276ee92c93ad621163bffa57a528c258f
|
cd8e626cc3f01c1051f13115ad3d6217dd99ddc6
|
refs/heads/master
| 2022-05-02T01:08:33.955292
| 2021-03-25T14:43:21
| 2021-03-25T14:43:21
| 234,493,053
| 0
| 0
| null | 2022-04-22T23:27:15
| 2020-01-17T07:16:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
# service\urls.py
# import
from django.urls import path
from . import views
urlpatterns = [
# GDP_graph
path('search_main', views.search_main, name="search_main"),
path('search_detail', views.search_detail, name="search_detail"),
path('search_show', views.search_show, name="search_show"),
path('sort_by_year', views.sort_by_year, name="sort_by_year"),
path('search_country', views.search_country, name="search_country"),
path('search_country_graph', views.search_country_graph, name="search_country_graph"),
path('search_country_graph_pop', views.search_country_graph_pop, name="search_country_graph_pop")
]
|
[
"sun4131@gmail.com"
] |
sun4131@gmail.com
|
84f719db77d5ee7f722e08c95dd6bff85761425f
|
25e989e986522cf91365a6cc51e3c68b3d29351b
|
/databases/migrations/2018_06_26_165322_add_is_active_to_users_table.py
|
e16a431117d57b3dbff7c92a8a01d0b377b0c016
|
[
"MIT"
] |
permissive
|
josephmancuso/gbaleague-masonite2
|
ff7a3865927705649deea07f68d89829b2132d31
|
b3dd5ec3f20c07eaabcc3129b0c50379a946a82b
|
refs/heads/master
| 2022-05-06T10:47:21.809432
| 2019-03-31T22:01:04
| 2019-03-31T22:01:04
| 136,680,885
| 0
| 1
|
MIT
| 2022-03-21T22:16:43
| 2018-06-09T01:33:01
|
Python
|
UTF-8
|
Python
| false
| false
| 398
|
py
|
from orator.migrations import Migration
class AddIsActiveToUsersTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.table('users') as table:
table.integer('is_active').nullable()
def down(self):
"""
Revert the migrations.
"""
with self.schema.table('users') as table:
pass
|
[
"idmann509@gmail.com"
] |
idmann509@gmail.com
|
6bfa7de91c5b3465d54ffa63e86ba56adc35cf78
|
ba0731b2dbc4c1529eaaa79811ec15754c19b4cd
|
/references/domain.py
|
9d612aaa40fa48d9746f598c63c465aedf4bba37
|
[
"MIT"
] |
permissive
|
arXiv/arxiv-references
|
35f87084cf91947c572faf1a86f119b308fada66
|
a755aeaa864ff807ff16ae2c3960f9fee54d8dd8
|
refs/heads/master
| 2022-12-21T02:34:57.166298
| 2018-05-04T20:30:48
| 2018-05-04T20:30:48
| 94,906,433
| 8
| 6
|
MIT
| 2022-12-08T02:06:20
| 2017-06-20T15:26:25
|
Python
|
UTF-8
|
Python
| false
| false
| 3,178
|
py
|
"""Core data structures in the references application."""
from typing import List, Optional
from datetime import datetime
from base64 import b64encode
from dataclasses import dataclass, field, asdict
from unidecode import unidecode
@dataclass
class Author:
"""A parsed author name in a bibliographic reference."""
surname: str = field(default_factory=str)
givennames: str = field(default_factory=str)
prefix: str = field(default_factory=str)
suffix: str = field(default_factory=str)
fullname: str = field(default_factory=str)
@dataclass
class Identifier:
"""A persistent identifier for a cited reference."""
identifer_type: str
"""E.g. ISBN, ISSN, URI."""
identifier: str
@dataclass
class Reference:
"""An instance of a parsed bibliographic reference."""
title: Optional[str] = field(default=None)
"""The title of the reference."""
raw: str = field(default_factory=str)
"""The un-parsed reference string."""
arxiv_id: Optional[str] = field(default=None)
"""arXiv paper ID."""
authors: List[Author] = field(default_factory=list)
reftype: str = field(default='article')
"""The type of work to which the reference refers."""
doi: Optional[str] = field(default=None)
volume: Optional[str] = field(default=None)
issue: Optional[str] = field(default=None)
pages: Optional[str] = field(default=None)
source: Optional[str] = field(default=None)
"""Journal, conference, etc."""
year: Optional[str] = field(default=None)
identifiers: List[Identifier] = field(default_factory=list)
identifier: str = field(default_factory=str)
"""Unique identifier for this extracted reference."""
score: float = field(default=0.)
def __post_init__(self) -> None:
"""Set the identifier based on reference content."""
hash_string = bytes(unidecode(self.raw), encoding='ascii')
self.identifier = str(b64encode(hash_string), encoding='utf-8')[:100]
def to_dict(self) -> dict:
"""Return a dict representation of this object."""
return {k: v for k, v in asdict(self).items() if v is not None}
@dataclass
class ReferenceSet:
"""A collection of :class:`.Reference`."""
document_id: str
"""arXiv paper ID (with version affix)."""
references: List[Reference]
version: str
"""Version of this application."""
score: float
"""In the range 0-1; relative quality of the set as a whole."""
created: datetime
updated: datetime
extractor: str = 'combined'
"""
Name of the extractor used.
Default is combined (for reconciled reference set). May also be 'author',
for the author-curated set.
"""
extractors: List[str] = field(default_factory=list)
"""Extractors used to generate this reference set."""
raw: bool = field(default=False)
"""If True, refs are from a single extractor before reconciliation."""
def to_dict(self) -> dict:
"""Generate a dict representation of this object."""
data: dict = asdict(self)
data['created'] = self.created.isoformat()
data['updated'] = self.updated.isoformat()
return data
|
[
"brp53@cornell.edu"
] |
brp53@cornell.edu
|
7b66b925a73790e4aaf1a9068d9c96e34ae8985d
|
2d048e630f8d9c546860820ef27700c1020b44cd
|
/th.py
|
5ae01e2c7426d7eb7c21903856965982279d5aca
|
[] |
no_license
|
0h-n0/tch-mnist-simple-benchmak
|
60ce3b6500f161f9a768e965d82eadf50a1e051f
|
c2f07661b64c83de82ad760b4a7b20b601a7129b
|
refs/heads/master
| 2020-12-31T22:36:26.946751
| 2020-02-08T02:40:40
| 2020-02-08T02:40:40
| 239,057,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,243
|
py
|
from __future__ import print_function
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader),
100. * correct / len(test_loader)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=50, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
train_loader = [(data.to(device), target.to(device)) for (data, target) in train_loader]
test_loader = [(data.to(device), target.to(device)) for (data, target) in test_loader]
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
#torch.backends.cudnn.benchmark = True
times = []
for epoch in range(1, args.epochs + 1):
s = time.time()
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
scheduler.step()
times.append((time.time() - s))
print(f"{(time.time() - s)}s")
print("ave=>", torch.FloatTensor(times).mean())
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
|
[
"kbu94982@gmail.com"
] |
kbu94982@gmail.com
|
ed34089d75838c5200f1175d0b9aece8648523e1
|
36b75aac4236e928e22552e8812abd45d32aecf1
|
/modules/dbnd-airflow/src/dbnd_airflow_contrib/utils/system_utils.py
|
88d9452cd26e2ec091d604cdb48fdd92751efb1d
|
[
"Apache-2.0"
] |
permissive
|
reloadbrain/dbnd
|
7793aa1864f678005de626068b0ac9361d637d65
|
ec0076f9a142b20e2f7afd886ed1a18683c553ec
|
refs/heads/master
| 2023-09-01T08:04:09.486666
| 2021-10-14T16:43:00
| 2021-10-14T16:43:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,451
|
py
|
import logging
import re
import subprocess
logger = logging.getLogger(__name__)
def print_stack_trace(stack_frame):
try:
import traceback
traceback.print_stack(stack_frame)
except Exception as e:
logger.info("Could not print stack trace! Exception: %s", e)
def print_cpu_memory_usage():
try:
import psutil
cpu_usage_percent = psutil.cpu_percent(interval=1)
virtual_memory = psutil.virtual_memory()
last_minute_load, last_5_minute_load, last_15_minute_load = [
x / psutil.cpu_count() * 100 for x in psutil.getloadavg()
]
logger.info(
"""
Cpu usage %%: %s"
"Virtual memory: %s"
"Last minute cpu load %%: %s"
"Last 5 minute cpu load %%: %s"
"Last 15 minute cpu load %%: %s"
"""
% (
cpu_usage_percent,
virtual_memory,
last_minute_load,
last_5_minute_load,
last_15_minute_load,
)
)
except Exception as e:
logger.info("Could not read cpu and memory usage! Exception: %s", e)
def print_dmesg():
try:
human_dmesg()
except Exception as e:
logger.info("Could not get dmesg data! Exception: %s", e)
_datetime_format = "%Y-%m-%d %H:%M:%S"
_dmesg_line_regex = re.compile("^\[(?P<time>\d+\.\d+)\](?P<line>.*)$")
def human_dmesg():
from datetime import datetime, timedelta
now = datetime.now()
uptime_diff = None
with open("/proc/uptime") as f:
uptime_diff = f.read().strip().split()[0]
uptime = now - timedelta(
seconds=int(uptime_diff.split(".")[0]),
microseconds=int(uptime_diff.split(".")[1]),
)
dmesg_data = subprocess.check_output(["dmesg"]).decode()
for line in dmesg_data.split("\n"):
if not line:
continue
match = _dmesg_line_regex.match(line)
if match:
seconds = int(match.groupdict().get("time", "").split(".")[0])
nanoseconds = int(match.groupdict().get("time", "").split(".")[1])
microseconds = int(round(nanoseconds * 0.001))
line = match.groupdict().get("line", "")
t = uptime + timedelta(seconds=seconds, microseconds=microseconds)
logger.info("[%s]%s" % (t.strftime(_datetime_format), line))
|
[
"roman.slipchenko@databand.ai"
] |
roman.slipchenko@databand.ai
|
e5d148377f281451a04b29aa1ba61f66b2a2f021
|
150d9e4cee92be00251625b7f9ff231cc8306e9f
|
/LongestCommonSubsequenceSP.py
|
bcfcc3820f481d704dc5053bddf126c12d6f8f44
|
[] |
no_license
|
JerinPaulS/Python-Programs
|
0d3724ce277794be597104d9e8f8becb67282cb0
|
d0778178d89d39a93ddb9b95ca18706554eb7655
|
refs/heads/master
| 2022-05-12T02:18:12.599648
| 2022-04-20T18:02:15
| 2022-04-20T18:02:15
| 216,547,245
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
'''
1143. Longest Common Subsequence
Given two strings text1 and text2, return the length of their longest common subsequence. If there is no common subsequence, return 0.
A subsequence of a string is a new string generated from the original string with some characters (can be none) deleted without changing the relative order of the remaining characters.
For example, "ace" is a subsequence of "abcde".
A common subsequence of two strings is a subsequence that is common to both strings.
Example 1:
Input: text1 = "abcde", text2 = "ace"
Output: 3
Explanation: The longest common subsequence is "ace" and its length is 3.
Example 2:
Input: text1 = "abc", text2 = "abc"
Output: 3
Explanation: The longest common subsequence is "abc" and its length is 3.
Example 3:
Input: text1 = "abc", text2 = "def"
Output: 0
Explanation: There is no such common subsequence, so the result is 0.
Constraints:
1 <= text1.length, text2.length <= 1000
text1 and text2 consist of only lowercase English characters.
'''
class Solution(object):
def longestCommonSubsequence(self, text1, text2):
"""
:type text1: str
:type text2: str
:rtype: int
"""
len1 = len(text1)
len2 = len(text2)
dp = []
for row in range(len1 + 1):
temp = []
for col in range(len2 + 1):
temp.append(0)
dp.append(temp)
for row in range(len1 - 1, -1, -1):
for col in range(len2 - 1, -1, -1):
if text1[row] == text2[col]:
dp[row][col] = dp[row + 1][col + 1] + 1
else:
dp[row][col] = max(dp[row + 1][col], dp[row][col + 1])
return dp[0][0]
|
[
"jerinsprograms@gmail.com"
] |
jerinsprograms@gmail.com
|
d971cd5adcf944f961e3a880ed247c1f0f3464c2
|
0ef98f8a60e4d30001c918dae6fa7ac6283abca9
|
/61.py
|
8fb34a02053700a90124fd61efbb6ce24271c8d1
|
[] |
no_license
|
samrithasudhagar/pro
|
a0169fc89c8c6d6189ac984ec3fab26e23269264
|
c90cb60fefb74174f12db5ee80812c2374e4e3ce
|
refs/heads/master
| 2020-04-22T11:00:47.588732
| 2019-06-20T09:43:28
| 2019-06-20T09:43:28
| 170,324,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
a=input()
c=input()
s=""
l=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for i in range(len(a)):
k=l.index(a[i])
p=l.index(c[i])
s=s+l[(k+p)%26+1]
print(s)
|
[
"noreply@github.com"
] |
samrithasudhagar.noreply@github.com
|
db970fba12cef3fd3dc931f70a998a1bb9b80ed5
|
5b52feaf975c810693bbd9c67deb061824cdca32
|
/Darlington/phase-2/FILE 1/O/day 84 solution/qtn3.py
|
13efc55791274ad1db6d3c1951ab660afa1f5ddb
|
[
"MIT"
] |
permissive
|
darlcruz/python-challenge-solutions
|
1dd21796b86f8fdcfa9a1a15faa26ab3e8e0f7b1
|
3e03a420d01177b71750d4d1b84cb3cbbf8c6900
|
refs/heads/master
| 2022-12-20T23:50:08.641120
| 2020-09-28T21:34:14
| 2020-09-28T21:34:14
| 263,591,779
| 0
| 0
|
MIT
| 2020-05-13T10:00:21
| 2020-05-13T10:00:20
| null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# program to create a file where all letters of English alphabet are listed by
# specified number of letters on each line.
import string
def letters_file_line(n):
with open("words1.txt", "w") as f:
alphabet = string.ascii_uppercase
letters = [alphabet[i:i + n] + "\n" for i in range(0, len(alphabet), n)]
f.writelines(letters)
letters_file_line(3)
|
[
"darlingtonchibuzor64@gmail.com"
] |
darlingtonchibuzor64@gmail.com
|
69729b36949e741a9c7edf3832821394f61312c9
|
44f216cc3bb4771c8186349013ff0ed1abc98ea6
|
/torch/distributed/_shard/sharded_tensor/_ops/math_ops.py
|
6d3ed59da38cc711b1b67bbf8fd16e507d64083c
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
eiphy/pytorch
|
a8fc21a3c0552b392ed8c3a1d69f7ed8660c56ac
|
104f0bf09ec7609d1c5626a7d7953ade4f8c9007
|
refs/heads/master
| 2022-05-23T02:10:13.158924
| 2022-05-07T21:26:00
| 2022-05-07T21:26:00
| 244,914,898
| 2
| 0
|
NOASSERTION
| 2020-03-04T14:00:53
| 2020-03-04T14:00:53
| null |
UTF-8
|
Python
| false
| false
| 3,242
|
py
|
import torch
from torch import Tensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
sharded_op_impl
)
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
from torch.distributed._shard._utils import narrow_tensor
def binary_math_op_impl(op, types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for the binary math ops
such as `torch.add`, `torch.mul`, `torch.div`, etc.
This method computes on ShardedTensor, or ShardedTensor op ReplicatedTensor
"""
if len(args) != 2:
raise ValueError("Only support binary math op on ShardedTensor for now!")
lhs = args[0]
rhs = args[1]
# Validate types
if isinstance(lhs, ReplicatedTensor):
assert isinstance(rhs, ShardedTensor)
st_size = rhs.size()
st_meta = rhs.local_shards()[0].metadata
if st_size != lhs.size():
# try to broadcast replicated tensor
lhs = lhs.expand(st_size)
replica_part = narrow_tensor(lhs, st_meta)
res = op(replica_part, rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
rhs.sharding_spec(),
rhs.size(), # type: ignore[arg-type]
process_group=pg)
elif isinstance(rhs, ReplicatedTensor):
assert isinstance(lhs, ShardedTensor)
st_size = lhs.size()
st_meta = lhs.local_shards()[0].metadata
if st_size != rhs.size():
# try to broadcast replicated tensor
rhs = rhs.expand(st_size)
replica_part = narrow_tensor(rhs, st_meta)
res = op(lhs.local_tensor(), replica_part)
return ShardedTensor._init_from_local_tensor(
res,
lhs.sharding_spec(),
lhs.size(), # type: ignore[arg-type]
process_group=pg)
elif isinstance(lhs, (int, float)):
assert isinstance(rhs, ShardedTensor)
res = op(lhs, rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
rhs.sharding_spec(),
rhs.size(), # type: ignore[arg-type]
process_group=pg)
elif isinstance(rhs, (int, float)):
assert isinstance(lhs, ShardedTensor)
res = op(lhs.local_tensor(), rhs)
return ShardedTensor._init_from_local_tensor(
res,
lhs.sharding_spec(),
lhs.size(), # type: ignore[arg-type]
process_group=pg)
else:
raise RuntimeError(
f"torch function '{op.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported yet for ShardedTensor!")
def register_math_op(op):
@sharded_op_impl(op)
def binary_math_op(types, args=(), kwargs=None, pg=None):
return binary_math_op_impl(op, types, args, kwargs, pg)
binary_ops = [
# add
torch.add,
Tensor.add,
Tensor.__add__,
Tensor.__radd__,
# sub
torch.sub,
Tensor.sub,
Tensor.__sub__,
Tensor.__rsub__,
# mul
torch.mul,
Tensor.mul,
Tensor.__mul__,
Tensor.__rmul__,
# div
torch.div,
Tensor.div,
Tensor.__div__,
Tensor.__rdiv__,
]
for op in binary_ops:
register_math_op(op)
|
[
"pytorchmergebot@users.noreply.github.com"
] |
pytorchmergebot@users.noreply.github.com
|
c00894ef0c8a747b7a9d05f73efe554aa75785e6
|
5111b0c881c8d86705f2b237e14024396e34091a
|
/task_check_list/models/task_check_list.py
|
88812aad7bc1b21e05fae5f1a896ebb05a2373dd
|
[] |
no_license
|
odoomates/odooapps
|
a22fa15346694563733008c42549ebc0da7fc9f6
|
68061b6fa79818d17727ef620e28fff44b48df72
|
refs/heads/16.0
| 2023-08-11T15:25:28.508718
| 2023-08-10T17:58:45
| 2023-08-10T17:58:45
| 173,598,986
| 182
| 306
| null | 2023-08-10T17:58:46
| 2019-03-03T16:20:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
class ProjectTask(models.Model):
_inherit = 'project.task'
@api.depends('task_checklist')
def checklist_progress(self):
total_len = self.env['task.checklist'].search_count([])
for rec in self:
if total_len != 0:
check_list_len = len(rec.task_checklist)
rec.checklist_progress = (check_list_len * 100) / total_len
else:
rec.checklist_progress = 0
task_checklist = fields.Many2many('task.checklist', string='Check List')
checklist_progress = fields.Float(compute=checklist_progress, string='Progress', store=True,
default=0.0)
max_rate = fields.Integer(string='Maximum rate', default=100)
class TaskChecklist(models.Model):
_name = 'task.checklist'
_description = 'Checklist for the task'
name = fields.Char(string='Name', required=True)
description = fields.Char(string='Description')
|
[
"odoomates@gmail.com"
] |
odoomates@gmail.com
|
1c68bb6b8863c584a3f0728adcaa19a31159f831
|
754d26af3d5fa0900d1dbfc934f3b9f0970e2a47
|
/unchained/community/announcement/views.py
|
da9977b7d41c7f38d2fd2e479fe5ec88fbdaba8e
|
[] |
no_license
|
live-wire/community
|
8a7bfdb4e2d6562d12be334ba0e655ffe041bb5f
|
7b2efa7b78465134138ee08fc557f4fedf678394
|
refs/heads/master
| 2021-07-11T10:26:31.535653
| 2020-03-20T02:37:30
| 2020-03-20T02:37:30
| 140,683,308
| 3
| 2
| null | 2020-06-06T12:03:38
| 2018-07-12T08:17:50
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
# Create your views here.
from rest_framework.decorators import action
from rest_framework.response import Response
from community.csrfsession import CsrfExemptSessionAuthentication
from .serializers import AnnouncementSerializer
from .models import Announcement
class AnnouncementViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Announcement.objects.all()
serializer_class = AnnouncementSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
authentication_classes = (CsrfExemptSessionAuthentication, )
|
[
"dhruv.life@hotmail.com"
] |
dhruv.life@hotmail.com
|
50a4aa9cfcccc1e4f762802aeab4c1d0c615195b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04019/s609202447.py
|
9cd2c625f2777b825900b773714a1f4e6fb7849b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
S=input()
n=0
s=0
w=0
e=0
for i in range(len(S)):
if S[i]=="N":
n+=1
if S[i]=="S":
s+=1
if S[i]=="W":
w+=1
if S[i]=="E":
e+=1
if n!=0 and s!=0 and w!=0 and e!=0 or n==0 and s==0 and w!=0 and e!=0 or n!=0 and s!=0 and w==0 and e==0:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
96df2462240b880242a521c7a6728ce366df98c0
|
553b34a101c54090e68f540d96369ac7d5774d95
|
/python/python_koans/python2/koans/about_list_assignments.py
|
aa05dc5b8a69e50728be9798b3455af462f5c023
|
[
"MIT"
] |
permissive
|
topliceanu/learn
|
fd124e1885b5c0bfea8587510b5eab79da629099
|
1c5b1433c3d6bfd834df35dee08607fcbdd9f4e3
|
refs/heads/master
| 2022-07-16T19:50:40.939933
| 2022-06-12T15:40:20
| 2022-06-12T15:40:20
| 21,684,180
| 26
| 12
|
MIT
| 2020-03-26T20:51:35
| 2014-07-10T07:22:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 947
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrayAssignments in the Ruby Koans
#
from runner.koan import *
class AboutListAssignments(Koan):
def test_non_parallel_assignment(self):
names = ["John", "Smith"]
self.assertEqual(['John', 'Smith'], names)
def test_parallel_assignments(self):
first_name, last_name = ["John", "Smith"]
self.assertEqual('John', first_name)
self.assertEqual('Smith', last_name)
def test_parallel_assignments_with_sublists(self):
first_name, last_name = [["Willie", "Rae"], "Johnson"]
self.assertEqual(['Willie', 'Rae'], first_name)
self.assertEqual('Johnson', last_name)
def test_swapping_with_parallel_assignment(self):
first_name = "Roy"
last_name = "Rob"
first_name, last_name = last_name, first_name
self.assertEqual('Rob', first_name)
self.assertEqual('Roy', last_name)
|
[
"alexandru.topliceanu@gmail.com"
] |
alexandru.topliceanu@gmail.com
|
b883d4613806a95ab753103d22f2fcd096a20b3f
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/contrib/cv/others/Pix2Pix/util/__init__.py
|
4de848e30b5a486d7214bec80276e35a3a4d0d04
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package includes a miscellaneous collection of useful helper functions."""
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
9c439dd90703b73edc146cfbc628fab4e7984a37
|
cfa08425d0a457e0c673543b6e16f0a02effe05f
|
/projects/admm_4bus/data/sixbus/plot.py
|
86a8e56d3602f498a28cb6b6409a2bb9500fe2cf
|
[] |
no_license
|
missinglpf/Distributed_optimization
|
5b3dfea8b2a29225761537531322e421be83d7a8
|
84040eebd3f04acf4c09e5e4ff2e59e752bf3fae
|
refs/heads/master
| 2020-08-01T03:42:36.455932
| 2018-06-25T15:59:44
| 2018-06-25T15:59:44
| 210,850,421
| 1
| 0
| null | 2019-09-25T13:20:11
| 2019-09-25T13:20:10
| null |
UTF-8
|
Python
| false
| false
| 4,376
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import datetime
# df0 = pd.read_csv('lamda300/data_A_0.csv')
# df1 = pd.read_csv('lamda300/data_A_0.csv')
# df0 = pd.read_csv('lamda200/data_A_0.csv')
# df3 = pd.read_csv('lamda100/data_A_0.csv')
df0 = pd.read_csv('lamda50/data_A_0.csv')
df1 = pd.read_csv('lamda50/data_A_1.csv')
df2 = pd.read_csv('lamda50/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label="lamda=50", linewidth=0.5)
df0 = pd.read_csv('lamda30/data_A_0.csv')
df1 = pd.read_csv('lamda30/data_A_1.csv')
df2 = pd.read_csv('lamda30/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=30", linewidth=0.5)
df0 = pd.read_csv('lamda25/data_A_0.csv')
df1 = pd.read_csv('lamda25/data_A_1.csv')
df2 = pd.read_csv('lamda25/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=25", linewidth=0.5)
df0 = pd.read_csv('lamda20/data_A_0.csv')
df1 = pd.read_csv('lamda20/data_A_1.csv')
df2 = pd.read_csv('lamda20/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=20", linewidth=0.5)
df0 = pd.read_csv('lamda15/data_A_0.csv')
df1 = pd.read_csv('lamda15/data_A_1.csv')
df2 = pd.read_csv('lamda15/data_A_2.csv')
p_total = df0['Q'][2:502]+df1['Q'][2:502]+df2['Q'][2:502] - [2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=15", linewidth=0.5)
df0 = pd.read_csv('lamda15/data_A_0.csv')
df1 = pd.read_csv('lamda15/data_A_1.csv')
df2 = pd.read_csv('lamda15/data_A_2.csv')
p_total = [df0['Q'][502]+df1['Q'][502]+df2['Q'][502] - 2.1]*500
plt.plot(df0['ADMM_IT'][1:501], p_total, label = "Optimal value", linewidth=2, linestyle='--')
# df0 = pd.read_csv('lamda10/data_A_0.csv')
# df1 = pd.read_csv('lamda10/data_A_1.csv')
# df2 = pd.read_csv('lamda10/data_A_2.csv')
#
# p_total = df0['P'][2:502]+df1['P'][2:502]+df2['P'][2:502] - [2.1]*500
# plt.plot(df0['ADMM_IT'][1:501], p_total, label = "lamda=10", linewidth=0.5)
# df5 = pd.read_csv('lamda25/data_A_2.csv')
# df6 = pd.read_csv('lamda15/data_A_2.csv')
# df7 = pd.read_csv('lamda20/data_A_2.csv')
# df8 = pd.read_csv('lamda30/data_A_2.csv')
# df2 = pd.read_csv('lamda300/data_A_2.csv')
# df3 = pd.read_csv('lamda300/data_A_3.csv')
# df4 = pd.read_csv('lamda300/data_A_4.csv')
# df5 = pd.read_csv('lamda300/data_A_5.csv')
# df['Time'] = df['Time'].map(lambda x: datetime.strptime(str(x), '%Y/%m/%d %H:%M:%S.%f'))
# plt.plot(df0['ADMM_IT'][1:6000], df0['P'][1:6000], label = "lamda=200")
# plt.plot(df0['ADMM_IT'][1:6000], df1['P'][1:6000], label = "lamda=300")
# plt.plot(df4['ADMM_IT'][1:3000], df3['Q'][1:3000], label = "lamda=100")
# central = [df5['P'][1000]]*1001
# plt.plot(df4['ADMM_IT'][1:500], df4['P'][1:500], label = "lamda=50", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df8['P'][1:500], label = "lamda=30", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df5['P'][1:500], label = "lamda=25", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df7['P'][1:500], label = "lamda=20", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df6['P'][1:500], label = "lamda=15", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], central[1:500], label = "Optimal value", linewidth=2, linestyle='--')
# central = [df5['Q'][1000]]*1001
# plt.plot(df4['ADMM_IT'][1:500], df4['Q'][1:500], label = "lamda=50", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df8['Q'][1:500], label = "lamda=30", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df5['Q'][1:500], label = "lamda=25", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df7['Q'][1:500], label = "lamda=20", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], df6['Q'][1:500], label = "lamda=15", linewidth=0.5)
# plt.plot(df6['ADMM_IT'][1:500], central[1:500], label = "Optimal value", linewidth=2, linestyle='--')
# plt.plot(df1['Time'][1:15000], df1['X_real'][1:15000])
# plt.plot(df2['Time'][1:15000], df2['X_real'][1:15000])
plt.legend()
plt.xlabel("Number of iterations")
plt.ylabel("Reactive power loss(pu)")
# plt.ylabel("Reactive power(pu)")
plt.show()
|
[
"tunglam87@gmail.com"
] |
tunglam87@gmail.com
|
afb02e97b0e4004e14d9c672ec1972d124005491
|
f1bff0e018463081513c30258a67f238f5d08396
|
/finalizing.py
|
b5105ab4545ed0f1ea81bf1665dca4f52c311693
|
[] |
no_license
|
bellyfat/speaker-Identification
|
cfcedd86ea634d5df19e560acea250c5b8dbc5d0
|
34c9ce12c6400f116e04a0d1be75e0e79228d599
|
refs/heads/master
| 2022-04-07T12:23:59.446618
| 2019-08-26T07:59:44
| 2019-08-26T07:59:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
from recoder import *
import glob, os
import wave
import pylab
def training():
get_model()
print("training complete....")
# getting the audio
def getAudio():
rec = Recorder()
print("Start recording")
rec.start()
time.sleep(11)
print("Stop recording")
rec.stop()
print("Saving")
rec.save("test.wav")
#getAudio()
def get_file(path):
#path = 'wav_file'
name = os.path.basename(path)
filename, file_extension = os.path.splitext(name)
return filename
def graph_spectrogram(wav_file):
sound_info, frame_rate = get_wav_info(wav_file)
pylab.figure(num=None, figsize=(19, 12))
pylab.subplot(111)
pylab.title('spectrogram of %r' % wav_file)
pylab.specgram(sound_info, Fs=frame_rate)
pylab.savefig(get_file(wav_file)+".png")
def get_wav_info(wav_file):
wav = wave.open(wav_file, 'r')
frames = wav.readframes(-1)
sound_info = pylab.fromstring(frames, 'int16')
frame_rate = wav.getframerate()
wav.close()
return sound_info, frame_rate
def create_img():
graph_spectrogram("test.wav")
print("img creeated")
def delete_wav(file):
if os.path.exists(file):
os.remove(file)
print("file deleted")
else:
print("The file does not exist")
def delt():
file_name = "test.wav"
delete_wav(file_name)
|
[
"noreply@github.com"
] |
bellyfat.noreply@github.com
|
0a6d677d3bfe4d9fc3186df61d42fd1449051a94
|
cb0e7d6493b23e870aa625eb362384a10f5ee657
|
/solutions/python3/0239.py
|
9306b2072110eb6570e723b0a84bc6ed856cb9dd
|
[] |
no_license
|
sweetpand/LeetCode-1
|
0acfa603af254a3350d457803449a91322f2d1a7
|
65f4ef26cb8b2db0b4bf8c42bfdc76421b479f94
|
refs/heads/master
| 2022-11-14T07:01:42.502172
| 2020-07-12T12:25:56
| 2020-07-12T12:25:56
| 279,088,171
| 1
| 0
| null | 2020-07-12T15:03:20
| 2020-07-12T15:03:19
| null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
class Solution:
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
ans = []
decreasingQueue = collections.deque()
for i, num in enumerate(nums):
while decreasingQueue and num > decreasingQueue[-1]:
decreasingQueue.pop()
decreasingQueue.append(num)
if i >= k - 1:
ans.append(decreasingQueue[0])
if nums[i - k + 1] == decreasingQueue[0]:
decreasingQueue.popleft()
return ans
|
[
"walkccray@gmail.com"
] |
walkccray@gmail.com
|
8e725276edde728b56862510da106778c1da2780
|
7f57c12349eb4046c40c48acb35b0f0a51a344f6
|
/2017/002_AddTwoNumbers_v1.py
|
1ea044ab8f4b389954d82f3afbf3dffdd586c7d5
|
[] |
no_license
|
everbird/leetcode-py
|
0a1135952a93b93c02dcb9766a45e481337f1131
|
b093920748012cddb77258b1900c6c177579bff8
|
refs/heads/master
| 2022-12-13T07:53:31.895212
| 2022-12-10T00:48:39
| 2022-12-10T00:48:39
| 11,116,752
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,639
|
py
|
#!/usr/bin/env python
# encoding: utf-8
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def addTwoNumbers(self, l1, l2):
head = p = None
carry = 0
while l1 or l2:
l1_val = l1.val if l1 else 0
l2_val = l2.val if l2 else 0
r = l1_val + l2_val + carry
if r >= 10:
carry = r // 10
r = r % 10
else:
carry = 0
if not p:
head = p = ListNode(r)
else:
p.next = ListNode(r)
p = p.next
if l1:
l1 = l1.next
if l2:
l2 = l2.next
if carry:
p.next = ListNode(1)
return head
def print_list(list_head):
print_l(list_head)
print '\n'
def print_l(list_head):
if list_head:
print list_head.val,
print_l(list_head.next)
if __name__ == '__main__':
l1a = ListNode(2)
l1b = ListNode(4)
l1c = ListNode(3)
l1a.next = l1b
l1b.next = l1c
l1 = l1a
l2a = ListNode(5)
l2b = ListNode(6)
l2c = ListNode(4)
l2a.next = l2b
l2b.next = l2c
l2 = l2a
s = Solution()
lr = s.addTwoNumbers(l1, l2)
print_list(l1)
print_list(l2)
print_list(lr)
print '>>>>>>'
l1a = ListNode(5)
l1 = l1a
l2a = ListNode(5)
l2 = l2a
s = Solution()
lr = s.addTwoNumbers(l1, l2)
print_list(l1)
print_list(l2)
print_list(lr)
|
[
"stephen.zhuang@gmail.com"
] |
stephen.zhuang@gmail.com
|
47fc7609c7840f44a8a36732191723aaed6399c9
|
45c170fb0673deece06f3055979ece25c3210380
|
/toontown/coghq/BossbotCountryClubKartRoom_Battle00.py
|
6209ace14272197b5d427ec788369610888baf8c
|
[] |
no_license
|
MTTPAM/PublicRelease
|
5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f
|
825f562d5021c65d40115d64523bb850feff6a98
|
refs/heads/master
| 2021-07-24T09:48:32.607518
| 2018-11-13T03:17:53
| 2018-11-13T03:17:53
| 119,129,731
| 2
| 6
| null | 2018-11-07T22:10:10
| 2018-01-27T03:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,780
|
py
|
#Embedded file name: toontown.coghq.BossbotCountryClubKartRoom_Battle00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_12/models/bossbotHQ/BossbotKartBoardingRm',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110400: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(4, 0, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 0,
'radius': 10},
110000: {'type': 'elevatorMarker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(26.854, 0, 0),
'hpr': Vec3(90, 0, 0),
'scale': Vec3(1, 1, 1),
'modelPath': 0},
10002: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
110401: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(101.07, 0, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1)}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
|
[
"linktlh@gmail.com"
] |
linktlh@gmail.com
|
d8194a910febb338161234dd2ca1b0ca28446a04
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03129/s416237729.py
|
26351727aeb63985e21352d846400af1977248dc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
n,k = input().split()
n = int(n)
k = int(k)
for l in range(k):
l = 2*l +1
c = l
if c <= n:
print('YES')
else:
print('NO')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
373acc2bcb313e92d96b6a0047fef866c1c722f7
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/scratch/code2023/namespace.py
|
99715efdf2a7cd185fc763fdba1aab4033c39f8a
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170
| 2023-07-18T21:12:46
| 2023-07-18T21:12:46
| 157,024,916
| 0
| 0
| null | 2023-02-16T05:20:37
| 2018-11-10T21:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 5,654
|
py
|
import logging
import os
from tensorflow.python.ops.summary_ops_v2 import create_file_writer
import trainer_v2.per_project.transparency.mmp.probe.probe_common
from cpath import output_path
from misc_lib import path_join
from trainer_v2.custom_loop.modeling_common.adam_decay import AdamWeightDecay
from trainer_v2.custom_loop.modeling_common.tf_helper import distribute_dataset
from trainer_v2.train_util.get_tpu_strategy import get_strategy2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import sys
from trainer_v2.chair_logging import c_log, IgnoreFilter, IgnoreFilterRE
import tensorflow as tf
from cpath import get_bert_config_path
from taskman_client.wrapper3 import report_run3
from trainer_v2.custom_loop.prediction_trainer import ModelV2IF, ModelV3IF
from trainer_v2.custom_loop.run_config2 import RunConfig2, get_run_config2
from trainer_v2.per_project.transparency.mmp.tt_model.model_conf_defs import InputShapeConfigTT, InputShapeConfigTT100_4
from trainer_v2.train_util.arg_flags import flags_parser
class LinearV3:
def __init__(self):
x = tf.keras.layers.Input(shape=(2,), dtype=tf.int32, name="x")
x_f = tf.cast(x, tf.float32)
y = tf.keras.layers.Dense(1)(x_f)
inputs = [x,]
output = {'pred': y}
self.model = tf.keras.models.Model(inputs=inputs, outputs=output)
def get_metrics(self) :
output_d = {}
metric = ProbeMAE("mae")
output_d["mae2"] = metric
return output_d
Metric = trainer_v2.per_project.transparency.mmp.probe.probe_common.Metric
class ProbeMAE(Metric):
def __init__(self, name, **kwargs):
super(ProbeMAE, self).__init__(name=name, **kwargs)
self.mae = self.add_weight(name='mae', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
# self.metric_inner = tf.keras.metrics.MeanAbsoluteError()
def update_state(self, output_d, _sample_weight=None):
v = tf.reduce_sum(output_d['pred'])
self.mae.assign_add(v)
self.count.assign_add(1.0)
def result(self):
return self.mae / self.count
def reset_state(self):
self.mae.assign(0.0)
self.count.assign(0.0)
class LinearModel(ModelV3IF):
def __init__(self, input_shape: InputShapeConfigTT):
self.inner_model = None
self.model: tf.keras.models.Model = None
self.loss = None
self.input_shape: InputShapeConfigTT = input_shape
self.log_var = ["loss"]
def build_model(self):
self.inner_model = LinearV3()
def get_keras_model(self) -> tf.keras.models.Model:
return self.inner_model.model
def init_checkpoint(self, init_checkpoint):
pass
def get_train_metrics(self):
return {}
def get_train_metrics_for_summary(self):
return self.inner_model.get_metrics()
def get_loss_fn(self):
def get_loss(d):
return tf.reduce_sum(d['pred'])
return get_loss
@report_run3
def main(args):
c_log.info(__file__)
run_config: RunConfig2 = get_run_config2(args)
run_config.print_info()
input_shape = InputShapeConfigTT100_4()
model_v2 = LinearModel(input_shape)
optimizer = AdamWeightDecay(
learning_rate=1e-3,
exclude_from_weight_decay=[]
)
def build_dataset(input_files, is_for_training):
def generator():
for _ in range(100):
yield [0., 0.]
train_dataset = tf.data.Dataset.from_generator(
generator,
output_types=(tf.float32),
output_shapes=(tf.TensorShape([2])))
return train_dataset.batch(2)
strategy = get_strategy2(False, "")
train_dataset = build_dataset(run_config.dataset_config.train_files_path, True)
eval_dataset = build_dataset(run_config.dataset_config.eval_files_path, False)
dist_train_dataset = distribute_dataset(strategy, train_dataset)
eval_batches = distribute_dataset(strategy, eval_dataset)
train_log_dir = path_join(output_path, "train_log")
step_idx = 0
with strategy.scope():
model_v2.build_model()
train_summary_writer = create_file_writer(train_log_dir, name="train")
train_summary_writer.set_as_default()
train_metrics = model_v2.get_train_metrics_for_summary()
def train_step(item):
model = model_v2.get_keras_model()
with tf.GradientTape() as tape:
output_d = model(item, training=True)
step = optimizer.iterations
for name, metric in train_metrics.items():
metric.update_state(output_d)
sc = tf.summary.scalar(name, metric.result(), step=step)
print(sc)
return tf.constant(0.0)
@tf.function
def distributed_train_step(train_itr, steps_per_execution):
# try:
total_loss = 0.0
n_step = 0.
for _ in tf.range(steps_per_execution):
batch_item = next(train_itr)
per_replica_losses = strategy.run(train_step, args=(batch_item, ))
loss = strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
total_loss += loss
n_step += 1.
train_loss = total_loss / n_step
return train_loss
train_itr = iter(dist_train_dataset)
for m in train_metrics.values():
m.reset_state()
train_loss = distributed_train_step(train_itr, 1)
step_idx += 1
if __name__ == "__main__":
args = flags_parser.parse_args(sys.argv[1:])
main(args)
|
[
"lesterny@gmail.com"
] |
lesterny@gmail.com
|
6cedc11b21eb576d025e57da6ccc3febbc2bb6c4
|
d2c80cd70f3220165c7add7ed9a103c0ed1ab871
|
/python/HOMEWORK/5th_Session/Answers/Class/1/1.py
|
37d69bb2650fbd94bf752634ed7bc727c291f579
|
[] |
no_license
|
nervaishere/DashTeam
|
2a786af8a871200d7facfa3701a07f97230b706e
|
a57b34a601f74b06a7be59f2bfe503cbd2a6c15f
|
refs/heads/master
| 2023-08-24T12:24:18.081164
| 2021-10-09T21:10:54
| 2021-10-09T21:10:54
| 393,689,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
a=int(input("enter your first number:" ))
b=int(input("enter your second number:" ))
c=int(input("enter your third number:" ))
maximum=max(a,b,c)
minimum=min(a,b,c)
if a==maximum and b==minimum:
print(a, c, b)
elif a==maximum and c==minimum:
print(a, b, c)
elif b==maximum and a==minimum:
print(b, c, a)
elif b==maximum and c==minimum:
print(b, a, c)
elif c==maximum and a==minimum:
print(c, b, a)
elif c==maximum and a==minimum:
print(c, a , b)
|
[
"athenajafari34@gmail.com"
] |
athenajafari34@gmail.com
|
86afd457c842b29e419998349f8353c18483ab10
|
54ab0f79f5d68f4732ca7d205f72ecef99862303
|
/benchmarks/distributed/rpc/parameter_server/metrics/ProcessedMetricsPrinter.py
|
7ff8c3171a83336b367299649c37b08f416d7ca2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
csarofeen/pytorch
|
a9dd0f8ffa0642d72df2d5e109a8b4d9c2389cbc
|
e8557ec5e064608577f81e51ccfe7c36c917cb0f
|
refs/heads/devel
| 2023-04-30T02:42:13.558738
| 2023-03-14T00:50:01
| 2023-03-14T00:50:01
| 88,071,101
| 35
| 10
|
NOASSERTION
| 2023-06-21T17:37:30
| 2017-04-12T16:02:31
|
C++
|
UTF-8
|
Python
| false
| false
| 3,206
|
py
|
import statistics
import pandas as pd
from tabulate import tabulate
class ProcessedMetricsPrinter:
def print_data_frame(self, name, processed_metrics):
print(f"metrics for {name}")
data_frame = self.get_data_frame(processed_metrics)
print(tabulate(data_frame, showindex=False, headers=data_frame.columns, tablefmt="grid"))
def combine_processed_metrics(self, processed_metrics_list):
r"""
A method that merges the value arrays of the keys in the dictionary
of processed metrics.
Args:
processed_metrics_list (list): a list containing dictionaries with
recorded metrics as keys, and the values are lists of elapsed times.
Returns::
A merged dictionary that is created from the list of dictionaries passed
into the method.
Examples::
>>> instance = ProcessedMetricsPrinter()
>>> dict_1 = trainer1.get_processed_metrics()
>>> dict_2 = trainer2.get_processed_metrics()
>>> print(dict_1)
{
"forward_metric_type,forward_pass" : [.0429, .0888]
}
>>> print(dict_2)
{
"forward_metric_type,forward_pass" : [.0111, .0222]
}
>>> processed_metrics_list = [dict_1, dict_2]
>>> result = instance.combine_processed_metrics(processed_metrics_list)
>>> print(result)
{
"forward_metric_type,forward_pass" : [.0429, .0888, .0111, .0222]
}
"""
processed_metric_totals = {}
for processed_metrics in processed_metrics_list:
for metric_name, values in processed_metrics.items():
if metric_name not in processed_metric_totals:
processed_metric_totals[metric_name] = []
processed_metric_totals[metric_name] += values
return processed_metric_totals
def get_data_frame(self, processed_metrics):
df = pd.DataFrame(
columns=['name', 'min', 'max', 'mean', 'variance', 'stdev']
)
for metric_name in sorted(processed_metrics.keys()):
values = processed_metrics[metric_name]
row = {
"name": metric_name,
"min": min(values),
"max": max(values),
"mean": statistics.mean(values),
"variance": statistics.variance(values),
"stdev": statistics.stdev(values)
}
df = df.append(row, ignore_index=True)
return df
def print_metrics(self, name, rank_metrics_list):
if rank_metrics_list:
metrics_list = []
for rank, metric in rank_metrics_list:
self.print_data_frame(f"{name}={rank}", metric)
metrics_list.append(metric)
combined_metrics = self.combine_processed_metrics(metrics_list)
self.print_data_frame(f"all {name}", combined_metrics)
def save_to_file(self, data_frame, file_name):
file_name = f"data_frames/{file_name}.csv"
data_frame.to_csv(file_name, encoding='utf-8', index=False)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
64a2f6689e74b94b8ed76e6cae0bed317078440b
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories_2to3/204509/kaggle-liberty-mutual-group-master/code/correlation.py
|
b6f3c6e3fd83c3c6656f2ae5fe14cd5de6b86512
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# -*- coding: utf-8 -*-
"""
simple code to calculate Pearson correlation between two results
"""
import pandas as pd
import numpy as np
print('Reading data...')
xgb1 = pd.read_csv("../output/xgboost_1.csv")
xgb2 = pd.read_csv("../output/xgboost_2.csv")
rf = pd.read_csv("../output/rf.csv")
gbm = pd.read_csv("../output/gbm.csv")
print(('Pearson correlation = ', np.corrcoef(gbm.Hazard, rf.Hazard)[0,1]))
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
7461637d40a3096ec3e12766fc5d9198b8cb2fdb
|
3e4bb5b4036a66d25a72793c1deaa4f5572d37bf
|
/apps/pyvcal/tests/independent/revision.py
|
8c3bae6df8ddd6187220622f0a6e5e8f2e1e65fd
|
[
"MIT"
] |
permissive
|
hbussell/pinax-tracker
|
f7f7eb0676d01251d7d8832557be14665755844d
|
4f6538324b2e1f7a8b14c346104d2f1bd8e1556b
|
refs/heads/master
| 2021-01-20T12:06:29.630850
| 2010-02-03T00:39:05
| 2010-02-03T00:39:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
import modulespecific
import unittest
class TestRevision(modulespecific.ModuleSpecificTestCase):
"""Test the Revision interface."""
def setUp(self):
"""Create and connect to a repository."""
self.basic_repo = self.test_module.BasicRepository()
self.repo = self.basic_repo.repo()
"""Get the latest revision from that repository."""
self.revisions = self.repo.revisions
self.head = self.repo.branches[""].head
def tearDown(self):
"""Destroy the created repository."""
self.basic_repo.teardown()
class TestRevisionPredecessors(TestRevision):
"""Test Revision.predecessors"""
def runTest(self):
"""Test that the latest revision returns the expected predecessor i.e: Revision(rev_num - 1)."""
# PROBLEM: This test fails (at least on git) because there is only ONE
# revision in the test repo, therefore self.head.properties.time is equal
# to predecessors[0].properties.time
predecessors = self.head.predecessors
self.assertEquals(1, len(predecessors))
self.assert_(self.head.properties.time > predecessors[0].properties.time)
self.assertEquals(predecessors[0].properties.commit_message, "Rename README.txt to README")
class TestRevisionGetProperties(TestRevision):
"""Test Revision.properties"""
def runTest(self):
"""Test that the 'basic' test Revision.properties returns a non-null properties object."""
props = self.head.properties
self.assert_(props)
self.assert_(props.committer)
self.assert_(props.time)
self.assert_(props.commit_message)
class TestRevisionDiffWithParents(TestRevision):
"""Test Revision.diff_with_parents"""
def runTest(self):
"""Test the get diff with parents returns a valid RevisionDiff object."""
diff = self.head.diff_with_parent
diff_value = diff.value
self.assertEquals("", diff_value)
#TODO need a better test... base on branch_and_merge test repo
|
[
"harley@harley-desktop.(none)"
] |
harley@harley-desktop.(none)
|
5d074a0b8bca96ac9ec808db99922c922dfe31a1
|
bc441bb06b8948288f110af63feda4e798f30225
|
/monitor_sdk/model/flowable_service/process_instance_pb2.py
|
4138e633ea17940cb65e4ffd486693a240303547
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 6,546
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: process_instance.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='process_instance.proto',
package='flowable_service',
syntax='proto3',
serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_service'),
serialized_pb=_b('\n\x16process_instance.proto\x12\x10\x66lowable_service\"\xdc\x01\n\x0fProcessInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x1a\n\x12\x66lowableInstanceId\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x0f\n\x07\x63reator\x18\x04 \x01(\t\x12\r\n\x05\x63time\x18\x05 \x01(\t\x12\r\n\x05\x65time\x18\x06 \x01(\t\x12\x0e\n\x06status\x18\x07 \x01(\t\x12\x12\n\nstepIdList\x18\x08 \x03(\t\x12\x0e\n\x06stopAt\x18\t \x01(\t\x12\x13\n\x0bisSuspended\x18\n \x01(\x08\x12\x13\n\x0bisCancelled\x18\x0b \x01(\x08\x42LZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_serviceb\x06proto3')
)
_PROCESSINSTANCE = _descriptor.Descriptor(
name='ProcessInstance',
full_name='flowable_service.ProcessInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='flowable_service.ProcessInstance.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowableInstanceId', full_name='flowable_service.ProcessInstance.flowableInstanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flowable_service.ProcessInstance.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='flowable_service.ProcessInstance.creator', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='flowable_service.ProcessInstance.ctime', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='etime', full_name='flowable_service.ProcessInstance.etime', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='flowable_service.ProcessInstance.status', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stepIdList', full_name='flowable_service.ProcessInstance.stepIdList', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stopAt', full_name='flowable_service.ProcessInstance.stopAt', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isSuspended', full_name='flowable_service.ProcessInstance.isSuspended', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isCancelled', full_name='flowable_service.ProcessInstance.isCancelled', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=45,
serialized_end=265,
)
DESCRIPTOR.message_types_by_name['ProcessInstance'] = _PROCESSINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ProcessInstance = _reflection.GeneratedProtocolMessageType('ProcessInstance', (_message.Message,), {
'DESCRIPTOR' : _PROCESSINSTANCE,
'__module__' : 'process_instance_pb2'
# @@protoc_insertion_point(class_scope:flowable_service.ProcessInstance)
})
_sym_db.RegisterMessage(ProcessInstance)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
e1777a1bf88087f70635240d8c7855eca9233d1a
|
cb35fffaab9650a8b89019a0269ad7cdf772f757
|
/news/migrations/0001_initial.py
|
849f2c75fec1aa5d94778cd7ca1fc5ab0994583d
|
[] |
no_license
|
amazing22/my-second-blog
|
6c75dddae2650269805edfa9c0f2b89ba6f1db48
|
a5cc02663badcbb2efcaf6d291a634edbdb8a009
|
refs/heads/master
| 2021-08-08T03:19:58.404245
| 2017-11-09T12:32:22
| 2017-11-09T12:32:22
| 102,439,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-11 05:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pub_date', models.DateField()),
('headline', models.CharField(max_length=200)),
('content', models.TextField()),
],
),
migrations.CreateModel(
name='Reporter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=70)),
],
),
migrations.AddField(
model_name='article',
name='reporter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='news.Reporter'),
),
]
|
[
"you@example.com"
] |
you@example.com
|
abb40f6104d91f9d09907f53c15d22b40b43d962
|
6f1e1c378997bf76942ce6e203e720035169ce27
|
/104-maximum-depth-of-binary-tree.py
|
7c47b2cce48c405645fdc77aba59813e9127047c
|
[
"MIT"
] |
permissive
|
yuenliou/leetcode
|
a489b0986b70b55f29d06c2fd7545294ba6e7ee5
|
e8a1c6cae6547cbcb6e8494be6df685f3e7c837c
|
refs/heads/main
| 2021-06-16T07:47:39.103445
| 2021-05-11T09:16:15
| 2021-05-11T09:16:15
| 306,536,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
#!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
from collections import deque
from datatype.tree_node import TreeNode
class Solution:
def maxDepth(self, root: TreeNode) -> int:
"""dfs"""
if not root:
return 0
else:
l = self.maxDepth(root.left)
r = self.maxDepth(root.right)
# return l + 1 if l > r else r + 1
return max(l, r) + 1
def maxDepth2(self, root: TreeNode) -> int:
"""bfs"""
if not root: return 0
cnt = 0
queue = deque()
queue.append(root)
while len(queue): # isEmpty()
temp = []
# cnt = len(queue); while cnt: ...; cnt -= 1
for _ in range(len(queue)):
root = queue.pop() # list.pop(0)
temp.append(root.val)
if root.left:
queue.appendleft(root.left)
if root.right:
queue.appendleft(root.right)
cnt += 1
return cnt
def main():
root = TreeNode(3)
n2 = TreeNode(9)
n3 = TreeNode(20)
n4 = TreeNode(15)
n5 = TreeNode(7)
root.setLeftNode(n2)
root.setRightNode(n3)
n3.setLeftNode(n4)
n3.setRightNode(n5)
solution = Solution()
ret = solution.maxDepth2(root)
print(ret)
'''104. 二叉树的最大深度
给定一个二叉树,找出其最大深度。
二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
说明: 叶子节点是指没有子节点的节点。
示例:
给定二叉树 [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
返回它的最大深度 3 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/maximum-depth-of-binary-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
|
[
"liuyuan@aplum.com.cn"
] |
liuyuan@aplum.com.cn
|
79800080c4d9f0483f339e63026999badf2cd752
|
4522fc52bc43654aadd30421a75bae00a09044f0
|
/alfa/diannad.py
|
f52594100e43207f34f304fe117f59a727bc3de3
|
[] |
no_license
|
qesoalpe/anelys
|
1edb8201aa80fedf0316db973da3a58b67070fca
|
cfccaa1bf5175827794da451a9408a26cd97599d
|
refs/heads/master
| 2020-04-07T22:39:35.344954
| 2018-11-25T05:23:21
| 2018-11-25T05:23:21
| 158,779,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
from sarah.acp_bson import Recipient
from base64 import b64decode
import json
ff = open('dianna_file', 'rb')
config = json.loads(b64decode(ff.read()).decode())
ff.close()
def handle_request(msg):
if 'request_type' in msg:
if msg['request_type'] == 'get':
if msg['get'] == 'dianna/local_device':
return {'local_device': config['local_device']}
dict_handle_msg = dict()
dict_handle_msg['request'] = handle_request
def read_msg(msg):
if 'type_message' in msg and msg['type_message'] in dict_handle_msg:
return dict_handle_msg[msg['type_message']](msg)
if __name__ == '__main__':
print("I'm citlali daemon.")
recipient = Recipient()
recipient.prepare('citlali', read_msg)
recipient.begin_receive_forever()
|
[
"qesoalpe@gmail.com"
] |
qesoalpe@gmail.com
|
5798d06030285a366239004b9efbbf2e57eedf93
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/spacecomponents/server/components/itemtrader.py
|
d137971475932934dac6aa414f5db1d9685da1b9
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529
| 2016-10-19T08:56:26
| 2016-10-19T08:56:26
| 71,334,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,402
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\spacecomponents\server\components\itemtrader.py
from spacecomponents.common.components.component import Component
from spacecomponents.common.componentregistry import ExportCall
import evetypes
class ItemTrader(Component):
def __init__(self, itemTraderItemId, typeId, attributes, componentRegistry):
Component.__init__(self, itemTraderItemId, typeId, attributes, componentRegistry)
self.SubscribeToMessage('OnAddedToSpace', self.OnAddedToSpace)
def OnAddedToSpace(self, ballpark, spaceComponentDb):
self.tradeProcessor = TradeProcessor(ballpark, ballpark.inventory2, ballpark.inventoryMgr, self.itemID, self.attributes.inputItems, self.attributes.outputItems, self.attributes.interactionRange)
@ExportCall
def ProcessTrade(self, session):
return self.tradeProcessor.ProcessTrade(session.shipid, session.charid)
class TradeProcessor(object):
def __init__(self, ballpark, inventory2, inventoryMgr, itemTraderItemId, inputItems, outputItems, interactionRange):
self.ballpark = ballpark
self.inventory2 = inventory2
self.inventoryMgr = inventoryMgr
self.itemTraderItemId = itemTraderItemId
self.inputItems = inputItems
self.outputItems = outputItems
self.interactionRange = interactionRange
self.requiredCapacity = self.GetCapacityForItems(outputItems) - self.GetCapacityForItems(inputItems)
def ProcessTrade(self, shipId, ownerId):
with self.inventory2.LockedItemAndSubItems(shipId):
self.CheckDistance(shipId)
ship = self.inventoryMgr.GetInventoryFromIdEx(shipId, -1)
self.CheckCargoCapacity(ship)
cargoItems = ship.List(const.flagCargo)
itemsForRemoval = self._GetItemsForTrade(cargoItems)
if itemsForRemoval:
self._TakeItems(shipId, itemsForRemoval)
self._GiveItems(shipId, ownerId)
return True
return False
def _GetItemsForTrade(self, cargoItems):
itemsForTrade = {}
for requiredTypeId, requiredQuantity in self.inputItems.iteritems():
quantityLeft = self._GetItemsForTradeFromCargo(cargoItems, itemsForTrade, requiredTypeId, requiredQuantity)
if quantityLeft != 0:
return {}
return itemsForTrade
def _GetItemsForTradeFromCargo(self, cargoItems, itemsForTrade, requiredTypeId, requiredQuantity):
quantityLeft = requiredQuantity
for item in cargoItems:
if item.typeID == requiredTypeId:
quantity = min(quantityLeft, item.quantity)
itemsForTrade[item.itemID] = quantity
quantityLeft -= quantity
if quantityLeft == 0:
break
return quantityLeft
def _TakeItems(self, shipId, itemsForRemoval):
for itemId, quantityForRemoval in itemsForRemoval.iteritems():
self.inventory2.MoveItem(itemId, shipId, const.locationJunkyard, qty=quantityForRemoval)
def _GiveItems(self, shipId, ownerId):
for typeId, quantityForAdd in self.outputItems.iteritems():
self.inventory2.AddItem2(typeId, ownerId, shipId, qty=quantityForAdd, flag=const.flagCargo)
def GetCapacityForItems(self, items):
capacity = 0
for typeId, quantity in items.iteritems():
typeVolume = evetypes.GetVolume(typeId)
capacity += quantity * typeVolume
return capacity
def CheckCargoCapacity(self, ship):
shipCapacity = ship.GetCapacity(flag=const.flagCargo)
availableCapacity = shipCapacity.capacity - shipCapacity.used
if availableCapacity < self.requiredCapacity:
raise UserError('NotEnoughCargoSpace', {'available': shipCapacity.capacity - shipCapacity.used,
'volume': self.requiredCapacity})
def CheckDistance(self, shipId):
actualDistance = self.ballpark.GetSurfaceDist(self.itemTraderItemId, shipId)
if actualDistance > self.interactionRange:
typeName = evetypes.GetName(self.inventory2.GetItem(self.itemTraderItemId).typeID)
raise UserError('TargetNotWithinRange', {'targetGroupName': typeName,
'desiredRange': self.interactionRange,
'actualDistance': actualDistance})
|
[
"le02005@163.com"
] |
le02005@163.com
|
b018c86a7c6c80f8fa48f8ac5dcca77ac0fc80bc
|
d8c50195fe04a09bd98e12f0b18a84dbe4a3dfe2
|
/zeus/tasks/process_artifact.py
|
a16482e42a4078a95b72babb1012eb62ace1e806
|
[
"Apache-2.0"
] |
permissive
|
keegancsmith/zeus
|
44eeac0e9c99635f21bfa7ec744c84be7b40525e
|
e7bfe3db564ad1bbf449d8197f7d663fe41dd60a
|
refs/heads/master
| 2021-09-04T04:34:47.747175
| 2018-01-12T17:24:11
| 2018-01-12T21:55:31
| 117,591,379
| 0
| 0
| null | 2018-01-15T20:19:57
| 2018-01-15T20:19:57
| null |
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
from flask import current_app
from zeus import auth
from zeus.artifacts import manager as default_manager
from zeus.config import celery, db
from zeus.constants import Result
from zeus.models import Artifact, Job, Status
from zeus.utils import timezone
from .aggregate_job_stats import aggregate_build_stats_for_job
@celery.task(max_retries=None, autoretry_for=(Exception,), acks_late=True)
def process_artifact(artifact_id, manager=None, force=False, **kwargs):
artifact = Artifact.query.unrestricted_unsafe().get(artifact_id)
if artifact is None:
current_app.logger.error('Artifact %s not found', artifact_id)
return
if artifact.status == Status.finished and not force:
current_app.logger.info(
'Skipping artifact processing (%s) - already marked as finished', artifact_id)
return
artifact.status = Status.in_progress
artifact.date_started = timezone.now()
db.session.add(artifact)
db.session.flush()
auth.set_current_tenant(auth.Tenant(
repository_ids=[artifact.repository_id]))
job = Job.query.get(artifact.job_id)
if job.result == Result.aborted:
current_app.logger.info(
'Skipping artifact processing (%s) - Job aborted', artifact_id)
artifact.status = Status.finished
db.session.add(artifact)
db.session.commit()
return
if artifact.file:
if manager is None:
manager = default_manager
try:
with db.session.begin_nested():
manager.process(artifact)
except Exception:
current_app.logger.exception(
'Unrecoverable exception processing artifact %s: %s', artifact.job_id, artifact
)
else:
current_app.logger.info(
'Skipping artifact processing (%s) due to missing file', artifact_id)
artifact.status = Status.finished
artifact.date_finished = timezone.now()
db.session.add(artifact)
db.session.commit()
# we always aggregate results to avoid locking here
aggregate_build_stats_for_job.delay(job_id=job.id)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
4e71953ea5d17920c540d29e944877d704f20cc5
|
4ee4c2cafad449dd60032630bdd249e63d70b5ac
|
/plugins/xevents/Xlib/xauth.py
|
4755b353f1bc01bc186ea50603860a9c1df9ddec
|
[
"MIT",
"GPL-2.0-only"
] |
permissive
|
rrojasPy/TurtleBots.activity
|
4c44ed90b1aadbd0788cdb091fc647deac28d8e8
|
c18e64cc817b2bd8d8cd80a538ff703f580bbe42
|
refs/heads/master
| 2022-10-20T20:46:27.304452
| 2020-06-17T15:57:11
| 2020-06-17T15:57:11
| 273,014,877
| 0
| 1
|
MIT
| 2020-06-17T15:57:12
| 2020-06-17T15:44:15
| null |
UTF-8
|
Python
| false
| false
| 4,168
|
py
|
# $Id: xauth.py,v 1.5 2007/06/10 14:11:58 mggrant Exp $
#
# Xlib.xauth -- ~/.Xauthority access
#
# Copyright (C) 2000 Peter Liljenberg <petli@ctrl-c.liu.se>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import struct
from Xlib import X, error
FamilyInternet = X.FamilyInternet
FamilyDECnet = X.FamilyDECnet
FamilyChaos = X.FamilyChaos
FamilyLocal = 256
class Xauthority:
def __init__(self, filename = None):
if filename is None:
filename = os.environ.get('XAUTHORITY')
if filename is None:
try:
filename = os.path.join(os.environ['HOME'], '.Xauthority')
except KeyError:
raise error.XauthError(
'$HOME not set, cannot find ~/.Xauthority')
try:
raw = open(filename, 'rb').read()
except IOError, err:
raise error.XauthError('~/.Xauthority: %s' % err)
self.entries = []
# entry format (all shorts in big-endian)
# short family;
# short addrlen;
# char addr[addrlen];
# short numlen;
# char num[numlen];
# short namelen;
# char name[namelen];
# short datalen;
# char data[datalen];
n = 0
try:
while n < len(raw):
family, = struct.unpack('>H', raw[n:n+2])
n = n + 2
length, = struct.unpack('>H', raw[n:n+2])
n = n + length + 2
addr = raw[n - length : n]
length, = struct.unpack('>H', raw[n:n+2])
n = n + length + 2
num = raw[n - length : n]
length, = struct.unpack('>H', raw[n:n+2])
n = n + length + 2
name = raw[n - length : n]
length, = struct.unpack('>H', raw[n:n+2])
n = n + length + 2
data = raw[n - length : n]
if len(data) != length:
break
self.entries.append((family, addr, num, name, data))
except struct.error, e:
print "Xlib.xauth: warning, failed to parse part of xauthority file (%s), aborting all further parsing" % filename
#pass
if len(self.entries) == 0:
print "Xlib.xauth: warning, no xauthority details available"
# raise an error? this should get partially caught by the XNoAuthError in get_best_auth..
def __len__(self):
return len(self.entries)
def __getitem__(self, i):
return self.entries[i]
def get_best_auth(self, family, address, dispno,
types = ( "MIT-MAGIC-COOKIE-1", )):
"""Find an authentication entry matching FAMILY, ADDRESS and
DISPNO.
The name of the auth scheme must match one of the names in
TYPES. If several entries match, the first scheme in TYPES
will be choosen.
If an entry is found, the tuple (name, data) is returned,
otherwise XNoAuthError is raised.
"""
num = str(dispno)
matches = {}
for efam, eaddr, enum, ename, edata in self.entries:
if efam == family and eaddr == address and num == enum:
matches[ename] = edata
for t in types:
try:
return (t, matches[t])
except KeyError:
pass
raise error.XNoAuthError((family, address, dispno))
|
[
"rodneyrrs@hotmail.com"
] |
rodneyrrs@hotmail.com
|
0eee2867d2a4f3aca7a06bb5468124d94ef182fe
|
1e263d605d4eaf0fd20f90dd2aa4174574e3ebce
|
/components/ally-utilities/__setup__/ally_utilities/logging.py
|
b8a479b38eb8e2357cf75e837ffe1b544a61d068
|
[] |
no_license
|
galiminus/my_liveblog
|
698f67174753ff30f8c9590935d6562a79ad2cbf
|
550aa1d0a58fc30aa9faccbfd24c79a0ceb83352
|
refs/heads/master
| 2021-05-26T20:03:13.506295
| 2013-04-23T09:57:53
| 2013-04-23T09:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
'''
Created on Nov 7, 2012
@package: ally utilities
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
Provides the logging configurations to be used for the application.
'''
from ally.container import ioc
# --------------------------------------------------------------------
@ioc.config
def format():
'''
The format to use for the logging messages more details can be found at "http://docs.python.org/3/library/logging.html"
in chapter "16.7.6. LogRecord attributes. Example:
"%(asctime)s %(levelname)s (%(threadName)s %(module)s.%(funcName)s %(lineno)d): %(message)s"
'''
return '%(module)s.%(funcName)s %(lineno)d: %(message)s'
@ioc.config
def debug_for():
'''
The list of packages or module patterns to provide debugging for, attention this is available only if the application
is not run with -O or -OO option
'''
return []
@ioc.config
def info_for():
'''The list of packages or module patterns to provide info for'''
return ['__deploy__', '__setup__']
@ioc.config
def warning_for():
'''The list of packages or module patterns to provide warnings for'''
return ['ally']
@ioc.config
def log_file():
''' The name of the log file '''
return 'app.log'
|
[
"etienne@spillemaeker.com"
] |
etienne@spillemaeker.com
|
c52e5a01d006afaa44d941558a3b4413e7d46507
|
2c97e11e13bfbabfdae8979385ba0957c7b11270
|
/ebl/tests/corpus/test_text.py
|
492a85c0eae4e191f7fa44e750326ebdfcf9d5eb
|
[
"MIT"
] |
permissive
|
ElectronicBabylonianLiterature/ebl-api
|
72a2a95291e502ec89a20ebe5c14447e63ac6d92
|
4910f6fbb57fa213fef55cbe9bc16215aebbaa27
|
refs/heads/master
| 2023-08-16T12:42:03.303042
| 2023-08-16T10:59:44
| 2023-08-16T10:59:44
| 135,266,736
| 11
| 3
|
MIT
| 2023-09-12T09:56:14
| 2018-05-29T08:39:58
|
Python
|
UTF-8
|
Python
| false
| false
| 679
|
py
|
import pytest
from ebl.transliteration.domain.stage import Stage
from ebl.tests.factories.corpus import ChapterListingFactory, TextFactory
@pytest.mark.parametrize(
"chapters,expected",
[
(tuple(), False),
(ChapterListingFactory.build_batch(2, stage=Stage.NEO_ASSYRIAN), False),
(
[
ChapterListingFactory.build(stage=Stage.NEO_ASSYRIAN),
ChapterListingFactory.build(stage=Stage.OLD_ASSYRIAN),
],
True,
),
],
)
def test_has_multiple_stages(chapters, expected) -> None:
text = TextFactory.build(chapters=chapters)
assert text.has_multiple_stages == expected
|
[
"noreply@github.com"
] |
ElectronicBabylonianLiterature.noreply@github.com
|
917fcebd166d847a92f7e606dacab4fd29e3999f
|
3f70e754981a941dbc3a24d15edb0a5abe3d4788
|
/yotta/test/test_ignores.py
|
a2b9a8a7100fa9cee34d536a142b11b4dc2cd2c5
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ARMmbed/yotta
|
66cfa634f03a25594311a569ea369a916cff70bf
|
82d854b43d391abb5a006b05e7beffe7d0d6ffbf
|
refs/heads/master
| 2023-03-16T11:57:12.852163
| 2021-01-15T13:49:47
| 2021-01-15T13:49:47
| 16,579,440
| 184
| 87
|
Apache-2.0
| 2021-01-15T13:46:43
| 2014-02-06T13:03:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,789
|
py
|
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import os
# internal modules:
from yotta.lib.detect import systemDefaultTarget
from yotta.lib import component
from yotta.test.cli import cli
from yotta.test.cli import util
Test_Files = {
'.yotta_ignore': '''
#comment
/moo
b/c/d
b/c/*.txt
/a/b/test.txt
b/*.c
/source/a/b/test.txt
/test/foo
sometest/a
someothertest
ignoredbyfname.c
''',
'module.json': '''
{
"name": "test-testdep-f",
"version": "0.0.6",
"description": "Module to test test-dependencies and ignoring things",
"author": "autopulated",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {},
"testDependencies": {}
}
''',
'a/b/c/d/e/f/test.txt': '',
'a/b/c/d/e/test.c': '#error should be ignored',
'a/b/c/d/e/test.txt': '',
'a/b/c/d/test.c': '#error should be ignored',
'a/b/c/d/test.txt': '',
'a/b/c/d/z/test.c':'#error should be ignored',
'a/b/c/test.txt': '',
'a/b/test.txt':'',
'a/test.txt':'',
'comment':'# should not be ignored',
'f/f.h':'''
#ifndef __F_H__
#define __F_H__
int f();
#endif
''',
'source/moo/test.txt':'',
'source/a/b/c/d/e/f/test.txt': '',
'source/a/b/c/d/e/test.c': '#error should be ignored',
'source/a/b/c/d/e/test.txt': '',
'source/a/b/c/d/test.c': '#error should be ignored',
'source/a/b/c/d/test.txt': '',
'source/a/b/c/d/z/test.c':'#error should be ignored',
'source/a/b/c/test.txt': '',
'source/a/b/test.txt':'',
'source/a/test.txt':'',
'source/f.c':'''
int f(){
return 6;
}
''',
'test/anothertest/ignoredbyfname.c':'#error should be ignored',
'test/anothertest/ignoredbyfname.c':'''
#include <stdio.h>
#include "f/f.h"
int main(){
int result = f();
printf("%d\n", result);
return !(result == 6);
}
''',
'test/foo/ignored.c':'''
#error should be ignored
''',
'test/someothertest/alsoignored.c':'''
#error should be ignored
''',
'test/sometest/a/ignored.c':'''
#error should be ignored
'''
}
Default_Test_Files = {
'module.json': '''
{
"name": "test-testdep-f",
"version": "0.0.6",
"license": "Apache-2.0"
}'''
}
def isWindows():
# can't run tests that hit github without an authn token
return os.name == 'nt'
class TestPackIgnores(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = util.writeTestFiles(Test_Files)
@classmethod
def tearDownClass(cls):
util.rmRf(cls.test_dir)
def test_absolute_ignores(self):
c = component.Component(self.test_dir)
self.assertTrue(c.ignores('moo'))
self.assertTrue(c.ignores('test/foo/ignored.c'))
def test_glob_ignores(self):
c = component.Component(self.test_dir)
self.assertTrue(c.ignores('a/b/c/test.txt'))
self.assertTrue(c.ignores('a/b/test.txt'))
self.assertTrue(c.ignores('a/b/test.c'))
self.assertTrue(c.ignores('source/a/b/c/test.txt'))
self.assertTrue(c.ignores('source/a/b/test.txt'))
self.assertTrue(c.ignores('source/a/b/test.c'))
def test_relative_ignores(self):
c = component.Component(self.test_dir)
self.assertTrue(c.ignores('a/b/c/d/e/f/test.txt'))
self.assertTrue(c.ignores('a/b/test.txt'))
self.assertTrue(c.ignores('source/a/b/c/d/e/f/test.txt'))
self.assertTrue(c.ignores('source/a/b/test.txt'))
self.assertTrue(c.ignores('test/anothertest/ignoredbyfname.c'))
self.assertTrue(c.ignores('test/someothertest/alsoignored.c'))
def test_default_ignores(self):
default_test_dir = util.writeTestFiles(Default_Test_Files)
c = component.Component(default_test_dir)
self.assertTrue(c.ignores('.something.c.swp'))
self.assertTrue(c.ignores('.something.c~'))
self.assertTrue(c.ignores('path/to/.something.c.swm'))
self.assertTrue(c.ignores('path/to/.something.c~'))
self.assertTrue(c.ignores('.DS_Store'))
self.assertTrue(c.ignores('.git'))
self.assertTrue(c.ignores('.hg'))
self.assertTrue(c.ignores('.svn'))
self.assertTrue(c.ignores('yotta_modules'))
self.assertTrue(c.ignores('yotta_targets'))
self.assertTrue(c.ignores('build'))
self.assertTrue(c.ignores('.yotta.json'))
util.rmRf(default_test_dir)
def test_comments(self):
c = component.Component(self.test_dir)
self.assertFalse(c.ignores('comment'))
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_build(self):
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'clean'], self.test_dir)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], self.test_dir)
self.assertNotIn('ignoredbyfname', stdout)
self.assertNotIn('someothertest', stdout)
self.assertNotIn('sometest', stdout)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_test(self):
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'clean'], self.test_dir)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], self.test_dir)
self.assertNotIn('ignoredbyfname', stdout)
self.assertNotIn('someothertest', stdout)
self.assertNotIn('sometest', stdout)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=self.test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return stdout or stderr
if __name__ == '__main__':
unittest.main()
|
[
"James.Crosby@arm.com"
] |
James.Crosby@arm.com
|
21592df1ab03e4bc5631a96f3de6b93a787069ac
|
e6f0d9716288c7a8ac04aad852343177195fe8a4
|
/hydrus/client/db/ClientDBMappingsStorage.py
|
b563cc014146c8b9f8777769f993c37b28e09b00
|
[
"WTFPL"
] |
permissive
|
dot1991/hydrus
|
d5fb7960650c7b0cc999832be196deec073146a2
|
e95ddf7fb65e2a1fc82e091473c4c9e6cb09e69d
|
refs/heads/master
| 2023-06-15T19:29:59.477574
| 2021-07-14T20:42:19
| 2021-07-14T20:42:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,221
|
py
|
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusDBModule
from hydrus.client.db import ClientDBServices
def GenerateMappingsTableNames( service_id: int ) -> typing.Tuple[ str, str, str, str ]:
suffix = str( service_id )
current_mappings_table_name = 'external_mappings.current_mappings_{}'.format( suffix )
deleted_mappings_table_name = 'external_mappings.deleted_mappings_{}'.format( suffix )
pending_mappings_table_name = 'external_mappings.pending_mappings_{}'.format( suffix )
petitioned_mappings_table_name = 'external_mappings.petitioned_mappings_{}'.format( suffix )
return ( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name )
class ClientDBMappingsStorage( HydrusDBModule.HydrusDBModule ):
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices ):
self.modules_services = modules_services
HydrusDBModule.HydrusDBModule.__init__( self, 'client mappings storage', cursor )
def _GetInitialIndexGenerationTuples( self ):
index_generation_tuples = []
return index_generation_tuples
def CreateInitialTables( self ):
pass
def GetExpectedTableNames( self ) -> typing.Collection[ str ]:
expected_table_names = []
return expected_table_names
def DropMappingsTables( self, service_id: int ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
self._c.execute( 'DROP TABLE IF EXISTS {};'.format( current_mappings_table_name ) )
self._c.execute( 'DROP TABLE IF EXISTS {};'.format( deleted_mappings_table_name ) )
self._c.execute( 'DROP TABLE IF EXISTS {};'.format( pending_mappings_table_name ) )
self._c.execute( 'DROP TABLE IF EXISTS {};'.format( petitioned_mappings_table_name ) )
def GenerateMappingsTables( self, service_id: int ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
self._c.execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( current_mappings_table_name ) )
self._CreateIndex( current_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
self._c.execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( deleted_mappings_table_name ) )
self._CreateIndex( deleted_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
self._c.execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( pending_mappings_table_name ) )
self._CreateIndex( pending_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
self._c.execute( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER, hash_id INTEGER, reason_id INTEGER, PRIMARY KEY ( tag_id, hash_id ) ) WITHOUT ROWID;'.format( petitioned_mappings_table_name ) )
self._CreateIndex( petitioned_mappings_table_name, [ 'hash_id', 'tag_id' ], unique = True )
def GetCurrentFilesCount( self, service_id: int ) -> int:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
result = self._c.execute( 'SELECT COUNT( DISTINCT hash_id ) FROM {};'.format( current_mappings_table_name ) ).fetchone()
( count, ) = result
return count
def GetDeletedMappingsCount( self, service_id: int ) -> int:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
result = self._c.execute( 'SELECT COUNT( * ) FROM {};'.format( deleted_mappings_table_name ) ).fetchone()
( count, ) = result
return count
def GetPendingMappingsCount( self, service_id: int ) -> int:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
result = self._c.execute( 'SELECT COUNT( * ) FROM {};'.format( pending_mappings_table_name ) ).fetchone()
( count, ) = result
return count
def GetPetitionedMappingsCount( self, service_id: int ) -> int:
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
result = self._c.execute( 'SELECT COUNT( * ) FROM {};'.format( petitioned_mappings_table_name ) ).fetchone()
( count, ) = result
return count
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
if HC.CONTENT_TYPE_HASH:
tables_and_columns = []
for service_id in self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
tables_and_columns.extend( [
( current_mappings_table_name, 'hash_id' ),
( deleted_mappings_table_name, 'hash_id' ),
( pending_mappings_table_name, 'hash_id' ),
( petitioned_mappings_table_name, 'hash_id' )
] )
return tables_and_columns
elif HC.CONTENT_TYPE_TAG:
tables_and_columns = []
for service_id in self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = GenerateMappingsTableNames( service_id )
tables_and_columns.extend( [
( current_mappings_table_name, 'tag_id' ),
( deleted_mappings_table_name, 'tag_id' ),
( pending_mappings_table_name, 'tag_id' ),
( petitioned_mappings_table_name, 'tag_id' )
] )
return tables_and_columns
return []
|
[
"hydrus.admin@gmail.com"
] |
hydrus.admin@gmail.com
|
f6e48ed453acfeacb2bc7ce5b0987480f39bf064
|
225a1d5fca742ae4f502bc346e26d804283e925e
|
/luna/shortcuts.py
|
eb802ebf4837f5a06844a562249863d1b76f2dc0
|
[
"MIT"
] |
permissive
|
y372465774/luna
|
8ad056fdd956c9fa5508d6d3a657b76785e9641d
|
ee3bb740f2cee67fa84b3e923979d29fd14015a7
|
refs/heads/master
| 2021-06-21T12:43:09.821262
| 2017-06-03T23:11:47
| 2017-06-03T23:11:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
from luna import util
from luna.compiler import Compiler
from luna.parser import Parser
def compile(filepath):
tree = parse(filepath)
compiler = Compiler()
return compiler.compile(tree)
def interpret(filepath):
frame = compile(filepath)
frame.run()
def parse(filepath):
import os
content = filepath
if os.path.isfile(filepath):
content = util.read(filepath)
parser = Parser()
return parser.parse_with_rule(None, content)
|
[
"numerodix@gmail.com"
] |
numerodix@gmail.com
|
d2e678f126fa9f4a2b06d9a9db003b86dc26c0cb
|
d668209e9951d249020765c011a836f193004c01
|
/tools/pnnx/tests/ncnn/test_F_pad.py
|
88590649883cbbc978a447b89328a1e6e6372751
|
[
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] |
permissive
|
Tencent/ncnn
|
d8371746c00439304c279041647362a723330a79
|
14b000d2b739bd0f169a9ccfeb042da06fa0a84a
|
refs/heads/master
| 2023-08-31T14:04:36.635201
| 2023-08-31T04:19:23
| 2023-08-31T04:19:23
| 95,879,426
| 18,818
| 4,491
|
NOASSERTION
| 2023-09-14T15:44:56
| 2017-06-30T10:55:37
|
C++
|
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x, y, z):
x = F.pad(x, (3,4), mode='constant', value=1.3)
x = F.pad(x, (2,2))
y = F.pad(y, (5,6), mode='reflect')
y = F.pad(y, (2,1), mode='replicate')
y = F.pad(y, (3,4), mode='constant', value=1.3)
y = F.pad(y, (1,1))
z = F.pad(z, (3,4,3,4), mode='reflect')
z = F.pad(z, (2,1,2,0), mode='replicate')
z = F.pad(z, (1,0,2,0), mode='constant', value=1.3)
z = F.pad(z, (3,3,3,3))
return x, y, z
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 16)
y = torch.rand(1, 2, 16)
z = torch.rand(1, 3, 12, 16)
a = net(x, y, z)
# export torchscript
mod = torch.jit.trace(net, (x, y, z))
mod.save("test_F_pad.pt")
# torchscript to pnnx
import os
os.system("../../src/pnnx test_F_pad.pt inputshape=[1,16],[1,2,16],[1,3,12,16]")
# ncnn inference
import test_F_pad_ncnn
b = test_F_pad_ncnn.test_inference()
for a0, b0 in zip(a, b):
if not torch.allclose(a0, b0, 1e-4, 1e-4):
return False
return True
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
[
"noreply@github.com"
] |
Tencent.noreply@github.com
|
ed20f0ff5ef0931674df3d3cb706768578fb1f47
|
28225a61f2fc33e4f6d56a1941a99301489c0f1e
|
/GAN/CoGAN(WIP)/main.py
|
34036d03559776a7b713da6c23fac7aa13909638
|
[] |
no_license
|
sailfish009/pytorchTutorialRepo
|
97cb4368b6ac22bd678965d351196b47d52970e1
|
17d67e64555d2b219d7d53de6a7bfda4172b809b
|
refs/heads/master
| 2023-03-08T13:03:57.244087
| 2021-02-23T11:26:19
| 2021-02-23T11:26:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,696
|
py
|
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from tqdm import tqdm
from trainer import *
import torchvision.models as models
import mnistm
import os
os.environ["TORCH_HOME"] = "~/Desktop/Datasets/"
# Allowing arguments for direct execution from terminal
parser = argparse.ArgumentParser()
parser.add_argument('--data', help = "folder for custom training", default = "")
parser.add_argument('--arch', default = 'resnet18', help= '''Choose any model
from pytorch. Or input "my" for taking a model from
model.py ''')
parser.add_argument("--weight-decay", default = 1e-4, help = "weight decay coefficient")
parser.add_argument("--resume", default = False, help = "Resume training from a checkpoint")
parser.add_argument("--pretrained", default = False, help = "If part of the standard datasets, downloaded pretrained weights")
parser.add_argument('--batch-size', type = int, default = 128, help = 'input batch size')
parser.add_argument(
"--test-batch-size", type = int, default = 1000
)
parser.add_argument(
"--epochs", type = int, default = 20, help = "no of epochs to train for"
)
parser.add_argument(
"--lr", type = float, default = 0.01, help = "Base learning rate"
)
parser.add_argument(
"--max_lr", type = float, default = 0.1, help = "Max learning rate for OneCycleLR"
)
parser.add_argument(
"--dry-run", action = 'store_true', default = False, help = 'quickly check a single pass'
)
parser.add_argument(
"--seed", type = int, default = 100, help = "torch random seed"
)
parser.add_argument(
"--log_interval", type = int, default = 20, help = "interval to show results"
)
parser.add_argument(
"--save-model", action = 'store_true', default = True, help = "Choose if model to be saved or not"
)
parser.add_argument("--save_path", default = "models/model.pt", help = "Choose model saved filepath")
# GAN specific args
parser.add_argument("--nz", default = 100, help = "size of latent vector")
parser.add_argument("--ngf", default = 28, help = "gen size")
parser.add_argument("--ndf", default = 28, help= "Discriminator size")
parser.add_argument("--beta1", default = 0.5, help = "adam beta1 parameter")
parser.add_argument('--CRITIC_ITERS', default=1, type=int, help='D update iters before G update')
parser.add_argument("--nc", default =1, help = "number of image channels")
args = parser.parse_args()
# Setting params
nz = int(args.nz)
nsamplesgf = int(args.ngf)
ndf = int(args.ndf)
nc = int(args.nc)
torch.manual_seed(args.seed)
device = torch.device("cuda")
kwargs = {'batch_size':args.batch_size}
kwargs.update(
{'num_workers':8,
'pin_memory':True,
'shuffle': True
}
)
# Defining batch transforms
transform = transforms.Compose(
[transforms.Resize(28),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
# Loading dataset
train_data = datasets.MNIST("~/Desktop/Datasets/", transform =
transform)
train_loader = torch.utils.data.DataLoader(train_data, **kwargs)
train_loader2 = torch.utils.data.DataLoader(
mnistm.MNISTM(
"~/Desktop/Datasets/",
train=True,
download=True,
transform=transforms.Compose(
[
transforms.Resize(ndf),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
),
batch_size=args.batch_size,
shuffle=True,
)
# Initialize weights
def weight_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight, 1.0, 0.02)
torch.nn.init.zeros_(m.bias)
# Loading model
num_classes = 10 #This is new; change it based on dataset
if args.arch == "my":
from Nets import *
netG = Generator(num_classes, (nc, ndf, ndf), args).to(device)
netG.apply(weight_init)
netD = Discriminator(num_classes, (nc, ndf, ndf), args).to(device)
netD.apply(weight_init)
print("Using custom architecture")
else:
if args.pretrained:
print(f"Using pretrained {args.arch}")
model = models.__dict__[args.arch](pretrained = True)
else:
print(f"Not using pretrained {args.arch}")
model = models.__dict__[args.arch]()
print("Generator", netG)
print("Discriminator", netD)
start_epoch = 1
if args.resume:
loc = "cuda:0"
checkpointD = torch.load(args.save_path+"dis.pt", map_location = loc)
checkpointG = torch.load(args.save_path+"gen.pt", map_location = loc)
netD.load_state_dict(checkpoint['state_dict'])
netD.load_state_dict(checkpoint['optimizer'])
netG.load_state_dict(checkpoint['state_dict'])
netG.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
print(f"Done loading pretrained, Start epoch: {checkpoint['epoch']}")
# Optimizers
optimizerD = optim.Adam(netD.parameters(), lr=args.lr,betas=(args.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=args.lr,betas=(args.beta1, 0.999))
# Loop
batches_done = 0
for epoch in tqdm(range(start_epoch, args.epochs+1)):
train(args, device, train_loader,train_loader2, epoch, netD, netG,nz , ndf, nc, optimizerD, optimizerG, batches_done, num_classes)
if args.save_model:
torch.save(netD.state_dict(), args.save_path+"disc.pt")
torch.save(netG.state_dict(), args.save_path+"gen.pt")
|
[
"msubhaditya@gmail.com"
] |
msubhaditya@gmail.com
|
9bcc59034c881a3c1b6c4c6e4be0b400fa0191b6
|
d8da64ecb3a88f8f3196937d3836c7bbafd5e26f
|
/backend/home/migrations/0002_load_initial_data.py
|
5ac5736cbfb33d2462efd03c2c2be03900aba58d
|
[] |
no_license
|
crowdbotics-apps/layouteditor151-1654
|
990b395d8054e05b0f8739345d5e55623cead88c
|
75e3614856ab5577da68e9ec4ad0157f462abd6d
|
refs/heads/master
| 2022-11-16T05:09:59.518254
| 2020-07-03T17:54:23
| 2020-07-03T17:54:23
| 276,958,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "layouteditor151"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">layouteditor151</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "layouteditor151-1654.botics.co"
site_params = {
"name": "layouteditor151",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
119c672f5d02ac1dc65310e156c212718e05ac89
|
d838bed08a00114c92b73982a74d96c15166a49e
|
/docs/data/learn/Bioinformatics/output/ch8_code/src/metrics/PearsonSimilarity.py
|
aae4c9870689ccc0776b67c2e785b313c5e05669
|
[] |
no_license
|
offbynull/offbynull.github.io
|
4911f53d77f6c59e7a453ee271b1e04e613862bc
|
754a85f43159738b89dd2bde1ad6ba0d75f34b98
|
refs/heads/master
| 2023-07-04T00:39:50.013571
| 2023-06-17T20:27:05
| 2023-06-17T23:27:00
| 308,482,936
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,725
|
py
|
from math import sqrt
from statistics import mean
from sys import stdin
from typing import Sequence
import yaml
# MARKDOWN
def pearson_similarity(v: Sequence[float], w: Sequence[float], dims: int):
v_avg = mean(v)
w_avg = mean(w)
vec_avg_diffs_dp = sum((v[i] - v_avg) * (w[i] - w_avg) for i in range(dims))
dist_to_v_avg = sqrt(sum((v[i] - v_avg) ** 2 for i in range(dims)))
dist_to_w_avg = sqrt(sum((w[i] - w_avg) ** 2 for i in range(dims)))
return vec_avg_diffs_dp / (dist_to_v_avg * dist_to_w_avg)
def pearson_distance(v: Sequence[float], w: Sequence[float], dims: int):
# To turn pearson similarity into a distance metric, subtract 1.0 from it. By
# subtracting 1.0, you're changing the bounds from [1.0, -1.0] to [0.0, 2.0].
#
# Recall that any distance metric must return 0 when the items being compared
# are the same and increases the more different they get. By subtracting 1.0,
# you're matching that distance metric requirement: 0.0 when totally similar
# and 2.0 for totally dissimilar.
return 1.0 - pearson_similarity(v, w, dims)
# MARKDOWN
def main():
print("<div style=\"border:1px solid black;\">", end="\n\n")
print("`{bm-disable-all}`", end="\n\n")
try:
data = yaml.safe_load(stdin)
v = data[0]
w = data[1]
dims = max(len(v), len(w))
print('Given the vectors ...')
print()
print(f' * {v}')
print(f' * {w}')
print()
d = pearson_similarity(v, w, dims)
print(f'Their pearson similarity is {d}')
print()
finally:
print("</div>", end="\n\n")
print("`{bm-enable-all}`", end="\n\n")
if __name__ == '__main__':
main()
|
[
"offbynull@gmail.com"
] |
offbynull@gmail.com
|
12715c528873546ae9649b4dbb52876cf6d0505b
|
f90522eee8d87c1486f32f3801a67141f7aee15f
|
/0129.Sum Root to Leaf Numbers/solution.py
|
a5789c653314b4d53aacfa3e2860b7adcd4c6bbb
|
[
"Apache-2.0"
] |
permissive
|
zhlinh/leetcode
|
15a30af8439e664d2a5f1aa328baf96f0f1791da
|
6dfa0a4df9ec07b2c746a13c8257780880ea04af
|
refs/heads/master
| 2021-01-15T15:49:25.525816
| 2016-09-20T03:24:10
| 2016-09-20T03:24:10
| 48,949,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-11
Last_modify: 2016-03-11
******************************************
'''
'''
Given a binary tree containing digits from 0-9 only,
each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3
which represents the number 123.
Find the total sum of all root-to-leaf numbers.
For example,
1
/ \
2 3
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Return the sum = 12 + 13 = 25.
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
totalSum = 0
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
self.helper(root, 0)
return self.totalSum
def helper(self, root, num):
num = num * 10 + root.val
if not root.left and not root.right:
self.totalSum += num
return
if root.left:
self.helper(root.left, num)
if root.right:
self.helper(root.right, num)
|
[
"zhlinhng@gmail.com"
] |
zhlinhng@gmail.com
|
3b6849ee78281ae2e1737249fd55941971b110fc
|
7ab4cdf01de10faa0b5e6103cb98f4a1447b38e1
|
/ChromeController/__init__.py
|
29f415a8675ecb9d895139a843ac001249a70336
|
[
"BSD-3-Clause"
] |
permissive
|
acskurucz/ChromeController
|
619a0d46db60809bbe1188dc1d7230e3ef2dba47
|
4294c1ca7db9569a976710c7c219069eb07d28bf
|
refs/heads/master
| 2020-12-08T16:40:32.528694
| 2020-01-10T12:36:28
| 2020-01-10T12:36:28
| 233,036,106
| 0
| 0
|
BSD-3-Clause
| 2020-01-10T11:47:56
| 2020-01-10T11:47:55
| null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
from .tab_pool import TabPooledChromium
from .chrome_context import ChromeContext
from .transport import ChromeExecutionManager
from .manager import ChromeRemoteDebugInterface
from .Generator import gen
from .cr_exceptions import ChromeControllerException
from .cr_exceptions import ChromeStartupException
from .cr_exceptions import ReusedPortError
from .cr_exceptions import ChromeConnectFailure
from .cr_exceptions import ChromeCommunicationsError
from .cr_exceptions import ChromeTabNotFoundError
from .cr_exceptions import ChromeError
from .cr_exceptions import ChromeDiedError
from .cr_exceptions import ChromeNavigateTimedOut
from .cr_exceptions import ChromeResponseNotReceived
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.