blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ed32859540a3be231e4c075717f88ae4e513b0d
|
c6cef87f0fc72df793b6151b3a60b60c026d9af0
|
/measurements/forms.py
|
41108293c5da115e1580f41279c5b5c442e8d104
|
[] |
no_license
|
rohitrajput-42/My_Map
|
50ff393bd256d3bc922b2601aaad845d1a5c1094
|
22605ba8cea5f709bc6dc9f686431bd0d89d541e
|
refs/heads/main
| 2023-06-05T15:15:02.206103
| 2021-07-01T04:26:48
| 2021-07-01T04:26:48
| 381,810,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from django import forms
from django.forms.models import ModelForm
from .models import Measurement
class MeasurementModelForm(forms.ModelForm):
class Meta:
model = Measurement
fields = ('destination',)
widgets = {
'destination': forms.TextInput(attrs = {'placeholder': 'Enter your destination'}),
}
|
[
"rohit1471997@gmail.com"
] |
rohit1471997@gmail.com
|
58526088e3fb0f233400ca6bb5eefe05cf3affce
|
aeeaf40350a652d96a392010071df8a486c6e79f
|
/archive/python/Python/binary_tree/236.lowest-common-ancestor-of-a-binary-tree.0.py
|
439011598294d17040086a1c52c0b366b1525b26
|
[
"MIT"
] |
permissive
|
linfengzhou/LeetCode
|
11e6c12ce43cf0053d86437b369a2337e6009be3
|
cb2ed3524431aea2b204fe66797f9850bbe506a9
|
refs/heads/master
| 2021-01-23T19:34:37.016755
| 2018-04-30T20:44:40
| 2018-04-30T20:44:40
| 53,916,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# what if p or q not in the tree
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
lca, is_p, is_q = self.helper(root, p, q)
if is_p and is_q:
return lca
else:
return None
def helper(self, root, p, q):
if not root:
return root, False, False
left_lca, left_p, left_q = self.helper(root.left, p, q)
right_lca, right_p, right_q = self.helper(root.right, p, q)
is_p = left_p or right_p or root == p
is_q = left_q or right_q or root == q
if left_lca and right_lca:
return root, is_p, is_q
if left_lca:
return left_lca, is_p, is_q
if right_lca:
return right_lca, is_p, is_q
return None, is_p, is_q
|
[
"luke.zlf@gmail.com"
] |
luke.zlf@gmail.com
|
bbbc8ede49f033fcaa8cfe5937eff44ec53222a0
|
f9462f3768fa058bd895a56b151da694664ce588
|
/examples/201_visualize_model_rhino.py
|
acacdc3daa62bc676a7299833e03bf0c92d3c5dd
|
[
"MIT"
] |
permissive
|
ryanpennings/workshop_swinburne_2021
|
16a9a7e2c7134832f8f714b7b430376f1b67dfb2
|
820ef4e36e73ac950f40e1846739087180af2e1c
|
refs/heads/main
| 2023-05-31T16:35:16.535310
| 2021-06-17T06:24:51
| 2021-06-17T06:24:51
| 377,373,107
| 0
| 0
|
MIT
| 2021-06-17T06:24:51
| 2021-06-16T04:45:02
| null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
from compas_rhino.artists import RobotModelArtist
from compas.robots import RobotModel
model = RobotModel.from_urdf_file('models/05_with_colors.urdf')
artist = RobotModelArtist(model, layer='COMPAS::Robot Viz')
artist.clear_layer()
artist.draw_visual()
|
[
"casas@arch.ethz.ch"
] |
casas@arch.ethz.ch
|
a1e209bed0477352863b8d389058b400cebac1b3
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/eve/client/script/ui/shared/info/panels/__init__.py
|
a211e9aae9803d9b8c33f8e2fe8f3a05f3e5139b
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\info\panels\__init__.py
pass
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
17092c071cf752cfa5953bed7125cf311ef11293
|
a9b6243dad4b86f75401a6ee0d6e8505fa57aa83
|
/test.py
|
f02e47c3ec23bca07523acac72bf183601844ae1
|
[] |
no_license
|
minji-o-j/system-for-visually-impaired
|
b4503078b9a74896a3786683f9f14485466bc7d5
|
748b9cdf97259d7f3d0cd5d15167ad5a629f6089
|
refs/heads/master
| 2023-02-06T21:14:37.840155
| 2020-12-30T11:03:49
| 2020-12-30T11:03:49
| 256,681,461
| 7
| 5
| null | 2020-12-06T08:27:27
| 2020-04-18T06:02:48
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,461
|
py
|
from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
from utils.parse_config import *
import os
import sys
import time
import datetime
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
import torch.optim as optim
def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size, batch_size):
model.eval()
# Get dataloader
dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn, pin_memory=True
)
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):
# Extract labels
labels += targets[:, 1].tolist()
# Rescale target
targets[:, 2:] = xywh2xyxy(targets[:, 2:])
targets[:, 2:] *= img_size
imgs = Variable(imgs.type(Tensor), requires_grad=False)
with torch.no_grad():
outputs = model(imgs)
outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)
# Concatenate sample statistics
true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)
return precision, recall, AP, f1, ap_class
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=8, help="size of each image batch")
parser.add_argument("--model_def", type=str, default="config/yolov4.cfg", help="path to model definition file")
parser.add_argument("--data_config", type=str, default="config/coco.data", help="path to data config file")
parser.add_argument("--weights_path", type=str, default="weights/yolov4.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--iou_thres", type=float, default=0.5, help="iou threshold required to qualify as detected")
parser.add_argument("--conf_thres", type=float, default=0.5, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--use_custom", type=bool, default=False, help="trained weight")
opt = parser.parse_args()
# Use custom weight
if opt.use_custom:
opt.model_def = 'config/yolov4-custom.cfg'
opt.class_path = 'data/custom/classes.names'
opt.data_config = 'config/custom.data'
ls = sorted(os.listdir('./weights/custom'))
if len(ls) > 0:
opt.weights_path = 'weights/custom/'+ls[-1]
print(opt)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_config = parse_data_config(opt.data_config)
valid_path = data_config["valid"]
class_names = load_classes(data_config["names"])
# Initiate model
model = Darknet(opt.model_def).to(device)
if opt.weights_path.endswith(".weights"):
# Load darknet weights
model.load_darknet_weights(opt.weights_path)
else:
# Load checkpoint weights
model.load_state_dict(torch.load(opt.weights_path))
print("Compute mAP...")
precision, recall, AP, f1, ap_class = evaluate(
model,
path=valid_path,
iou_thres=opt.iou_thres,
conf_thres=opt.conf_thres,
nms_thres=opt.nms_thres,
img_size=opt.img_size,
batch_size=opt.batch_size,
)
print("Average Precisions:")
for i, c in enumerate(ap_class):
print(f"+ Class '{c}' ({class_names[c]}) - AP: {AP[i]}")
print(f"mAP: {AP.mean()}")
|
[
"jminji98@gmail.com"
] |
jminji98@gmail.com
|
212e3c7f9eed689458556700d2b64f75b0d4b956
|
425aba1a7c134c78e8d5710890d426d7d6b0bd45
|
/tests/settings.py
|
f5b9b7ceb97006a73fcfd1b1add3b90442fa9338
|
[
"BSD-3-Clause"
] |
permissive
|
curiousTauseef/django-multiple-form-view
|
bd1f4558879382e9ae1b6c173ecbb2102350c12e
|
de13d124d913f12aa01aeeb6ea2f7b1768cd93cb
|
refs/heads/master
| 2021-08-19T22:28:12.921932
| 2017-11-27T15:58:16
| 2017-11-27T15:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'not-a-secret'
DEBUG = True
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'testapp',
]
MIDDLEWARE = []
ROOT_URLCONF = 'testapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': ':memory:',
},
},
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
[
"rpkilby@ncsu.edu"
] |
rpkilby@ncsu.edu
|
8c3b83d33ccb9923cd38f392fa462b54697237c9
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_com_day_cq_mailer_impl_cq_mailing_service_properties.py
|
224a540ecb8cd7059da3fee3c5ac2c652c422627
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_day_cq_mailer_impl_cq_mailing_service_properties import ComDayCqMailerImplCqMailingServiceProperties # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComDayCqMailerImplCqMailingServiceProperties(unittest.TestCase):
"""ComDayCqMailerImplCqMailingServiceProperties unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComDayCqMailerImplCqMailingServiceProperties(self):
"""Test ComDayCqMailerImplCqMailingServiceProperties"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_day_cq_mailer_impl_cq_mailing_service_properties.ComDayCqMailerImplCqMailingServiceProperties() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
218dc82bd8eed4a2d5859956054fc24b06364e5a
|
f8b5aafac15f408a48fabf853a918015c927e6fe
|
/backup/virtualenv/venv27/lib/python2.7/site-packages/heatclient/tests/test_events.py
|
e94b7eb9702afaf92195edca367bae7b1ce1b5b4
|
[] |
no_license
|
to30/tmp
|
bda1ac0ca3fc61e96c2a1c491367b698d7e97937
|
ec809683970af6787728c2c41f161f416155982a
|
refs/heads/master
| 2021-01-01T04:25:52.040770
| 2016-05-13T16:34:59
| 2016-05-13T16:34:59
| 58,756,087
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,855
|
py
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import utils
from heatclient.v1 import events
import mock
from mox3 import mox
import testtools
class EventManagerTest(testtools.TestCase):
def setUp(self):
super(EventManagerTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.UnsetStubs)
self.addCleanup(self.m.ResetAll)
def test_list_event(self):
stack_id = 'teststack',
resource_name = 'testresource'
manager = events.EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = mock.MagicMock()
manager.list(stack_id, resource_name)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack%2Fabcd1234/'
'resources/testresource/events',
"events")
def test_list_event_with_unicode_resource_name(self):
stack_id = 'teststack',
resource_name = u'\u5de5\u4f5c'
manager = events.EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = mock.MagicMock()
manager.list(stack_id, resource_name)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack%2Fabcd1234/'
'resources/%E5%B7%A5%E4%BD%9C/'
'events', "events")
def test_list_event_with_none_resource_name(self):
stack_id = 'teststack',
manager = events.EventManager(None)
manager._list = mock.MagicMock()
manager.list(stack_id)
# Make sure url is correct.
manager._list.assert_called_once_with('/stacks/teststack/'
'events', "events")
def test_list_event_with_kwargs(self):
stack_id = 'teststack',
resource_name = 'testresource'
kwargs = {'limit': 2,
'marker': '6d6935f4-0ae5',
'filters': {
'resource_action': 'CREATE',
'resource_status': 'COMPLETE'
}}
manager = events.EventManager(None)
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id(stack_id).AndReturn('teststack/abcd1234')
self.m.ReplayAll()
manager._list = mock.MagicMock()
manager.list(stack_id, resource_name, **kwargs)
# Make sure url is correct.
self.assertEqual(1, manager._list.call_count)
args = manager._list.call_args
self.assertEqual(2, len(args[0]))
url, param = args[0]
self.assertEqual("events", param)
base_url, query_params = utils.parse_query_url(url)
expected_base_url = ('/stacks/teststack%2Fabcd1234/'
'resources/testresource/events')
self.assertEqual(expected_base_url, base_url)
expected_query_dict = {'marker': ['6d6935f4-0ae5'],
'limit': ['2'],
'resource_action': ['CREATE'],
'resource_status': ['COMPLETE']}
self.assertEqual(expected_query_dict, query_params)
def test_get_event(self):
fields = {'stack_id': 'teststack',
'resource_name': 'testresource',
'event_id': '1'}
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def json_request(self, *args, **kwargs):
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/testresource/events/1')
assert args == expect
return {}, {'event': []}
manager = events.EventManager(FakeAPI())
with mock.patch('heatclient.v1.events.Event'):
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id('teststack').AndReturn(
'teststack/abcd1234')
self.m.ReplayAll()
manager.get(**fields)
def test_get_event_with_unicode_resource_name(self):
fields = {'stack_id': 'teststack',
'resource_name': u'\u5de5\u4f5c',
'event_id': '1'}
class FakeAPI(object):
"""Fake API and ensure request url is correct."""
def json_request(self, *args, **kwargs):
expect = ('GET',
'/stacks/teststack%2Fabcd1234/resources'
'/%E5%B7%A5%E4%BD%9C/events/1')
assert args == expect
return {}, {'event': []}
manager = events.EventManager(FakeAPI())
with mock.patch('heatclient.v1.events.Event'):
self.m.StubOutWithMock(manager, '_resolve_stack_id')
manager._resolve_stack_id('teststack').AndReturn(
'teststack/abcd1234')
self.m.ReplayAll()
manager.get(**fields)
|
[
"tomonaga@mx2.mesh.ne.jp"
] |
tomonaga@mx2.mesh.ne.jp
|
9b3fc0aadb7a94e0c3921ce7159d230d74acf942
|
c89e59b4d018e8a2d7dc0dbc3bb7a3768024f849
|
/before2021/python/190922/2_D4_1824_혁진이의프로그램검증.py
|
35739cf8ff47cfc4a6a46240d6cabeed99ea85a6
|
[] |
no_license
|
leeiopd/algorithm
|
ff32103a43e467a5a091257cc07cf35365ecbf91
|
e41647d3918c3099110d97f455c5ebf9a38d571e
|
refs/heads/master
| 2023-03-08T23:46:34.919991
| 2023-02-22T09:39:46
| 2023-02-22T09:39:46
| 166,131,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,328
|
py
|
'''
Samsung Collegiate Programming Cup은 Samsung이 매년마다 개최하는 대학생 프로그래밍 축제다.
이 축제의 우승자는 Samsung에 입사할 수 있지만, 나머지 사람들은 시공의 폭풍 속으로 빠지게 된다.
이 축제에서 참가자들은 자신이 선호하는 언어로 프로그램을 작성할 수 있다.
혁진이는 자신이 개발한 언어 혁셈블리어를 이용해 대회에 참가했다.
축제에서 꼭 우승하고 싶은 혁진이는 자신이 작성한 프로그램이 결국에는 멈출 수 있는지 확인하고 싶다.
혁셈블리어는 다음과 같이 동작한다:
- 프로그램이 수행해야 하는 명령은 문자로 주어지며, 문자들은 2차원 격자 모양으로 줄지어 있다. 다음은 혁셈블리어 프로그램의 예이다.
6>--v.
.^--_@
- 프로그램은 현재 위치에 있는 문자가 나타내는 명령을 처리하고, 이동 방향에 따라 다음 문자로 이동해야 한다.
가장 처음 위치는 제일 왼쪽 위에 있는 문자이고, 이동 방향은 오른쪽이다.
- 명령을 처리하다 보면 이동 방향이 상하좌우로 바뀔 수 있다.
만약 다음 이동이 2차원 격자의 바깥으로 이동하는 방향이면, 반대편에 있는 위치로 이동한다.
예를 들어, 첫 번째 줄의 가장 오른쪽 칸에서 오른쪽 방향으로 이동하면 첫 번째 줄의 가장 왼쪽 칸으로 이동한다.
혁셈블리어에서는 메모리가 단 하나 있으며, 0에서 15사이의 정수를 하나 저장할 수 있다. 가장 처음에는 0이 저장되어 있다.
사용 가능한 명령은 아래와 같다:
문자 수행 명령
< 이동 방향을 왼쪽으로 바꾼다.
> 이동 방향을 오른쪽으로 바꾼다.
^ 이동 방향을 위쪽으로 바꾼다.
v 이동 방향을 아래쪽으로 바꾼다.
_ 메모리에 0이 저장되어 있으면 이동 방향을 오른쪽으로 바꾸고, 아니면 왼쪽으로 바꾼다.
| 메모리에 0이 저장되어 있으면 이동 방향을 아래쪽으로 바꾸고, 아니면 위쪽으로 바꾼다.
? 이동 방향을 상하좌우 중 하나로 무작위로 바꾼다. 방향이 바뀔 확률은 네 방향 동일하다.
. 아무 것도 하지 않는다.
@ 프로그램의 실행을 정지한다.
0~9 메모리에 문자가 나타내는 값을 저장한다.
+ 메모리에 저장된 값에 1을 더한다. 만약 더하기 전 값이 15이라면 0으로 바꾼다.
- 메모리에 저장된 값에 1을 뺀다. 만약 빼기 전 값이 0이라면 15로 바꾼다.
[입력]
첫 번째 줄에 테스트 케이스의 수 T가 주어진다.
각 테스트 케이스의 첫 번째 줄에는 두 정수 R, C (2 ≤ R, C ≤ 20) 가 공백으로 구분되어 주어진다.
이는 프로그램이 R행 C열의 문자로 이루어짐을 나타낸다.
다음 R개의 줄의 각 줄에는 C개의 문자로 이루어진 문자열이 주어진다. 주어지는 문자는 위에서 주어진 문자들이다.
[출력]
각 테스트 케이스마다 ‘#x’(x는 테스트케이스 번호를 의미하며 1부터 시작한다)를 출력하고,
주어진 프로그램이 정지할 수 있으면 “YES”를 출력하고, 아니면 “NO”를 출력한다.
'''
import sys
sys.stdin = open('1824.txt')
T = int(input())
dx = [1, -1, 0, 0]
dy = [0, 0, -1, 1]
for case in range(1, T+1):
R, C = map(int, input().split())
maps = []
for r in range(R):
maps.append(input())
flag = 0
for y in range(R):
if '@' in maps[y]:
flag = 1
if not flag:
print('#{} NO'.format(case))
else:
tmp = [[0, 0, 0, 0]]
check = [[[0, 0, 0, 0] for c in range(C)] for r in range(R)]
result = 'NO'
while tmp:
if len(tmp) >= 1000:
break
x, y, memory, arrow = map(int, tmp.pop())
check[y][x][arrow] += 1
if maps[y][x] == '<':
arrow = 1
X = x + dx[arrow]
X %= C
if check[y][X][arrow] <= 20:
tmp.append([X, y, memory, arrow])
elif maps[y][x] == '>':
arrow = 0
X = x + dx[arrow]
X %= C
if check[y][X][arrow] <= 20:
tmp.append([X, y, memory, arrow])
elif maps[y][x] == '^':
arrow = 2
Y = y + dy[arrow]
Y %= R
if check[Y][x][arrow] <= 20:
tmp.append([x, Y, memory, arrow])
elif maps[y][x] == 'v':
arrow = 3
Y = y + dy[arrow]
Y %= R
if check[Y][x][arrow] <= 20:
tmp.append([x, Y, memory, arrow])
elif maps[y][x] == '_':
if memory == 0:
arrow = 0
else:
arrow = 1
X = x + dx[arrow]
X %= C
if check[y][X][arrow] <= 20:
tmp.append([X, y, memory, arrow])
elif maps[y][x] == '|':
if memory == 0:
arrow = 3
else:
arrow = 2
Y = y + dy[arrow]
Y %= R
if check[Y][x][arrow] <= 20:
tmp.append([x, Y, memory, arrow])
elif maps[y][x] == '?':
for i in range(4):
X = x + dx[i]
Y = y + dy[i]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, i])
elif maps[y][x] == '.':
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
elif maps[y][x] == '@':
result = 'YES'
break
elif maps[y][x] == '+':
if memory == 15:
memory = 0
else:
memory += 1
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
elif maps[y][x] == '-':
if memory == 0:
memory = 15
else:
memory -= 1
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
else:
memory = int(maps[y][x])
X = x + dx[arrow]
Y = y + dy[arrow]
X %= C
Y %= R
if check[Y][X][arrow] <= 20:
tmp.append([X, Y, memory, arrow])
print('#{} {}'.format(case, result))
|
[
"leeiopd@hanmail.net"
] |
leeiopd@hanmail.net
|
1112cad995c7f9cfcf9ea20d70efdbb239b37b36
|
2e26bf9c44f349ee308e63e067d93da654daf69d
|
/projecteuler/euler036.py
|
878cb071e730b422a0ee722e5900f8f5af658ac0
|
[
"MIT"
] |
permissive
|
RelativeTech/PYTHON_PRAC
|
034e44484d63d50a9c4295aa7e1dc63ef786fb37
|
7fa145dece99089706460466a89901e00eef9d28
|
refs/heads/master
| 2023-06-04T18:59:45.059403
| 2021-06-07T19:40:10
| 2021-06-07T19:40:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
#!/usr/bin/env python
"""
Solution to Project Euler Problem 36
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in
base 10 and base 2.
(Please note that the palindromic number, in either base, may not include
leading zeros.)
"""
from palindromes import is_palindrome
def to_binary(n):
return '{:b}'.format(n)
def dec_and_bin_palindromes(m):
for n in range(1, m):
if is_palindrome(n) and is_palindrome(to_binary(n)):
yield n
def sum_dec_and_bin_palindromes(m):
return sum(x for x in dec_and_bin_palindromes(m))
def test():
assert is_palindrome(585) and is_palindrome(to_binary(585))
def run():
print(sum_dec_and_bin_palindromes(10 ** 6))
if __name__ == '__main__':
test()
run()
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
b811df8852a8bc944150eb81bb5b2b5cdb6b8914
|
5cc954e27fd924da0f6f44e7d58691d612a77f80
|
/coremltools/converters/mil/frontend/tensorflow/ssa_passes/__init__.py
|
a659bb2d3a72c417e763c9e2730b9e92c562bf9e
|
[
"BSD-3-Clause"
] |
permissive
|
1duo/coremltools
|
e25f1a8423ec368bf1e7dabfaa36e77952578e79
|
37e619d99bf603d2cb9ea0839fa3ebe649996b0a
|
refs/heads/master
| 2021-07-15T08:48:51.930217
| 2020-07-27T20:58:33
| 2020-07-27T20:58:33
| 203,466,876
| 2
| 0
|
BSD-3-Clause
| 2020-07-22T00:05:02
| 2019-08-20T22:59:50
|
Python
|
UTF-8
|
Python
| false
| false
| 732
|
py
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# Import all passes in this dir
from os.path import dirname, basename, isfile, join
import glob
excluded_files = [
"__init__.py",
"tf_passes.py",
]
modules = glob.glob(join(dirname(__file__), "*.py"))
pass_modules = [
basename(f)[:-3]
for f in modules
if isfile(f)
and basename(f)[:1] != "_" # Follow python convention to hide _* files.
and basename(f)[:4] != "test"
and basename(f) not in excluded_files
]
__all__ = pass_modules
from . import * # import everything in __all__
|
[
"noreply@github.com"
] |
1duo.noreply@github.com
|
8d1ab7912a785809077016e666d282153dd9da31
|
426aed70aa6925105f10c7fcb7b611b277bf8b84
|
/python/dgl/distributed/__init__.py
|
462e347f48493808c907b3d4968a92bfd18ca25f
|
[
"Apache-2.0"
] |
permissive
|
hengruizhang98/dgl
|
0ce7201ca7380482440f031cb8ced6ca0e8c8dc1
|
195f99362d883f8b6d131b70a7868a537e55b786
|
refs/heads/master
| 2023-06-10T22:21:45.835646
| 2021-04-13T12:29:43
| 2021-04-13T12:29:43
| 336,804,001
| 3
| 0
|
Apache-2.0
| 2021-02-07T14:16:20
| 2021-02-07T14:16:20
| null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
"""DGL distributed module contains classes and functions to support
distributed graph neural network training and inference in a cluster of
machines.
This includes a few submodules:
* distributed data structures including distributed graph, distributed tensor
and distributed embeddings.
* distributed sampling.
* distributed workload split at runtime.
* graph partition.
"""
import os
import sys
from .dist_graph import DistGraphServer, DistGraph, node_split, edge_split
from .dist_tensor import DistTensor
from .partition import partition_graph, load_partition, load_partition_book
from .graph_partition_book import GraphPartitionBook, PartitionPolicy
from .sparse_emb import SparseAdagrad, DistEmbedding
from .rpc import *
from .rpc_server import start_server
from .rpc_client import connect_to_server
from .dist_context import initialize, exit_client
from .kvstore import KVServer, KVClient
from .server_state import ServerState
from .dist_dataloader import DistDataLoader
from .graph_services import sample_neighbors, in_subgraph, find_edges
|
[
"noreply@github.com"
] |
hengruizhang98.noreply@github.com
|
da5ef632a7cf8fee5a2e5b4c2148620481985735
|
7a0625ef4c271ed9992a736de7bb93215b7013fd
|
/leetcode70.py
|
69d3a74dd79759a429e6c7c53286866920e7b5a1
|
[] |
no_license
|
yuchien302/LeetCode
|
6576b93c005ea2275646df7b9547c22683d3b45c
|
c9a53ef2fc1fd1fea7377c3633689fa87601dba6
|
refs/heads/master
| 2020-12-11T01:42:36.980414
| 2015-12-03T02:53:51
| 2015-12-03T02:53:51
| 36,424,494
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
import unittest
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
steps = []
steps.append(1)
steps.append(1)
for i in range(2, n+1):
steps.append(steps[i-1] + steps[i-2])
return steps[-1]
class Test(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test_0(self):
self.assertEqual(self.solution.climbStairs(3), 3)
if __name__ == '__main__':
unittest.main()
|
[
"yc2257@cornell.edu"
] |
yc2257@cornell.edu
|
dcd43c82ed320c62fa992c05e7d3c179dd40a3ce
|
5b4b1866571453f78db5b06a08ff0eda17b91b04
|
/test/vanilla/Expected/AcceptanceTests/Validation/validation/_configuration.py
|
058eb79f711e66e68abbc33d48dc1d80d856909a
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
koek67/autorest.azure-functions-python
|
ba345f1d194ca7431daab1210a0cd801d4946991
|
b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783
|
refs/heads/main
| 2022-12-20T13:27:56.405901
| 2020-09-30T08:23:11
| 2020-09-30T08:23:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,409
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
class AutoRestValidationTestConfiguration(Configuration):
"""Configuration for AutoRestValidationTest.
Note that all parameters used to create this instance are saved as instance
attributes.
:param subscription_id: Subscription ID.
:type subscription_id: str
"""
def __init__(
self,
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(AutoRestValidationTestConfiguration, self).__init__(**kwargs)
self.subscription_id = subscription_id
self.api_version = "1.0.0"
kwargs.setdefault('sdk_moniker', 'autorestvalidationtest/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
|
[
"varad.meru@gmail.com"
] |
varad.meru@gmail.com
|
d1a77da2f46d84a5e90b64fd9a100a0ab730d597
|
44cf0bda12c8ca392a7000efa709adc2ac2aff42
|
/26_다음_이메일_크롤링.py
|
cb07d2336b10220b005cf68307343167a7d1fd17
|
[] |
no_license
|
swj8905/Shinhan_Basic_Course
|
a3d0ebe9aa12a70f3da56cf78e7eca39c5a92238
|
b59ad0eeaa4ef77bee5d41d504ecd76148dbf9f8
|
refs/heads/master
| 2023-06-03T07:30:24.585194
| 2021-06-29T09:32:02
| 2021-06-29T09:32:02
| 380,967,321
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,267
|
py
|
from selenium import webdriver
import time
import chromedriver_autoinstaller
chrome_path = chromedriver_autoinstaller.install()
browser = webdriver.Chrome(chrome_path) # 크롬브라우저 실행
browser.get("https://logins.daum.net/accounts/signinform.do?url=https%3A%2F%2Fwww.daum.net%2F")
# 아이디 입력
id = browser.find_element_by_css_selector("input#id")
id.send_keys("talingpython")
# 비밀번호 입력
pw = browser.find_element_by_css_selector("input#inputPwd")
pw.send_keys("q1w2e3!@#")
# 로그인 버튼 클릭
button = browser.find_element_by_css_selector("button#loginBtn")
button.click()
time.sleep(3) # 로그인 다 될 때까지 기다리기
# 이메일함으로 이동
browser.get("https://mail.daum.net/")
time.sleep(2) # 웹페이지 다 뜰때까지 기다리기
# 이메일 제목 크롤링
page_num = 2
while True:
title = browser.find_elements_by_css_selector("strong.tit_subject")
for i in title:
print(i.text)
# 다음 페이지로 이동
try:
next_button = browser.find_element_by_css_selector(f"span.paging_mail > a:nth-child({page_num+1})")
except:
print("======== 크롤링 끝! ===========")
break
next_button.click()
time.sleep(1)
page_num += 1
browser.close()
|
[
"swj8905@naver.com"
] |
swj8905@naver.com
|
8361438a1cee72c2f72855173f80fe01740cc2d8
|
6ab9a3229719f457e4883f8b9c5f1d4c7b349362
|
/leetcode/00007_reverse_integer.py
|
9db0a0e2296e3e0688f687c37ad5b73c6f853dd8
|
[] |
no_license
|
ajmarin/coding
|
77c91ee760b3af34db7c45c64f90b23f6f5def16
|
8af901372ade9d3d913f69b1532df36fc9461603
|
refs/heads/master
| 2022-01-26T09:54:38.068385
| 2022-01-09T11:26:30
| 2022-01-09T11:26:30
| 2,166,262
| 33
| 15
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
class Solution:
def reverse(self, x: int) -> int:
negative = x < 0
x = -x if negative else x
res = 0
while x:
res = res * 10 + (x % 10)
x = x // 10
res = -res if negative else res
return res if -2**31 <= res and res <= 2**31 - 1 else 0
|
[
"mistermarin@gmail.com"
] |
mistermarin@gmail.com
|
26455f7f887f002a25abe4aae6cbde984a2d68c6
|
17381d148b86fc4354d1ac0e4062a35215eafd09
|
/paiza/C062.py
|
d97af758a417b3e2440ff2a2c8de5acb6614538a
|
[] |
no_license
|
worldyone/workspace
|
027a93e1f227eb1c10485f6f2082a92bd98710d5
|
1e3fa21e23d6e91268882c9e480b76c4a3c4920f
|
refs/heads/master
| 2023-03-09T01:21:53.402440
| 2022-09-20T14:47:49
| 2022-09-20T14:47:49
| 174,147,113
| 0
| 1
| null | 2023-02-10T22:53:26
| 2019-03-06T13:07:08
|
Python
|
UTF-8
|
Python
| false
| false
| 195
|
py
|
cnt = 0
rcnt = 0
m = "melon"
T = int(input())
for t in range(T):
sushi = input()
rcnt -= 1
if sushi == m and rcnt <= 0:
cnt += 1
rcnt = 11
print(cnt)
|
[
"amanohikari142857@gmail.com"
] |
amanohikari142857@gmail.com
|
77ff9ab3b4d79c4946379a900be2fcb4a29ca7d3
|
d4792ed2061380bb23cec6fcd1c18ea2939490ac
|
/examples/keras_recipes/antirectifier.py
|
4798e2df9b4fff6c10b3d5e9854ee727815d9458
|
[
"Apache-2.0"
] |
permissive
|
tcglarry/keras-io
|
47e813795204a4fc88512c90168d3f81d2aad8aa
|
4f26aead10fd238c98d85fbb6a32679923d79c76
|
refs/heads/master
| 2022-06-09T09:21:50.376337
| 2020-05-07T21:53:26
| 2020-05-07T21:53:26
| 262,175,145
| 2
| 0
|
Apache-2.0
| 2020-05-07T22:53:45
| 2020-05-07T22:53:44
| null |
UTF-8
|
Python
| false
| false
| 3,072
|
py
|
"""
Title: Simple custom layer example: Antirectifier
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2016/01/06
Last modified: 2020/04/20
Description: Demonstration of custom layer creation.
"""
"""
## Introduction
This example shows how to create custom layers, using the Antirectifier layer
(originally proposed as a Keras example script in January 2016), an alternative
to ReLU. Instead of zeroing-out the negative part of the input, it splits the negative
and positive parts and returns the concatenation of the absolute value
of both. This avoids loss of information, at the cost of an increase in dimensionality.
To fix the dimensionality increase, we linearly combine the
features back to a space of the original size.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## The Antirectifier layer
"""
class Antirectifier(layers.Layer):
def __init__(self, initializer="he_normal", **kwargs):
super(Antirectifier, self).__init__(**kwargs)
self.initializer = keras.initializers.get(initializer)
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer=self.initializer,
name="kernel",
trainable=True,
)
def call(self, inputs):
inputs -= tf.reduce_mean(inputs, axis=-1, keepdims=True)
pos = tf.nn.relu(inputs)
neg = tf.nn.relu(-inputs)
concatenated = tf.concat([pos, neg], axis=-1)
mixed = tf.matmul(concatenated, self.kernel)
return mixed
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super(Antirectifier, self).get_config()
config = {"initializer": keras.initializers.serialize(self.initializer)}
return dict(list(base_config.items()) + list(config.items()))
"""
## Let's test-drive it on MNIST
"""
# Training parameters
batch_size = 128
num_classes = 10
epochs = 20
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784)
x_test = x_test.reshape(-1, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Build the model
model = keras.Sequential(
[
keras.Input(shape=(784,)),
layers.Dense(256),
Antirectifier(),
layers.Dense(256),
Antirectifier(),
layers.Dropout(0.5),
layers.Dense(10),
]
)
# Compile the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15)
# Test the model
model.evaluate(x_test, y_test)
|
[
"francois.chollet@gmail.com"
] |
francois.chollet@gmail.com
|
6d1c82a577174ba581dfefe9fd5878004cf1c33d
|
05ae2d651e6adbc4cfea04e2ab8a93c0a9e23aff
|
/core/roof/roof_props.py
|
d077cb45445b4acd8b5534a71210700108775101
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
thunderpwn/building_tool
|
932087b2415607b41d3da374da1eb2d5e10dbb3d
|
5aa4a476f93ae1b8fd8240439b3272d8cf33a0b1
|
refs/heads/master
| 2022-04-12T18:59:47.373480
| 2020-04-11T11:31:24
| 2020-04-11T11:31:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
import bpy
from bpy.props import EnumProperty, FloatProperty, BoolProperty
class RoofProperty(bpy.types.PropertyGroup):
roof_items = [
("FLAT", "Flat", "", 0),
("GABLE", "Gable", "", 1),
("HIP", "Hip", "", 2),
]
type: EnumProperty(
name="Roof Type",
items=roof_items,
default="FLAT",
description="Type of roof to create",
)
thickness: FloatProperty(
name="Thickness",
min=0.01,
max=1000.0,
default=0.1,
description="Thickness of roof hangs",
)
outset: FloatProperty(
name="Outset",
min=0.01,
max=1.0,
default=0.1,
description="Outset of roof hangs",
)
height: FloatProperty(
name="Height",
min=0.01,
max=1000.0,
default=1,
description="Height of entire roof",
)
o_types = [("HORIZONTAL", "Horizontal", "", 0), ("VERTICAL", "Vertical", "", 1)]
orient: EnumProperty(
description="Orientation of gable", items=o_types, default="HORIZONTAL"
)
roof_hangs: BoolProperty(
name="Roof Hangs", default=True, description="Whether to add roof hangs"
)
def draw(self, context, layout):
layout.prop(self, "type", text="")
box = layout.box()
if self.type == "FLAT":
col = box.column(align=True)
col.prop(self, "thickness")
col.prop(self, "outset")
elif self.type == "GABLE":
row = box.row(align=True)
row.prop(self, "orient", expand=True)
col = box.column(align=True)
col.prop(self, "thickness")
col.prop(self, "outset")
col.prop(self, "height")
box.prop(self, "roof_hangs", toggle=True)
else:
col = box.column(align=True)
col.prop(self, "thickness")
col.prop(self, "outset")
col.prop(self, "height")
|
[
"karanjaichungwa@gmail.com"
] |
karanjaichungwa@gmail.com
|
2bc5c3b7f00dfa38d5f5b217488400d7f88be85b
|
91ef6ab9391c7c78981b6a36a7daa47b48ac582a
|
/neupy/algorithms/steps/errdiff.py
|
fec7e685f5e29365892da5ffeead35a2bc97f9a2
|
[
"MIT"
] |
permissive
|
stczhc/neupy
|
f89505011d78b7ade5800b51057c4c1370dea83a
|
de09f5abd6667824f14806709de2afa1ac5daa09
|
refs/heads/master
| 2020-12-07T00:45:07.445414
| 2016-06-11T19:00:33
| 2016-06-11T19:00:33
| 58,636,539
| 0
| 1
| null | 2016-05-12T11:42:01
| 2016-05-12T11:42:01
| null |
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
import theano
from theano.ifelse import ifelse
import numpy as np
from neupy.core.properties import (BoundedProperty,
ProperFractionProperty)
from .base import SingleStepConfigurable
__all__ = ('ErrDiffStepUpdate',)
class ErrDiffStepUpdate(SingleStepConfigurable):
""" This algorithm make step update base on error difference between
epochs.
Parameters
----------
update_for_smaller_error : float
Multiplies this option to ``step`` in if the error was less than in
previous epochs. Defaults to ``1.05``. Value can't be less
than ``1``.
update_for_bigger_error : float
Multiplies this option to ``step`` in if the error was more than in
previous epochs. Defaults to ``0.7``.
error_difference : float
The value indicates how many had to increase the error from the
previous epochs that would produce a reduction step. Defaults
to ``1.04``. Value can't be less than ``1``.
Warns
-----
{SingleStepConfigurable.Warns}
Examples
--------
>>> from neupy import algorithms
>>>
>>> bpnet = algorithms.GradientDescent(
... (2, 4, 1),
... step=0.1,
... verbose=False,
... addons=[algorithms.ErrDiffStepUpdate]
... )
>>>
"""
update_for_smaller_error = BoundedProperty(default=1.05, minval=1)
update_for_bigger_error = ProperFractionProperty(default=0.7)
error_difference = BoundedProperty(default=1.04, minval=1)
def init_variables(self):
self.variables.update(
last_error=theano.shared(name='last_error', value=np.nan),
previous_error=theano.shared(name='previous_error', value=np.nan),
)
super(ErrDiffStepUpdate, self).init_variables()
def init_train_updates(self):
updates = super(ErrDiffStepUpdate, self).init_train_updates()
step = self.variables.step
last_error = self.variables.last_error
previous_error = self.variables.previous_error
step_update_condition = ifelse(
last_error < previous_error,
self.update_for_smaller_error * step,
ifelse(
last_error > self.update_for_bigger_error * previous_error,
self.update_for_bigger_error * step,
step
)
)
updates.append((step, step_update_condition))
return updates
def on_epoch_start_update(self, epoch):
super(ErrDiffStepUpdate, self).on_epoch_start_update(epoch)
previous_error = self.errors.previous()
if previous_error:
last_error = self.errors.last()
self.variables.last_error.set_value(last_error)
self.variables.previous_error.set_value(previous_error)
|
[
"mail@itdxer.com"
] |
mail@itdxer.com
|
cba4cb0cc371d08ed47fec7e5feb685cd700f669
|
53c1eb6604f9e060bd6c9ce84395ab1a38d58f6f
|
/exercise/codewar/arara.py
|
cc30d2059afcc4569d4a2a045cf54c4e10176cbe
|
[] |
no_license
|
turo62/exercise
|
543c684ef3dfe138a5f0d6976b7ff0d9c19553f0
|
3d8d8d8a12bb3885b3015eff0032cd977c02957e
|
refs/heads/master
| 2020-04-14T18:10:31.224244
| 2019-01-03T18:10:55
| 2019-01-03T18:10:55
| 164,008,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
# Returns arara translation of a number. Wrong solution.
def count_arara(n):
val = ""
temp = 0
temp2 = 0
empty = " "
dict = {1 : "anane",
2 : "adak",
3 : "adak anane",
4 : "adak adak",
5 : "adak adak anane",
6 : "adak adak adak",
7 : "adak adak adak anane",
8 : "adak adak adak adak"
}
if n > 8:
temp = n // 8
val = (dict.get(8) * temp + " ")
val = val + (dict.get(n % 2)
if n <= 8:
val = dict.get(n)
return val
def main():
val = count_arara(5)
print(val)
if __name__ == "__main__":
main()
|
[
"turo62@gmail.com"
] |
turo62@gmail.com
|
44c56b7babb4f9b8f08de5e547e5d173f72309fe
|
16303902855d9a3b18b39e46c58567b16d907c02
|
/setup.py
|
b9e8a6224c7cfcccdc1de8c06c6b1bfffbea86ca
|
[] |
no_license
|
ConstClub/pyconst
|
194f2d1f53113dec9a5178b56905c3a5e3892909
|
863fd2c0617d769f392cab4e1bf33555ee8f011c
|
refs/heads/master
| 2021-01-24T16:52:09.458058
| 2019-10-18T08:23:29
| 2019-10-18T08:23:29
| 123,215,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
# -*- coding: utf-8 -*-
from setuptools import setup
version = '1.0.0'
setup(
name='pyconstant',
version=version,
keywords='',
description='Const for Python',
long_description=open('README.rst').read(),
url='https://github.com/ConstClub/pyconst',
author='Hackathon',
author_email='kimi.huang@brightcells.com',
packages=['pyconstant'],
py_modules=[],
install_requires=[],
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
[
"brightcells@gmail.com"
] |
brightcells@gmail.com
|
9e416dd9d02fb2dfe9f230adaee649b8bf5ab6d7
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayOpenMiniMembersHuobanCreateModel.py
|
e1ea8e7fdf3f890762367a9dcafff368584a3256
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,430
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniMembersHuobanCreateModel(object):
def __init__(self):
self._domain_account = None
self._login_id = None
self._operate_app_id = None
self._type = None
@property
def domain_account(self):
return self._domain_account
@domain_account.setter
def domain_account(self, value):
self._domain_account = value
@property
def login_id(self):
return self._login_id
@login_id.setter
def login_id(self, value):
self._login_id = value
@property
def operate_app_id(self):
return self._operate_app_id
@operate_app_id.setter
def operate_app_id(self, value):
self._operate_app_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.domain_account:
if hasattr(self.domain_account, 'to_alipay_dict'):
params['domain_account'] = self.domain_account.to_alipay_dict()
else:
params['domain_account'] = self.domain_account
if self.login_id:
if hasattr(self.login_id, 'to_alipay_dict'):
params['login_id'] = self.login_id.to_alipay_dict()
else:
params['login_id'] = self.login_id
if self.operate_app_id:
if hasattr(self.operate_app_id, 'to_alipay_dict'):
params['operate_app_id'] = self.operate_app_id.to_alipay_dict()
else:
params['operate_app_id'] = self.operate_app_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniMembersHuobanCreateModel()
if 'domain_account' in d:
o.domain_account = d['domain_account']
if 'login_id' in d:
o.login_id = d['login_id']
if 'operate_app_id' in d:
o.operate_app_id = d['operate_app_id']
if 'type' in d:
o.type = d['type']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
ab443ab24b7f89149ef4fcfb7e441860e9d2030c
|
5261e3c72259991fbdb9737c4c764eb0686860d3
|
/tests/test_docs/test_cli_commands.py
|
47dcb09ac7d77549be4329b7f7f7d7f6f3a6308f
|
[
"Apache-2.0"
] |
permissive
|
eorituz/agents-aea
|
45dfb9729718421290c71da91ac4c51f9cc6a608
|
197451196728141a27ec73fd8210c05cb74501f7
|
refs/heads/main
| 2023-03-24T02:40:27.132664
| 2021-03-23T14:42:58
| 2021-03-23T14:42:58
| 350,744,268
| 0
| 0
|
Apache-2.0
| 2021-03-23T14:40:13
| 2021-03-23T14:32:29
| null |
UTF-8
|
Python
| false
| false
| 2,632
|
py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the content of cli-commands.md file."""
import pprint
import re
from pathlib import Path
from aea.cli import cli
from tests.conftest import ROOT_DIR
from tests.test_docs.helper import BaseTestMarkdownDocs
IGNORE_MATCHES = ["`-v DEBUG run`", "`config set [path] [--type TYPE]`"]
class TestCliCommands(BaseTestMarkdownDocs):
"""Test cli-commands.md documentation."""
DOC_PATH = Path(ROOT_DIR, "docs", "cli-commands.md")
def test_cli_commands(self):
"""Test CLI commands."""
commands_raw = re.compile(r"\| `.*` +\|").findall(self.doc_content)
commands_raw = [
re.compile(r"`([A-Za-z0-9\-_]+) ?.*`").search(s) for s in commands_raw
]
commands_raw = list(
filter(lambda x: x.group(0) not in IGNORE_MATCHES, commands_raw)
)
actual_commands = list(map(lambda match: match.group(1), commands_raw))
actual_commands_set = set(actual_commands)
expected_commands = set(cli.commands.keys())
# test no duplicates
assert len(actual_commands) == len(
actual_commands_set
), "Found duplicate commands in the documentation."
# test that there is no missing command
missing = expected_commands.difference(actual_commands)
assert (
len(missing) == 0
), f"Missing the following commands: {pprint.pformat(missing)}"
# test that there are no more commands
more = actual_commands_set.difference(expected_commands)
assert len(more) == 0, f"There are unknown commands: {pprint.pformat(missing)}"
# test that they are in the same order.
actual = actual_commands
expected = sorted(expected_commands)
assert actual == expected, "Commands are not in alphabetical order."
|
[
"marco.favorito@gmail.com"
] |
marco.favorito@gmail.com
|
e8d244e1575403819544217a2d429d5ef5a0d1af
|
77dcf5ebad9512843742741c20cd412972d3261d
|
/alien.py
|
83eda05822e131264b074b9f2c51b8590a2cbc4c
|
[] |
no_license
|
crystalDf/Python-Crash-Course-2nd-Edition-Chapter-06-Dictionaries
|
5742236d071e16cbb672be7f010f9fbc82033c06
|
cc602b56134d63b6baf286e718daf08a2955967c
|
refs/heads/master
| 2023-06-03T09:47:26.659353
| 2021-06-20T12:55:09
| 2021-06-20T12:55:09
| 378,645,143
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
alien_0 = {'color': 'green', 'points': 5}
print(alien_0['color'])
print(alien_0['points'])
new_points = alien_0['points']
print(f"You just earned {new_points} points!")
print(alien_0)
alien_0['x_position'] = 0
alien_0['y_position'] = 25
print(alien_0)
alien_0 = {}
alien_0['color'] = 'green'
alien_0['points'] = 5
print(alien_0)
alien_0 = {'color': 'green'}
print(f"The alien is {alien_0['color']}.")
alien_0['color'] = 'yellow'
print(f"The alien is now {alien_0['color']}.")
alien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}
print(f"Original position: {alien_0['x_position']}")
if alien_0['speed'] == 'slow':
x_increment = 1
elif alien_0['speed'] == 'medium':
x_increment = 2
else:
x_increment = 3
alien_0['x_position'] = alien_0['x_position'] + x_increment
print(f"New position: {alien_0['x_position']}")
alien_0 = {'color': 'green', 'points': 5}
print(alien_0)
del alien_0['points']
print(alien_0)
|
[
"chendong333@gmail.com"
] |
chendong333@gmail.com
|
94c48e5e215007b0d4439de04f8f769da3f0762b
|
a20ae2286d7055de8c533bc954c18b22d2a3cf5a
|
/sstones/ss_app/migrations/0022_auto_20190125_2146.py
|
89a1a1c7637a8511bde7ab64191b1c4ea7b1f968
|
[] |
no_license
|
jflynn87/sstones
|
695442c2c1745c83a37100d4b163938891afe184
|
3a2548b5457ef6386e808b6d984e376f2c83a254
|
refs/heads/master
| 2021-07-13T19:04:59.898493
| 2020-06-09T03:07:43
| 2020-06-09T03:07:43
| 139,449,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
# Generated by Django 2.0.4 on 2019-01-25 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ss_app', '0021_auto_20190125_2110'),
]
operations = [
migrations.AlterField(
model_name='invoice',
name='principal',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='invoice',
name='tax',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='invoice',
name='total',
field=models.IntegerField(),
),
]
|
[
"jflynn87@hotmail.com"
] |
jflynn87@hotmail.com
|
72ba6c385460dab2b1036368b0325f806107877f
|
fe6775ca8c5b42710785e3a923974ae079f92c8f
|
/秋招/电信/3.py
|
05b38997f944a3eb85cdd29b02167a6d0624197c
|
[] |
no_license
|
AiZhanghan/Leetcode
|
41bda6676fa1a25fa19e393553c1148ed51fdf72
|
101bce2fac8b188a4eb2f5e017293d21ad0ecb21
|
refs/heads/master
| 2021-06-28T10:48:07.865968
| 2020-11-20T09:45:15
| 2020-11-20T09:45:15
| 188,155,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 556
|
py
|
class Solution:
def func(self, nums):
"""
动态规划
Args:
nums: list[int]
Return:
int
"""
dp = [[0 for _ in range(2)] for _ in range(len(nums))]
dp[0][1] = nums[0]
for i in range(1, len(nums)):
dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])
dp[i][1] = dp[i - 1][0] + nums[i]
return max(dp[-1][0], dp[-1][1])
if __name__ == "__main__":
nums = list(map(int, input().split(",")))
print(Solution().func(nums))
|
[
"35103759+AiZhanghan@users.noreply.github.com"
] |
35103759+AiZhanghan@users.noreply.github.com
|
a88a544ffd2d07eb0de1c4220d463733c1ad7f92
|
3fda3ff2e9334433554b6cf923506f428d9e9366
|
/hipeac/migrations/0003_auto_20181203_1702.py
|
7b70e8202885094bb723ff32f5c3484853d1a689
|
[
"MIT"
] |
permissive
|
CreativeOthman/hipeac
|
12adb61099886a6719dfccfa5ce26fdec8951bf9
|
2ce98da17cac2c6a87ec88df1b7676db4c200607
|
refs/heads/master
| 2022-07-20T10:06:58.771811
| 2020-05-07T11:39:13
| 2020-05-07T11:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,826
|
py
|
# Generated by Django 2.1.3 on 2018-12-03 16:02
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("hipeac", "0002_auto_20181203_1217"),
]
operations = [
migrations.AddField(
model_name="magazine",
name="application_areas",
field=models.CharField(
blank=True,
max_length=250,
validators=[
django.core.validators.RegexValidator(
re.compile("^\\d+(?:,\\d+)*\\Z"),
code="invalid",
message="Enter only digits separated by commas.",
)
],
),
),
migrations.AddField(
model_name="magazine",
name="projects",
field=models.ManyToManyField(blank=True, related_name="magazines", to="hipeac.Project"),
),
migrations.AddField(
model_name="magazine",
name="topics",
field=models.CharField(
blank=True,
max_length=250,
validators=[
django.core.validators.RegexValidator(
re.compile("^\\d+(?:,\\d+)*\\Z"),
code="invalid",
message="Enter only digits separated by commas.",
)
],
),
),
migrations.AddField(
model_name="magazine",
name="users",
field=models.ManyToManyField(blank=True, related_name="magazines", to=settings.AUTH_USER_MODEL),
),
]
|
[
"eneko.illarramendi@ugent.be"
] |
eneko.illarramendi@ugent.be
|
cff3977d4479674d5ad613e5cf07ee5c36761581
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/knight-dialer/403821504.py
|
fe51e2c85fa1fd0b3cc504be5cf71dc4954c155d
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
# title: knight-dialer
# detail: https://leetcode.com/submissions/detail/403821504/
# datetime: Sat Oct 3 15:34:15 2020
# runtime: 2428 ms
# memory: 45.6 MB
class Solution:
def knightDialer(self, n: int) -> int:
@lru_cache(None)
def dp(i, k):
if k == 0:
return 1
result = 0
for j in jump[i]:
result = (result + dp(j, k - 1)) % MOD
return result
MOD = 10 ** 9 + 7
jump = [[4, 6], [6, 8], [7, 9], [4, 8], [3, 9, 0], [], [0, 1, 7], [2, 6], [1, 3], [2, 4], list(range(10))]
return dp(10, n)
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
75051d09b67149f3380eaf69db28304e417cc616
|
13569261f1f7808aa2f9424a957358da77a7a949
|
/Monet_Harkin/Bike/server.py
|
755633b8d945d7b8a725af4c53ff9e9d3b02838b
|
[] |
no_license
|
CodingDojoDallas/python-nov-2016
|
75049d114116330c1898ae5e3d1fd202a999da5d
|
a6a50cc7cd4f50b59459d995d2df4707417b8f1c
|
refs/heads/master
| 2021-01-12T12:21:18.972855
| 2016-11-23T21:24:53
| 2016-11-23T21:24:53
| 72,453,587
| 0
| 13
| null | 2016-11-23T21:24:54
| 2016-10-31T16:07:05
|
Python
|
UTF-8
|
Python
| false
| false
| 792
|
py
|
# Monet Harkin - Bike OOP test
class Bike(object):
def __init__(self, price, max_speed):
self.price = price
self.max_speed = max_speed
self.miles= 0
def displayInfo(self):
print "Price: "+ str(self.price )
print "Max speed: "+ str(self.max_speed)
print "Miles: "+ str(self.miles)
def ride(self):
self.miles += 10
print "Riding"
def reverse(self):
if self.miles >= 5:
self.miles -= 5
else:
self.miles =0
print "Reversing"
bike1 = Bike(200, "25mph")
bike2 = Bike(300, "27mph")
bike3 = Bike(100, "10mph")
bike1.ride()
bike1.ride()
bike1.ride()
bike1.reverse()
bike1.displayInfo()
print"*" * 50
bike2.ride()
bike2.ride()
bike2.reverse()
bike2.reverse()
bike2.displayInfo()
print"*" * 50
bike3.reverse()
bike3.reverse()
bike3.reverse()
bike3.displayInfo()
|
[
"mpxluv@msn.com"
] |
mpxluv@msn.com
|
03184a2f469e1ce27641847dc30183d3a658359d
|
f2a5680231e205dc49a083578d9bd90e4603036c
|
/Grokking-Coding-Interview-Patterns/14. Top K Numbers/KpointsClosestTwoOrigin.py
|
ef7a6e0c1f638611eb0b6307af2de741ced1381e
|
[] |
no_license
|
flogothetis/Technical-Coding-Interviews-Algorithms-LeetCode
|
d592451f7d297fd52395e33dc67686e9990a663c
|
7c8473fce4b5b5affbfde5ed8c39fdb89cbc77d4
|
refs/heads/master
| 2023-01-13T15:56:07.706164
| 2020-11-18T18:54:52
| 2020-11-18T18:54:52
| 281,101,823
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
'''
Problem Statement #
Given an array of points in the a 2D2D2D plane, find ‘K’ closest points to the origin.
'''
from heapq import *
# Time Comp : O(NlogK)
# Space Comp : O(K)
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __lt__(self, other):
self.euclidean_dist() > other.euclidean_dist()
def print_point(self):
print("[" + str(self.x) + ", " + str(self.y) + "] ", end='')
def euclidean_dist (self):
return (self.x ** 2 + self.y **2) ** (1/2)
def find_closest_points(points, k):
result = []
max_heap = []
for i in range(k):
heappush(max_heap, points[i])
for i in range ( k, len(points)):
if (points[i].euclidean_dist() < max_heap[0].euclidean_dist()):
heappop(max_heap)
heappush(max_heap, points[i])
return list(max_heap)
def main():
result = find_closest_points([Point(1, 3), Point(3, 4), Point(2, -1)], 2)
print("Here are the k points closest the origin: ", end='')
for point in result:
point.print_point()
main()
|
[
"flogothetis95@gmail.com"
] |
flogothetis95@gmail.com
|
15ed73aa9d59827398ca3d5f8fe89d7baaf23d8d
|
f5645f685c8be36711f71c3a6763d6a4f93788b5
|
/sandbox/settings/test.py
|
9ef14ae7a9e7f1cd9b73a4a8de05e269cc2be55b
|
[
"MIT"
] |
permissive
|
pincoin/rakmai
|
ef034c6b6d5501cd433869446275f4f3b622a73b
|
fe41fd0ab88bf143e65b450ceb798741d0f80330
|
refs/heads/master
| 2023-03-02T13:24:10.885714
| 2023-02-27T14:37:10
| 2023-02-27T14:37:10
| 112,416,258
| 13
| 4
|
MIT
| 2023-02-15T18:54:11
| 2017-11-29T02:34:15
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
SECRET_KEY = 'rakmai_fake_key'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
INSTALLED_APPS += [
'mptt',
'taggit',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.kakao',
# 'allauth.socialaccount.providers.line',
'import_export',
'easy_thumbnails',
'crispy_forms',
'rakmai',
'member',
'blog',
'board',
'book',
'shop',
'help',
'rabop',
'banner',
]
ROOT_URLCONF = 'sandbox.urls'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db_test.sqlite3',
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ADMIN_URL = 'secret-admin/'
RABOP_URL = 'rabop/'
API_URL = 'api/'
# Allauth
# Member
# Blog
# Board
# Forum
# Bleach sanitizing text fragments
BLEACH_ALLOWED_TAGS = [
'h1', 'h2', 'h3', 'h4', 'h5', 'ol', 'ul', 'li', 'div', 'p', 'code', 'blockquote', 'pre', 'span', 'table', 'thead',
'tbody', 'tr', 'th', 'td', 'a', 'em', 'strong', 'hr', 'img'
]
BLEACH_ALLOWED_ATTRIBUTES = {
'*': ['class', 'id'],
'a': ['href', 'rel'],
'img': ['alt', 'src'],
}
# crispy-form
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Dummy cache for testing
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'TIMEOUT': 300,
}
}
GOOGLE_OTP_ENABLED = False
|
[
"pincoins@gmail.com"
] |
pincoins@gmail.com
|
50f57cdf741ff10781381d4764ff69474278b5b8
|
124b9b3aa4d6c4e1014e2119a32b711c9bfe3b78
|
/Problem Solving/Birthday cake candles.py
|
b40d1a9bc11bff36ff280ded0ffb1d7af6ec3d14
|
[] |
no_license
|
Devendra33/HackerRank
|
5f4929c1161fade3ed1a593b847403943e757bdb
|
980f8577677e24da654627b35fbfccb69b17f218
|
refs/heads/master
| 2022-12-14T05:13:30.405197
| 2020-09-12T09:24:31
| 2020-09-12T09:24:31
| 264,129,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the birthdayCakeCandles function below.
def birthdayCakeCandles(ar, ar_count):
cnt = 0
num = max(ar)
for i in ar:
if i == num:
cnt += 1
return cnt
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = birthdayCakeCandles(ar,ar_count)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"noreply@github.com"
] |
Devendra33.noreply@github.com
|
dcd82fd2a59803baeb191e7c636fb4021b62689e
|
56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e
|
/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs/Job_46/run_cfg.py
|
d0f4e8c6b8e8c8ce3a1c19aadea1c9c7a03b7ca4
|
[] |
no_license
|
rmanzoni/HTT
|
18e6b583f04c0a6ca10142d9da3dd4c850cddabc
|
a03b227073b2d4d8a2abe95367c014694588bf98
|
refs/heads/master
| 2016-09-06T05:55:52.602604
| 2014-02-20T16:35:34
| 2014-02-20T16:35:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0_1377544839/HTT_24Jul_newTES_manzoni_Up_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_262.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_263.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_264.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_265.root',
'/store/cmst3/user/cmgtools/CMG/DY3JetsToLL_M-50_TuneZ2Star_8TeV-madgraph/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/V5_B/PAT_CMG_V5_16_0/cmgTuple_266.root')
)
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
ac3f681ff49d500b4fa57354e421919c40815772
|
d1bcd1a55221d52cab88f397183ee6492fc7f5a5
|
/workmail-translate-email/src/translate_helper.py
|
3fb6cccbe58ab4aa99341ff7ff29997ad198d655
|
[
"Apache-2.0"
] |
permissive
|
aws-samples/amazon-workmail-lambda-templates
|
fccd1b490d69a08b512e2e666d4a85000745b793
|
d7b83d7f4499768f60fd115f5f995e1d8daccf89
|
refs/heads/master
| 2022-06-30T22:22:43.030710
| 2022-06-30T19:05:15
| 2022-06-30T19:05:15
| 168,240,710
| 39
| 18
|
Apache-2.0
| 2022-06-30T19:05:16
| 2019-01-29T22:38:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
import boto3
comprehend = boto3.client(service_name='comprehend')
translate = boto3.client(service_name='translate')
def detect_language(text):
"""
Detects the dominant language in a text
Parameters
----------
text: string, required
Input text
Returns
-------
string
Representing language code of the dominant language
"""
# Sending call to get language
result = comprehend.detect_dominant_language(Text = text)['Languages']
# Since the result can contain more than one language find the one with the highest score.
high_score = 0
best_guess = ''
for lang in range(len(result)):
if result[lang]['Score'] > high_score:
high_score = result[lang]['Score']
best_guess = result[lang]['LanguageCode']
return best_guess
def translate_text(text, source_lang, destination_lang):
"""
Translates given text from source language into destination language
Parameters
----------
text: string, required
Input text in source language
Returns
-------
string
Translated text in destination language
"""
result = translate.translate_text(Text=text,
SourceLanguageCode=source_lang, TargetLanguageCode=destination_lang)
return result.get('TranslatedText')
|
[
"noreply@github.com"
] |
aws-samples.noreply@github.com
|
546a4af39360350465c0ca3bda6e5dabafad1e0c
|
3208f15876e5ae0275366763f57380f383eae55b
|
/manage.py
|
baef8477b5382806b9d2514ca37998eb21f3ff95
|
[
"MIT"
] |
permissive
|
jpadilla/feedleap
|
05d4abbd21408ec2c4d1f2a99aaba0fe22a7e3f7
|
06d87a680a4bd2ead550b3540e58a8c520a733ba
|
refs/heads/master
| 2023-05-31T19:22:14.143515
| 2014-06-27T13:02:52
| 2014-06-27T13:02:52
| 9,079,296
| 1
| 1
| null | 2013-05-05T17:05:29
| 2013-03-28T14:53:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,004
|
py
|
#!/usr/bin/env python
import os
import sys
import re
def read_env():
"""Pulled from Honcho code with minor updates, reads local default
environment variables from a .env file located in the project root
directory.
"""
try:
with open('.env') as f:
content = f.read()
except IOError:
content = ''
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
os.environ.setdefault(key, val)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "feedleap.settings.base")
from django.core.management import execute_from_command_line
read_env()
execute_from_command_line(sys.argv)
|
[
"jpadilla@webapplicate.com"
] |
jpadilla@webapplicate.com
|
03d5325ec088f2701d3826c732407eeba995016a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/g7sh7oLoArRLmM2ky_6.py
|
d4615fa5b39a2c4ba13bdd9812cce19b2372f521
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
import re
def baconify(msg, mask=""):
d_key={
"uuuuu" :"a" ,
"uuuul" :"b" ,
"uuulu" :"c" ,
"uuull" :"d" ,
"uuluu" :"e" ,
"uulul" :"f" ,
"uullu" :"g" ,
"uulll" :"h" ,
"uluuu" :"i" ,
"uluul" :"j" ,
"ululu" :"k" ,
"ulull" :"l" ,
"ulluu" :"m" ,
"ullul" :"n" ,
"ulllu" :"o" ,
"ullll" :"p" ,
"luuuu" :"q" ,
"luuul" :"r" ,
"luulu" :"s" ,
"luull" :"t" ,
"luluu" :"u" ,
"lulul" :"v" ,
"lullu" :"w" ,
"lulll" :"x" ,
"lluuu" :"y" ,
"lluul" :"z" ,
"llllu" :"." ,
"lllll" :" "
}
e_key={
"a": "uuuuu",
"b": "uuuul",
"c": "uuulu",
"d": "uuull",
"e": "uuluu",
"f": "uulul",
"g": "uullu",
"h": "uulll",
"i": "uluuu",
"j": "uluul",
"k": "ululu",
"l": "ulull",
"m": "ulluu",
"n": "ullul",
"o": "ulllu",
"p": "ullll",
"q": "luuuu",
"r": "luuul",
"s": "luulu",
"t": "luull",
"u": "luluu",
"v": "lulul",
"w": "lullu",
"x": "lulll",
"y": "lluuu",
"z": "lluul",
".": "llllu",
" ": "lllll"
}
return_statement=""
borken_word=""
if not mask:
msg=re.sub("[\d\W\s]","",msg)
for ch in msg:
borken_word+="l" if str.islower(ch) else "u"
if len(borken_word)==5:
return_statement+=(d_key[borken_word])
borken_word=""
return return_statement
else:
ch_count=0
msg= re.sub("[!?:;'\"]","",msg)
return_statement=list(mask)
for ch in str.lower(msg):
for x in (e_key[ch]):
while not str.isalpha(mask[ch_count]):
ch_count+=1
return_statement[ch_count]=str.upper(mask[ch_count]) if x=="u" else str.lower(mask[ch_count])
ch_count+=1
return "".join(return_statement)
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
e710851a689538325d804091ca103627905d86d5
|
8329282a8fda056d705c1af6dbcd0de1ed7ca25e
|
/.history/textutiles/textutiles/views_20210522225246.py
|
ec3d62029c51e1f60b0b7c91f234a8013ae9d2c6
|
[] |
no_license
|
ritikalohia/Django-beginners-
|
c069b16867407ef883bb00c6faf4f601921c118a
|
829e28ab25201853de5c71a10ceff30496afea52
|
refs/heads/main
| 2023-05-04T03:34:29.082656
| 2021-05-22T17:38:21
| 2021-05-22T17:38:21
| 369,869,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,784
|
py
|
#created
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
#params = {'name' : 'Ritika', 'place' : 'Mars'}
return render(request, 'index.html')
#return HttpResponse("Home")
def contact(request):
return render(request, 'contact.html')
def about(request):
return render(request, 'about_us.html')
def analyze(request):
#get the text in head
djtext = request.POST.get('text', 'default' )
#check checkbox values
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
spaceremover = request.POST.get('spaceremover', 'off'),
charcount = request.POST.get('charcount', 'off')
if removepunc == "on":
#analyzed = djtext
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params ={'purpose':'removed punctuations', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(fullcaps == "on"):
analyzed =""
for char in djtext:
analyzed = analyzed + char.upper()
params ={'purpose':'changed to UPPERCASE', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(newlineremover== "on"):
analyzed =""
for char in djtext:
if char != '\n' and char !="\r":
analyzed = analyzed + char
params ={'purpose':'Removed new lines', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(spaceremover== "on"):
analyzed =""
for index, char in enumerate(djtext):
if not djtext[index] == " " and djtext[index+1]==" ":
analyzed = analyzed + char
params ={'purpose':'extra space removed', 'analyzed_text': analyzed}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(charcount== "on"):
a=0
for char in djtext:
a = a + 1
params ={'purpose':'extra space removed', 'analyzed_text': a}
#analyze the text
djtext = analyzed
#return render(request, 'analyze.html', params)
if(removepunc != "on" and newlineremover != "on" and spaceremover !="on" and fullcaps != "on"):
return render(request, 'analyze.html', params)
# def capfirst(request):
# return HttpResponse("capitalize first")
|
[
"rtklohia@gmail.com"
] |
rtklohia@gmail.com
|
98f694263451c4748f27f993918222f436b573c9
|
a32ca3544bb5a587e5fd7aaa1c73ac0fb918f11e
|
/hypha/apply/funds/migrations/0102_add_projectapprovalform_to_fundbase_labbase.py
|
882caed4c6e476984fe371711a1304045f239190
|
[
"BSD-3-Clause"
] |
permissive
|
jvasile/hypha
|
87904bf514e7cf5af63c7146eaaa49d3611fd57f
|
b5ccad20dd3434f53a2b9d711fac510124c70a6e
|
refs/heads/main
| 2023-07-08T04:10:08.233259
| 2023-06-20T05:35:29
| 2023-06-20T05:35:29
| 354,630,183
| 0
| 0
|
BSD-3-Clause
| 2021-04-04T19:32:38
| 2021-04-04T19:32:38
| null |
UTF-8
|
Python
| false
| false
| 2,110
|
py
|
# Generated by Django 3.2.15 on 2022-09-07 12:35
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('application_projects', '0055_alter_project_status_add_pafreviewersrole'),
('funds', '0101_auto_20220722_0844'),
]
operations = [
migrations.RemoveField(
model_name='applicationbase',
name='approval_form',
),
migrations.RemoveField(
model_name='labbase',
name='approval_form',
),
migrations.CreateModel(
name='LabBaseProjectApprovalForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('form', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='application_projects.projectapprovalform')),
('lab', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='approval_forms', to='funds.labbase')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationBaseProjectApprovalForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('application', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='approval_forms', to='funds.applicationbase')),
('form', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='application_projects.projectapprovalform')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
[
"sandeepsajan0@gmail.com"
] |
sandeepsajan0@gmail.com
|
82b262e6528870bd41e0231a81c5d9242080a628
|
e567b06c895054d88758366e769de77ee693a568
|
/SciComputing with Python/lesson_05-15/asteroids.py
|
19db8c21126f49a799c2586684f0c759519408df
|
[
"MIT"
] |
permissive
|
evtodorov/aerospace
|
68986b4ae772e1de8cc7982b4f8497b6423ac8cc
|
54a1b58c3c0b02c0eaa3aef14d0e732d7f867566
|
refs/heads/main
| 2023-01-19T17:52:29.520340
| 2020-11-29T13:23:31
| 2020-11-29T13:23:31
| 315,653,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,121
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 15 14:13:36 2014
@author: etodorov
"""
import pygame as pg
import numpy as np
#Loading resources
pg.init()
bg = pg.image.load("background.jpg")
scrWidth, scrHeight = bg.get_width(), bg.get_height()
scr = pg.display.set_mode((scrWidth, scrHeight))
scr.blit(bg,(0,0))
ship = pg.image.load("ship.gif")
shipRect = ship.get_rect()
wShip = ship.get_width()
hShip = ship.get_height()
ast = pg.image.load("ast1.gif")
astRect = ast.get_rect()
wAst = ast.get_width()
hAst = ast.get_height()
def detectCollision(x1,y1,w1,h1,x2,y2,w2,h2):
if (x2+w2>=x1>=x2 and y2+h2>=y1>=y2): return True
elif (x2+w2>=x1+w1>=x2 and y2+h2>=y1>=y2):return True
else: return False
print "Resources Loaded. Initializing game."
#initialize game loop
xShip = scrWidth/2
v = 100 #px/s
vAst = 400 #px/s
xAst = np.array([])
yAst = np.array([])
totAst = 0
tAst = .3 #threshold
t0Ast = 0
running = True
t0 = pg.time.get_ticks()*.001
while running:
pg.event.pump()
#get time
t = pg.time.get_ticks()*.001
dt = t-t0
t0 = t
#events
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
keys = pg.key.get_pressed()
if keys[pg.K_ESCAPE]:
running = False
if keys[pg.K_LEFT]:
xShip += -v*dt
if keys[pg.K_RIGHT]:
xShip += v*dt
#engine
yShip = scrHeight - ship.get_height()
dtAst = pg.time.get_ticks()*.001 - t0Ast
if(dtAst>=tAst):
t0Ast = pg.time.get_ticks()*.001
xAst = np.append(xAst,np.random.random_integers(0,scrWidth-ship.get_width()))
yAst = np.append(yAst,ship.get_height()+1.)
totAst += 1
yAst += vAst*dt
xAst = xAst[yAst<scrHeight]
yAst = yAst[yAst<scrHeight]
#draw
scr.blit(bg,(0,0))
for x,y in zip(xAst,yAst):
scr.blit(ast,(int(x),int(y)))
if(detectCollision(xShip,yShip,wShip,hShip,x,y,wAst,hAst)):
running = False
scr.blit(ship,(int(xShip),int(yShip)))
pg.display.flip()
score = totAst - len(xAst)
print "Your score is", score
pg.quit()
|
[
"evgeni.todorov@tum.de"
] |
evgeni.todorov@tum.de
|
d0c0ada98ca93fd50965148c85fac09b8295da92
|
4a28995530e5766675869266704fa3b59e6b9908
|
/bravo/tests/infini/test_packets.py
|
f50d0439be585dfeb0bfd3b52df85d8a54feeb0c
|
[
"MIT"
] |
permissive
|
CyberFlameGO/bravo
|
e732dc87309e98e52fb02195d542f3105486b9c8
|
7be5d792871a8447499911fa1502c6a7c1437dc3
|
refs/heads/master
| 2021-12-04T02:04:00.138404
| 2014-09-09T04:43:18
| 2014-09-09T04:43:18
| 415,181,334
| 0
| 0
|
NOASSERTION
| 2023-08-16T22:12:36
| 2021-10-09T02:34:28
| null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
from twisted.trial import unittest
from bravo.infini.packets import packets, parse_packets
class TestInfiniPacketParsing(unittest.TestCase):
def test_ping(self):
raw = "\x00\x01\x00\x00\x00\x06\x00\x10\x00\x4d\x3c\x7d\x7c"
parsed = packets[0].parse(raw)
self.assertEqual(parsed.header.identifier, 0x00)
self.assertEqual(parsed.header.flags, 0x01)
self.assertEqual(parsed.payload.uid, 16)
self.assertEqual(parsed.payload.timestamp, 5061757)
def test_disconnect(self):
raw = "\xff\x00\x00\x00\x00\x19\x00\x17Invalid client version!"
parsed = packets[255].parse(raw)
self.assertEqual(parsed.header.identifier, 0xff)
self.assertEqual(parsed.payload.explanation,
"Invalid client version!")
class TestInfiniPacketStream(unittest.TestCase):
def test_ping_stream(self):
raw = "\x00\x01\x00\x00\x00\x06\x00\x10\x00\x4d\x3c\x7d\x7c"
packets, leftovers = parse_packets(raw)
|
[
"MostAwesomeDude@gmail.com"
] |
MostAwesomeDude@gmail.com
|
f6ed307305ac991a2a922af645d6169197b603d8
|
d2fedd2085cbdbd5e54228abf0633001989787cc
|
/36.COUNT PALINDROMIC SUBSEQUENCE IN A RANGE.py
|
55ab57ce87bc84e1d63299856106adef6cff022f
|
[] |
no_license
|
KumarAmbuj/dynamic-programing-intermediate
|
228d25205d370ebc329eaf6ffbcfbc2853b18abe
|
4b40f322f57762e0cf264fb2024ae56d1fa3243b
|
refs/heads/main
| 2023-02-23T20:42:26.865855
| 2021-01-23T11:10:44
| 2021-01-23T11:10:44
| 332,188,114
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
def findpalindromicsequence(s,l,r):
n=len(s)
dp=[[False for j in range(len(s))] for i in range(len(s))]
for g in range(len(s)):
i=0
for j in range(g,len(s)):
if g==0:
dp[i][j]=True
elif g==1:
if s[i]==s[j]:
dp[i][j]=True
else:
dp[i][j]=False
else:
if s[i]==s[j] and dp[i+1][j-1]==True:
dp[i][j]=True
else:
dp[i][j]=False
i+=1
count=0
for g in range(r-l+1):
i=l
j=l+g
while(j<r+1):
if dp[i][j]==True:
count+=1
i+=1
j+=1
#method 2
count1=0
for i in range(l,r+1):
for j in range(i,r+1):
if dp[i][j]==True:
count1+=1
print(count1)
print(count)
s='abccbc'
findpalindromicsequence(s,3,5)
s = "xyaabax"
findpalindromicsequence(s,3,5)
s = "xyaabax"
findpalindromicsequence(s,2,3)
|
[
"noreply@github.com"
] |
KumarAmbuj.noreply@github.com
|
eafec4733775b023c1b2b9de75a58d4fa681f74c
|
6c53847f9956edc8f31b23c24b1786d1b9789f03
|
/gps-server-master/setup.py
|
d313f9422a4ec8e4b70cb05bc8e2ad640ac49b23
|
[] |
no_license
|
kunal1510010/Quikmile
|
c64a9264798cf834aaf32ecb4653b9b81dab0dd5
|
244d2749eb8438ce858de51c088a52ca3de58992
|
refs/heads/master
| 2022-12-12T08:03:50.628252
| 2018-11-29T15:24:46
| 2018-11-29T15:24:46
| 159,226,383
| 0
| 0
| null | 2022-12-08T02:27:44
| 2018-11-26T20:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 580
|
py
|
from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup, find_packages
install_reqs = parse_requirements("./requirements.txt", session=PipSession())
install_requires = [str(ir.req).split('==')[0] for ir in install_reqs]
setup(
name='gps-server',
packages=find_packages(exclude=['examples', 'tests']),
version='1.0',
description='GPS Server and Kafka Producer',
author='Abhishek Verma, Chirag',
author_email='abhishek@quikmile.com',
package_data={'': ['*.json']},
install_requires=install_requires
)
|
[
"kunal1510010@gmail.com"
] |
kunal1510010@gmail.com
|
2131681365efc05ba1105dd453257b1ea60ea71e
|
82a9077bcb5a90d88e0a8be7f8627af4f0844434
|
/google-cloud-sdk/lib/tests/unit/surface/compute/org_security_policies/associations/create_associations_test.py
|
d67314583ec45ddf3221e10dda33832b9c249e68
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
piotradamczyk5/gcloud_cli
|
1ae2553595e569fad6ce84af62b91a7ee5489017
|
384ece11040caadcd64d51da74e0b8491dd22ca3
|
refs/heads/master
| 2023-01-01T23:00:27.858583
| 2020-10-21T04:21:23
| 2020-10-21T04:21:23
| 290,238,061
| 0
| 0
| null | 2020-10-19T16:43:36
| 2020-08-25T14:31:00
|
Python
|
UTF-8
|
Python
| false
| false
| 4,684
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the organization security policy associations create subcommand."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py.testing import mock
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import base as calliope_base
from tests.lib import cli_test_base
from tests.lib import sdk_test_base
from tests.lib import test_case
from tests.lib.api_lib.util import waiter as waiter_test_base
class OrgSecurityPoliciesAssociationsCreateBetaTest(sdk_test_base.WithFakeAuth,
cli_test_base.CliTestBase,
waiter_test_base.Base):
def SetUp(self):
self.track = calliope_base.ReleaseTrack.BETA
self.api_version = 'beta'
self.messages = core_apis.GetMessagesModule('compute', self.api_version)
self.mock_client = mock.Client(
core_apis.GetClientClass('compute', self.api_version),
real_client=core_apis.GetClientInstance(
'compute', self.api_version, no_http=True))
self.mock_client.Mock()
self.addCleanup(self.mock_client.Unmock)
def CreateTestOrgSecurityPolicyMessage(self, **kwargs):
return self.messages.SecurityPolicy(
description='test-description',
displayName='test-sp',
type=self.messages.SecurityPolicy.TypeValueValuesEnum.FIREWALL)
def _GetOperationMessage(self, operation_name, status, resource_uri=None):
return self.messages.Operation(
name=operation_name,
status=status,
selfLink='https://compute.googleapis.com/compute/{0}/locations/'
'global/operations/{1}'.format(self.api_version, operation_name),
targetLink=resource_uri)
def CreateTestOrgSecurityPolicyAssociationMessage(self, **kwargs):
return self.messages.SecurityPolicyAssociation(
attachmentId='organizations/12345', name='association-name')
def testAssociationsCreateOrgSecurityPolicyWithOrganization(self):
self.mock_client.organizationSecurityPolicies.AddAssociation.Expect(
self.messages.ComputeOrganizationSecurityPoliciesAddAssociationRequest(
securityPolicy='12345678910',
securityPolicyAssociation=self
.CreateTestOrgSecurityPolicyAssociationMessage(),
replaceExistingAssociation=False),
self._GetOperationMessage(
operation_name='org-12345-operation-myop',
status=self.messages.Operation.StatusValueValuesEnum.PENDING))
self.mock_client.globalOrganizationOperations.Get.Expect(
self.messages.ComputeGlobalOrganizationOperationsGetRequest(
parentId='organizations/12345',
operation='org-12345-operation-myop'),
self._GetOperationMessage(
operation_name='org-12345-operation-myop',
status=self.messages.Operation.StatusValueValuesEnum.DONE,
resource_uri='https://compute.googleapis.com/compute/{0}/'
'locations/global/securityPolicies/{1}'.format(
self.api_version, '12345678910')))
self.Run('compute org-security-policies associations create '
'--name association-name '
'--organization 12345 '
'--security-policy 12345678910')
self.AssertOutputEquals('')
self.AssertErrContains(
'Add association of the organization Security Policy.')
class OrgSecurityPoliciesAssociationsCreateAlphaTest(
OrgSecurityPoliciesAssociationsCreateBetaTest):
def SetUp(self):
self.track = calliope_base.ReleaseTrack.ALPHA
self.api_version = 'alpha'
self.messages = core_apis.GetMessagesModule('compute', self.api_version)
self.mock_client = mock.Client(
core_apis.GetClientClass('compute', self.api_version),
real_client=core_apis.GetClientInstance(
'compute', self.api_version, no_http=True))
self.mock_client.Mock()
self.addCleanup(self.mock_client.Unmock)
if __name__ == '__main__':
test_case.main()
|
[
"code@bootstraponline.com"
] |
code@bootstraponline.com
|
526da7b66b7f3a0e2d31d78cfa9e1e610ce48f3b
|
15e6385746ccf4b8eb6c6e302aca236021bb8781
|
/BinaryTree and Divide and Conquer/le257_binaryTreePaths.py
|
04f6722ba0b8cd3e57bdecab9d2bbb33d805064f
|
[] |
no_license
|
akb46mayu/Data-Structures-and-Algorithms
|
11c4bbddc9b4d286e1aeaa9481eb6a620cd54746
|
de98494e14fff3e2a468da681c48d60b4d1445a1
|
refs/heads/master
| 2021-01-12T09:51:32.618362
| 2018-05-16T16:37:18
| 2018-05-16T16:37:18
| 76,279,268
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
"""
Given a binary tree, return all root-to-leaf paths.
For example, given the following binary tree:
1
/ \
2 3
\
5
All root-to-leaf paths are:
["1->2->5", "1->3"]
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
if not root:
return []
path = []
paths = []
self.btreeHelper(root, path, paths)
return paths
def btreeHelper(self, root, path, paths):
if not root:
return
path.append(str(root.val))
if not root.left and not root.right:
paths.append('->'.join(path))
path.pop() # is this pop for printing result usage?
return
self.btreeHelper(root.left, path , paths)
self.btreeHelper(root.right, path, paths)
path.pop() # is this pop for traverse usage?
|
[
"noreply@github.com"
] |
akb46mayu.noreply@github.com
|
828d680c2d1538b21c4b8b9195efc0e26cac2b28
|
de64b143a346585f51590bd674e8d13bbc672386
|
/algorithm/2023/0321_40_Combination_Sum_II/myunghak.py
|
8d64f62cc7c6d9a896ef5565521251d1cff3a514
|
[] |
no_license
|
ai-kmu/etc
|
304ec20f59e4026025abdcbcae21863c80630dcb
|
9c29941e19b7dd2a2037b110dd6e16690e9a0cc2
|
refs/heads/master
| 2023-08-21T16:30:31.149956
| 2023-08-21T16:26:19
| 2023-08-21T16:26:19
| 199,843,899
| 3
| 24
| null | 2023-05-31T09:56:59
| 2019-07-31T11:36:16
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 796
|
py
|
# 답보고 풀었습니다. 풀이 안해주셔도 되요
from collections import deque
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
def backtrack(start, target):
if target == 0:
result.append(list(dq))
return
for i in range(start, len(candidates)):
if target < candidates[i]:
break
elif i <= start or candidates[i] != candidates[i-1]:
dq.append(candidates[i])
backtrack(i+1, target-candidates[i])
dq.pop()
candidates.sort()
dq = deque()
result = []
backtrack(0, target)
return result
|
[
"noreply@github.com"
] |
ai-kmu.noreply@github.com
|
09de35b9c9569a3d6a18907aa2a1c17e263a55cb
|
32f34baaa620d6ec945a08c842123a8872c7a2a5
|
/blog/admin.py
|
31122591ac74349107d324ee346326f92261f9a7
|
[] |
no_license
|
devArist/bankapp_kivy
|
e3590723643d30746c3ffce9d70868b3524b6e30
|
701db3a856722a618fbcfa8fbd0b606415265ca9
|
refs/heads/main
| 2023-07-18T06:38:00.148288
| 2021-08-27T14:05:47
| 2021-08-27T14:05:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
from django.contrib import admin
from blog import models
from django.utils.safestring import mark_safe
# Register your models here.
@admin.register(models.Blog)
class BlogAdmin(admin.ModelAdmin):
list_display = ('title', 'imageblog')
search_fields = ('title',)
def imageblog(self, obj):
return mark_safe(f"<img src={obj.image.url} style='width:150px; height:90px' ")
|
[
"aridev97@gmail.com"
] |
aridev97@gmail.com
|
933a002ab324b2adef7d0dbfa19291d879c51704
|
35788f5ca40e5b9e14d4700084e0f3a7fd7f3033
|
/basic/conversion-teacher.py
|
a8f091bdf171ca3f3c8dc193324984e020ad5ccf
|
[] |
no_license
|
yunnyisgood/django-monaco
|
0c097385c0497b2bcbd66571f16a84d53b4913db
|
b83e9702a064f81d76be35a2e8f4c02c03b5255e
|
refs/heads/master
| 2023-05-23T03:53:26.934348
| 2021-06-14T14:29:43
| 2021-06-14T14:29:43
| 370,548,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,783
|
py
|
import pandas as pd
class Conversion(object):
df = pd.DataFrame()
# dp = ()
@staticmethod
def dp_create() -> ():
# 여기서 프로퍼티(self)를 사용하지 않는다 -> 객체 메소드가 아닌 staticmethod
return (1, 2, 3, 4, 5, 6, 7, 8, 9)
@staticmethod
def dp_to_list(dp) -> []:
return list(dp)
@staticmethod
def list_to_float(dp) -> []: # 리스트를 실수로 변환
return [float(dp[i]) for i in range(0, len(dp))]
@staticmethod
def float_to_num(dp) -> []:
return [int(dp[i]) for i in range(0, len(dp))]
@staticmethod
def list_to_dictionary(dp) -> {}:
return dict(zip([str(i) for i in dp], [str(i) for i in dp]))
@staticmethod
def string_to_tuple(h)-> ():
return tuple(list(h))
@staticmethod
def tuple_to_list(h) -> []:
return list(h)
# @staticmethod
def dictionary_to_dataFrame(dt):
return pd.DataFrame.from_dict(dt, orient='index')
@staticmethod
def main():
c = Conversion()
temp = ''
num = 0
f = 0.0
ls = []
dt = {}
h = 'hello'
while 1:
menu = input('0.Exit 1.String 2. Num 3. List 4. Dictionary 5.Tuple')
if menu == '0':
break
# 1부터 10까지 요소를 가진 튜플을 생성하시오
elif menu == '1':
dp = c.dp_create()
print(dp)
# 튜플을 리스트로 전환
elif menu == '2':
dp = c.dp_to_list(dp)
print(f'dp_to_list:{dp}')
#리스트를 실수 리스트로 전환
elif menu == '3':
dp = c.list_to_float(dp)
print(f'list_to_float:{dp}')
# 리스트를 정수 리스트로 전환
elif menu == '4':
dp = c.float_to_num(dp)
print(f'float_to_num:{dp}')
# 4번 리스트를 딕셔너리로 전환하시오. 단 키는 리스트의 인덱스인데 str 로 전환하시오 (return)
elif menu == '5':
c.list_to_dictionary(dp)
print(f'list_to_dictionary:{dt}')
# hello를 튜플로 전환
elif menu == '6':
h = c.string_to_tuple(h)
print(h)
# 6번 튜플을 리스트로 전환하시오
elif menu == '7':
h = c.tuple_to_list(h)
print(h)
#5번 딕셔너리를 데이터 프레임으로 전환하시오
elif menu == '8':
df = c.dictionary_to_dataFrame(pd.DataFrame(dt))
else:
continue
Conversion.main()
|
[
"cyeon0801@gmail.com"
] |
cyeon0801@gmail.com
|
6094e58198120626f12f5aa95fe32e016cc64ba5
|
20ed6e74d227e097a924e050bc82682381739fc7
|
/src/tx/functional/list.py
|
45c2627a517eeef89d294e0f38dfc335f6b719e2
|
[
"MIT"
] |
permissive
|
RENCI/tx-functional
|
c341f38293a889e125824822c47b0d1f1f3f87fb
|
45427ab06b7d029940e250a5f189997a8111d3f0
|
refs/heads/master
| 2022-12-11T11:53:34.830442
| 2020-09-05T17:56:40
| 2020-09-05T17:56:40
| 264,998,389
| 1
| 1
| null | 2020-05-18T17:14:53
| 2020-05-18T16:32:09
|
Python
|
UTF-8
|
Python
| false
| false
| 717
|
py
|
from .traversable import Traversable
from typing import Generic, TypeVar, Callable, Any, List
from .functor import Functor
from .applicative import Applicative
from .utils import foldl, foldr, Arrow
S = TypeVar("S")
T = TypeVar("T")
def rec(ma: List[S], b: T, f: Callable[[S, T], T]) -> T:
return foldr(f, b, ma)
def _map(f: Arrow[S, T], ma: List[S]) -> List[T]:
return list(map(f, ma))
def append(l : List[T], a: T) -> List[T]:
return l + [a]
def sequenceA(m: Applicative, ma: List[Any]) -> Any:
return foldl(m.liftA2(append), m.pure([]), ma)
list_functor = Functor(_map)
def list_traversable(m: Applicative) -> Traversable:
return Traversable(_map, lambda ma: sequenceA(m, ma))
|
[
"xuh@cs.unc.edu"
] |
xuh@cs.unc.edu
|
2a15671d6f800b0ed904eda1e0736d12f02e3e02
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/39/usersdata/120/13433/submittedfiles/dec2bin.py
|
9a40134fda961c27492e0c85f0f6f823e4b3bc9e
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
d=input('digite um numero na base decimal:')
cont=0
k=d
while k>0:
k=k//2
cont+=1
n=cont
i=0
soma=0
r=d
while i<=(n-1):
r=d%2
d=d//d
b=r*(10**i)
i+=1
soma=soma+b
print soma
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1b8ca6d4285b1bf2ba6e8ef64cd2e7b25a83d932
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res_bw/scripts/common/lib/idlelib/idle_test/test_pathbrowser.py
|
c74b4f69f1a9d7f33751f283cef5ff88fc8318bc
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 652
|
py
|
# 2015.11.10 21:36:26 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/idlelib/idle_test/test_pathbrowser.py
import unittest
import idlelib.PathBrowser as PathBrowser
class PathBrowserTest(unittest.TestCase):
def test_DirBrowserTreeItem(self):
d = PathBrowser.DirBrowserTreeItem('')
d.GetSubList()
if __name__ == '__main__':
unittest.main(verbosity=2, exit=False)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\idlelib\idle_test\test_pathbrowser.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:36:26 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
44d907b9a568613000577d7c233cfdfa14a4213a
|
032117bbf248a76abd25fcc2355bc8ade84fa76a
|
/django/theproject2/pro3/app3/views.py
|
be4a8ac1bbf617042c03024c19496ce33a98df2b
|
[] |
no_license
|
shefaligoel136/python_summer_training
|
ba8f28f6af008584b4239c73d466e4e9d35b4b01
|
0b97fea050342fe4ed95b18c5f7ed885a6c8ca23
|
refs/heads/master
| 2022-11-13T07:22:32.855717
| 2020-07-06T08:33:19
| 2020-07-06T08:33:19
| 277,480,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 754
|
py
|
from django.shortcuts import render
def home(request):
return render(request,'temphtml.html')
def solve1(request):
val1 = float(request.POST['num1'])
val2 = float(request.POST['num2'])
op = request.POST['op']
if(op=='+'):
ans = val1+val2
return render(request,'temphtml.html',{'res':ans})
elif(op=='-'):
ans = val1-val2
return render(request,'temphtml.html',{'res':ans})
elif(op=='*'):
ans = val1*val2
return render(request,'temphtml.html',{'res':ans})
elif(op=='/'):
ans = val1/val2
return render(request,'temphtml.html',{'res':ans})
else:
return render(request,'temphtml.html',{'res':'wrong expression'})
def solve2(request):
val = (request.POST['num'])
s = eval(val)
return render(request,'temphtml.html',{'res1':s})
|
[
"goel136shefali@gmail.com"
] |
goel136shefali@gmail.com
|
99aadc30ade96f7a2b39bb1935c8d592ccd03ed7
|
49a167d942f19fc084da2da68fc3881d44cacdd7
|
/kubernetes_asyncio/test/test_policy_v1beta1_id_range.py
|
6e5e62b8e765d805b6eb01144abad5213e8a04c2
|
[
"Apache-2.0"
] |
permissive
|
olitheolix/kubernetes_asyncio
|
fdb61323dc7fc1bade5e26e907de0fe6e0e42396
|
344426793e4e4b653bcd8e4a29c6fa4766e1fff7
|
refs/heads/master
| 2020-03-19T12:52:27.025399
| 2018-06-24T23:34:03
| 2018-06-24T23:34:03
| 136,546,270
| 1
| 0
|
Apache-2.0
| 2018-06-24T23:52:47
| 2018-06-08T00:39:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.policy_v1beta1_id_range import PolicyV1beta1IDRange # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestPolicyV1beta1IDRange(unittest.TestCase):
"""PolicyV1beta1IDRange unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPolicyV1beta1IDRange(self):
"""Test PolicyV1beta1IDRange"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.policy_v1beta1_id_range.PolicyV1beta1IDRange() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"tomasz.prus@gmail.com"
] |
tomasz.prus@gmail.com
|
17e41741acf2c53e5af3b84136bdd4fb2cea28cd
|
ed6b358cfaf9bc61dab608b117c2cf0abcf90854
|
/xichuangzhu/controllers/user.py
|
5cf56bb8f0920a95c7c74bd335ae0e05c71f22c6
|
[] |
no_license
|
wallaceyuan/xichuangzhu
|
faa8fdec2a670661165d351ac3311126c8fc91e3
|
ec45aa8b3f4b1e6b9b70537e270be89e97034c99
|
refs/heads/master
| 2021-01-20T21:34:45.949361
| 2014-05-23T07:29:50
| 2014-05-23T07:29:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,729
|
py
|
# coding: utf-8
from __future__ import division
from flask import render_template, Blueprint, g
from ..models import User, CollectWork, CollectWorkImage, Work, WorkImage, WorkReview
from ..utils import check_is_me
from ..permissions import user_permission
bp = Blueprint('user', __name__)
@bp.route('/<user_abbr>')
def view(user_abbr):
"""用户主页"""
user = User.query.filter(User.abbr == user_abbr).first_or_404()
query = user.work_reviews
if not check_is_me(user.id):
query = query.filter(WorkReview.is_publish == True)
work_reviews = query.limit(3)
work_reviews_num = query.count()
topics = user.topics.limit(3)
work_images = user.work_images.limit(16)
return render_template('user/user.html', user=user, work_reviews=work_reviews,
work_reviews_num=work_reviews_num, topics=topics,
work_images=work_images)
@bp.route('/<user_abbr>/work_reviews', defaults={'page': 1})
@bp.route('/<user_abbr>/work_reviews/page/<int:page>')
def work_reviews(user_abbr, page):
"""用户的作品点评"""
user = User.query.filter(User.abbr == user_abbr).first_or_404()
work_reviews = user.work_reviews
if not check_is_me(user.id):
work_reviews = work_reviews.filter(WorkReview.is_publish == True)
paginator = work_reviews.paginate(page, 10)
return render_template('user/work_reviews.html', user=user, paginator=paginator)
@bp.route('/<user_abbr>/topics', defaults={'page': 1})
@bp.route('/<user_abbr>/topics/page/<int:page>')
def topics(user_abbr, page):
"""用户发表的话题"""
user = User.query.filter(User.abbr == user_abbr).first_or_404()
paginator = user.topics.paginate(page, 10)
return render_template('user/topics.html', user=user, paginator=paginator)
@bp.route('/<user_abbr>/work_images', defaults={'page': 1})
@bp.route('/<user_abbr>/work_images/page/<int:page>')
def work_images(user_abbr, page):
"""用户上传的作品图片"""
user = User.query.filter(User.abbr == user_abbr).first_or_404()
paginator = user.work_images.paginate(page, 16)
return render_template('user/work_images.html', user=user, paginator=paginator)
@bp.route('/collects')
@user_permission
def collects():
"""用户收藏页"""
collect_works = Work.query.join(CollectWork).filter(CollectWork.user_id == g.user.id).order_by(
CollectWork.create_time.desc()).limit(6)
collect_work_images = WorkImage.query.join(CollectWorkImage).filter(
CollectWorkImage.user_id == g.user.id).order_by(
CollectWorkImage.create_time.desc()).limit(9)
return render_template('user/collects.html', user=g.user, collect_works=collect_works,
collect_work_images=collect_work_images)
@bp.route('/collect_works', defaults={'page': 1})
@bp.route('/collect_works/page/<int:page>')
@user_permission
def collect_works(page):
"""用户收藏的文学作品"""
paginator = Work.query.join(CollectWork).filter(
CollectWork.user_id == g.user.id).order_by(
CollectWork.create_time.desc()).paginate(page, 10)
return render_template('user/collect_works.html', paginator=paginator)
@bp.route('/collect_work_images', defaults={'page': 1})
@bp.route('/collect_work_images/page/<int:page>')
@user_permission
def collect_work_images(page):
"""用户收藏的图片"""
paginator = WorkImage.query.join(CollectWorkImage).filter(
CollectWorkImage.user_id == g.user.id).order_by(
CollectWorkImage.create_time.desc()).paginate(page, 12)
return render_template('user/collect_work_images.html', paginator=paginator)
|
[
"hustlzp@qq.com"
] |
hustlzp@qq.com
|
2175a66e56fec5a6b38b8d8c9b58684e11ae83a5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02721/s005838488.py
|
67b4f1efce6fc3f21eede2e8af273a37d4a09818
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
n , k , c = map(int,input().split())
s = input()
L = []
R = []
i = 0
j = n-1
while i<n and len(L)<k :
if s[i] == "o" :
L.append(i)
i += c
i += 1
while j>-1 and len(R)<k :
if s[j] == "o" :
R.append(j)
j -= c
j -= 1
R.reverse()
for x in range(k):
if R[x] == L[x]:
print(R[x]+1)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6528f2aef6ccb83cf7c93281d60781f7bd740da3
|
4912cbd47c19c58d142e6833911d70f5ea037357
|
/question_bank/length-of-last-word/length-of-last-word.py
|
802c71d7fdb456bf98226e268e3f524641dbadf5
|
[
"Apache-2.0"
] |
permissive
|
yatengLG/leetcode-python
|
a09a17cd9e60cafd9ff8ca9c068f5b70719c436f
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
refs/heads/master
| 2023-07-13T16:10:01.920716
| 2021-09-06T02:51:46
| 2021-09-06T02:51:46
| 286,969,109
| 13
| 6
| null | 2021-02-16T10:19:44
| 2020-08-12T09:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 470
|
py
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:32 ms, 在所有 Python3 提交中击败了96.39% 的用户
内存消耗:13.3 MB, 在所有 Python3 提交中击败了73.72% 的用户
解题思路:
见代码注释
"""
class Solution:
def lengthOfLastWord(self, s: str) -> int:
s = s.rstrip(' ') # 去除右侧空格
words = s.split(' ') # 以空格划开单词
return len(words[-1]) # 取最后一个单词的长度
|
[
"767624851@qq.com"
] |
767624851@qq.com
|
3e99bde13b9275c37392065bcce7d9a4fb67e948
|
3de2a746243ad1cb000994a06a0f9699db9a901f
|
/agc016a.py
|
049fcd1810826d416cc69758d1fa09b721e56213
|
[] |
no_license
|
takumi152/atcoder
|
71d726ffdf2542d8abac0d9817afaff911db7c6c
|
ebac94f1227974aa2e6bf372e18605518de46441
|
refs/heads/master
| 2022-10-30T12:14:41.742596
| 2022-09-29T19:49:32
| 2022-09-29T19:49:32
| 181,502,518
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
def main():
buf = input()
s = buf
characters = []
for i in s:
if not i in characters:
characters.append(i)
min_iter_count = 999
for i in characters:
iter_count = 0
t = s
while True:
flag = False
new_t = ""
for j in range(len(t) - 1):
if t[j] == i:
new_t += i
elif t[j+1] == i:
new_t += i
flag = True
else:
new_t += t[j]
flag = True
if t[-1] != i:
flag = True
t = new_t
if flag:
iter_count += 1
else:
break
if iter_count < min_iter_count:
min_iter_count = iter_count
print(min_iter_count)
if __name__ == '__main__':
main()
|
[
"takumi152@hotmail.com"
] |
takumi152@hotmail.com
|
171430aa6e72848779736e903cf664b836f0d045
|
9ab9d9a3883471763edbceea59a0e83170581b5f
|
/eggs/Parsley-1.1-py2.7.egg/terml/test/test_terml.py
|
efb9991a475a1bc684728f0d8bc302bd17366807
|
[
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
asmmhossain/phyG
|
24dc211dad5b3e89c87ff384e841f2e98bbd52db
|
023f505b705ab953f502cbc55e90612047867583
|
refs/heads/master
| 2022-11-21T12:43:46.172725
| 2014-02-14T12:33:08
| 2014-02-14T12:33:08
| 13,800,552
| 0
| 1
|
NOASSERTION
| 2020-07-25T21:05:41
| 2013-10-23T11:04:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,108
|
py
|
from twisted.trial import unittest
from ometa.runtime import ParseError
from terml.nodes import Tag, Term, coerceToTerm, TermMaker
from terml.parser import TermLParser, character, parseTerm
class TermMakerTests(unittest.TestCase):
def test_make(self):
m = TermMaker()
t1 = m.Foo(1, 'a', m.Baz())
self.assertEqual(t1, parseTerm('Foo(1, "a", Baz)'))
class ParserTest(unittest.TestCase):
"""
Test TermL parser rules.
"""
def getParser(self, rule):
def parse(src):
p = TermLParser(src)
result, error = p.apply(rule)
return result
return parse
def test_literal(self):
"""
Literals are parsed to literal terms.
"""
parse = self.getParser("literal")
self.assertEqual(parse('"foo bar"'),
Term(Tag('.String.'), "foo bar", None))
self.assertEqual(parse("'x'"),
Term(Tag('.char.'), 'x', None))
self.assertEqual(parse("0xDECAFC0FFEEBAD"),
Term(Tag('.int.'), 0xDECAFC0FFEEBAD, None))
self.assertEqual(parse("0755"),
Term(Tag('.int.'), 0755, None))
self.assertEqual(parse("3.14159E17"),
Term(Tag('.float64.'), 3.14159E17, None))
self.assertEqual(parse("1e9"),
Term(Tag('.float64.'), 1e9, None))
self.assertEqual(parse("0"), Term(Tag(".int."), 0, None))
self.assertEqual(parse("7"), Term(Tag(".int."), 7, None))
self.assertEqual(parse("-1"), Term(Tag(".int."), -1, None))
self.assertEqual(parse("-3.14"),
Term(Tag('.float64.'), -3.14, None))
self.assertEqual(parse("3_000"),
Term(Tag('.int.'), 3000, None))
self.assertEqual(parse("0.91"),
Term(Tag('.float64.'), 0.91, None))
self.assertEqual(parse("3e-2"),
Term(Tag('.float64.'), 3e-2, None))
self.assertEqual(parse("'\\n'"),
Term(Tag('.char.'), character("\n"), None))
self.assertEqual(parse('"foo\\nbar"'),
Term(Tag('.String.'), "foo\nbar", None))
self.assertEqual(parse("'\\u0061'"),
Term(Tag('.char.'), character("a"), None))
self.assertEqual(parse('"z\141p"'),
Term(Tag('.String.'), "zap", None))
self.assertEqual(parse('"x\41"'),
Term(Tag('.String.'), "x!", None))
self.assertEqual(parse('"foo\\\nbar"'),
Term(Tag('.String.'), "foobar", None))
def test_simpleTag(self):
"""
Tags are parsed properly.
"""
parse = self.getParser("tag")
self.assertEqual(parse("foo"), Tag("foo"))
self.assertEqual(parse('::"foo"'), Tag('::"foo"'))
self.assertEqual(parse("::foo"), Tag('::foo'))
self.assertEqual(parse("foo::baz"), Tag('foo::baz'))
self.assertEqual(parse('foo::"baz"'), Tag('foo::"baz"'))
self.assertEqual(parse("biz::baz::foo"), Tag('biz::baz::foo'))
self.assertEqual(parse("foo_yay"), Tag('foo_yay'))
self.assertEqual(parse("foo$baz32"), Tag('foo$baz32'))
self.assertEqual(parse("foo-baz.19"), Tag('foo-baz.19'))
def test_simpleTerm(self):
"""
Kernel syntax for terms is parsed properly.
"""
parse = self.getParser("baseTerm")
self.assertEqual(parse("x"), Term(Tag("x"), None, None))
self.assertEqual(parse("x()"), Term(Tag("x"), None, []))
self.assertEqual(parse("x(1)"), Term(Tag("x"), None,
(Term(Tag(".int."), 1, None),)))
self.assertEqual(parse("x(1, 2)"), Term(Tag("x"), None,
(Term(Tag(".int."), 1,
None),
Term(Tag(".int."), 2,
None))))
self.assertEqual(parse("1"), Term(Tag(".int."), 1, None))
self.assertEqual(parse('"1"'), Term(Tag(".String."), "1", None))
self.assertRaises(ValueError, parse, "'x'(x)")
self.assertRaises(ValueError, parse, '3.14(1)')
self.assertRaises(ValueError, parse, '"foo"(x)')
self.assertRaises(ValueError, parse, "1(2)")
def test_fullTerm(self):
"""
Shortcut syntax for terms is handled.
"""
self.assertEqual(parseTerm("[x, y, 1]"), parseTerm(".tuple.(x, y, 1)"))
self.assertEqual(parseTerm("{x, y, 1}"), parseTerm(".bag.(x, y, 1)"))
self.assertEqual(parseTerm("f {x, y, 1}"), parseTerm("f(.bag.(x, y, 1))"))
self.assertEqual(parseTerm("a: b"), parseTerm(".attr.(a, b)"))
self.assertEqual(parseTerm('"a": b'), parseTerm('.attr.("a", b)'))
self.assertEqual(parseTerm('a: [b]'), parseTerm('.attr.(a, .tuple.(b))'))
def test_multiline(self):
"""
Terms spread across multiple lines are parsed correctly.
"""
single = parseTerm('foo(baz({x: "y", boz: 42}))')
multi = parseTerm(
"""foo(
baz({
x: "y",
boz: 42}
))""")
self.assertEqual(multi, single)
def test_leftovers(self):
e = self.assertRaises(ParseError, parseTerm, "foo(x) and stuff")
self.assertEqual(e.position, 7)
def test_unparse(self):
def assertRoundtrip(txt):
self.assertEqual('term(%r)' % (txt,), repr(parseTerm(txt)))
cases = ["1", "3.25", "f", "f(1)", "f(1, 2)", "f(a, b)",
"{a, b}", "[a, b]", "f{1, 2}", '''{"name": "Robert", attrs: {'c': 3}}''']
for case in cases:
assertRoundtrip(case)
def test_coerce(self):
self.assertEqual(
coerceToTerm({3: 4, "a": character('x'), (2, 3): [4, 5]}),
parseTerm('{"a": \'x\', 3: 4, [2, 3]: [4, 5]}'))
|
[
"mukarram819@gmail.com"
] |
mukarram819@gmail.com
|
e843c39e8e4989e30428e9ca261411b48af05bc5
|
c0450361aa707635f5bf70eff21c1235d7e60cfa
|
/Lessons by HoudyHo/lesson (32).py
|
c992dd1dcb5591096afb4678ee5bf2a1ecc56285
|
[] |
no_license
|
zarkaltair/Learn-python
|
f48810b86e9832f4c364c345d1fa8624f9ced683
|
dd6114b5bd6cc30eff328002521041dd2be2c3c5
|
refs/heads/master
| 2020-04-10T05:48:51.052751
| 2019-01-23T18:48:34
| 2019-01-23T18:48:34
| 160,837,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
# Class
class Cat:
def __init__(self, color, legs):
self.color = color
self.legs = legs
felix = Cat('ginger', 4)
print(felix.color)
rover = Cat('dog-colored', 4)
stumpy = Cat('brown', 3)
class Student:
def __init__(self, name):
self.name = name
test = Student('Bob')
print(test.name)
class Dog:
def __init__(self, name, color):
self.name = name
self.color = color
def bark(self):
print('Woof!')
fido = Dog('Fido', 'brown')
print(fido.name)
fido.bark()
class Dog:
legs = 4
def __init__(self, name, color):
self.name = name
self.color = color
fido = Dog('fido', 'brown')
print(fido.legs)
print(Dog.legs)
class Student:
def __init__(self, name):
self.name = name
def sayHi(self):
print('Hi from ' + self.name)
s1 = Student('Amy')
s1.sayHi()
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
rect = Rectangle(7, 8)
print(rect.color)
|
[
"zarkaltair@gmail.com"
] |
zarkaltair@gmail.com
|
3e7b548f6b5cdbd48c47d9c85724e93cbb569120
|
2b25aae9266437b657e748f3d6fea4db9e9d7f15
|
/graphics/line/4/richard-zhan/main.py
|
543aa01f31e1d50c82c08b4e4ca0d48c0b406554
|
[] |
no_license
|
Zilby/Stuy-Stuff
|
b1c3bc23abf40092a8a7a80e406e7c412bd22ae0
|
5c5e375304952f62667d3b34b36f0056c1a8e753
|
refs/heads/master
| 2020-05-18T03:03:48.210196
| 2018-11-15T04:50:03
| 2018-11-15T04:50:03
| 24,191,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
from display import *
from draw import *
screen = new_screen()
color = [ 0, 255, 0 ]
matrix = []
#octant I
# add_edge(matrix, 0, 0, 0, XRES - 1, YRES - 75, 0 )
# add_edge(matrix, 0, 0, 0, XRES - 75, YRES - 1, 0 )
# add_edge(matrix, 0, YRES - 1, 0, XRES - 1, 75, 0 )
# add_edge(matrix, 0, YRES - 1, 0, XRES - 75, 0, 0 )
# add_edge(matrix,0,400,0,250,300,0)
# add_edge(matrix,0,300,0,250,200,0)
# add_edge(matrix,0,150,0,450,325,0)
add_edge(matrix,0,250,0,250,0,0)
add_edge(matrix,250,0,0,499,250,0)
add_edge(matrix,499,250,0,250,499,0)
add_edge(matrix,250,499,0,0,250,0)
draw_lines( matrix, screen, color )
matrix=[]
add_edge(matrix,125,125,0,375,125,0)
add_edge(matrix,375,125,0,375,375,0)
add_edge(matrix,375,375,0,125,375,0)
add_edge(matrix,125,375,0,125,125,0)
# add_edge(matrix,0,250,0,250,0,0)
color = [255,0,0]
draw_lines(matrix,screen,color)
display(screen)
|
[
"azilby@gmail.com"
] |
azilby@gmail.com
|
08388f40f96262e48a825ed8578c70f7e147a701
|
66fe4dbcb81ceb688fa557c9a05a92779bd4e263
|
/config/config.py
|
97c447221012cfb133c0e71153480f5577f69a13
|
[] |
no_license
|
AdamEECS/sc
|
5d3e98d697dd891dfdbae910d0167a0ce1082f19
|
387930acb7af4c04b39415e923639cad458fda09
|
refs/heads/master
| 2021-01-01T06:28:18.465633
| 2018-08-16T07:56:35
| 2018-08-16T07:56:35
| 97,430,842
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
from pymongo import *
import os
config_dict = dict(
USER_AVATAR_DIR='static/user_avatar/',
PRODUCT_PIC_DIR='static/product_pic/',
UPLOAD_FILE_DIR='static/files/',
PRODUCT_PIC_EXT='png',
CDN_URL='http://opguqe876.bkt.clouddn.com/',
CDN_USER_AVATAR_DIR='/user_avatar/',
CDN_PRODUCT_PIC_DIR='/product_pic/',
CDN_BUCKET='buy-suzumiya',
QINIU_CALLBACK_URL='https://buy.suzumiya.cc/callback/all',
PIC_UPLOAD_URL='https://up-z1.qbox.me/',
SEND_EMAIL_URL='https://api.mailgun.net/v3/mg.suzumiya.cc/messages',
SEND_EMAIL_FROM='Suzumiya <no-replay@mg.suzumiya.cc>',
BASE_URL='http://localhost:8001',
MAX_CONTENT_LENGTH=2 * 1024 * 1024,
ALLOWED_UPLOAD_TYPE=['jpg', 'jpeg', 'gif', 'png', 'ico'],
PINGPP_PRIVATE_KEY_PATH=os.path.join(os.path.dirname(__file__), 'mtk_rsa.pem'),
ALIPAY_PRIVATE_KEY_PATH=os.path.join(os.path.dirname(__file__), 'mtk_rsa.pem'),
ALIPAY_PUBLIC_KEY_PATH=os.path.join(os.path.dirname(__file__), 'ali_pub.pem'),
ALIPAY_CALLBACK_URL="http://yc.miteke.com/callback/ali",
ALIPAY_RETURN_URL="http://yc.miteke.com/user/profile",
ALIPAY_APPID="2017092008837195",
)
# mongodb config
db_name = 'mongo_sc'
client = MongoClient("mongodb://localhost:27017")
db = client[db_name]
|
[
"jrgwyz@163.com"
] |
jrgwyz@163.com
|
beb5886b6bb03f8e0149d52f247c773ab8efa39e
|
0789766b3f242835f3c4e03e573f4d2fa3ebbc5a
|
/my_nas/dataset/imagenet_downsample.py
|
c2d73f91310cc798966575e69aef00dd70867fed
|
[] |
no_license
|
Anonymous-1112/anonymous
|
05900a2a5feba3a48ad76847a22a8c3a3f35b2e1
|
d86ec6b35b681c9220150c68bb5eb10af26f5629
|
refs/heads/master
| 2023-07-01T19:49:57.400134
| 2021-08-08T15:29:12
| 2021-08-08T15:36:56
| 393,964,141
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,872
|
py
|
# -*- coding: utf-8 -*-
import os
import pickle
from PIL import Image
import numpy as np
from torchvision import transforms
from torchvision.datasets import vision
from my_nas.utils.torch_utils import Cutout
from my_nas.dataset.base import BaseDataset
class ImageNetDownsampleDataset(vision.VisionDataset):
train_list = [
"train_data_batch_1",
"train_data_batch_2",
"train_data_batch_3",
"train_data_batch_4",
"train_data_batch_5",
"train_data_batch_6",
"train_data_batch_7",
"train_data_batch_8",
"train_data_batch_9",
"train_data_batch_10"
]
test_list = [
"val_data"
]
def __init__(self, root, num_class=1000, size=16, train=True,
transform=None, target_transform=None):
super(ImageNetDownsampleDataset, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
file_list = self.train_list if self.train else self.test_list
self.num_class = num_class # the first `num_class` classes are kept
len_ = 3 * size * size
self.data = np.zeros((0, len_), dtype=np.uint8)
self.targets = []
for file_name in file_list:
file_path = os.path.join(self.root, file_name)
with open(file_path, "rb") as f:
entry = pickle.load(f)
if num_class < 1000:
mask = np.array(entry["labels"]) <= num_class
self.data = np.concatenate((self.data, entry["data"][mask]), axis=0)
self.targets.extend(list((np.array(entry["labels"]) - 1)[mask]))
else:
self.data = np.concatenate((self.data, entry["data"]), axis=0)
self.targets.extend(list(np.array(entry["labels"]) - 1))
self.data = self.data.reshape(-1, 3, size, size).transpose((0, 2, 3, 1)) # HWC for PIL
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
class ImageNetDownsample(BaseDataset):
NAME = "imagenet_downsample"
def __init__(self, num_class=120, size=16, relative_dir=None, cutout=None):
super(ImageNetDownsample, self).__init__(relative_dir=relative_dir)
self.cutout = cutout
self.num_class = num_class
self.size = size
# array([122.68245678, 116.65812896, 104.00708381])
imgnet_mean = [0.48110767, 0.45748286, 0.40787092]
imgnet_std = [0.229, 0.224, 0.225] # use imgnet
train_transform = transforms.Compose([
transforms.RandomCrop(16, padding=2), # follow NB201
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(imgnet_mean, imgnet_std),
])
if self.cutout:
train_transform.transforms.append(Cutout(self.cutout))
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(imgnet_mean, imgnet_std),
])
self.datasets = {}
self.datasets["train"] = ImageNetDownsampleDataset(
root=self.data_dir, num_class=self.num_class, size=self.size,
train=True, transform=train_transform)
self.datasets["train_testTransform"] = ImageNetDownsampleDataset(
root=self.data_dir, num_class=self.num_class, size=self.size,
train=True, transform=test_transform)
self.datasets["test"] = ImageNetDownsampleDataset(
root=self.data_dir, num_class=self.num_class, size=self.size,
train=False, transform=test_transform)
def same_data_split_mapping(self):
return {"train_testTransform": "train"}
def splits(self):
return self.datasets
@classmethod
def data_type(cls):
return "image"
def __reduce__(self):
"""
Python 3
reduce for pickling (mainly for use with async search see trainer/async_trainer.py)
"""
return ImageNetDownsample, (self.cutout,)
def __getinitargs__(self):
"""
Python 2
getinitargs for pickling (mainly for use with async search see trainer/async_trainer.py)
"""
return (self.cutout,)
|
[
"anonymous-email@anony.com"
] |
anonymous-email@anony.com
|
54cdd7f5ce0fc2040583d0605b91c1bddb75daee
|
68e0a967f52fd86e82f80dc4fd9198449f8f1030
|
/doc/.src/book/scripts.py
|
09eacba33e843d93599017499cf00bfdf3b8d05a
|
[] |
no_license
|
htphuc/fdm-book
|
4ac32a30506a83fd1ae35c2fe1934d194ea11686
|
07b15f987374b3e91d21ab14c06cfc0a79634936
|
refs/heads/master
| 2021-01-17T20:59:48.188917
| 2016-04-25T21:06:12
| 2016-04-25T21:06:12
| 57,400,233
| 1
| 0
| null | 2016-04-29T17:01:53
| 2016-04-29T17:01:52
| null |
UTF-8
|
Python
| false
| false
| 5,340
|
py
|
import sys, re, os, shutil, glob
chapters = "vib wave diffu trunc staggered softeng2 formulas advec".split()
chaptersdir = 'chapters'
ignored_files = '*.o *.so *.a *.pyc *.bak *.swp *~ .*~ *.old tmp* temp* .#* \\#* *.log *.dvi *.aux *.blg *.idx *.nav *.out *.toc *.snm *.vrb *.cproject *.project .DS_Store Trash'.split()
def chapter_visitor(action=None, chapters=chapters):
"""Visit dirs in chapters and call/perform action."""
if isinstance(action, str):
action = re.split('r\s*;\s*', action)
if isinstance(action, (tuple,list)):
# Wrap Unix commands and run
def action_function():
for command in action:
print command
failure = os.system(command)
if failure:
print 'failure in execution...'; sys.exit(1)
elif callable(action):
action_function = action
prefix = os.path.join(os.pardir, chaptersdir)
thisdir = os.getcwd()
for chapter in chapters:
destination = os.path.join(prefix, chapter)
if os.path.isdir(destination):
print 'visiting directory', destination
os.chdir(destination)
action_function()
os.chdir(thisdir)
else:
print '\n*** error: directory %s does not exist!' % destination
sys.exit(1)
def clean():
"""
Remove all files that can be regenerated.
Method: run common ../clean.sh in all chapter dirs +
doconce clean in this book dir.
"""
chapter_visitor('bash -x ../clean.sh')
os.system('doconce clean')
# Remove reduant files
redundant = glob.glob('newcommands*.tex')
for filename in redundant:
os.remove(filename)
def compile_chapters():
"""
Compile all chapters as stand-alone PDF documents.
Method: run make.sh in all chapter dirs.
"""
chapter_visitor('rm -rf tmp*; bash -x make.sh')
def make_links(chapters=chapters):
"""Make links to all src-* and fig-* dirs for all chapters."""
prefix = os.path.join(os.pardir, chaptersdir)
for chapter in chapters:
destination = os.path.join(prefix, chapter)
subdirs = [tp + '-' + chapter for tp in 'fig', 'src', 'mov', 'exer']
for subdir in subdirs:
if not os.path.islink(subdir):
dest_subdir = os.path.join(destination, subdir)
if os.path.isdir(dest_subdir):
os.symlink(dest_subdir, subdir)
print 'created local link %s to %s' % (subdir, destination)
# Sometimes manual additions are needed here, e.g.,
#os.symlink(os.path.join(prefix, 'tech', 'fig2'), 'fig2')
def spellcheck():
"""Visit each individual chapter and spellcheck all *.do.txt in it."""
chapter_visitor('rm -rf tmp*; doconce spellcheck -d .dict4spell.txt *.do.txt')
def pack_src(root='src', tarfile='book-examples.tar.gz', chapters=chapters):
"""
Publish programs, libraries, data, etc. from the book.
Method: make new directory tree root, copy all src-name dirs
from all chapters to name.
This root tree can be synced to an external repo or packed
as a tar or zip file.
"""
shutil.rmtree(root)
os.mkdir(root)
os.chdir(root)
prefix = os.path.join(os.pardir, os.pardir, chaptersdir)
thisdir = os.getcwd()
for chapter in chapters:
src = 'src-' + chapter
# Clean up redundant files that we will not publish
destination = os.path.join(prefix, src)
if os.path.isdir(destination):
os.chdir(destination)
for file_spec in ignored_files:
for filename in glob.glob(file_spec):
os.remove(filename)
print 'removed', 'src-%s/%s' % (chapter, filename)
os.chdir(thisdir)
# Copy files
shutil.copytree(destination, chapter)
print '\ndirectory tree with source code files for the book:', root
os.chdir(os.pardir)
os.system('tar czf %s %s' % (tarfile, root))
print 'tarfile:', tarfile
def externaldocuments():
# Find all candidate documents in ../chapters/*
prefix = os.path.join(os.pardir, chaptersdir)
#dirs = [name for name in os.listdir(prefix)
# if os.path.isdir(os.path.join(prefix, name))]
dirs = chapters[:]
docs = []
for nickname in dirs:
mainfiles = glob.glob(os.path.join(prefix, nickname, 'main_*.do.txt'))
for mainfile in mainfiles:
docs.append((nickname, mainfile[:-7])) # drop .do.txt
mainfiles = [mainfile for nickname, mainfile in docs]
# Need to visit all dirs, remove that dir from the list and subst
for mainfile in mainfiles:
other_mainfiles = mainfiles[:] # copy
other_mainfiles.remove(mainfile)
# Strip off ../chapters to ../
other_mainfiles = ['../' + mainfile[12:] for mainfile in mainfiles]
f = open(mainfile + '.do.txt', 'r')
text = f.read()
f.close()
text = re.sub('^# Externaldocuments:.*', '# Externaldocuments: ' +
', '.join(other_mainfiles), text, flags=re.MULTILINE)
print 'subst in', mainfile
f = open(mainfile + '.do.txt', 'w')
f.write(text)
f.close()
print 'updated # Externaldocuments in', mainfile, 'with\n ', ', '.join(other_mainfiles)
|
[
"hpl@simula.no"
] |
hpl@simula.no
|
9569e88a4594523c588bf67478cf3e69e5fa07d3
|
eae3d77ac72c168cee7701462f1fc45d7d4dcd91
|
/Tree/5176_이진탐색.py
|
1d5de72b6fe0d9ddc3f43d237019f001829c7471
|
[] |
no_license
|
ByeongjunCho/Algorithm-TIL
|
ed2f018d50bd2483bd1175ff9bf7e91913c14766
|
ad79125a1498915fe97c1d57ee6860b06c410958
|
refs/heads/master
| 2022-07-19T15:12:23.689319
| 2020-05-18T08:37:09
| 2020-05-18T08:37:09
| 256,399,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 286
|
py
|
T = int(input())
for tc in range(1, T+1):
N = int(input())
V = [0] * (N+1)
L = [0] * (N+1)
R = [0] * (N+1)
# 이진트리 구현
i = 1
while (i << 1) < N+1:
L[i] = i << 1
if (i << 1) + 1 < N+1:
R[i] = (i << 1) + 1
i += 1
|
[
"jjgk91@naver.com"
] |
jjgk91@naver.com
|
b92ace36f8eaa5fa5bd1a781ed1656742c2db3c5
|
a2c90d183ac66f39401cd8ece5207c492c811158
|
/Solving_Problem/daily_222/1111/17140.py
|
524cee2fdd156023f681b4bf34cde15944c9a1c3
|
[] |
no_license
|
kwoneyng/TIL
|
0498cfc4dbebbb1f2c193cb7c9459aab7ebad02a
|
c6fbaa609b2e805f298b17b1f9504fd12cb63e8a
|
refs/heads/master
| 2020-06-17T11:53:38.685202
| 2020-03-18T01:29:36
| 2020-03-18T01:29:36
| 195,916,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,924
|
py
|
from collections import deque
from heapq import heappop, heappush
def rcal():
global y
ls = []
mx = 0
for i in range(x):
su = len(bd[i])
cnt = 0
for j in range(1,101):
if bd[i].count(j):
heappush(ls,[bd[i].count(j),j])
cnt += bd[i].count(j)
if cnt == su:
break
bd[i] = []
for _ in range(len(ls)):
many, su = heappop(ls)
bd[i].append(su)
bd[i].append(many)
mx = max(mx, len(bd[i]),y)
for i in range(x):
for _ in range(mx-len(bd[i])):
bd[i].append(0)
y = mx
def ccal():
global x
new = [[] for i in range(y)]
ls = []
re_bd = []
mx = 0
for i in range(y):
cnt = 0
bls =[]
for j in range(x):
bls.append(bd[j][i])
su = len(bls)-bls.count(0)
for k in range(1,101):
if bls.count(k):
heappush(ls,[bls.count(k),k])
cnt += bls.count(k)
if cnt == su:
break
for _ in range(len(ls)):
many, su = heappop(ls)
new[i].append(su)
new[i].append(many)
mx = max(mx, len(new[i]),x)
for i in range(y):
for _ in range(mx-len(new[i])):
new[i].append(0)
x = mx
for i in range(x):
ls = []
for j in range(y):
ls.append(new[j][i])
re_bd.append(ls)
return re_bd
def debug():
for i in bd:
print(i)
print('-------------------------')
r,c,k = map(int,input().split())
r -= 1
c -= 1
x,y = 3,3
bd = [list(map(int,input().split())) for i in range(x)]
for i in range(101):
if r < x and c < y:
if bd[r][c] == k:
print(i)
break
if x >= y:
rcal()
else:
bd = ccal()
# debug()
else:
print(-1)
|
[
"nan308@naver.com"
] |
nan308@naver.com
|
efa5c09e00baf175a267323493146e4a079511df
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_5/sctjas002/question4.py
|
e4b18527ed1d0802f17d9b728e65f1ab2eec2bbd
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import math
(fx)=input('Enter a function f(x):\n')
for y in range (10,-11,-1):
for x in range (-10,11):
if y==round((eval((fx)))):
print('o',end='')
elif y==0 and x==0:
print('+',end='')
elif y==0:
print('-',end='')
elif x==0:
print('|' ,end='')
else:
print(' ',end='')
print()
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
db99a7fae497a54bcf8582832888bcb9835fca74
|
30d1902232eb9ddb84fdf5404a3a1dfd6232406a
|
/wxpython/test/sxn.spec
|
9fb6f075ebf9e03cbe029bbde4b5335d0aaab5e1
|
[] |
no_license
|
sxnys/mypython
|
c3a768b054077ed97ff1e2fac31cb93f0765deb3
|
de48cd883ad2de3320cb0c6b46b451ebb2311ac7
|
refs/heads/master
| 2022-11-07T15:11:48.936412
| 2019-04-14T12:04:30
| 2019-04-14T12:04:30
| 119,686,106
| 0
| 1
| null | 2022-10-31T05:13:00
| 2018-01-31T12:46:06
|
Python
|
UTF-8
|
Python
| false
| false
| 835
|
spec
|
# -*- mode: python -*-
block_cipher = None
a = Analysis(['sxn.py'],
pathex=['F:\\Python\\wxpython\\test'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='sxn',
debug=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='sxn')
|
[
"1119112647@qq.com"
] |
1119112647@qq.com
|
80d512046627f21ec6e5b8db3615ee5f70869009
|
f3d40fcd992b38132ff9634d2b76988a99cefb3b
|
/pycoinnet/util/BlockChainStore.py
|
86653b8c71a73bfe27c1a3f2bfbd1b35acc4ec52
|
[
"MIT"
] |
permissive
|
richardkiss/pycoinnet
|
b9b999dbf0401722e4550c5926197881e5b13102
|
57a7f439f0b4c9102cd25f95c0b7e4db00fe2f5b
|
refs/heads/master
| 2022-04-27T19:15:39.098602
| 2021-12-25T23:26:24
| 2021-12-25T23:26:24
| 16,194,216
| 117
| 56
|
MIT
| 2021-12-25T23:26:24
| 2014-01-24T03:43:42
|
Python
|
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
import logging
import os
class FakeHeader:
def __init__(self, h, previous_block_hash):
self.h = h
self.previous_block_hash = previous_block_hash
self.difficulty = 1
def hash(self):
return self.h
def __repr__(self):
return "%s (parent %s)" % (self.h, self.previous_block_hash)
def __eq__(self, other):
return self.h == other.h and self.previous_block_hash == other.previous_block_hash
def __hash__(self):
return self.h.__hash__()
class BlockChainStore:
BLOCK_HASHES_PATH = "locked_block_hashes.bin"
def __init__(self, dir_path, parent_to_0=b'\0' * 32):
self.dir_path = dir_path
self.parent_to_0 = parent_to_0
def block_tuple_iterator(self):
try:
with open(os.path.join(self.dir_path, self.BLOCK_HASHES_PATH), "rb") as f:
prev_hash = self.parent_to_0
while 1:
d = f.read(16384)
if len(d) == 0:
return
while len(d) >= 32:
the_hash = d[:32]
yield (the_hash, prev_hash, 1)
prev_hash = the_hash
d = d[32:]
except Exception:
pass
def headers(self):
for the_hash, prev_hash, weight in self.block_tuple_iterator():
yield FakeHeader(the_hash, prev_hash)
def did_lock_to_index(self, block_tuple_list, start_index):
with open(os.path.join(self.dir_path, self.BLOCK_HASHES_PATH), "a+b") as f:
pass
with open(os.path.join(self.dir_path, self.BLOCK_HASHES_PATH), "r+b") as f:
f.seek(start_index*32)
count = 0
# ## TODO: make sure the one we're writing is in the right place
for the_hash, parent_hash, weight in block_tuple_list:
f.write(the_hash)
count += 1
logging.debug("wrote %d items to block chain store at %s", count, self.dir_path)
|
[
"him@richardkiss.com"
] |
him@richardkiss.com
|
308b6b16a55851f143ffb7afe1ce0b0fa3f85bf3
|
e254c72d3fd11306c8625c5d8ad8ac394eabc6c6
|
/06.scrapy/AppleDailySearchMongo/AppleDailySearch/settings.py
|
68673e439c56176af474067092b73ea14f949c88
|
[] |
no_license
|
Edward83528/crawlerToMachinLearningAndBot
|
87c7ea92779b949ad5015612a4e70275becab480
|
82818137b517f4c5a856535f83a8cb8b211da8aa
|
refs/heads/master
| 2022-11-06T19:41:20.473933
| 2020-07-04T14:01:07
| 2020-07-04T14:01:07
| 268,072,162
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,426
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for AppleDailySearch project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'AppleDailySearch'
SPIDER_MODULES = ['AppleDailySearch.spiders']
NEWSPIDER_MODULE = 'AppleDailySearch.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'AppleDailySearch (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'AppleDailySearch.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'AppleDailySearch.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'AppleDailySearch.pipelines.SomePipeline': 300,
#}
ITEM_PIPELINES = {
'AppleDailySearch.pipelines.JsonWithEncodingPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"u0151051@gmail.com"
] |
u0151051@gmail.com
|
4b05388caf49263247f5a9216da4d2033fdccc11
|
c7f353cc14439fc47d351bd29258c9453cf16f32
|
/h2o-py/tests/testdir_munging/pyunit_ischaracter_isnumeric.py
|
aa42b41611f89e9681ba6234c5bba7e04441ba66
|
[
"Apache-2.0"
] |
permissive
|
tamseo/h2o-3
|
a131f40a0cd7f0c52d359b06b300f87d627cfd83
|
cc59fa0d97325796c5a57085661cea7b34fa81e9
|
refs/heads/master
| 2020-12-11T09:27:54.589687
| 2015-10-19T21:56:12
| 2015-10-19T21:56:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
import random
def pyunit_ischaracter_isnumeric():
iris = h2o.import_file(tests.locate("smalldata/iris/iris.csv"))
assert iris[0].isnumeric(), "Expected the first column of iris to be numeric"
assert not iris[0].ischaracter(), "Expected the first column of iris to be numeric"
assert not iris[4].isnumeric(), "Expected the last column of iris to be character"
iris[4] = iris[4].ascharacter()
assert iris[4].isstring(), "Expected the last column of iris to be a string"
if __name__ == "__main__":
tests.run_test(sys.argv, pyunit_ischaracter_isnumeric)
|
[
"eric.eckstrand@gmail.com"
] |
eric.eckstrand@gmail.com
|
8dfe9a9df3bccbd5d817c8705b15fc06fd4569ce
|
ae06af824e864fab8d33f695ddb612e7867ab92f
|
/dashboard/dashboard/pinpoint/models/quest/read_value.py
|
be0fda52522ba5143cfe3a50720310b2db79bc77
|
[
"BSD-3-Clause"
] |
permissive
|
takingmynetback/catapult
|
49402759c34dd07e424b47f4c9ec824dd1744526
|
f718fb12b8cfd16b07509674747abf56cf330ac8
|
refs/heads/master
| 2020-03-10T18:46:57.367789
| 2018-04-13T14:20:21
| 2018-04-13T15:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,183
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from dashboard.common import histogram_helpers
from dashboard.pinpoint.models.quest import execution
from dashboard.pinpoint.models.quest import quest
from dashboard.services import isolate
from tracing.value import histogram_set
from tracing.value.diagnostics import diagnostic_ref
from tracing.value.diagnostics import reserved_infos
class ReadValueError(Exception):
pass
class ReadHistogramsJsonValue(quest.Quest):
def __init__(self, hist_name, tir_label=None, story=None, statistic=None):
self._hist_name = hist_name
self._tir_label = tir_label
self._story = story
self._statistic = statistic
def __eq__(self, other):
return (isinstance(other, type(self)) and
self._hist_name == other._hist_name and
self._tir_label == other._tir_label and
self._story == other._story and
self._statistic == other._statistic)
def __str__(self):
return 'Values'
def Start(self, change, isolate_hash):
del change
return _ReadHistogramsJsonValueExecution(self._hist_name, self._tir_label,
self._story, self._statistic,
isolate_hash)
@classmethod
def FromDict(cls, arguments):
chart = arguments.get('chart')
tir_label = arguments.get('tir_label')
trace = arguments.get('trace')
statistic = arguments.get('statistic')
return cls(chart, tir_label, trace, statistic)
class _ReadHistogramsJsonValueExecution(execution.Execution):
def __init__(self, hist_name, tir_label, story, statistic, isolate_hash):
super(_ReadHistogramsJsonValueExecution, self).__init__()
self._hist_name = hist_name
self._tir_label = tir_label
self._story = story
self._statistic = statistic
self._isolate_hash = isolate_hash
self._trace_urls = []
def _AsDict(self):
if not self._trace_urls:
return {}
return {'traces': self._trace_urls}
def _Poll(self):
# TODO(simonhatch): Switch this to use the new perf-output flag instead
# of the chartjson one. They're functionally equivalent, just new name.
histogram_dicts = _RetrieveOutputJson(
self._isolate_hash, 'chartjson-output.json')
histograms = histogram_set.HistogramSet()
histograms.ImportDicts(histogram_dicts)
histograms.ResolveRelatedHistograms()
matching_histograms = histograms.GetHistogramsNamed(self._hist_name)
# Get and cache any trace URLs.
unique_trace_urls = set()
for hist in histograms:
trace_urls = hist.diagnostics.get(reserved_infos.TRACE_URLS.name)
# TODO(simonhatch): Remove this sometime after May 2018. We had a
# brief period where the histograms generated by tests had invalid
# trace_urls diagnostics. If the diagnostic we get back is just a ref,
# then skip.
# https://github.com/catapult-project/catapult/issues/4243
if trace_urls and not isinstance(
trace_urls, diagnostic_ref.DiagnosticRef):
unique_trace_urls.update(trace_urls)
sorted_urls = sorted(unique_trace_urls)
self._trace_urls = [
{'name': t.split('/')[-1], 'url': t} for t in sorted_urls]
# Filter the histograms by tir_label and story. Getting either the
# tir_label or the story from a histogram involves pulling out and
# examining various diagnostics associated with the histogram.
tir_label = self._tir_label or ''
matching_histograms = [
h for h in matching_histograms
if tir_label == histogram_helpers.GetTIRLabelFromHistogram(h)]
# If no story is supplied, we're looking for a summary metric so just match
# on name and tir_label. This is equivalent to the chartjson condition that
# if no story is specified, look for "summary".
if self._story:
matching_histograms = [
h for h in matching_histograms
if self._story == _GetStoryFromHistogram(h)]
# Have to pull out either the raw sample values, or the statistic
result_values = []
for h in matching_histograms:
result_values.extend(self._GetValuesOrStatistic(h))
if not result_values and self._hist_name:
name = 'histogram: %s' % self._hist_name
if tir_label:
name += ' tir_label: %s' % tir_label
if self._story:
name += ' story: %s' % self._story
raise ReadValueError('Could not find values matching: %s' % name)
self._Complete(result_values=tuple(result_values))
def _GetValuesOrStatistic(self, hist):
if not self._statistic:
return hist.sample_values
if not hist.sample_values:
return []
# TODO(simonhatch): Use Histogram.getStatisticScalar when it's ported from
# js.
if self._statistic == 'avg':
return [hist.running.mean]
elif self._statistic == 'min':
return [hist.running.min]
elif self._statistic == 'max':
return [hist.running.max]
elif self._statistic == 'sum':
return [hist.running.sum]
elif self._statistic == 'std':
return [hist.running.stddev]
elif self._statistic == 'count':
return [hist.running.count]
raise ReadValueError('Unknown statistic type: %s' % self._statistic)
def _ResultValuesFromHistogram(buckets):
total_count = sum(bucket['count'] for bucket in buckets)
result_values = []
for bucket in buckets:
# TODO: Assumes the bucket is evenly distributed.
bucket_mean = (bucket['low'] + bucket.get('high', bucket['low'])) / 2
if total_count > 10000:
bucket_count = 10000 * bucket['count'] / total_count
else:
bucket_count = bucket['count']
result_values += [bucket_mean] * bucket_count
return tuple(result_values)
class ReadGraphJsonValue(quest.Quest):
def __init__(self, chart, trace):
self._chart = chart
self._trace = trace
def __eq__(self, other):
return (isinstance(other, type(self)) and
self._chart == other._chart and
self._trace == other._trace)
def __str__(self):
return 'Values'
def Start(self, change, isolate_hash):
del change
return _ReadGraphJsonValueExecution(self._chart, self._trace, isolate_hash)
@classmethod
def FromDict(cls, arguments):
chart = arguments.get('chart')
trace = arguments.get('trace')
if not (chart or trace):
return None
if chart and not trace:
raise TypeError('"chart" specified but no "trace" given.')
if trace and not chart:
raise TypeError('"trace" specified but no "chart" given.')
return cls(chart, trace)
class _ReadGraphJsonValueExecution(execution.Execution):
def __init__(self, chart, trace, isolate_hash):
super(_ReadGraphJsonValueExecution, self).__init__()
self._chart = chart
self._trace = trace
self._isolate_hash = isolate_hash
def _AsDict(self):
return {}
def _Poll(self):
graphjson = _RetrieveOutputJson(self._isolate_hash, 'chartjson-output.json')
if self._chart not in graphjson:
raise ReadValueError('The chart "%s" is not in the results.' %
self._chart)
if self._trace not in graphjson[self._chart]['traces']:
raise ReadValueError('The trace "%s" is not in the results.' %
self._trace)
result_value = float(graphjson[self._chart]['traces'][self._trace][0])
self._Complete(result_values=(result_value,))
def _RetrieveOutputJson(isolate_hash, filename):
# TODO: Plumb isolate_server through the parameters. crbug.com/822008
server = 'https://isolateserver.appspot.com'
output_files = json.loads(isolate.Retrieve(server, isolate_hash))['files']
if filename not in output_files:
raise ReadValueError("The test didn't produce %s." % filename)
output_json_isolate_hash = output_files[filename]['h']
return json.loads(isolate.Retrieve(server, output_json_isolate_hash))
def _GetStoryFromHistogram(hist):
stories = hist.diagnostics.get(reserved_infos.STORIES.name)
if stories and len(stories) == 1:
return list(stories)[0]
return None
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
00ee6e8b2941d6e3cd3d1713cf36490b5754624e
|
28cab1ef484a5796fc9b0897043e918f9a28e650
|
/account/urls/user.py
|
caca4ac46ecc06a22eb78a92276ad522c397c750
|
[] |
no_license
|
bxxfighting/dalangshen
|
12cb58d2078804327dbf7a01be0fc2a0d27f4495
|
e174147b8778c188941d5fd0f5e33de65afc8b00
|
refs/heads/main
| 2023-01-15T08:07:57.429342
| 2020-11-16T03:49:34
| 2020-11-16T03:49:34
| 313,184,879
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from django.urls import path
from account.apis import user as user_api
urlpatterns = [
path('user/login/', user_api.LoginApi.as_view()),
path('user/logout/', user_api.LogoutApi.as_view()),
path('user/', user_api.UserApi.as_view()),
path('user/current/', user_api.CurrentUserApi.as_view()),
path('user/list/', user_api.ListUserApi.as_view()),
path('user/create/', user_api.CreateUserApi.as_view()),
path('user/update/', user_api.UpdateUserApi.as_view()),
path('user/delete/', user_api.DeleteUserApi.as_view()),
]
|
[
"boxingxing@limikeji.com"
] |
boxingxing@limikeji.com
|
8b4246df4e9e8bb970c0809d972016ef7188b9f1
|
b8d7c4e3476aae5c3bba7ffa28311f84fda5af9e
|
/main/apps/cart/views.py
|
0c406e7f828ea18dc57dda560f00ccd7024a78e5
|
[] |
no_license
|
zhongzhiqiang/hotel-api
|
1744b8ecb63c4626f7a90f6f04f073aab052b312
|
25703713d0e8ab2314e07e983b98506a3551e762
|
refs/heads/master
| 2020-03-26T08:53:06.776003
| 2019-01-20T09:23:39
| 2019-01-20T09:23:39
| 144,724,134
| 0
| 0
| null | 2018-10-12T13:29:20
| 2018-08-14T13:28:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
# coding:utf-8
# Time : 2018/10/15 下午10:47
# Author : Zhongzq
# Site :
# File : views.py
# Software: PyCharm
from __future__ import unicode_literals
from rest_framework import mixins, viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import list_route
from main.apps.cart import serializers
from main.models import Cart
class CartViews(mixins.CreateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""
create:
在自己的购物车新增一件商品
如果购物车有一样的商品会合并.如果传递数字小于等于0 则会删除
list:
返回当前用户的购物车
update:
更新购物车商品某个商品
empty_cart:
清空购物车。什么都不用传递。直接post
"""
queryset = Cart.objects.all()
serializer_class = serializers.CartSerializers
def get_queryset(self):
return self.queryset.filter(consumer=self.request.user.consumer)
def perform_create(self, serializer):
serializer.save(consumer=self.request.user.consumer)
def create(self, request, *args, **kwargs):
post_data = request.data
if post_data.get("nums") <= 0:
cart = self.queryset.filter(goods__id=post_data.get("goods")).first()
if cart:
cart.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
else:
return Response(status=status.HTTP_400_BAD_REQUEST, data={"non_field_errors": "传递错误"})
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@list_route(methods=["POST"])
def empty_cart(self, request, *args, **kwargs):
self.get_queryset().delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"200730249@qq.com"
] |
200730249@qq.com
|
de499b1d1ee6eebeb74c84cbf98ec9a1e9bfa0ad
|
84bd24e7aba23c7775f52d51c754f14601e28b61
|
/cars/migrations/0015_auto_20201222_0911.py
|
53cd0970e12348b27aef4a29fb5a55ef15ccf7ed
|
[] |
no_license
|
hamzaumar8/sandvet
|
c0ad473e8f2f97d1c5bf5104e034e731ac0a0add
|
7f02d24f1b50cd4f64beff618b6d9c508b7a42d4
|
refs/heads/master
| 2023-02-18T01:28:25.252360
| 2021-01-18T19:26:39
| 2021-01-18T19:26:39
| 310,844,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
# Generated by Django 3.0.6 on 2020-12-22 01:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cars', '0014_auto_20201222_0911'),
]
operations = [
migrations.AlterField(
model_name='schoolimage',
name='school',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='schoolimage', to='cars.School'),
),
]
|
[
"humar6078@gmail.com"
] |
humar6078@gmail.com
|
1cd7f7fe2262f547b545bce5583d232fd3056bcb
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/prepositions/_beside.py
|
412a30036868081724ff1297f7950ab0b9365210
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
#calss header
class _BESIDE():
def __init__(self,):
self.name = "BESIDE"
self.definitions = [u'at the side of, next to: ', u'compared to another person or thing: ', u'to be in no way connected to the subject that is being discussed: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'prepositions'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
a807a1a843bf88cf36512e099d3aaca3261e2f3e
|
de9b8b7192a0a81e9249823bb2b86f0b7e452863
|
/.history/main_20171106232335.py
|
96c596fc3fcc185c546af76e74da5916cad83166
|
[
"MIT"
] |
permissive
|
reecebenson/uwe-dadsa-tennis-a
|
f5eaeb1b96d4e61f29279514e68eeea8ad6533db
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
refs/heads/master
| 2023-07-08T16:13:23.963348
| 2017-11-30T12:07:01
| 2017-11-30T12:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,056
|
py
|
# DADSA - Assignment 1
# Reece Benson
import random
from classes import Menu as Menu
from classes import Handler as Handler
class App():
# Define the variables we will be using
debug = True
handler = None
# Define all of the properties we will need to use
def __init__(self):
# Load our handler
self.handler = Handler.Handler(self)
self.handler.load()
# Generate rounds
self.generate_rounds()
# Hold the program
self.exit()
# Generate our rounds from our player list from scratch
def generate_rounds(self):
# Write our new data to memory
for seasonId in self.handler.get_seasons():
season = self.handler.get_season(seasonId)
players = season.players()
# Our Round Data should be completely empty
round_data = { }
# Generate our rounds
for gender in players:
# Generate 'x' amount of rounds
for r in range(season.settings()['round_count']):
# Default Round Cap
round_cap = 3
# Create our gendered rounds
if(not gender in round_data):
# Do we have a Round Cap overrider for this gender?
if(gender + "_cap" in season.settings()):
roundCap = season.settings()[gender + "_cap"]
# Update our round data
round_data.update({ "round_"+str(r): { gender: [ { "_roundCap": round_cap } ] } })
# Create our match data from players
rand_players = random.sample(players[gender], len(players[gender]))
for i in range(int(len(rand_players) / 2 )):
# Grab our versus players
p_one = rand_players[i * 2]
p_two = rand_players[(i * 2) + 1]
# Generate some scores
p_one_score = random.randint(0, round_cap - 1)
p_two_score = random.randint(0, round_cap - 1)
# Make a random player the winner
who = random.randint(0, 1)
if(who == 0): p_one_score = round_cap
else: p_two_score = round_cap
# Append our random data as a Match
#round_data[gender].append({ p_one.name(): p_one_score, p_two.name(): p_two_score })
round_data[gender]["round_"+str(r)].append(Match.Match("round_"+str(r), p_one, p_two, p_one_score, p_two_score))
# Set our Round Data to our season
season.set_rounds_raw(round_data)
# End of generate_rounds()
# A method which exits the program after the user has pressed the Return key
def exit(self):
input(">>> Press <Return> to terminate the program")
exit()
App()
|
[
"me@reecebenson.me"
] |
me@reecebenson.me
|
9bac4c8027b6b8102d2288a4ae7b4d617d5fded3
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/LongLivedChi0ToMuQQ_MSquark_1500_MChi_494_TuneZ2Star_8TeV_pythia6_cff.py
|
969c146bfb5a013812585e4739862cc042491409
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,634
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
source = cms.Source("EmptySource")
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
crossSection = cms.untracked.double(0.0001388),
comEnergy = cms.double(8000.0),
UseExternalGenerators = cms.untracked.bool(False),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
pythiaParameters = cms.vstring(
'MSTJ(22)=1 ! Decay all unstable particles',
'MSTP(95)=0 ! Disable colour reconnection, since it can put colour strings between widely separated partons',
'MSEL=0',
'MSUB(271)=1 ! Squark pair production',
'MSUB(272)=1',
'MSUB(273)=1',
'MSUB(274)=1',
'MSUB(275)=1',
'MSUB(276)=1',
'MSUB(277)=1',
'MSUB(278)=1',
'MSUB(279)=1',
'MSUB(280)=1',
'IMSS(1)=1 ! General MSSM simultaion',
'RMSS(2)=5000. ! M2 mass',
'RMSS(3)=5000. ! M3 mass',
'RMSS(4)=800. ! mu parameter',
'RMSS(5)=2. ! tan Beta',
'RMSS(6)=5000. ! Left slepton mass',
'RMSS(7)=5000. ! Right slepton mass',
'RMSS(10)=5000. ! Left squark mass for third generation',
'RMSS(11)=5000. ! Right sbottom mass',
'RMSS(12)=5000. ! Right stop mass',
'RMSS(13)=5000. ! Left stau mass',
'RMSS(14)=5000. ! Right stau mass',
'IMSS(52)=3 ! Turn on Lepton number violating LQD decay channels with all couplings set to zero',
'RVLAMP(2,1,1)=0.00001 ! Set lambda Prime(2,1,1)',
'MDME(2241,1)=0 ! Turn off LQD decays to neutrinos',
'MDME(2242,1)=0 ! Turn off LQD decays to neutrinos',
'RMSS(1)=500 ! M1 mass',
'RMSS(8)=1500 ! Left squark mass',
'RMSS(9)=1500 ! Right squark mass'
),
parameterSets = cms.vstring('pythiaUESettings',
'pythiaParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch"
] |
sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch
|
e64c699df93ca5619fa36bd10f267b0786259b19
|
b6b28e1588050597366907223bfcb71464d76734
|
/lr/minibatch_sgd/data_process/read_data.py
|
8f8d526e7b6cb9a6d1cef6757b7a8cac94cc8fb5
|
[
"MIT"
] |
permissive
|
DiracSea/project-sxl
|
ea8af63643a2547493c32c83dc297180c072bd01
|
f458bec818d55f80a5eda461316a22d843fef753
|
refs/heads/master
| 2020-03-10T04:08:42.466142
| 2018-05-20T05:03:13
| 2018-05-20T05:03:13
| 129,184,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,868
|
py
|
import numpy as np
import random
from .db import *
from .tool.combine import first_stack
from .tool.sperate import split_num
#all array
def read_rand_data(batchsize,table,db):#yield batch
rand = conn_rand(db,table,'112.74.45.185',3306,'root','opal123456!@#')###
counter = 0
size = 0
train_num, valid_num = split_num(batchsize)
for row in rand.export():
if size%batchsize == 0:
T = [];V = [];flag = 1;flag1 = 1
if row:
row = np.array(row)
counter += 1
size += 1
if size%batchsize != 0:
if counter%batchsize < train_num:
T,flag = first_stack(T,row,flag)
else:
V,flag1 = first_stack(V,row,flag1)
else:
yield T,V
else:
yield T,V
def del_label(table,db):
block = conn_block(db,table,'112.74.45.185',3306,'root','opal123456!@#')###
for b in block.export():
a = np.array(b)
yield a[:,1:]
def read_single_block(blank,table,db):
for block in del_label(table,db):
if block!= np.array([]):
batchsize = len(block)
train_num, valid_num = split_num(batchsize)
T = [];V = [];flag = 1;flag1 = 1
np.random.shuffle(block)
counter = 0
for row in block:
counter += 1
if counter%batchsize < train_num:
T,flag = first_stack(T,row,flag)
else:
V,flag1 = first_stack(V,row,flag1)
yield T,V#batch
def read_all_block(table,db):
for block in del_label(table,db):
if block!= np.array([]):
seed = int(random.random()*10)
if(seed < 7):
yield block,"train"
else:
yield block,"valid"
|
[
"sulz@mail.sustc.edu.cn"
] |
sulz@mail.sustc.edu.cn
|
61b7547ed5510ee1d2ee0d78be17f4572f61d01e
|
1d717c797e93b451f7da7c810a0fb4075b1050d5
|
/src/preprocessors/naive_preprocessor.py
|
246a888db14a6870701bf64b6726d191337ee985
|
[] |
no_license
|
jessie0624/nlp-task
|
32338b08051a3ea192db2bf74c9c969bdff1f6ad
|
aaeeed86341356d9fd061664f6f7bccf2ac353d0
|
refs/heads/master
| 2023-01-24T12:06:13.323646
| 2020-12-10T08:38:23
| 2020-12-10T08:38:23
| 292,151,135
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
'''
@description: 最简单的预处理
'''
import pandas as pd
from tqdm import tqdm
from src.base import BasePreprocessor, units
from src.preprocessors import apply_on_df_columns
from src.tools.build_unit import build_vocab_unit, chain_transform
tqdm.pandas()
class NaivePreprocessor(BasePreprocessor):
"""Define Naive preprocessors"""
def fit(self, data: pd.DataFrame, columns: list, verbose: int=1):
func = chain_transform(self._default_units())
# 应用所有的是转换
data = apply_on_df_columns(data, columns, func, verbose=verbose)
vocab_unit = build_vocab_unit(data, columns=columns, verbose=verbose)
self._context['vocab_unit'] = vocab_unit
return self
def transform(self, data: pd.DataFrame, columns: list,
verbose: int=1) -> pd.DataFrame:
"""
Apply transformation on data, create truncated length, representation.
"""
units_ = self._default_units()
units_.append(self._context['vocab_unit'])
units_.append(
units.TruncatedLength(text_length=30, truncate_mode='post')
)
func = chain_transform(units_)
data = apply_on_df_columns(data, columns, func, verbose=verbose)
for col in columns:
data[col+'_len'] = data[col].apply(len)
empty_id = data[data[col+'_len'] == 0].index.tolist()
data.drop(index=empty_id, axis=0, inplace=True)
data.dropna(axis=0, inplace=True)
data.reset_index(drop=True, inplace=True)
return data
|
[
"jessie_lijie@126.com"
] |
jessie_lijie@126.com
|
0f88316bf11c35e936d8f86e044b31b12973dbe9
|
43f0c93802ef62c03388006cdae18c62de4d3295
|
/setup.py
|
524d362968902bc8a4e648bf8419ebe2c4b0c37a
|
[
"MIT"
] |
permissive
|
pombredanne/qtstyles
|
e05f67f4d0f58284ae5b5c50909f23090f5bf278
|
de962879e36be305572b0c5fb5c4ddcfeda5afe0
|
refs/heads/master
| 2020-04-27T00:58:55.044676
| 2018-10-20T05:19:33
| 2018-10-20T05:19:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
from setuptools import setup, find_packages
setup(name="qtstyles",
version="0.0.2",
install_requires=[
"QtPy>=1.4.1"
],
description="A collection of Qt style sheets and helpful classes for applying them.",
long_description=open("README.md").read(),
# https://setuptools.readthedocs.io/en/latest/setuptools.html#including-data-files
package_data={"qtstyles": ["style_sheets/*.qss"]}, # include style sheets
author="Simon Garisch",
author_email="gatman946@gmail.com",
url="https://github.com/simongarisch/qtstyles",
packages=find_packages()
)
|
[
"gatman946@gmail.com"
] |
gatman946@gmail.com
|
cb448cc57982cd1d11cc353decfa6f00bac6d2d2
|
35e6605da2d105158d4ce3aa8230f650ba965651
|
/v7/meta_template/meta_template.py
|
b1eb9b573778b911af44b802b56501e7968fc974
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later"
] |
permissive
|
getnikola/plugins
|
8a24d00d9ca17ef075c49925d9945b059eeed849
|
9de663884ba5f15153d37e527ade6f55e42661a3
|
refs/heads/master
| 2023-08-29T23:38:25.184763
| 2023-08-06T12:58:33
| 2023-08-06T12:58:33
| 13,049,233
| 62
| 104
|
MIT
| 2023-08-06T12:55:44
| 2013-09-23T22:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
# -*- coding: utf-8 -*-
# Copyright © 2016 Manuel Kaufmann
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from nikola.plugin_categories import RestExtension
class Plugin(RestExtension):
name = "meta_template"
def set_site(self, site):
self.site = site
MetaTemplate.site = site
return super(Plugin, self).set_site(site)
class MetaTemplate(Directive):
""" Restructured text extension for inserting custom templates."""
option_spec = {
'title': directives.unchanged,
'href': directives.unchanged,
'url': directives.unchanged,
'target': directives.unchanged,
'src': directives.unchanged,
'style': directives.unchanged,
}
has_content = True
required_arguments = 1
optional_arguments = 0
def __init__(self, *args, **kwargs):
super(MetaTemplate, self).__init__(*args, **kwargs)
def run(self):
template_name = self.arguments[0] + '.tmpl'
self.options.update({
'content': self.content,
})
output = self.site.template_system.render_template(
template_name,
None,
self.options,
)
return [nodes.raw('', output, format='html')]
directives.register_directive('template', MetaTemplate)
|
[
"humitos@gmail.com"
] |
humitos@gmail.com
|
dff2c4c6b24ea68093845fe8c8cc96b6c0b00eb6
|
4f7962d02254ab6e5cf692648c933394ff41c79d
|
/component_sdk/python/tests/google/bigquery/test__query.py
|
06d91a42747f7c24d3454014f3d87a395c35ebae
|
[
"Apache-2.0"
] |
permissive
|
yebrahim/pipelines
|
5414131f5ab176aa7607114e3a0d23db73f5c8c8
|
77df6c2438f4cf6b81c97ecf4dac9fdbac0e3132
|
refs/heads/master
| 2020-04-08T13:23:50.628537
| 2019-03-01T18:35:47
| 2019-03-01T18:35:47
| 159,389,183
| 1
| 0
|
Apache-2.0
| 2018-11-27T19:37:57
| 2018-11-27T19:37:56
| null |
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from google.cloud import bigquery
from google.api_core import exceptions
from kfp_component.google.bigquery import query
CREATE_JOB_MODULE = 'kfp_component.google.bigquery._query'
@mock.patch(CREATE_JOB_MODULE + '.display.display')
@mock.patch(CREATE_JOB_MODULE + '.gcp_common.dump_file')
@mock.patch(CREATE_JOB_MODULE + '.KfpExecutionContext')
@mock.patch(CREATE_JOB_MODULE + '.bigquery.Client')
class TestQuery(unittest.TestCase):
def test_create_job_succeed(self, mock_client,
mock_kfp_context, mock_dump_json, mock_display):
mock_kfp_context().__enter__().context_id.return_value = 'ctx1'
mock_client().get_job.side_effect = exceptions.NotFound('not found')
mock_response = {
'configuration': {
'query': {
'query': 'SELECT * FROM table_1'
}
}
}
mock_client().query.return_value.to_api_repr.return_value = mock_response
mock_dataset = bigquery.DatasetReference('project-1', 'dataset-1')
mock_client().dataset.return_value = mock_dataset
result = query('SELECT * FROM table_1', 'project-1', 'dataset-1',
output_gcs_path='gs://output/path')
self.assertEqual(mock_response, result)
expected_job_config = bigquery.QueryJobConfig()
expected_job_config.create_disposition = bigquery.job.CreateDisposition.CREATE_IF_NEEDED
expected_job_config.write_disposition = bigquery.job.WriteDisposition.WRITE_TRUNCATE
expected_job_config.destination = mock_dataset.table('table_ctx1')
mock_client().query.assert_called_with('SELECT * FROM table_1',mock.ANY,
job_id = 'query_ctx1')
actual_job_config = mock_client().query.call_args_list[0][0][1]
self.assertDictEqual(
expected_job_config.to_api_repr(),
actual_job_config.to_api_repr()
)
mock_client().extract_table.assert_called_with(
mock_dataset.table('table_ctx1'),
'gs://output/path')
|
[
"k8s-ci-robot@users.noreply.github.com"
] |
k8s-ci-robot@users.noreply.github.com
|
7ca9ac0a216728a647c1da58e0b311e1690ce6e1
|
922a4f63f71e8833ecb240387d675ddfddf13845
|
/PythonProgrammingAssignmentsII/Q20.py
|
042c6100010d3493d297cb3fed73f20a55511bfb
|
[] |
no_license
|
asmitbhantana/Insight-Workshop
|
0ed9e6de49dc15f0447166227f404f108ffaad2e
|
54f9ce92fe47a01b08440d20aa850dfc97fa0423
|
refs/heads/master
| 2022-11-19T19:14:56.557014
| 2020-07-24T07:32:12
| 2020-07-24T07:32:12
| 275,709,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
"""
20. Write a Python class to find the three elements that sum to zero
from a list of n real numbers.
Input array : [-25, -10, -7, -3, 2, 4, 8, 10]
Output : [[-10, 2, 8], [-7, -3, 10]]
"""
if __name__ == '__main__':
usr_list = [-25, -10, -7, -3, 2, 4, 8, 10]
required_result = 0
usr_list.sort()
required_result_num_list = []
for i in range(len(usr_list)):
if usr_list[i] >= required_result:
break
for j in range(i + 1, len(usr_list)):
if usr_list[i]+usr_list[j] >= required_result:
break
for k in range(j + 1, len(usr_list)):
c_sum = usr_list[i] + usr_list[j] + usr_list[k]
if c_sum > required_result:
break
elif c_sum == required_result:
required_result_num_list.append([usr_list[i], usr_list[j], usr_list[k]])
break
print(required_result_num_list)
|
[
"bhantanasmit@gmail.com"
] |
bhantanasmit@gmail.com
|
99cd86f3d8ff4704dcb4b37bf6424a04ccda5c61
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/4c0889e8fcee6c8be9fef33887480747c227725d-<cmd_build>-bug.py
|
435761d91c7ec686f1c2a16c517b74393ddf97ed
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
def cmd_build(self):
result = dict(changed=False, actions=[])
if (not self.check_mode):
for service in self.project.get_services(self.services, include_deps=False):
if service.can_be_built():
self.log(('Building image for service %s' % service.name))
old_image_id = ''
try:
image = service.image()
if (image and image.get('Id')):
old_image_id = image['Id']
except NoSuchImageError:
pass
except Exception as exc:
self.client.fail(('Error: service image lookup failed - %s' % str(exc)))
try:
new_image_id = service.build(pull=True, no_cache=self.nocache)
except Exception as exc:
self.client.fail(('Error: build failed with %s' % str(exc)))
if (new_image_id not in old_image_id):
result['changed'] = True
result['actions'].append(dict(service=service.name, built_image=dict(name=service.image_name, id=new_image_id)))
return result
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
b38b46fd26f5a49bcaa3e1b5de0b4f3f25a2e70a
|
d272b041f84bbd18fd65a48b42e0158ef6cceb20
|
/catch/datasets/gyrovirus_gyv7-sf.py
|
5cf9a7265da81fb03c45e238b7ce53b151c9e6c3
|
[
"MIT"
] |
permissive
|
jahanshah/catch
|
bbffeadd4113251cc2b2ec9893e3d014608896ce
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
refs/heads/master
| 2023-02-19T13:30:13.677960
| 2021-01-26T03:41:10
| 2021-01-26T03:41:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
"""Dataset with 'Gyrovirus GyV7-SF' sequences.
A dataset with 1 'Gyrovirus GyV7-SF' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/gyrovirus_gyv7-sf.fasta.gz", relative=True)
sys.modules[__name__] = ds
|
[
"hmetsky@gmail.com"
] |
hmetsky@gmail.com
|
72c1be6bcfb8580304d1dc0d10de7f18699c9b28
|
ec19603130dddeb4b8298ee020965030d66edc81
|
/src/networkService/servicos/informacao/informacaoQt.py
|
19fd21829654effa14a3be1fdce01111d7712a16
|
[] |
no_license
|
tassio/NetworkService
|
9a5f08c0e3b92cbe34fc99c36e80f57fcbd258f0
|
f800d48d8af94bf8d927fd440eab7a1c40296066
|
refs/heads/master
| 2016-09-09T23:33:14.584056
| 2012-12-13T15:06:24
| 2012-12-13T15:06:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,885
|
py
|
#-*- coding: utf-8 -*-
from PyQt4.QtNetwork import QNetworkCacheMetaData, QHostAddress
from PyQt4.QtGui import QColor, QBrush, QCursor, QFont, QIcon, QImage, QKeySequence, QListWidgetItem, QMatrix, \
QPainterPath, QPen, QPicture, QPixmap, QPolygon, QPolygonF, QQuaternion, QRegion, QSizePolicy, QStandardItem, \
QTableWidgetItem, QTextLength, QTextFormat, QTransform, QTreeWidgetItem, QVector2D, QVector3D, QVector4D
from PyQt4.QtCore import QUuid, QUrl, QSize, QSizeF, QRegExp, QRectF, QRect, QPoint, QPointF, QLocale, QLine, \
QLineF, QDateTime, QTime, QDate, QByteArray, QBitArray
from networkService.servicos.informacao.informacao import InformacaoAbstrata
from networkService.servicos.informacao.dataManipulador import DataManipulador
from networkService.servicos.informacao.registroInformacao import RegistroInformacao
@RegistroInformacao.addInformacaoHandler(
QColor, QNetworkCacheMetaData, QBrush, QHostAddress, QCursor,
QFont, QIcon, QImage, QKeySequence, QListWidgetItem, QMatrix,
QPainterPath, QPen, QPicture, QPixmap,
QPolygonF, QPolygon, QQuaternion, QRegion, QSizePolicy,
QStandardItem, QTableWidgetItem, QTextLength, QTextFormat,
QTransform, QTreeWidgetItem, QVector2D, QVector3D, QVector4D,
QUuid, QUrl, QSizeF, QSize, QRegExp, QRectF, QRect, QPointF,
QPoint, QLocale, QLineF, QLine, QDateTime, QTime, QDate,
QByteArray, QBitArray
)
class QInformacao(InformacaoAbstrata):
"""Classe que guarda qualquer classe do Qt que possa ser serializada e tenha um construtor sem parametros"""
def __lshift__(self, data):
nomeClasse = DataManipulador(data).getNextInstance()
self.valor = eval(nomeClasse)()
data >> self.valor
def __rshift__(self, data):
DataManipulador(data).addInstance(self.valor.__class__.__name__)
data << self.valor
|
[
"tassio@infox.com.br"
] |
tassio@infox.com.br
|
129863d00cccb8a19b5adbe2d12eaf8deed86c74
|
bebe65ae5ea5d15eca9a388ddf86ca5b352762a6
|
/bin/bubbles
|
5b12642f3e732cd6140f54e107df357d82c1eebb
|
[
"MIT",
"LicenseRef-scancode-saas-mit"
] |
permissive
|
biswapanda/bubbles
|
f65aa11b129cf272be1205ef1fd8f885b215216d
|
6c6bd7b378e53bc0edcbbb35c2211922e1cb2100
|
refs/heads/master
| 2021-01-17T19:16:58.100977
| 2013-12-08T21:31:21
| 2013-12-08T21:31:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,839
|
#! /usr/bin/env python3
#
"""
Bubbles command line tool
For more information run: bubbles --help
Paths:
* /etc/bubbles:~/.bubbles:.bubbles
Config:
* config.ini
Author: Stefan Urbanek <stefan.urbanek@gmail.com>
"""
import argparse
import json
import sys
import configparser
import os.path
import argparse
import re
from bubbles import *
class ToolError(Exception):
"""Just exception"""
pass
CONFIG_PATHS = ['/etc/bubbles', \
'~/.bubbles', \
'./bubbles']
def load_config(args):
paths = CONFIG_PATHS + (args.config if args.config else [])
config = configparser.SafeConfigParser()
for path in paths:
config_file = os.path.join(path, "config.ini")
if os.path.exists(config_file):
config.read(config_file)
if config.has_section("extensions"):
extensions = config.options("extensions")
for extension in extensions:
mod_name = config.get("extensions", extension)
import_extension(extension, mod_name)
def import_extension(extension_name, module_name=None):
"""Imports a bubbles tool extension from module `module_name`.
Note: extension name is not used yet module_name is specified. Might be used in the future to allow
different modules replace extensions with same name.
"""
# FIXME: this is from brewery tool
module = __import__(module_name or extension_name)
def create_context(args):
if args.empty:
context = OperationContext()
else:
context = default_context
# Dummy request for an operation - forces automatic loading
context.operation_list("distinct")
modules = args.module or []
for name in modules:
module = __import__(name)
context.add_operations_from(mod)
return context
def opcatalogue(context, args):
"""Print all operations in the context."""
keys = list(context.operations.keys())
keys.sort()
reps = set(args.representation)
selection = []
# Select only operations with signatures matching reps
for opname in keys:
ops = context.operations[opname]
for op in ops:
if not reps or reps and (reps & set(op.signature.signature)):
selection.append(opname)
for opname in selection:
print("%s" % opname)
if args.signatures:
ops = context.operations[opname]
for op in ops:
if not reps or reps and (reps & set(op.signature.signature)):
sig = ", ".join(op.signature.signature)
print(" (%s)" % sig)
def run_pipe(args):
# Collect operations
pattern = re.compile(r"^(\w+)=(.*)")
templates = []
attribs = {}
current = None
# Cllect nodes and attributes
#
# node name pattern: node_name
# attribute pattern: attribute=value
#
for arg in args.node:
match = pattern.match(arg)
if match:
(attribute, value) = match.groups()
attribs[attribute] = value
else:
if current:
templates.append( (current, attribs) )
attribs = {}
current = arg
if current:
templates.append( (current, attribs) )
pipe = Pipeline()
nodes = []
for template, attribs in templates:
try:
node = brewery.nodes.create_node(template)
except KeyError:
sys.stderr.write("ERROR: unknown node %s\n" % template)
exit(1)
node.configure(attribs)
stream.add(node)
nodes.append(node)
if last_node:
stream.connect(last_node, node)
last_node = node
# If first node is not source node, then we add CSV node with standard
# input
if not isinstance(nodes[0], brewery.nodes.SourceNode):
node = brewery.nodes.create_node("csv_source")
node.resource = sys.stdin
stream.add(node)
stream.connect(node, nodes[0])
if not isinstance(nodes[-1], brewery.nodes.TargetNode):
node = brewery.nodes.create_node("csv_target")
node.resource = sys.stdout
stream.add(node)
stream.connect(nodes[-1], node)
stream.run()
################################################################################
# Main code
main_parser = argparse.ArgumentParser(description='Bubbles command lite tool')
main_parser.add_argument('--config',
action='append',
help='bubbles configuration file')
main_parser.add_argument('-m', '--module',
action='append',
help='list of python modules to be loaded and inspected '
'for potential operations')
main_parser.add_argument('--empty',
action='store_true',
help='start with empty context (requires -m)')
subparsers = main_parser.add_subparsers(title='commands', help='additional help')
################################################################################
# Command: operation catalogue
op_parser = subparsers.add_parser('op')
op_subparsers = op_parser.add_subparsers(title='operation commands')
subparser = op_subparsers.add_parser("list", help = "list available operations")
subparser.add_argument('-r', '--representation', action='append',
help="show operations having specified rep in signature")
subparser.add_argument('--signatures', action='store_true',
help="show also operation signatures")
subparser.set_defaults(func=opcatalogue)
################################################################################
# Command: pipe
subparser = subparsers.add_parser('pipe',
help="create a simple Brewery node pipe",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
There should be at least one operation specified.
The arguments are either operations or operation
arguments. Attributes follow node name and have format:
attribute=value
If there is no source node, then CSV source on standard
input is assumed. If there is no target node, then CSV
target on standard output is assumed.
''')
)
subparser.add_argument('op', nargs="+", help='list of operations')
subparser.set_defaults(func=run_pipe)
#
args = main_parser.parse_args(sys.argv[1:])
context = create_context(args)
load_config(args)
if "func" in args:
try:
args.func(context, args)
except ToolError as e:
sys.stderr.write("Error: %s" % str(e))
exit(1)
else:
main_parser.print_help()
|
[
"stefan@agentfarms.net"
] |
stefan@agentfarms.net
|
|
5a1ad840edc1d0ca68d0087d4ec58a1799c74647
|
6843258fe430c67ffa01e909d1650df390369d93
|
/errata_tool/__init__.py
|
d0efa742971710c04b6bc0d10dfd7c1f99727682
|
[
"MIT"
] |
permissive
|
ralphbean/errata-tool
|
86df0c5a5bdd65d62e01653f003ac2ecf3e2f092
|
d7f999a34ef2780e5218b071a4cd99b35b8702de
|
refs/heads/master
| 2021-05-15T05:34:04.761851
| 2017-11-18T03:59:38
| 2017-12-14T21:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from .exception import ErrataException
from .connector import ErrataConnector
from .erratum import Erratum
__all__ = ['ErrataException', 'ErrataConnector', 'Erratum']
__version__ = '1.9.0'
|
[
"kdreyer@redhat.com"
] |
kdreyer@redhat.com
|
735cbb7dfb9b821b50fe2b7be81fe9770ca3d8d1
|
ef16d4d796588cbf0d5cb0f84727812e7866f92e
|
/myvenv/bin/symilar
|
e547e7e0d4da4224aa8441aff9d3b4c6c945a143
|
[] |
no_license
|
shortnd/django_blog
|
5b24f4c40cd79181a946de6f7edecc9490279839
|
aaa8c92e3281924b2e2ece54338899c0879ee7b2
|
refs/heads/master
| 2020-05-03T17:19:54.363860
| 2019-03-31T21:04:23
| 2019-03-31T21:04:23
| 178,742,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
#!/Users/kortr/code/python/djangogirls/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
|
[
"ococncol@gmail.com"
] |
ococncol@gmail.com
|
|
066a1c5df73dd3d29347781664c88fc58a2ca994
|
019c78e21f861b6a56800877082a9c155dd8fb5f
|
/niveau-02/chapitre-4-fonctions/02-deux-codes-secrets-obligatoire.py
|
f63d0032b5d7f7e7b581e0a0a96d1af4008a9cd2
|
[] |
no_license
|
donnerc/oci-prog-exos
|
0c0bd50a93896826251e343baa9836e6d09fc9fd
|
de5da49fb8a3df56ef12c4f9ea284d476e999434
|
refs/heads/master
| 2021-01-23T13:16:59.304820
| 2015-01-13T15:57:22
| 2015-01-13T15:57:22
| 13,173,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
##################################
# fichier 02-deux-codes-secrets-obligatoire.py
# nom de l'exercice : Deux codes secrets
# url : http://www.france-ioi.org/algo/task.php?idChapter=509&idTask=0&sTab=task&iOrder=3
# type : obligatoire
#
# Chapitre : chapitre-4-fonctions
#
# Compétence développée :
#
# auteur :
##################################
# chargement des modules
# mettre votre code ici
|
[
"cedonner@gmail.com"
] |
cedonner@gmail.com
|
a538af6a464fa56591c72692d25ab74aa2ef4463
|
7857b4f02001c3e0ac0317fa501a4bacc8ea335b
|
/logic_tutorial.py
|
20c0ea7cc1dc5ceb8e4405692f0ade77a4859382
|
[
"MIT"
] |
permissive
|
twtrubiks/leetcode-python
|
65a2035fe2c9e4a65b09f5d65df7b24be385d6fc
|
e46b32f6de4c0711ef44b7f2a482dc59657aa5e5
|
refs/heads/master
| 2022-01-12T01:34:50.628413
| 2022-01-10T06:30:21
| 2022-01-10T06:30:21
| 55,111,802
| 25
| 18
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
def trunk_1(arr_1, size_1):
result_1 = []
while arr:
pop_data = [arr_1.pop(0) for _ in range(size_1)]
result_1.append(pop_data)
return result_1
def trunk_2(arr_2, size_2):
arrs = []
while len(arr_2) > size_2:
pice = arr_2[:size_2]
arrs.append(pice)
arr_2 = arr_2[size:]
arrs.append(arr_2)
return arrs
def trunk_3(arr, size):
result = []
count = 0
while count < len(arr):
result.append(arr[count:count+size])
count += size
return result
if __name__ == "__main__":
'''
arr = [1, 2, 3, 4, 5, 6]
size = 2
result = [[1, 2], [3, 4], [5, 6]]
'''
arr = [1, 2, 3, 4, 5, 6]
size = 2
result = trunk_1(arr, size)
print(result)
|
[
"twtrubiks@gmail.com"
] |
twtrubiks@gmail.com
|
af30001153143516bb60447c5a6baee10e8ce452
|
8d3713030d02e34eb37b149d0bc2a8fd25fec7f7
|
/problem111.py
|
d028894946ae341863fed4e2ca6ad0ba8893cf7f
|
[] |
no_license
|
GlenHaber/euler
|
cd3a34550a0c6189a17fbc26991393ee6a4ab8d6
|
cb3259f375c1f21af7daf79ab19532518765bbc8
|
refs/heads/master
| 2021-01-19T13:06:52.579227
| 2017-06-09T21:07:33
| 2017-06-09T21:07:33
| 100,825,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,010
|
py
|
"""
Primes with runs
Considering 4-digit primes containing repeated digits it is clear that they cannot all be the same: 1111 is divisible by
11, 2222 is divisible by 22, and so on. But there are nine 4-digit primes containing three ones:
1117, 1151, 1171, 1181, 1511, 1811, 2111, 4111, 8111
We shall say that M(n, d) represents the maximum number of repeated digits for an n-digit prime where d is the repeated
digit, N(n, d) represents the number of such primes, and S(n, d) represents the sum of these primes.
So M(4, 1) = 3 is the maximum number of repeated digits for a 4-digit prime where one is the repeated digit, there are
N(4, 1) = 9 such primes, and the sum of these primes is S(4, 1) = 22275. It turns out that for d = 0, it is only
possible to have M(4, 0) = 2 repeated digits, but there are N(4, 0) = 13 such cases.
In the same way we obtain the following results for 4-digit primes.
d M(4, d) N(4, d) S(4, d)
0 2 13 67061
1 3 9 22275
2 3 1 2221
3 3 12 46214
4 3 2 8888
5 3 1 5557
6 3 1 6661
7 3 9 57863
8 3 1 8887
9 3 7 48073
For d = 0 to 9, the sum of all S(4, d) is 273700.
Find the sum of all S(10, d).
"""
from common import is_prime, miller_rabin_test
# Brute force the case in the example to make sure I get it
def n_digit_primes(n):
for i in range(10 ** (n - 1), 10 ** n):
if is_prime(i):
yield i
def M(n, d):
return max(str(num).count(str(d)) for num in n_digit_primes(n))
def N(n, d):
nums = list(n_digit_primes(n))
M = max(str(num).count(str(d)) for num in nums)
return len([n for n in nums if str(n).count(str(d)) == M])
def S(n, d, nums=None):
if nums is None:
nums = list(n_digit_primes(n))
M = max(str(num).count(str(d)) for num in nums)
return sum([n for n in nums if str(n).count(str(d)) == M])
assert sum(S(4, d) for d in range(10)) == 273700
number = [0] * 10
# Shamelessly taken from mathblog.dk
def recurse(basedigit, startpos, level, fill=False):
global number
if level <= 0:
if number[0] == 0:
return 0
n = sum(10 ** i * x for i, x in enumerate(number[::-1]))
return n if miller_rabin_test(n) else 0
res = 0
if fill:
for pos in range(len(number)):
number[pos] = basedigit
for pos in range(startpos, len(number)):
for val in range(10):
number[pos] = val
res += recurse(basedigit, pos + 1, level - 1)
number[pos] = basedigit
return res
total = 0
for d in range(10):
for i in range(1, len(number)):
res = recurse(d, 0, i, True)
if res:
total += res
break
print('Answer:', total)
# primes = list(n_digit_primes(10))
# print(len(primes), 'primes generated')
# print('Answer:', sum(S(10, d, primes) for d in range(10)))
|
[
"GHaber@BehaviorMatrix.com"
] |
GHaber@BehaviorMatrix.com
|
cbb1fc6301940401b020a676152a2dd636acf9ef
|
5dd47abf7061201d9378e73e51f08fbb314ba2fd
|
/envdsys/envcontacts/migrations/0074_auto_20210227_1830.py
|
129e1c19b1364390a394fc2a8aa7265a96484ea8
|
[
"Unlicense"
] |
permissive
|
NOAA-PMEL/envDataSystem
|
4d264ae5209015e4faee648f37608d68a4461d0a
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
refs/heads/master
| 2023-02-23T22:33:14.334737
| 2021-07-22T01:09:16
| 2021-07-22T01:09:16
| 191,809,007
| 1
| 0
|
Unlicense
| 2023-02-08T00:45:54
| 2019-06-13T17:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
# Generated by Django 3.1.7 on 2021-02-27 18:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envcontacts', '0073_auto_20210227_1819'),
]
operations = [
migrations.AlterField(
model_name='person',
name='email1_type',
field=models.CharField(choices=[('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='email2_type',
field=models.CharField(choices=[('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='W', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone1_type',
field=models.CharField(choices=[('M', 'Mobile'), ('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='M', max_length=1),
),
migrations.AlterField(
model_name='person',
name='phone2_type',
field=models.CharField(choices=[('M', 'Mobile'), ('W', 'Work'), ('H', 'Home'), ('O', 'Other')], default='M', max_length=1),
),
]
|
[
"derek.coffman@noaa.gov"
] |
derek.coffman@noaa.gov
|
f03af2a7915d3835033777ee323af7c7ddf60627
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03049/s862641058.py
|
d26e8c01dc0350e1428c4fbcbbbc4791d8acc382
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
n,*s=open(0).read().split()
a=0;b=0;ba=0;ans=0
for s_i in s:
b+=(s_i[0]=='B')*(not s_i[-1]=='A')
a+=(not s_i[0]=='B')*(s_i[-1]=='A')
ba+=(s_i[0]=='B')*(s_i[-1]=='A')
ans+=s_i.count('AB')
print(ans+ba+min(a,b)-(ba>0)*(a+b==0))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ccadf51ea06ea13387ab2c4085caaed98e426aaf
|
525f5ba86e1476d5f0dc396e225d544beb43cd3b
|
/nomenklatura/query/builder.py
|
65e44f59be0c769551f83140ad62fed9b1e909cb
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
he0x/nomenklatura
|
bb47cd9103b03893832b4bda4bd69cba80473976
|
b2e0a989de7aa4a08a63e22982c4904b255dc04a
|
refs/heads/master
| 2021-01-15T08:18:29.257815
| 2015-03-28T20:15:52
| 2015-03-28T20:15:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,177
|
py
|
from sqlalchemy.orm import aliased
from nomenklatura.core import db, url_for
from nomenklatura.schema import attributes
from nomenklatura.model.statement import Statement
from nomenklatura.model.context import Context
# from nomenklatura.model.type import Type
class QueryBuilder(object):
def __init__(self, dataset, parent, node):
self.dataset = dataset
self.parent = parent
self.node = node
self.results = {}
@property
def children(self):
if not hasattr(self, '_children'):
self._children = []
for child_node in self.node.children:
qb = QueryBuilder(self.dataset, self, child_node)
self._children.append(qb)
return self._children
def _add_statement(self, q):
""" Generate a linked statement that can be used in any
part of the query. """
stmt = aliased(Statement)
ctx = aliased(Context)
q = q.filter(stmt.context_id == ctx.id)
q = q.filter(stmt.dataset_id == self.dataset.id)
q = q.filter(ctx.active == True) # noqa
return stmt, q
def filter_value(self, q, stmt):
q = q.filter(stmt._value == self.node.value)
return q
def filter(self, q, stmt):
""" Apply filters to the given query recursively. """
if not self.node.filtered:
return q
filter_stmt, q = self._add_statement(q)
q = q.filter(stmt.subject == filter_stmt.subject)
if self.node.attribute:
q = q.filter(stmt._attribute == self.node.attribute.name)
if self.node.leaf:
return self.filter_value(q, filter_stmt)
for child in self.children:
q = child.filter(q, stmt)
return q
def filter_query(self, parents=None):
""" An inner query that is used to apply any filters, limits
and offset. """
q = db.session.query()
stmt, q = self._add_statement(q)
q = q.add_column(stmt.subject)
if parents is not None and self.node.attribute:
parent_stmt, q = self._add_statement(q)
q = q.filter(stmt.subject == parent_stmt._value)
q = q.filter(parent_stmt._attribute == self.node.attribute.name)
q = q.filter(parent_stmt.subject.in_(parents))
q = self.filter(q, stmt)
q = q.group_by(stmt.subject)
q = q.order_by(stmt.subject.asc())
if self.node.root:
q = q.limit(self.node.limit)
q = q.offset(self.node.offset)
return q
def nested(self):
""" A list of all sub-entities for which separate queries will
be conducted. """
for child in self.children:
if child.node.leaf or not child.node.attribute:
continue
if child.node.attribute.data_type == 'entity':
yield child
def project(self):
""" Figure out which attributes should be returned for the current
level of the query. """
attrs = set()
for child in self.children:
if child.node.blank and child.node.leaf:
attrs.update(child.node.attributes)
attrs = attrs if len(attrs) else attributes
skip_nested = [n.node.attribute for n in self.nested()]
return [a.name for a in attrs if a not in skip_nested]
def base_object(self, data):
""" Make sure to return all the existing filter fields
for query results. """
obj = {
'id': data.get('id'),
'api_url': url_for('entities.view', dataset=self.dataset.slug,
id=data.get('id')),
'parent_id': data.get('parent_id')
}
for child in self.children:
if child.node.leaf and child.node.filtered:
obj[child.node.name] = child.node.raw
return obj
return obj
def get_node(self, name):
""" Get the node for a given name. """
for child in self.children:
if child.node.name == name:
return child.node
return None if name == '*' else self.get_node('*')
def data_query(self, parents=None):
""" Generate a query for any statement which matches the criteria
specified through the filter query. """
filter_q = self.filter_query(parents=parents)
q = db.session.query()
stmt, q = self._add_statement(q)
filter_sq = filter_q.subquery()
q = q.filter(stmt.subject == filter_sq.c.subject)
q = q.filter(stmt._attribute.in_(self.project()))
q = q.add_column(stmt.subject.label('id'))
q = q.add_column(stmt._attribute.label('attribute'))
q = q.add_column(stmt._value.label('value'))
if parents is not None and self.node.attribute:
parent_stmt, q = self._add_statement(q)
q = q.filter(stmt.subject == parent_stmt._value)
q = q.filter(parent_stmt._attribute == self.node.attribute.name)
q = q.add_column(parent_stmt.subject.label('parent_id'))
q = q.order_by(filter_sq.c.subject.desc())
q = q.order_by(stmt.created_at.asc())
return q
def execute(self, parents=None):
""" Run the data query and construct entities from it's results. """
results = {}
for row in self.data_query(parents=parents):
data = row._asdict()
id = data.get('id')
if id not in results:
results[id] = self.base_object(data)
value = data.get('value')
attr = attributes[data.get('attribute')]
if attr.data_type not in ['type', 'entity']:
conv = attr.converter(self.dataset, attr)
value = conv.deserialize_safe(value)
node = self.get_node(data.get('attribute'))
if attr.many if node is None else node.many:
if attr.name not in results[id]:
results[id][attr.name] = []
results[id][attr.name].append(value)
else:
results[id][attr.name] = value
return results
def collect(self, parents=None):
""" Given re-constructed entities, conduct queries for child
entities and merge them into the current level's object graph. """
results = self.execute(parents=parents)
ids = results.keys()
for child in self.nested():
attr = child.node.attribute.name
for child_data in child.collect(parents=ids).values():
parent_id = child_data.pop('parent_id')
if child.node.many:
if attr not in results[parent_id]:
results[parent_id][attr] = []
results[parent_id][attr].append(child_data)
else:
results[parent_id][attr] = child_data
return results
def query(self):
results = []
for result in self.collect().values():
result.pop('parent_id')
if not self.node.many:
return result
results.append(result)
return results
|
[
"friedrich@pudo.org"
] |
friedrich@pudo.org
|
8358602e69b3372bacd7a45ddadd7849c1ccf792
|
650b3dd4cc74f32db78f7d99cef9907aec78a222
|
/dialogs/Report/fPettyCashReport_data.py
|
d224b848def8260b8a4ae4863468aef52b0886ab
|
[] |
no_license
|
mech4/PKTrx
|
29b871ab587434e7c208175c248f48d9b6c80a17
|
cf01bc5be8837d632974786d2419c58b94a0381d
|
refs/heads/master
| 2020-03-29T19:55:07.331831
| 2012-09-18T20:22:52
| 2012-09-18T20:22:52
| 6,289,691
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
import sys
def CetakData(config,parameters,returns):
ret = returns.CreateValues(
['IsErr', 0],
['ErrMessage','']
)
try:
except:
ret.IsErr = 1
ret.ErrMessage = str(sys.exc_info()[1])
|
[
"wisnu27@gmail.com"
] |
wisnu27@gmail.com
|
e82dcd7e2c42de6224abe59e0b0800eb2ca85e3e
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/export_asset_response.py
|
105871097c009b0aa7c5f4c4354c87308f05e410
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,344
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ExportAssetResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str'
}
attribute_map = {
'job_id': 'job_id'
}
def __init__(self, job_id=None):
"""ExportAssetResponse
The model defined in huaweicloud sdk
:param job_id: 资产导出作业的ID,可用于查询作业进度,获取导出作业进度
:type job_id: str
"""
super(ExportAssetResponse, self).__init__()
self._job_id = None
self.discriminator = None
if job_id is not None:
self.job_id = job_id
@property
def job_id(self):
"""Gets the job_id of this ExportAssetResponse.
资产导出作业的ID,可用于查询作业进度,获取导出作业进度
:return: The job_id of this ExportAssetResponse.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ExportAssetResponse.
资产导出作业的ID,可用于查询作业进度,获取导出作业进度
:param job_id: The job_id of this ExportAssetResponse.
:type job_id: str
"""
self._job_id = job_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportAssetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
4ec02b40b4eaef9cd000b9f4fed6b0c691c3f47d
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_21454.py
|
45e7d2122ab9b2a6b0f3906d21dcb18297aff031
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 159
|
py
|
# Updating collections from collections
db.Coll2.find().forEach(function(c2){
db.Coll1.update({isbn:c2.isbn},{$set: {category:c2.category}},{multi:true})
});
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
151f8401dd23cc073bf7bb3fbb5cbf94fb035bc6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_greyhounds.py
|
b30dc2d9386d20b4a85bd14ebe73422e2417bc96
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from xai.brain.wordbase.nouns._greyhound import _GREYHOUND
#calss header
class _GREYHOUNDS(_GREYHOUND, ):
def __init__(self,):
_GREYHOUND.__init__(self)
self.name = "GREYHOUNDS"
self.specie = 'nouns'
self.basic = "greyhound"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dfabfbb7eef76a04289682868648631a818c208c
|
198ac210d70c865367fb73fb3c8f99b06cdd91d0
|
/tests/integration/steam_simulator.py
|
82382ec24aaef6653b83a506c43439f19fc0d0c8
|
[
"BSD-3-Clause"
] |
permissive
|
gutomaia/steamer-py
|
0f2bc6a81bfab6986470b03b370ccf53941432ff
|
7175fb1d79fe6ffc0c31b3e74f62805629e457b2
|
refs/heads/master
| 2021-01-10T19:30:50.429652
| 2013-04-29T13:38:30
| 2013-04-29T13:38:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
import threading
from time import sleep
import web
import requests
urls = (
'/id/(\w+)/stats/(\w+)', 'game_page'
)
class game_page(object):
def GET(self, user, game):
f = open('fixtures/%s-%s.xml' % (user, game))
xml = f.read()
f.close()
web.header('Content-Length', len(xml))
return xml
class SteamSimulator(threading.Thread):
def __init__(self):
super(SteamSimulator, self).__init__()
self._stop = threading.Event()
def run(self):
self.app = web.application(urls, globals())
web.config.default_port = 8080
self.app.internalerror = web.debugerror
self.app.run()
def stop(self):
self.app.stop()
self._stop.set()
def stopped(self):
return self._stop.isSet()
if __name__ == "__main__":
sim = SteamSimulator()
sim.run()
|
[
"guto@guto.net"
] |
guto@guto.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.