blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d99e70cf63ab0cd31386b32105d62353563e8104
|
7e099db730027a332cef52c4e539a1c3a39584d7
|
/shell.py
|
79226bed2f104aac2d8f90a5d23985b81099cf9f
|
[] |
no_license
|
haricm/hasweb
|
688108d01950822c9f3813a9a926c11e17529827
|
48e92c46855e4fa14f35090ae1e1c9ceffa5d75f
|
refs/heads/static
| 2020-12-28T06:58:31.525138
| 2016-06-22T21:41:54
| 2016-06-22T21:41:54
| 51,294,404
| 0
| 0
| null | 2016-02-08T11:36:39
| 2016-02-08T11:36:38
| null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
#! /usr/bin/env python
#! -*- coding: utf-8 -*-
import os
import readline
from pprint import pprint
from flask import *
from hasweb import *
os.environ['PYTHONINSPECT'] = 'True'
|
[
"kiran@hasgeek.com"
] |
kiran@hasgeek.com
|
10dc94fce77ff1027f55411a178940bdfd04d4a5
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/common/Lib/plat-mac/lib-scriptpackages/Explorer/Netscape_Suite.py
|
9dc8c9f099abf16d3e2caf20047680e160b01130
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,485
|
py
|
# 2017.08.29 21:58:49 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/lib-scriptpackages/Explorer/Netscape_Suite.py
"""Suite Netscape Suite: Events defined by Netscape
Level 1, version 1
Generated from /Applications/Internet Explorer.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'MOSS'
class Netscape_Suite_Events:
def Open_bookmark(self, _object = None, _attributes = {}, **_arguments):
"""Open bookmark: Opens a bookmark file
Required argument: If not available, reloads the current bookmark file
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'MOSS'
_subcode = 'book'
if _arguments:
raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode, _arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
if _arguments.has_key('----'):
return _arguments['----']
_classdeclarations = {}
_propdeclarations = {}
_compdeclarations = {}
_enumdeclarations = {}
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-mac\lib-scriptpackages\Explorer\Netscape_Suite.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:58:49 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
d9c62814e709deaedc82713d90955bb4505d05c9
|
5759c0ed3219c06437ce5b39ef9ad92b5e191fed
|
/py/0814_binary_tree_pruning.py
|
58027f3dc60a087ead471c4791aa442c2fa71a8e
|
[] |
no_license
|
mengnan1994/Surrender-to-Reality
|
ba69df7c36112ad19f19157a9f368eae6340630f
|
66232728ce49149188f863271ec2c57e426abb43
|
refs/heads/master
| 2022-02-25T01:34:49.526517
| 2019-09-22T17:21:28
| 2019-09-22T17:21:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
"""
We are given the head node root of a binary tree, where additionally every node's value is either a 0 or a 1.
Return the same tree where every subtree (of the given tree) not containing a 1 has been removed.
(Recall that the subtree of a node X is X, plus every node that is a descendant of X.)
Example 1:
Input: [1,null,0,0,1]
Output: [1,null,0,null,1]
Explanation:
Only the red nodes satisfy the property "every subtree not containing a 1".
The diagram on the right represents the answer.
Example 2:
Input: [1,0,1,0,0,0,1]
Output: [1,null,1,null,1]
Example 3:
Input: [1,1,0,1,1,0,1,0]
Output: [1,1,0,1,1,null,1]
Note:
1. The binary tree will have at most 100 nodes.
2. The value of each node will only be 0 or 1.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def prune_tree(self, root):
"""
后序遍历,统计子树的节点值之和。若和为 0,剪枝
"""
if root.val == 0 and not root.left and not root.right:
return None
self._postorder_traverse(root)
return root
def _postorder_traverse(self, node : TreeNode):
if not node:
return 0
left = self._postorder_traverse(node.left)
right = self._postorder_traverse(node.right)
if not left:
node.left = None
if not right:
node.right = None
return left + right + node.val
|
[
"hurricane.cui@gmail.com"
] |
hurricane.cui@gmail.com
|
b8d25789b012978f89d53ff49aaf89fe33ba5b8e
|
17acb8e20f9a24b16ce3651302fc2d7fc7b887a6
|
/src/programy/storage/stores/nosql/mongo/store/users.py
|
02d5a2bccdd7221a3337439e37f79782513550ca
|
[
"MIT"
] |
permissive
|
cen-ai/program-y
|
91052fdc11aec0f60311e3429895fac489d8ce54
|
a753667638147544c54dbebd9f1c8f9ae7f2159e
|
refs/heads/master
| 2020-03-22T15:11:07.896885
| 2018-10-15T22:13:58
| 2018-10-15T22:13:58
| 140,234,173
| 5
| 5
|
NOASSERTION
| 2019-01-03T09:09:07
| 2018-07-09T05:11:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
"""
Copyright (c) 2016-2018 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.stores.nosql.mongo.store.mongostore import MongoStore
from programy.storage.entities.user import UserStore
from programy.storage.stores.nosql.mongo.dao.user import User
class MongoUserStore(MongoStore, UserStore):
USERS = 'users'
USERID = 'userid'
CLIENT = 'client'
def __init__(self, storage_engine):
MongoStore.__init__(self, storage_engine)
def collection_name(self):
return MongoUserStore.USERS
def add_user(self, userid, client):
YLogger.info(self, "Adding user [%s] for client [%s]", userid, client)
user = User(userid, client)
self.add_document(user)
return True
def get_user(self, userid):
collection = self.collection()
user = collection.find_one({MongoUserStore.USERID: userid})
return user
def get_client_users(self, client):
collection = self.collection()
db_users = collection.find({MongoUserStore.CLIENT: client})
users = []
for user in db_users:
users.append(user)
return users
|
[
"edguy@eguy.org"
] |
edguy@eguy.org
|
9c742413d38b0aaf50f7200c1696142e316f6e85
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_scrimps.py
|
64415eec48625a7a2cc7517d1e7bb4590bc19881
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from xai.brain.wordbase.verbs._scrimp import _SCRIMP
#calss header
class _SCRIMPS(_SCRIMP, ):
def __init__(self,):
_SCRIMP.__init__(self)
self.name = "SCRIMPS"
self.specie = 'verbs'
self.basic = "scrimp"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
eac6a2b1d70e266e04d31db1b3283e1f1ae7aaae
|
bebc317eebad1e0f136c2d959e5b9882e123d366
|
/game/mario/src/data/level.py
|
45618b67ab8331a23000646413fe4780b9d3a293
|
[
"Apache-2.0"
] |
permissive
|
GuillaumeFalourd/formulas-games
|
62ac5b87e6868db29de022e52fd56993ec9c3c46
|
7530ef09cc85f30157e62b8761868c2a0eddb93d
|
refs/heads/main
| 2023-07-14T04:33:11.666127
| 2021-08-29T23:21:16
| 2021-08-29T23:21:16
| 369,598,803
| 10
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,192
|
py
|
from .sprites import level_1
from .basetypes import Vector2, Rectangle
from . import config as c
from .components.tiles import Question, Brick, Collider_Rect, Flagpole
from .components.items import *
from .components.enemies import *
#Colliders that don't possess velocity
static_colliders = []
#Colliders that possess velocity
dynamic_colliders = []
coins = []
super_mushrooms = []
enemies = []
#Fragments go here when a brick tile gets broken
brick_fragments = []
#Start and End tile for grouping large rows of tiles into one collider
start_tile = None
end_tile = None
#Read pixel data from level map and instantiate objects corresponding to pixel colors
for y in range(0, level_1.size[1]):
for x in range(0, level_1.size[0]):
color = level_1.getpixel((x, y))
pos = Vector2(x * c.TILE_SIZE, y * c.TILE_SIZE + 24)
#Black = Static ground collider, which are grouped together for optimizations
if color == c.BLACK:
if start_tile == None:
start_tile = pos
if end_tile == None:
if x + 1 > level_1.size[0]:
end_tile = pos
if level_1.getpixel((x + 1, y)) != c.BLACK:
end_tile = pos
if end_tile != None and start_tile != None:
w = end_tile.x - start_tile.x + c.TILE_SIZE
h = c.TILE_SIZE
rect = Rectangle(start_tile, w, h)
static_colliders.append(Collider_Rect(rect))
end_tile = None
start_tile = None
#Red = Pipe collider
elif color == c.RED:
h = c.SCREEN_SIZE.y - pos.y
w = 2 * c.TILE_SIZE
rect = Rectangle(pos, w, h)
static_colliders.append(Collider_Rect(rect))
#Yellow = Question tile with coin as item
elif color == c.YELLOW:
coin_rect = Rectangle(Vector2(pos.x, pos.y), 48, 42)
contents = Coin(coin_rect)
coins.append(contents)
rect = Rectangle(pos, c.TILE_SIZE, c.TILE_SIZE)
dynamic_colliders.append(Question(rect, contents))
#Gray = Brick tile
elif color == c.GRAY:
rect = Rectangle(pos, c.TILE_SIZE, c.TILE_SIZE)
dynamic_colliders.append(Brick(rect))
#Green = Question tile with mushroom as item
elif color == c.GREEN:
mushroom_rect = Rectangle(Vector2(pos.x, pos.y), c.TILE_SIZE, c.TILE_SIZE)
contents = Super_Mushroom(mushroom_rect, Vector2(c.MUSHROOM_START_VEL_X, 0))
super_mushrooms.append(contents)
rect = Rectangle(pos, c.TILE_SIZE, c.TILE_SIZE)
dynamic_colliders.append(Question(rect, contents))
#Brown = Goomba
elif color == c.BROWN:
rect = Rectangle(pos, c.TILE_SIZE, c.TILE_SIZE)
enemies.append(Goomba(rect, Vector2()))
elif color == c.PURPLE:
rect = Rectangle(Vector2(pos.x, pos.y - 24), 48, 72)
enemies.append(Turtle(rect, Vector2()))
#Instantiate flagpole
rect = Rectangle(Vector2(9504, 96), 48, 456)
flag_pos = Vector2(9480, 120)
c.flagpole = Flagpole(rect, flag_pos)
|
[
"guillaume.falourd@zup.com.br"
] |
guillaume.falourd@zup.com.br
|
3ebadc10fd17025f0d68ab97ada972030f456f03
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2403/60592/233469.py
|
9761f234d7673441623d7aa29b2be6f823ffb15b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
candies = int(input())
num_people = int(input())
i = 0
res = [0]*num_people
while(candies>0):
if candies>=i+1:
candies-=i+1
res[i%num_people] += i+1;
else:
res[i%num_people] += candies
candies = 0;
i = i+1
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
2f3a2709a16c0bcb0d3ed029c939daadb80c6b60
|
22a16f2fc3f2ddf92a620751530c2046ba2e9f1f
|
/api/dynamic_tests_v2/activation.py
|
020b0bb91b00720d239a6152f8749c5e7cdbb632
|
[] |
no_license
|
ForFishes/benchmark
|
8ebb8e13f44b2f3a350fe4325b03f7e5cab42065
|
56e070628ad67178cdfc67b47759020ff408300a
|
refs/heads/master
| 2023-02-23T03:45:50.320413
| 2021-01-21T05:15:05
| 2021-01-21T05:15:05
| 331,538,261
| 0
| 2
| null | 2021-01-21T06:39:15
| 2021-01-21T06:39:14
| null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
class ActivationConfig(APIConfig):
def __init__(self):
super(ActivationConfig, self).__init__('activation')
self.api_name = 'cos'
self.api_list = {
'cos': 'cos',
'exp': 'exp',
'log': 'log',
'sin': 'sin',
'sinh': 'sinh',
'sqrt': 'sqrt',
'square': 'square',
'tanh': 'tanh'
}
class PDActivation(PaddleDynamicAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = self.layers(config.api_name, x=x)
self.feed_list = [x]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [x])
class TorchActivation(PytorchAPIBenchmarkBase):
def build_graph(self, config):
x = self.variable(name='x', shape=config.x_shape, dtype=config.x_dtype)
result = self.layers(config.api_name, x=x)
self.feed_list = [x]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [x])
if __name__ == '__main__':
test_main(
pd_dy_obj=PDActivation(),
torch_obj=TorchActivation(),
config=ActivationConfig())
|
[
"noreply@github.com"
] |
ForFishes.noreply@github.com
|
188d1f6fc00b5a13136bb562cf24311d89cc6901
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/rna-transcription/d54f298dde914c7e9a732cbec50a20e1.py
|
970f6d38930e8da150af549feb941eddf63bb6a5
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
class DNA:
def __init__(self, seq = None):
self.sequence = seq
# I guess this is inefficient as it declares the mapping every time a new object is created?
self.DNA_to_RNA = {
'A': 'U',
'G': 'C',
'C': 'G',
'T': 'A'
}
def to_rna(self):
return ''.join(self.DNA_to_RNA[c] for c in self.sequence)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
d5ad7a7cac14d5c97e9330e9f6ba0ea20956718b
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_8/brnann016/question3.py
|
75ec0cc3ee04626a026462b0fc5770c9ab077a6a
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 456
|
py
|
# Assignment 8-question3
#Annika Brundyn
#encrypt function
def encrypt(message):
if len(message) == 1:
if message.islower():
if message == 'z':
return 'a'
else:
return chr(ord(message) + 1)
else:
return message
else:
return encrypt(message[0]) + encrypt(message[1:])
string = input("Enter a message:\n")
print("Encrypted message:")
print(encrypt(string))
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
c05e933761f1a14bf307d77aeb5a24ae19f63d46
|
6123df2ee8648c7977c99564197f7834c7ea83a1
|
/DataPreprocessing/排序算法/快速排序方法1.py
|
bbf8cdc0da6f5ccb2a18ba2805a1233b0aad786c
|
[] |
no_license
|
JiaqiuWang/DataStatusPrediction
|
2b66a24f992df64d93506f54e041d93282213c6e
|
9eb3eff99f0f804857f3a1d70227f75c91a8258d
|
refs/heads/master
| 2020-05-21T21:34:28.571549
| 2017-08-17T08:44:12
| 2017-08-17T08:44:12
| 84,649,900
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
# Method1
def quickSort(arr):
less = []
pivotList = []
more = []
if len(arr) <= 1:
return arr
else:
pivot = arr[0] # 将第一个值作为基准
print("pivot:", pivot)
for i in arr:
if i < pivot:
less.append(i)
elif i > pivot:
more.append(i)
else:
pivotList.append(i)
# 得到第一轮分组之后,继续讲分组进行下去
less = quickSort(less)
more = quickSort(more)
print("pivotList:", pivotList)
return less + pivotList + more
if __name__ == "__main__":
a = [4, 65, 2, -31, 0, 99, 83, 782, 1]
print("original list:", a)
print(quickSort(a))
|
[
"wangjiaqiu186@icloud.com"
] |
wangjiaqiu186@icloud.com
|
b546707ae6eb6405b3eefd2b37bb4147f39847f4
|
fe3bc38d2a9f80a6b258e2c61dbe4557323a1d71
|
/custom/ewsghana/south_migrations/0001_initial.py
|
2a5bab415d8f95907823c7712377fcd7c3ce648e
|
[] |
no_license
|
ekush/commcare-hq
|
077eb3f525ffb7d1acca0848b9c7678baf776832
|
97a1f55f24f79224724b2ecdc7d5cea87d42f65b
|
refs/heads/master
| 2021-01-17T22:25:09.734898
| 2015-08-25T23:07:49
| 2015-08-25T23:07:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,976
|
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FacilityInCharge'
db.create_table(u'ewsghana_facilityincharge', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user_id', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('location', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['locations.SQLLocation'])),
))
db.send_create_signal(u'ewsghana', ['FacilityInCharge'])
def backwards(self, orm):
# Deleting model 'FacilityInCharge'
db.delete_table(u'ewsghana_facilityincharge')
models = {
u'ewsghana.facilityincharge': {
'Meta': {'object_name': 'FacilityInCharge'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.SQLLocation']"}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'administrative': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'db_index': 'False'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'emergency_level': ('django.db.models.fields.DecimalField', [], {'default': '0.5', 'max_digits': '10', 'decimal_places': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'overstock_threshold': ('django.db.models.fields.DecimalField', [], {'default': '3.0', 'max_digits': '10', 'decimal_places': '1'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.LocationType']", 'null': 'True'}),
'shares_cases': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'understock_threshold': ('django.db.models.fields.DecimalField', [], {'default': '1.5', 'max_digits': '10', 'decimal_places': '1'}),
'view_descendants': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'locations.sqllocation': {
'Meta': {'unique_together': "(('domain', 'site_code'),)", 'object_name': 'SQLLocation'},
'_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['products.SQLProduct']", 'null': 'True', 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '10'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'location_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'location_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.LocationType']"}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '10'}),
'metadata': ('json_field.fields.JSONField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.SQLLocation']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'stocks_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'supply_point_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'products.sqlproduct': {
'Meta': {'object_name': 'SQLProduct'},
'category': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'cost': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '5'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'product_data': ('json_field.fields.JSONField', [], {'default': '{}'}),
'product_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'program_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True'})
}
}
complete_apps = ['ewsghana']
|
[
"kkrampa@soldevelo.com"
] |
kkrampa@soldevelo.com
|
0e5519be5f6c9f0162340f3b13fe27ca4e7ef96e
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_10_01/_container_service_client.py
|
40fe62dd9a76da362a1f9a68e6ab090b8e1aa17f
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 6,786
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import ContainerServiceClientConfiguration
from .operations import (
AgentPoolsOperations,
MaintenanceConfigurationsOperations,
ManagedClustersOperations,
Operations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
ResolvePrivateLinkServiceIdOperations,
SnapshotsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ContainerServiceClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes
"""The Container Service Client.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerservice.v2021_10_01.operations.Operations
:ivar managed_clusters: ManagedClustersOperations operations
:vartype managed_clusters:
azure.mgmt.containerservice.v2021_10_01.operations.ManagedClustersOperations
:ivar maintenance_configurations: MaintenanceConfigurationsOperations operations
:vartype maintenance_configurations:
azure.mgmt.containerservice.v2021_10_01.operations.MaintenanceConfigurationsOperations
:ivar agent_pools: AgentPoolsOperations operations
:vartype agent_pools: azure.mgmt.containerservice.v2021_10_01.operations.AgentPoolsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.containerservice.v2021_10_01.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources:
azure.mgmt.containerservice.v2021_10_01.operations.PrivateLinkResourcesOperations
:ivar resolve_private_link_service_id: ResolvePrivateLinkServiceIdOperations operations
:vartype resolve_private_link_service_id:
azure.mgmt.containerservice.v2021_10_01.operations.ResolvePrivateLinkServiceIdOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.containerservice.v2021_10_01.operations.SnapshotsOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2021-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerServiceClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.managed_clusters = ManagedClustersOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.maintenance_configurations = MaintenanceConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.agent_pools = AgentPoolsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.resolve_private_link_service_id = ResolvePrivateLinkServiceIdOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.snapshots = SnapshotsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "ContainerServiceClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details) -> None:
self._client.__exit__(*exc_details)
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
16f82b3f6d262e8dc868a376d1488222401592a2
|
4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422
|
/_0053_Maximum_Subarray.py
|
be2bb3203c47327d5a0a3576ae0dfaf79ab007fe
|
[] |
no_license
|
mingweihe/leetcode
|
a2cfee0e004627b817a3c0321bb9c74128f8c1a7
|
edff905f63ab95cdd40447b27a9c449c9cefec37
|
refs/heads/master
| 2021-06-19T07:46:46.897952
| 2021-05-02T05:13:17
| 2021-05-02T05:13:17
| 205,740,338
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Approach 2
for i in range(1, len(nums)):
if nums[i-1] > 0:
nums[i] = nums[i] + nums[i-1]
return max(nums)
# Approach 1
# if len(nums) == 1: return nums[0]
# return self.dfs(nums, 1, nums[0])
# def dfs(self, nums, index, accum):
# if len(nums) - 1 == index:
# return max(accum, accum + nums[index], nums[index])
# if accum < 0:
# return max(accum, self.dfs(nums, index + 1, nums[index]))
# return max(accum, self.dfs(nums, index + 1, nums[index] + accum))
|
[
"10962421@qq.com"
] |
10962421@qq.com
|
2a3cb3835f9c19bc32575a438d0a76d34366cd63
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/095_os_and_sys/_exercises/templates/Programming_Python/04_File and Directory Tools/04_002_Ensuring file closure Exception handlers and context managers.py
|
1f41b027f691c845469a9997d5f06857f17bb216
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 609
|
py
|
# myfile = open(filename, 'w')
# ___
# ...process myfile...
# finally:
# myfile.close()
#
# with open(filename, 'w') as myfile:
# ...process myfile, auto-closed on statement exit...
#
# myfile = open(filename, 'w') # traditional form
# ...process myfile...
# myfile.close()
# with open(filename) as myfile: # context manager form
# ...process myfile...
#
# with A() as a, B() as b:
# ...statements...
#
# with A() as a:
# with B() as b:
# ...statements...
#
# with open('data') as fin, open('results', 'w') as fout:
# for line in fin:
# fout.write(transform(line))
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
3688f07341087091eea0c443b07b0e57449140dd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02699/s068976853.py
|
b2a893ced4293b4b0b5cd753623e0bc052df3729
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
import math
def fact(n):
ans = 1
for i in range(2, n+1):
ans*= i
return ans
def comb(n, c):
return fact(n)//(fact(n-c)*c)
s,w = map(int, input().split())
if(s >w):
print('safe')
else:
print('unsafe')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7588f5b215b1669a718d62698603a3e391129fd8
|
925767da3d33266364f4af5ea5ac161245c598ad
|
/deshi/deshi/settings.py
|
8ce101a6eb8b87962b35c3d1f54201f270f08ed8
|
[] |
no_license
|
NicholasPiano/karate-master
|
facce4100ac92c0f39f1328ee2838126c153538d
|
137ebbd1f274dd79a367f9bbab3651ea365fce83
|
refs/heads/master
| 2016-09-05T11:24:34.843555
| 2014-08-23T19:24:09
| 2014-08-23T19:24:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,969
|
py
|
"""
Django settings for deshi project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5^*pq(en6o74-3b&8mjn*46jqm*g1o2+5f8s9ws*+8m#7)mleu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'deshi.urls'
WSGI_APPLICATION = 'deshi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
[
"nicholas.d.piano@gmail.com"
] |
nicholas.d.piano@gmail.com
|
38271f663558957f927383295f723ae4eee88114
|
74be814f7cd10d3c91a53460bd6698aa8bc95704
|
/剑指offer/面试题28. 对称的二叉树.py
|
11a5e592f54dc79fb7ac584d9dd760cf25be2339
|
[] |
no_license
|
weiyuyan/LeetCode
|
7202f7422bc3bef6bd35ea299550b51905401656
|
19db0e78826d3e3d27d2574abd9d461eb41458d1
|
refs/heads/master
| 2020-12-03T17:10:53.738507
| 2020-05-27T08:28:36
| 2020-05-27T08:28:36
| 231,402,839
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,587
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:ShidongDu time:2020/2/17
'''
请实现一个函数,用来判断一棵二叉树是不是对称的。如果一棵二叉树和它的镜像一样,那么它是对称的。
例如,二叉树 [1,2,2,3,4,4,3] 是对称的。
1
/ \
2 2
/ \ / \
3 4 4 3
但是下面这个 [1,2,2,null,3,null,3] 则不是镜像对称的:
1
/ \
2 2
\ \
3 3
示例 1:
输入:root = [1,2,2,3,4,4,3]
输出:true
示例 2:
输入:root = [1,2,2,null,3,null,3]
输出:false
限制:
0 <= 节点个数 <= 1000
注意:本题与主站 101 题相同:https://leetcode-cn.com/problems/symmetric-tree/
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
return self.is_Symmetrical(root, root)
def is_Symmetrical(self, root1: TreeNode, root2: TreeNode):
# 全为空,自然返回True
if not root1 and not root2: return True
# 一个为空,另一个不为空,返回False
if not root1 or not root2: return False
# 两个节点的值不相同,返回False
if root1.val != root2.val: return False
# 全不为空
# root1 按照 根-左-右 的先序遍历来, root2按照 根-右-左 的对称先序遍历来
return self.is_Symmetrical(root1.left, root2.right) and self.is_Symmetrical(root1.right, root2.left)
|
[
"244128764@qq.com"
] |
244128764@qq.com
|
8baeb5e0f4856447b4b776a2cfd340d842904525
|
350db570521d3fc43f07df645addb9d6e648c17e
|
/0191_Number_of_1_Bits/solution_test.py
|
c90810e481c8eb1413275fac582e235cd1db37c7
|
[] |
no_license
|
benjaminhuanghuang/ben-leetcode
|
2efcc9185459a1dd881c6e2ded96c42c5715560a
|
a2cd0dc5e098080df87c4fb57d16877d21ca47a3
|
refs/heads/master
| 2022-12-10T02:30:06.744566
| 2022-11-27T04:06:52
| 2022-11-27T04:06:52
| 236,252,145
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
'''
191. Number of 1 Bits
Level: Easy
https://leetcode.com/problems/number-of-1-bits
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main()
|
[
"bhuang@rms.com"
] |
bhuang@rms.com
|
85bf6770328eb00c91e0cc62a4016f0dc533d828
|
a6982e37fdc5359fa89b9f0587f75ff4889a849d
|
/venv/Lib/site-packages/pandas/tests/series/indexing/test_loc.py
|
088406e0a1db670b2a260bce1a69e073370b2a34
|
[
"Apache-2.0"
] |
permissive
|
1pani/fund-rank-dashboard
|
5d7ca6107013d5f82e7c54b3199d5506f34175c3
|
95c17bbee6e09ae4eb5738c519d3b6c651e8257c
|
refs/heads/master
| 2022-12-21T19:36:05.446703
| 2019-10-19T09:45:30
| 2019-10-19T09:45:30
| 137,461,621
| 4
| 1
|
Apache-2.0
| 2022-12-15T09:19:53
| 2018-06-15T08:26:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,078
|
py
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
from pandas import (Series, Timestamp)
from pandas.compat import lrange
from pandas.util.testing import (assert_series_equal)
def test_loc_getitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
assert_series_equal(
test_data.series.loc[inds],
test_data.series.reindex(inds))
assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])
# slice with indices
d1, d2 = test_data.ts.index[[5, 15]]
result = test_data.ts.loc[d1:d2]
expected = test_data.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = test_data.series > test_data.series.median()
assert_series_equal(test_data.series.loc[mask], test_data.series[mask])
# ask for index value
assert test_data.ts.loc[d1] == test_data.ts[d1]
assert test_data.ts.loc[d2] == test_data.ts[d2]
def test_loc_getitem_not_monotonic(test_data):
d1, d2 = test_data.ts.index[[5, 15]]
ts2 = test_data.ts[::2][[1, 2, 0]]
pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2))
pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0)
def test_loc_getitem_setitem_integer_slice_keyerrors():
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0)
def test_loc_getitem_iterator(test_data):
idx = iter(test_data.series.index[:10])
result = test_data.series.loc[idx]
assert_series_equal(result, test_data.series[:10])
def test_loc_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
result = test_data.series.copy()
result.loc[mask] = 0
expected = test_data.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_loc_setitem_corner(test_data):
inds = list(test_data.series.index[[5, 8, 12]])
test_data.series.loc[inds] = 5
pytest.raises(Exception, test_data.series.loc.__setitem__,
inds + ['foo'], 5)
def test_basic_setitem_with_labels(test_data):
indices = test_data.ts.index[[5, 10, 15]]
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
pytest.raises(Exception, s.__setitem__, inds_notfound, 0)
pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
assert result == expected
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = s.copy()
s2['a'] = expected
result = s2['a']
assert result == expected
|
[
"abhilashpani651@gmail.com"
] |
abhilashpani651@gmail.com
|
31406d7aa715df53849ac4f2affbec4743320fcb
|
04097eda667d795407c71dbf7b8c408a55661e35
|
/prb_srv_x_T_legacy.py
|
7a9109fbb219f71ff13fa21410bbe1f7dff3579f
|
[] |
no_license
|
synsrv/src_thrs-dyn-analysis
|
b0f76aded935093ebd15a1aa0d525ca07bbe98ee
|
9a286ec6f8259a6f7b73fb75ffe99c965263f4a7
|
refs/heads/master
| 2020-06-22T02:32:26.379781
| 2019-07-18T15:22:28
| 2019-07-18T15:22:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,114
|
py
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pl
from matplotlib import rc
rc('text', usetex=True)
pl.rcParams['text.latex.preamble'] = [
r'\usepackage{tgheros}',
r'\usepackage{sansmath}',
r'\sansmath'
r'\usepackage{siunitx}',
r'\sisetup{detect-all}',
]
import argparse, sys, os, itertools, pickle
import numpy as np
data_dirs = sorted(['data/'+pth for pth in next(os.walk("data/"))[1]])
fig, ax = pl.subplots()
bin_w = 1
for dpath in data_dirs:
try:
with open(dpath+'/namespace.p', 'rb') as pfile:
nsp=pickle.load(pfile)
with open(dpath+'/lts.p', 'rb') as pfile:
lts_df=np.array(pickle.load(pfile))
# discard synapses present at beginning
lts_df = lts_df[lts_df[:,1]>0]
# only take synapses grown in first half of simulation
t_split = nsp['Nsteps']/2
lts_df = lts_df[lts_df[:,3]<t_split]
lts = lts_df[:,2] - lts_df[:,3]
assert np.min(lts) > 0
lts[lts>t_split]=t_split
bins = np.arange(1,t_split+bin_w,bin_w)
counts, edges = np.histogram(lts,
bins=bins,
density=False)
srv = 1. - np.cumsum(counts)/float(np.sum(counts))
label = str(nsp['bn_sig'])
centers = (edges[:-1] + edges[1:])/2.
ax.plot(centers, srv, '.', label=label)
except FileNotFoundError:
print(dpath[-4:], "reports: Error loading namespace")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('lifetime [steps]')
ax.set_ylabel('relative frequency')
directory = 'figures/prb_srv_single/'
if not os.path.exists(directory):
os.makedirs(directory)
pl.legend()
fname = dpath[-4:]
fig.savefig(directory+'/'+fname+'.png', dpi=150,
bbox_inches='tight')
|
[
"felix11h.dev@gmail.com"
] |
felix11h.dev@gmail.com
|
a56fc1a4e5b64686820edca3055851034da5cf31
|
ade10d077441c1d36d027c05df458e942b904a1d
|
/scripts/classifcation_pos_all_vs_one.py
|
cbd024b089929aa4abb8a5011aef697193f32b07
|
[
"MIT"
] |
permissive
|
nmningmei/metacognition
|
dabadca913e404994f8d53167e5f36f551590def
|
734082e247cc7fc9d277563e2676e10692617a3f
|
refs/heads/master
| 2022-07-14T06:51:42.508132
| 2021-11-14T09:40:13
| 2021-11-14T09:40:13
| 197,187,294
| 2
| 1
|
MIT
| 2022-06-21T23:01:56
| 2019-07-16T12:12:11
|
Python
|
UTF-8
|
Python
| false
| false
| 6,544
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 16:02:16 2018
@author: ning
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
from utils import (classification_simple_logistic)
saving_dir = '../results/all_vs_one'
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
# Exp 1
for participant in ['AC', 'CL', 'FW', 'HB', 'KK', 'LM', 'MC', 'MP1', 'MP2', 'NN', 'RP','SD', 'TJ', 'TS', 'WT']:
experiment = 'pos'
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
df_sub = df[df['participant'] == participant]
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
# use success, awareness, and confidence as features
np.random.seed(12345)
# use all judgement features
feature_names = [
'correct',
'awareness',
'confidence',]
target_name = 'success'
results = dict(sub = [],
model = [],
score = [],
window = [],
chance = [],
feature = [],
)
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = classification_simple_logistic(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
chance = False,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'pos_3_1_features (experiment score)_{}.csv'.format(participant)),index=False) # save as a csv
# use correct as features
feature_names = [
'correct',
]
target_name = 'success'
results = dict(sub = [],
model = [],
score = [],
window = [],
chance = [],
feature = [],
)
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = classification_simple_logistic(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
chance = False,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'pos_correct_features (experiment score)_{}.csv'.format(participant)),index=False) # save as a csv
np.random.seed(12345)
# use awareness as features
feature_names = [
'awareness',]
target_name = 'success'
results = dict(sub = [],
model = [],
score = [],
window = [],
chance = [],
feature = [],
)
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = classification_simple_logistic(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
chance = False,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'pos_awareness_features (experiment score)_{}.csv'.format(participant)),index=False) # save as a csv
# use confidence as features
feature_names = [
'confidence',]
target_name = 'success'
results = dict(sub = [],
model = [],
score = [],
window = [],
chance = [],
feature = [],
)
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = classification_simple_logistic(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
chance = False,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'pos_confidence_features (experiment score)_{}.csv'.format(participant)),index=False) # save as a csv
|
[
"nmei@bcbl.eu"
] |
nmei@bcbl.eu
|
1f2da7d9fbaf2354d8c00fea11e387e58c256aaa
|
d03ce5c5c3750e60ba9baa958e5445b98ba14462
|
/user/views.py
|
f25749f7fb2516d57bb3384433e3d5e046bc0143
|
[] |
no_license
|
alxayeed/covid-room-finder
|
dbd22ebb702bf588384d6fb8d917c29568cc22a6
|
3ff541d76e342d6bd0b3d7221577b06ec0806837
|
refs/heads/main
| 2023-03-30T12:33:57.152717
| 2021-02-21T13:08:00
| 2021-02-21T13:08:00
| 339,812,785
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,407
|
py
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .forms import RegisterForm
from django.contrib.auth import login, logout, authenticate
from django.contrib import messages
def register_user(request):
form = RegisterForm()
if request.method == "POST":
form = RegisterForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
form.save()
login(request, authenticate(
request, username=username, password=password))
return redirect('index')
else:
return render(request, 'user/register_form.html', {'form': form})
return render(request, 'user/register_form.html', {'form': form})
def login_user(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
print(request.user.is_authenticated)
return redirect('index')
else:
messages.error(request, 'Username or Password is incorrect')
return render(request, 'user/login.html')
def logout_user(request):
logout(request)
return redirect('login')
|
[
"alxayeed@gmail.com"
] |
alxayeed@gmail.com
|
82b94aa0ddd70df563d846434a596b315ad4d8a1
|
84341d15f4b8d13b09c7dabe2b7286705ee86b7b
|
/scripts/multi_rc/topk_evidence_self_training/roberta_predict_sentence3.0.py
|
e9e34a6953dcd9a10999bf35f331f6ab6d91b805
|
[] |
no_license
|
UMP-Healthcare-AI/Self-Training-MRC
|
7a0ef0c52f0064cfc32a2bedb433608ed10328a7
|
0601158085bb11e454aee1ebaa987f5aa741ab3f
|
refs/heads/master
| 2022-12-01T17:46:28.463777
| 2020-08-14T01:56:25
| 2020-08-14T01:56:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
import subprocess
import time
import logging
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def run_cmd(command: str):
logger.info(command)
subprocess.check_call(command, shell=True)
def wait_for_file(file: str):
if not os.path.exists(file):
logger.info(f'Could not find file {file}. Waiting...')
minute_cnt = 0
while not os.path.exists(file):
print(f'The {minute_cnt}th minute...')
time.sleep(60)
minute_cnt += 1
time.sleep(60)
logger.info(f'Find file {file} after waiting for {minute_cnt} minutes')
# model
roberta_large_model_dir = "/home/jiaofangkai/roberta-large"
train_file = '/home/jiaofangkai/multi-rc/splitv2/train.json'
dev_file = '/home/jiaofangkai/multi-rc/splitv2/dev.json'
task_name = 'topk-rc-roberta'
reader_name = 'topk-multi-rc-roberta'
bert_name = 'hie-topk-roberta'
k = 2000
label_threshold = 0.8
weight_threshold = 0.5
recurrent_times = 10
num_train_epochs = [8] * 10
sentence_id_file = None
num_evidence = 3
root_dir = f'experiments/multi-rc/topk-evidence/roberta-self-training/v1.0_acc_top{k}'
os.makedirs(root_dir, exist_ok=True)
f_handler = logging.FileHandler(os.path.join(root_dir, f'output.log'))
f_handler.setLevel(logging.INFO)
f_handler.setFormatter(logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt='%m/%d/%Y %H:%M:%S'))
logger.addHandler(f_handler)
logger.info('Self-training parameters:')
logger.info(f'k: {k}')
logger.info(f'label_threshold: {label_threshold}')
logger.info(f'weight_threshold: {weight_threshold}')
logger.info(f'recurrent_times: {recurrent_times}')
logger.info(f'num_evidence: {num_evidence}')
learning_rate = 1e-5
for i in range(recurrent_times):
logger.info(f'Start running for the {i}th times.')
output_dir = f'{root_dir}/recurrent{i}'
if i == 0:
evidence_lambda = 0.0
else:
evidence_lambda = 0.8
cmd = f'python main2_0.6.2_topk_predict_sentences.py --bert_model roberta-large ' \
f'--vocab_file {roberta_large_model_dir} --model_file {roberta_large_model_dir} ' \
f'--output_dir {output_dir} --predict_dir {output_dir} ' \
f'--train_file {train_file} --predict_file {dev_file} ' \
f'--max_seq_length 512 --train_batch_size 32 --predict_batch_size 1 ' \
f'--learning_rate {learning_rate} --num_train_epochs {num_train_epochs[i]} ' \
f'--fp16 --fp16_opt_level O2 --gradient_accumulation_steps 32 --per_eval_step 100 ' \
f'--bert_name {bert_name} --task_name {task_name} --reader_name {reader_name} ' \
f'--evidence_lambda {evidence_lambda} ' \
f'--do_label --only_correct --label_threshold {label_threshold} --weight_threshold {weight_threshold} ' \
f'--num_evidence {num_evidence} --max_grad_norm 5.0 --adam_epsilon 1e-6 '
run_cmd(cmd)
logger.info('=' * 50)
|
[
"jiaofangkai@hotmail.com"
] |
jiaofangkai@hotmail.com
|
9d1075dd4cf3c10e3855309c10cd54598f9bbd9e
|
8d0f35341ad14f1f1edd71481e5fa870a4cb6ed5
|
/setup.py
|
acbda8936e4d0aeefd6b89aa075f25c298c7c64f
|
[
"MIT"
] |
permissive
|
gitter-badger/django-static-precompiler
|
f22b2414ef73c8ad0c0e448f1d821dfc73868ee4
|
aad008df68dc7194ed9d6e78ab4358f94e456894
|
refs/heads/master
| 2021-01-12T21:45:20.258776
| 2014-11-19T05:04:15
| 2014-11-19T05:04:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
from setuptools import setup, find_packages
import os
import sys
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
if sys.version < '3':
return open(path).read()
return open(path, encoding="utf-8").read()
README = read('README.rst')
CHANGES = read('CHANGES.rst')
setup(
name="django-static-precompiler",
packages=find_packages(),
version="0.7",
author="Andrey Fedoseev",
author_email="andrey.fedoseev@gmail.com",
url="https://github.com/andreyfedoseev/django-static-precompiler",
description="Django template tags to compile all kinds of static files "
"(SASS, LESS, CoffeeScript).",
long_description="\n\n".join([README, CHANGES]),
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
],
keywords=["sass", "scss", "less", "css", "coffeescript", "javascript"],
tests_require=[
"mock",
],
test_suite="static_precompiler.tests.suite",
)
|
[
"andrey.fedoseev@gmail.com"
] |
andrey.fedoseev@gmail.com
|
61c96eaafdcde160af8110b4322db04ba28832ed
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_118/1006.py
|
f920c1947a495d32a85e4a572c45898ba6ebdea5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 880
|
py
|
import numpy as np
from math import ceil, sqrt
fin = open('C-small-attempt0.in')
fout = open('out_small.txt', 'w')
def isqrt(x):
n = int(x)
if n == 0:
return 0
a, b = divmod(n.bit_length(), 2)
x = 2**(a+b)
while True:
y = (x + n//x)//2
if y >= x:
return x
x = y
T = int(fin.readline().rstrip('\n'))
def IsPalindrome(x):
return(str(x)[::-1] == str(x))
for iter in range(T):
num_palindromes = 0
num_lims = np.array(fin.readline().rstrip('\n').split(), dtype=long)
min_val = num_lims[0]
max_val = num_lims[1]
start_val = int(ceil(sqrt(min_val)))
for i in range(start_val, isqrt(max_val)+1,1):
if IsPalindrome(i) and IsPalindrome(pow(i,2)):
num_palindromes = num_palindromes + 1
case_num = iter+1
fout.write('Case #%d: ' %case_num + '%d\n' %num_palindromes)
fin.close()
fout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
63ef114161ba9d387165bf9cd0c4eee914e502f5
|
60e57860bc819d83cd628b5179ba7f4d655562b8
|
/implementation/lps/lps/tests.py
|
17794922910430e7844501e766a65b1be823652f
|
[] |
no_license
|
Schwenger/TinBots
|
9a3075c9e84762f34e33f15970eaadc305eed003
|
ce7554f357b291b6ff7ae0214d4f8bbf5bd0c48e
|
refs/heads/master
| 2021-01-19T21:15:31.948534
| 2017-08-31T21:34:09
| 2017-08-31T21:34:09
| 63,550,617
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016, Maximilian Köhl <mail@koehlma.de>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
from lps.commands import Commands
from lps.constants import Modes
from lps.debugger import Debugger, INFO
class VictimDirectionTest:
def __init__(self, tinbot, debugger=None):
self.debugger = debugger or Debugger.current
self.tinbot = tinbot
self.thread = None
self.iterations = None
self.result = None
self.done = threading.Event()
def start(self, iterations=50):
self.iterations = iterations
self.result = []
self.thread = threading.Thread(target=self.run)
self.thread.start()
def run(self):
self.tinbot.package_event += self.on_package
self.debugger.print_message('Starting Victim Direction Test', INFO)
self.tinbot.set_mode(Modes.VICDIR)
for iteration in range(self.iterations):
time.sleep(0.5)
self.done.clear()
self.tinbot.start()
self.done.wait()
self.tinbot.reset()
self.tinbot.package_event -= self.on_package
def on_package(self, device, source, target, command, payload):
if command != Commands.VICTIM_PHI:
return
x, y, phi = Commands.VICTIM_PHI.decode(payload)
self.result.append((phi, self.tinbot.victim_phi))
self.done.set()
|
[
"mail@koehlma.de"
] |
mail@koehlma.de
|
960efe5c81f0c4d2490e1618d848cb089fb10f97
|
62e45255088abb536e9ea6fcbe497e83bad171a0
|
/ippython/multiplo.py
|
9dce2387b353d86c8dd7be1608c2d034bf585dc5
|
[] |
no_license
|
jmery24/python
|
a24f562c8d893a97a5d9011e9283eba948b8b6dc
|
3e35ac9c9efbac4ff20374e1dfa75a7af6003ab9
|
refs/heads/master
| 2020-12-25T21:56:17.063767
| 2015-06-18T04:59:05
| 2015-06-18T04:59:05
| 36,337,473
| 0
| 0
| null | 2015-05-27T02:26:54
| 2015-05-27T02:26:54
| null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 22 07:18:42 2013
@author: daniel
"""
#programa:multiplo.py
#calcula los multiplos de numero en el intervalo [numero, limite]
#data input
num = 1
repeticiones = int(raw_input('valor limite <entero positivo>: '))
multiplo = int(raw_input('multiplos del numero: '))
#proccesing
if repeticiones < 0:
print 'no es posible la operacion'
else:
while num <= repeticiones / multiplo:
print multiplo * num
num += 1
print 'Hecho'
|
[
"danmery@gmail.com"
] |
danmery@gmail.com
|
f1d9996b93014b4210b992142021b15cb3c21578
|
16c8fdf291430475f40d578b0d64552eb64046e9
|
/colour/plotting/tm3018/tests/test_components.py
|
f11dce989d2eabcc845c3554a2ee35f1ac115482
|
[
"BSD-3-Clause"
] |
permissive
|
nodefeet/colour
|
4c1bfed87ce173ff878bdf288fd9828bb68022e3
|
319dd5b1c45aef6983eff1830f918c1e593fb530
|
refs/heads/develop
| 2022-02-19T17:39:36.657993
| 2022-02-15T08:38:26
| 2022-02-15T08:38:26
| 460,456,444
| 0
| 0
|
BSD-3-Clause
| 2022-02-17T13:53:37
| 2022-02-17T13:53:36
| null |
UTF-8
|
Python
| false
| false
| 5,064
|
py
|
"""Defines the unit tests for the :mod:`colour.plotting.tm3018.components` module."""
from __future__ import annotations
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.colorimetry import SDS_ILLUMINANTS
from colour.hints import cast
from colour.quality import (
ColourQuality_Specification_ANSIIESTM3018,
colour_fidelity_index_ANSIIESTM3018,
)
from colour.plotting.tm3018.components import (
plot_spectra_ANSIIESTM3018,
plot_colour_vector_graphic,
plot_16_bin_bars,
plot_local_chroma_shifts,
plot_local_hue_shifts,
plot_local_colour_fidelities,
plot_colour_fidelity_indexes,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestPlotSpectraANSIIESTM3018",
"TestPlotColourVectorGraphic",
"TestPlot16BinBars",
"TestPlotLocalChromaShifts",
"TestPlotLocalHueShifts",
"TestPlotLocalColourFidelities",
"TestPlotColourFidelityIndexes",
]
SPECIFICATION_ANSIIESTM3018: ColourQuality_Specification_ANSIIESTM3018 = cast(
ColourQuality_Specification_ANSIIESTM3018,
colour_fidelity_index_ANSIIESTM3018(SDS_ILLUMINANTS["FL2"], True),
)
class TestPlotSpectraANSIIESTM3018(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.
plot_spectra_ANSIIESTM3018` definition unit tests methods.
"""
def test_plot_spectra_ANSIIESTM3018(self):
"""
Test :func:`colour.plotting.tm3018.components.\
plot_spectra_ANSIIESTM3018` definition.
"""
figure, axes = plot_spectra_ANSIIESTM3018(SPECIFICATION_ANSIIESTM3018)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotColourVectorGraphic(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.\
plot_colour_vector_graphic` definition unit tests methods.
"""
def test_plot_colour_vector_graphic(self):
"""
Test :func:`colour.plotting.tm3018.components.\
plot_colour_vector_graphic` definition.
"""
figure, axes = plot_colour_vector_graphic(SPECIFICATION_ANSIIESTM3018)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlot16BinBars(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.plot_16_bin_bars`
definition unit tests methods.
"""
def test_plot_16_bin_bars(self):
"""
Test :func:`colour.plotting.tm3018.components.plot_16_bin_bars`
definition.
"""
figure, axes = plot_16_bin_bars(range(16), "{0}")
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotLocalChromaShifts(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.plot_local_chroma_shifts`
definition unit tests methods.
"""
def test_plot_local_chroma_shifts(self):
"""
Test :func:`colour.plotting.tm3018.components.\
plot_local_chroma_shifts` definition.
"""
figure, axes = plot_local_chroma_shifts(SPECIFICATION_ANSIIESTM3018)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotLocalHueShifts(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.plot_local_hue_shifts`
definition unit tests methods.
"""
def test_plot_local_hue_shifts(self):
"""
Test :func:`colour.plotting.tm3018.components.\
plot_local_hue_shifts` definition.
"""
figure, axes = plot_local_hue_shifts(SPECIFICATION_ANSIIESTM3018)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotLocalColourFidelities(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.
plot_local_colour_fidelities` definition unit tests methods.
"""
def test_plot_local_colour_fidelities(self):
"""
Test :func:`colour.plotting.tm3018.components.\
plot_local_colour_fidelities` definition.
"""
figure, axes = plot_local_colour_fidelities(
SPECIFICATION_ANSIIESTM3018
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotColourFidelityIndexes(unittest.TestCase):
"""
Define :func:`colour.plotting.tm3018.components.\
plot_colour_fidelity_indexes` definition unit tests methods.
"""
def test_plot_colour_fidelity_indexes(self):
"""
Test :func:`colour.plotting.tm3018.components.\
plot_colour_fidelity_indexes` definition.
"""
figure, axes = plot_colour_fidelity_indexes(
SPECIFICATION_ANSIIESTM3018
)
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == "__main__":
unittest.main()
|
[
"thomas.mansencal@gmail.com"
] |
thomas.mansencal@gmail.com
|
ca29d96325ee8f61ab5f0947f4388cd848582dd5
|
e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7
|
/flask_api/venv/lib/python3.7/site-packages/vsts/release/v4_1/models/task_input_definition_base.py
|
7d9e81d65f08fc44f1f9696cf00be3bf1a173c64
|
[] |
no_license
|
u-blavins/secret_sasquatch_society
|
c36993c738ab29a6a4879bfbeb78a5803f4f2a57
|
0214eadcdfa9b40254e331a6617c50b422212f4c
|
refs/heads/master
| 2020-08-14T00:39:52.948272
| 2020-01-22T13:54:58
| 2020-01-22T13:54:58
| 215,058,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,850
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskInputDefinitionBase(Model):
"""TaskInputDefinitionBase.
:param aliases:
:type aliases: list of str
:param default_value:
:type default_value: str
:param group_name:
:type group_name: str
:param help_mark_down:
:type help_mark_down: str
:param label:
:type label: str
:param name:
:type name: str
:param options:
:type options: dict
:param properties:
:type properties: dict
:param required:
:type required: bool
:param type:
:type type: str
:param validation:
:type validation: :class:`TaskInputValidation <microsoft.-team-foundation.-distributed-task.-common.-contracts.v4_1.models.TaskInputValidation>`
:param visible_rule:
:type visible_rule: str
"""
_attribute_map = {
'aliases': {'key': 'aliases', 'type': '[str]'},
'default_value': {'key': 'defaultValue', 'type': 'str'},
'group_name': {'key': 'groupName', 'type': 'str'},
'help_mark_down': {'key': 'helpMarkDown', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'options': {'key': 'options', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'required': {'key': 'required', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'validation': {'key': 'validation', 'type': 'TaskInputValidation'},
'visible_rule': {'key': 'visibleRule', 'type': 'str'}
}
def __init__(self, aliases=None, default_value=None, group_name=None, help_mark_down=None, label=None, name=None, options=None, properties=None, required=None, type=None, validation=None, visible_rule=None):
super(TaskInputDefinitionBase, self).__init__()
self.aliases = aliases
self.default_value = default_value
self.group_name = group_name
self.help_mark_down = help_mark_down
self.label = label
self.name = name
self.options = options
self.properties = properties
self.required = required
self.type = type
self.validation = validation
self.visible_rule = visible_rule
|
[
"usama.blavins1@gmail.com"
] |
usama.blavins1@gmail.com
|
73a3eee3fd2e1f1a0057bd58a22f2e7a32c4130b
|
87aeb666b3e0d1e2d75fbb11c6e1bcc71402d098
|
/task_manager/taskapp/migrations/0001_initial.py
|
0a993da519f68d801dab37465e113bc760814c2b
|
[] |
no_license
|
Alexey-Krapivnitsky/Task-Manager
|
25934aff3c56c442e2080da7d910705fbbe27f4c
|
62ee195bd79f86915e2c4dd5af9cc65797230f42
|
refs/heads/master
| 2022-12-17T13:27:53.660378
| 2020-10-01T19:23:54
| 2020-10-01T19:23:54
| 299,638,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
# Generated by Django 3.1.1 on 2020-09-30 13:57
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
('authapp', '0003_auto_20200930_1857'),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task_name', models.CharField(max_length=25, null=True)),
('task_description', models.CharField(max_length=250, null=True)),
('created_at', models.DateTimeField(default=datetime.datetime(2020, 9, 30, 13, 57, 2, 549223, tzinfo=utc))),
('task_status', models.CharField(choices=[('N', 'New'), ('P', 'Planned'), ('W', 'Working'), ('F', 'Finished')], max_length=1)),
('task_finished_date', models.DateTimeField(null=True)),
('task_owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='authapp.user')),
],
),
]
|
[
"cronos1009@yandex.ru"
] |
cronos1009@yandex.ru
|
6cccd938961ceaf9dc6a7234ea27deaebf7670bc
|
e1b52ce8238e7fe0d1c2cd8627535279edeb4bcb
|
/app/__init__.py
|
b65f106af4139090033d0b02ef36fab7a1dbcff2
|
[
"MIT"
] |
permissive
|
nevooronni/Stackoverflow-lite-APIs
|
5cfb9048f56156c88f7f5c35fcba50b99b0d1111
|
eb0af8588628e174822be947b8a914ed64e97236
|
refs/heads/master
| 2020-04-14T12:22:21.658955
| 2019-01-02T15:28:23
| 2019-01-02T15:28:23
| 163,838,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
"""creating app"""
import os
from flask import Flask
from instance.config import app_config
"""
import the configurations from the .confifrom dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv()) #pass override=True to override current system environment variables
g file which is in the instance folder
"""
def create_app(config_name):
"""
creating the app using the configurations in the directory created in the .config file
"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
return app
|
[
"nevooronni@gmail.com"
] |
nevooronni@gmail.com
|
7dc43320f08fd0538e450d05265783dd856dc117
|
a1d30d667cbf814db1809c31cf68ba75c01f819c
|
/Google/2. medium/362. Design Hit Counter.py
|
579742997805aaf7ccf5cd8362f9bcbc212660fb
|
[] |
no_license
|
yemao616/summer18
|
adb5f0e04e6f1e1da6894b0b99a61da3c5cba8ee
|
8bb17099be02d997d554519be360ef4aa1c028e3
|
refs/heads/master
| 2021-06-02T04:32:07.703198
| 2020-01-09T17:45:29
| 2020-01-09T17:45:29
| 110,744,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
# Design a hit counter which counts the number of hits received in the past 5 minutes.
# Each function accepts a timestamp parameter (in seconds granularity) and you may assume that calls are being made to the system in chronological order (ie, the timestamp is monotonically increasing). You may assume that the earliest timestamp starts at 1.
# It is possible that several hits arrive roughly at the same time.
# Example:
# HitCounter counter = new HitCounter();
# // hit at timestamp 1.
# counter.hit(1);
# // hit at timestamp 2.
# counter.hit(2);
# // hit at timestamp 3.
# counter.hit(3);
# // get hits at timestamp 4, should return 3.
# counter.getHits(4);
# // hit at timestamp 300.
# counter.hit(300);
# // get hits at timestamp 300, should return 4.
# counter.getHits(300);
# // get hits at timestamp 301, should return 3.
# counter.getHits(301);
# Follow up:
# What if the number of hits per second could be very large? Does your design scale?
class HitCounter(object):
def __init__(self):
"""
Initialize your data structure here.
"""
from collections import deque
self.num_of_hits = 0
self.time_hits = deque()
def hit(self, timestamp):
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: void
"""
if not self.time_hits or self.time_hits[-1][0] != timestamp:
self.time_hits.append([timestamp, 1])
else:
self.time_hits[-1][1] += 1
self.num_of_hits += 1
def getHits(self, timestamp):
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: int
"""
while self.time_hits and self.time_hits[0][0] <= timestamp - 300:
self.num_of_hits -= self.time_hits.popleft()[1]
return self.num_of_hits
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# obj.hit(timestamp)
# param_2 = obj.getHits(timestamp)
|
[
"ymao4@ncsu.edu"
] |
ymao4@ncsu.edu
|
a447848b01fb067427ac2e26e86ee507529529fc
|
a81c07a5663d967c432a61d0b4a09de5187be87b
|
/build/android/pylib/base/environment_factory.py
|
4d3727444f43ce92292cd4e4ca2f5e68dedb9799
|
[
"BSD-3-Clause"
] |
permissive
|
junxuezheng/chromium
|
c401dec07f19878501801c9e9205a703e8643031
|
381ce9d478b684e0df5d149f59350e3bc634dad3
|
refs/heads/master
| 2023-02-28T17:07:31.342118
| 2019-09-03T01:42:42
| 2019-09-03T01:42:42
| 205,967,014
| 2
| 0
|
BSD-3-Clause
| 2019-09-03T01:48:23
| 2019-09-03T01:48:23
| null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib import constants
from pylib.local.device import local_device_environment
from pylib.local.emulator import local_emulator_environment
from pylib.local.machine import local_machine_environment
def CreateEnvironment(args, output_manager, error_func):
if args.environment == 'local':
if args.command not in constants.LOCAL_MACHINE_TESTS:
if args.avd_name:
return local_emulator_environment.LocalEmulatorEnvironment(
args, output_manager, error_func)
return local_device_environment.LocalDeviceEnvironment(
args, output_manager, error_func)
else:
return local_machine_environment.LocalMachineEnvironment(
args, output_manager, error_func)
error_func('Unable to create %s environment.' % args.environment)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
97a6be8339a60d63037f8b82f678e9d303761f4b
|
c9b1e04ba65ba3e0af2a8ae86b88187b72bcaa0b
|
/branches/Release0_70_Branch/tests/unittests/persistenceTests/HTMLWriterTest.py
|
84743b68f664018b2bfc3847f13b0d62eebaf424
|
[] |
no_license
|
feitianyiren/TaskCoach
|
7762a89d5b521cfba0827323a9e8a91d1579810b
|
0b7427562074845ac771e59e24a750aa5b432589
|
refs/heads/master
| 2020-04-08T04:56:35.491490
| 2016-01-12T13:29:03
| 2016-01-12T13:29:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,514
|
py
|
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2008 Frank Niessink <frank@niessink.com>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx, StringIO
import test
from unittests import dummy
from taskcoachlib import persistence, gui, config, widgets
from taskcoachlib.domain import task, category, effort, date, note
class HTMLWriterTestCase(test.wxTestCase):
def setUp(self):
super(HTMLWriterTestCase, self).setUp()
self.fd = StringIO.StringIO()
self.writer = persistence.HTMLWriter(self.fd)
self.task = task.Task('Task subject')
self.taskList = task.TaskList([self.task])
self.effortList = effort.EffortList(self.taskList)
self.categories = category.CategoryList()
self.notes = note.NoteContainer()
self.settings = config.Settings(load=False)
self.viewerContainer = gui.viewercontainer.ViewerContainer(\
widgets.Notebook(self.frame), self.settings, 'mainviewer')
self.createViewer()
def __writeAndRead(self, selectionOnly):
self.writer.write(self.viewer, selectionOnly)
return self.fd.getvalue()
def expectInHTML(self, htmlFragment, selectionOnly=False):
html = self.__writeAndRead(selectionOnly)
self.failUnless(htmlFragment in html,
'%s not in %s'%(htmlFragment, html))
def expectNotInHTML(self, htmlFragment, selectionOnly=False):
html = self.__writeAndRead(selectionOnly)
self.failIf(htmlFragment in html, '%s in %s'%(htmlFragment, html))
class TaskTests(object):
def testTaskSubject(self):
self.expectInHTML('>Task subject<')
def testWriteSelectionOnly(self):
self.expectNotInHTML('>Task subject<', selectionOnly=True)
def testWriteSelectionOnly_SelectedChild(self):
child = task.Task('Child')
self.task.addChild(child)
self.taskList.append(child)
self.selectItem(1)
self.expectInHTML('>Task subject<')
def testSubjectColumnAlignment(self):
self.expectInHTML('<td align="left">Task subject</td>')
def testOverdueTask(self):
self.task.setDueDate(date.Yesterday())
self.expectInHTML('<font color="#FF0000">Task subject</font>')
def testCompletedTask(self):
self.task.setCompletionDate()
self.expectInHTML('<font color="#00FF00">Task subject</font>')
def testTaskDueToday(self):
self.task.setDueDate(date.Today())
expectedColor = '%02X%02X%02X'%eval(self.settings.get('color', 'duetodaytasks'))[:3]
self.expectInHTML('<font color="#%s">Task subject</font>'%expectedColor)
def testInactiveTask(self):
self.task.setStartDate(date.Tomorrow())
expectedColor = '%02X%02X%02X'%eval(self.settings.get('color', 'inactivetasks'))[:3]
self.expectInHTML('<font color="#%s">Task subject</font>'%expectedColor)
def testCategoryColor(self):
cat = category.Category('cat', color=wx.RED)
self.task.addCategory(cat)
self.expectInHTML('<tr bgcolor="#FF0000">')
def testCategoryColorAsTuple(self):
cat = category.Category('cat', color=(255, 0, 0, 0))
self.task.addCategory(cat)
self.expectInHTML('<tr bgcolor="#FF0000">')
class HTMLListWriterTest(TaskTests, HTMLWriterTestCase):
def createViewer(self):
self.viewer = gui.viewer.TaskListViewer(self.frame, self.taskList,
self.settings, categories=self.categories, efforts=self.effortList)
def selectItem(self, index):
self.viewer.widget.SelectItem(index)
def testTaskDescription(self):
self.task.setDescription('Task description')
self.viewer.showColumnByName('description')
self.expectInHTML('>Task description<')
def testTaskDescriptionWithNewLine(self):
self.task.setDescription('Line1\nLine2')
self.viewer.showColumnByName('description')
self.expectInHTML('>Line1<br>Line2<')
class HTMLTreeWriterTest(TaskTests, HTMLWriterTestCase):
def createViewer(self):
self.viewer = gui.viewer.TaskTreeViewer(self.frame, self.taskList,
self.settings, categories=self.categories, efforts=self.effortList)
def selectItem(self, index):
item, cookie = self.viewer.widget.GetFirstChild(self.viewer.widget.GetRootItem())
self.viewer.widget.SelectItem(item)
class EffortWriterTest(HTMLWriterTestCase):
def setUp(self):
super(EffortWriterTest, self).setUp()
self.task.addEffort(effort.Effort(self.task))
def createViewer(self):
self.viewer = gui.viewer.EffortListViewer(self.frame, self.taskList,
self.settings)
def testTaskSubject(self):
self.expectInHTML('>Task subject<')
def testEffortDuration(self):
self.expectInHTML('>0:00:00<')
|
[
"hieronymus_schweiz@yahoo.de"
] |
hieronymus_schweiz@yahoo.de
|
4cf43299d54bb37595d33106ca7efa886f45fa1f
|
a9b67cdcdd09d90669e87ececc0830419ad999c6
|
/blog/urls.py
|
2f4b41b1bd42ea09787542f648536b9799b3d907
|
[] |
no_license
|
ver0nika4ka/my-first-blog
|
c05d30d21900e8706200c9971307faaa603a6220
|
bf81939ca502ab5b7c5b4577b01ede1c518838d9
|
refs/heads/master
| 2021-06-25T18:25:24.703987
| 2021-01-05T06:59:36
| 2021-01-05T06:59:36
| 193,432,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,019
|
py
|
# Так мы импортировали функцию path Django и все views (представления) из приложения blog
from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list, name='post_list'),
# Фрагмент post/<int:pk>/ определяет шаблон URL-адреса:
# post/ значит, что после начала строки URL должен содержать слово post и косую черту /.
# <int:pk> — эта часть означает, что Django ожидает целочисленное значение и преобразует его в представление — переменную pk.
# / — затем нам нужен еще один символ / перед тем, как адрес закончится.
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
]
|
[
"veranika.aizu@gmail.com"
] |
veranika.aizu@gmail.com
|
f2c7c34ff3fe71ccce7318a2d04bdd2e37ca1372
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq858.py
|
8d4b711a55f0b71384de14f529d3ccf647d1d1f6
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,432
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=42
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=36
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.X.on(input_qubit[0])) # number=29
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=32
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=39
c.append(cirq.X.on(input_qubit[1])) # number=40
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=41
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=34
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=35
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
c.append(cirq.Z.on(input_qubit[1])) # number=31
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq858.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
c5a07c81fbe718191d7e3692de37bdb34ffea009
|
7628bfc96a7248461d3f2071992d682c0ad18272
|
/test/test_runners.py
|
ec4a2e80525cd0e2c6011733b05cf1da14bac0ec
|
[
"Apache-2.0"
] |
permissive
|
war3gu/tensorforce
|
d0c72b26364368d873b1e2717595718669b7fa2d
|
70d551f8f92b694aae51262663e77c25576ecfad
|
refs/heads/master
| 2020-12-11T14:51:21.916658
| 2020-01-13T20:43:25
| 2020-01-13T20:43:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,105
|
py
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import time
import unittest
from tensorforce import ParallelRunner, Runner
from test.unittest_base import UnittestBase
class TestRunners(UnittestBase, unittest.TestCase):
min_timesteps = 3
require_observe = True
def test_runner(self):
self.start_tests(name='runner')
agent, environment = self.prepare()
runner = Runner(agent=agent, environment=environment)
runner.run(num_episodes=10, use_tqdm=False)
runner.close()
self.finished_test()
# callback
agent, environment = self.prepare()
runner = Runner(agent=agent, environment=environment)
callback_episode_frequency = 2
self.num_callbacks = 0
def callback(r):
self.num_callbacks += 1
self.assertEqual(r.episodes, self.num_callbacks * callback_episode_frequency)
runner.run(
num_episodes=5, callback=callback,
callback_episode_frequency=callback_episode_frequency, use_tqdm=False
)
callback_timestep_frequency = 3
self.num_callbacks = 0
def callback(r):
self.num_callbacks += 1
self.assertEqual(r.episode_timestep, self.num_callbacks * callback_timestep_frequency)
runner.run(
num_episodes=1, callback=callback,
callback_timestep_frequency=callback_timestep_frequency, use_tqdm=False
)
self.is_callback1 = False
self.is_callback2 = False
def callback1(r):
self.is_callback1 = True
def callback2(r):
self.is_callback2 = True
runner.run(
num_episodes=1, callback=[callback1, callback2],
callback_timestep_frequency=callback_timestep_frequency, use_tqdm=False
)
runner.close()
self.finished_test(assertion=(self.is_callback1 and self.is_callback2))
# evaluation
agent, environment = self.prepare()
runner = Runner(agent=agent, environment=environment)
self.num_evaluations = 0
evaluation_frequency = 3
num_evaluation_iterations = 2
def evaluation_callback(r):
self.num_evaluations += 1
self.assertEqual(r.episodes, self.num_evaluations * evaluation_frequency)
self.assertEqual(len(r.evaluation_timesteps), num_evaluation_iterations)
runner.run(
num_episodes=6, use_tqdm=False, evaluation_callback=evaluation_callback,
evaluation_frequency=evaluation_frequency,
num_evaluation_iterations=num_evaluation_iterations
)
runner.close()
self.finished_test()
def test_parallel_runner(self):
self.start_tests(name='parallel-runner')
agent, environment1 = self.prepare(
update=dict(unit='episodes', batch_size=1), parallel_interactions=2
)
environment2 = copy.deepcopy(environment1)
runner = ParallelRunner(agent=agent, environments=[environment1, environment2])
runner.run(num_episodes=5, use_tqdm=False)
runner.close()
self.finished_test()
# callback
agent, environment1 = self.prepare(
update=dict(unit='episodes', batch_size=1), parallel_interactions=2
)
environment2 = copy.deepcopy(environment1)
runner = ParallelRunner(agent=agent, environments=[environment1, environment2])
callback_episode_frequency = 2
self.num_callbacks = 0
def callback(r, parallel):
self.num_callbacks += 1
self.assertEqual(r.episodes, self.num_callbacks * callback_episode_frequency)
runner.run(
num_episodes=5, callback=callback,
callback_episode_frequency=callback_episode_frequency, use_tqdm=False
)
time.sleep(1)
callback_timestep_frequency = 3
def callback(r, parallel):
self.assertEqual(r.episode_timestep[parallel] % callback_timestep_frequency, 0)
runner.run(
num_episodes=1, callback=callback,
callback_timestep_frequency=callback_timestep_frequency, use_tqdm=False
)
self.is_callback1 = False
self.is_callback2 = False
def callback1(r, parallel):
self.is_callback1 = True
def callback2(r, parallel):
self.is_callback2 = True
runner.run(
num_episodes=1, callback=[callback1, callback2],
callback_timestep_frequency=callback_timestep_frequency, use_tqdm=False
)
runner.close()
self.finished_test(assertion=(self.is_callback1 and self.is_callback2))
# evaluation
agent, environment1 = self.prepare(
update=dict(unit='episodes', batch_size=1), parallel_interactions=2
)
environment2 = copy.deepcopy(environment1)
evaluation_environment = copy.deepcopy(environment1)
runner = ParallelRunner(
agent=agent, environments=[environment1, environment2],
evaluation_environment=evaluation_environment
)
self.num_evaluations = 0
def evaluation_callback(r):
self.num_evaluations += 1
runner.run(num_episodes=5, use_tqdm=False, evaluation_callback=evaluation_callback)
runner.close()
self.assertGreaterEqual(self.num_evaluations, 1)
self.finished_test()
|
[
"alexkuhnle@t-online.de"
] |
alexkuhnle@t-online.de
|
b2edd05a4a829eae2b86d8543f76dd6e0c7cea91
|
9d7f0f91e51083039013f792d6f5d0067a663844
|
/ventura-sanic/db.py
|
430ccc5732f0299e749ca7e7f4a3d27d869b9300
|
[] |
no_license
|
ventura-open-source/non-blocking-service-example
|
88b98325cfe5db8af0155e8b4b7590c56d6f0884
|
d5bec25e986981004a198d74aecc11f32f70eff4
|
refs/heads/master
| 2020-03-22T05:51:07.927025
| 2018-07-03T14:24:20
| 2018-07-03T14:24:20
| 139,594,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import logging
import peewee_async
# Setup mysql connection
database = peewee_async.MySQLDatabase(
'ventura_laravel',
user='root',
password='root',
host='localhost',
port=3306,
)
# No need for sync anymore!
database.set_allow_sync(False)
# Create async models manager
objects = peewee_async.Manager(database)
|
[
"genesisdaft@gmail.com"
] |
genesisdaft@gmail.com
|
3af3448c062fe48f3b47b009218c66d850913b87
|
f2604386f503a06de34f0e6c23993dd0d1ce093d
|
/wallet/urls.py
|
ac851ec7356c586163db22b20c9502ca89aeb8f0
|
[] |
no_license
|
kicha-05/wallet
|
6c64ebbf2849d084b68ea13b60a6ac675a958c78
|
ae6af4479ab3b7211265c943ccb1791965b2fa66
|
refs/heads/master
| 2022-12-25T11:31:15.926443
| 2020-10-08T07:48:12
| 2020-10-08T07:48:12
| 302,228,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from .views import *
urlpatterns = [
path('init', InitializeAccount.as_view(), name='init'),
path('wallet', WalletView.as_view(), name='wallet'),
path('wallet/deposit', DepositWithdrawVirtualMoney.as_view(), name='deposit'),
path('wallet/withdraw', DepositWithdrawVirtualMoney.as_view(), name='withdraw')
]
|
[
"krishna@gmail.com"
] |
krishna@gmail.com
|
b6b415e6bf267ac3378d6b864303165d97c05cdc
|
3f48e3308674212408c3b6ca972eb4f793bf142b
|
/f0_data_process/chip_seq/final_chipseq/sicer_df/py1_write_run_sicer_df_slurm.py
|
d366ec6d7bdea0661c665daf34df7addf7f051d8
|
[] |
no_license
|
zanglab/utx_code
|
8497840ace81e0337f92f04fafbb691f0ed24865
|
32fc7851207f650b3cc78974ab798f8606099e56
|
refs/heads/main
| 2023-06-28T17:38:33.231877
| 2021-07-27T01:31:00
| 2021-07-27T01:31:00
| 388,648,753
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,039
|
py
|
import sys,argparse
import os,glob
import numpy as np
import pandas as pd
from scipy import stats
import re,bisect
slurm_dir = 'slurm_files'
os.makedirs(slurm_dir,exist_ok=True)
project_dir='/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang'
outdir='sicer_out'
sub_dirs=['re_1st_submission_H3K4me3_MLL4SC_trim','re_202012_H3K4me2_trim','202102_H3K27ac_H3K4me1_trim','202011_UTX_trim','202102_UTX_H3K27me3_trim']
# celltypes = ['Vector','WT','DEL','EIF','MT2','TPR']
factors= ['UTX','UTXFEB','H3K4me1','H3K4me2','H3K4me3','H3K27ac','H3K27me3','MLL4']
compr_pairs = [['Vector','WT'],['WT','DEL'],['DEL','EIF']]
for sub_dir in sub_dirs:
for compr_pair in compr_pairs:
for factor in factors:
sicer_outdir='{}/{}_over_{}_{}'.format(outdir,compr_pair[1],compr_pair[0],factor)
os.makedirs(sicer_outdir,exist_ok=True)
basename_treatment = '{}_{}'.format(compr_pair[1],factor)
basename_control = '{}_{}'.format(compr_pair[0],factor)
bam_control ='{}/f0_data_process/chip_seq/final_chipseq/{}/process_qc_out/{}/{}_treat.bam'.format(project_dir,sub_dir,basename_control,basename_control)
bam_treatment ='{}/f0_data_process/chip_seq/final_chipseq/{}/process_qc_out/{}/{}_treat.bam'.format(project_dir,sub_dir,basename_treatment,basename_treatment)
if os.path.isfile(bam_control):
#print(sub_dir,celltype,factor)
slurmfile = '{}/{}_over_{}_{}.slurm'.format(slurm_dir,compr_pair[1],compr_pair[0],factor)
with open(slurmfile,'w') as slurmout:
slurmout.write('''#!/bin/bash
#SBATCH -n 1
#SBATCH --mem=20000
#SBATCH -t 24:00:00
#SBATCH -p standard
#SBATCH -A cphg_cz3d
''')
slurmout.write('#SBATCH -o {}/slurm_{}_over_{}_{}.out\n\n'.format(slurm_dir,compr_pair[1],compr_pair[0],factor))
slurmout.write('time sicer_df -t \\\n{} \\\n{} \\\n-s hg38 --output_directory {}\n'.format(bam_treatment,bam_control,sicer_outdir))
|
[
"zhenjia.sdu@gmail.com"
] |
zhenjia.sdu@gmail.com
|
a618ed18a38138e126c6e7cf13a613ff0eeac304
|
5f6425e9d83b57b864e48f227e1dc58356a555c0
|
/utils/palettes/archive/selenized_medium_0_4.py
|
94959c43f26c4b355583da48508d798782d438af
|
[
"MIT"
] |
permissive
|
jan-warchol/selenized
|
b374fa7822f281b16aa8b52e34bd1e585db75904
|
df1c7f1f94f22e2c717f8224158f6f4097c5ecbe
|
refs/heads/master
| 2023-06-22T09:37:02.962677
| 2022-09-12T20:24:40
| 2022-09-12T20:24:40
| 45,570,283
| 663
| 58
|
MIT
| 2023-04-18T09:33:22
| 2015-11-04T22:00:52
|
Emacs Lisp
|
UTF-8
|
Python
| false
| false
| 590
|
py
|
name = 'Selenized medium v0.4 (adapted monotones)'
palette = {
"bg_0": "#154053",
"fg_0": "#a8bcc3",
"bg_1": "#245971",
"red": "#fc5851",
"green": "#78b93e",
"yellow": "#d8b033",
"blue": "#4e97f5",
"magenta": "#f16dc5",
"cyan": "#41c7b9",
"dim_0": "#7c95a0",
"bg_2": "#245971",
"br_red": "#ff675d",
"br_green": "#85c74c",
"br_yellow": "#e7be42",
"br_blue": "#5ea4ff",
"br_magenta": "#ff7bd3",
"br_cyan": "#52d5c7",
"fg_1": "#c4d8df",
}
|
[
"jan.warchol@gmail.com"
] |
jan.warchol@gmail.com
|
0ad20010a16169575984c8d5f5c526a6e6379811
|
7675abbb6a30fdb56cb534cbd198571f75cc926e
|
/dazubi_fan_site/dazubi_fan_site/settings.py
|
240dda22c91a32799aaaab32a9a20c13a4781f3d
|
[] |
no_license
|
moreal/dazbee_fan_site
|
4b2077635118987eb2731b36030c47cd181cc25a
|
dfb0e02a25087917968be6f4ee54049e9cca1b07
|
refs/heads/master
| 2020-04-09T02:37:04.173566
| 2018-12-01T13:56:51
| 2018-12-01T13:56:51
| 159,947,801
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,115
|
py
|
"""
Django settings for dazubi_fan_site project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'abelz*+9_9_^&u$s5=zefr!*-z#t9gp$3)in%^v6er82end3di'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dazubi_fan_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dazubi_fan_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"dev.moreal@gmail.com"
] |
dev.moreal@gmail.com
|
445409926fa5283911011a77f745099c9cf58d53
|
99a310f6bb6c7a6c728f1b3ae78054487372042d
|
/aoc2017/day9.py
|
915cef5559e604d0f1001897d7c7e96a442ff7ed
|
[] |
no_license
|
jepebe/aoc2018
|
46ce6b46479a0faf2c2970413af14a071dcfdb79
|
4bf91b99bec4b59529533ef70f24bf6496bada99
|
refs/heads/master
| 2023-01-11T16:44:42.125394
| 2023-01-06T06:27:14
| 2023-01-06T06:27:14
| 159,912,721
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
def process(data):
group_score = 0
garbage_count = 0
stack =[]
garbage = False
ignore = False
for c in data:
if ignore:
ignore = False
continue
if c == '{' and not garbage:
stack.append(len(stack) + 1)
elif c == '}' and not garbage:
group_score += stack.pop()
elif c == '<' and not garbage:
garbage = True
elif c == '>' and garbage:
garbage = False
elif c == '!':
ignore = True
elif garbage:
garbage_count += 1
assert len(stack) == 0
return group_score, garbage_count
if __name__ == '__main__':
assert process('<>') == (0, 0)
assert process('<random characters>') == (0, 17)
assert process('<<<<>') == (0, 3)
assert process('<{!>}>') == (0, 2)
assert process('<!!>') == (0, 0)
assert process('<!!!>>') == (0, 0)
assert process('<{o"i!a,<{i<a>') == (0, 10)
assert process('{}') == (1, 0)
assert process('{{{}}}') == (6, 0)
assert process('{{{},{},{{}}}}') == (16, 0)
assert process('{<a>,<a>,<a>,<a>}') == (1, 4)
assert process('{{<a>},{<a>},{<a>},{<a>}}') == (9, 4)
assert process('{{<!>},{<!>},{<!>},{<a>}}') == (3, 13)
assert process('{{<!!>},{<!!>},{<!!>},{<!!>}}') == (9, 0)
with open('day9.txt', 'r') as f:
data = f.read()
print(process(data))
|
[
"jepebe@users.noreply.github.com"
] |
jepebe@users.noreply.github.com
|
34974698db983346e41e782fa77394e2c568893b
|
b4874cbd7299492277ad28441bad05e6348307f2
|
/dummies/zerg/zerg_random.py
|
b950007a6b9104c741a3ee370c8d3c20e56d79a4
|
[
"MIT"
] |
permissive
|
MadManSC2/sharpy-sc2
|
7d405578413c7a8f8fc1e4030ad719d7fe5df10a
|
13950357df2db58033daab24f076e3ae83f0b2a8
|
refs/heads/master
| 2021-01-05T03:38:58.038563
| 2020-03-07T20:35:24
| 2020-03-07T20:35:24
| 240,865,466
| 1
| 0
|
MIT
| 2020-02-16T09:38:05
| 2020-02-16T09:38:04
| null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
import random
val = random.randint(0, 5)
if val == 0:
from .lings import LadderBot
elif val == 1:
from .macro_roach import LadderBot
elif val == 2:
from .macro_zerg_v2 import LadderBot
elif val == 3:
from .mutalisk import LadderBot
elif val == 4:
from .roach_hydra import LadderBot
elif val == 5:
from .twelve_pool import LadderBot
class RandomZergBot(LadderBot):
pass
|
[
"aki.vanttinen@sedgestudios.com"
] |
aki.vanttinen@sedgestudios.com
|
8c4eecccb5304b7457c69b24b8d130d4c73a3c7f
|
1680edad321979cdf9f655ace5533f67c4ae6589
|
/client_support/client_support/doctype/email/email.py
|
8ab950a133bf6e6b679ac6897b073e95300c934b
|
[] |
no_license
|
ssindham/Client_Support
|
18a28bd6f55807b1c07ff233a839a2207f039874
|
1fc59526f27ead426f5ce9ac8f582e5441b05410
|
refs/heads/master
| 2021-06-18T11:27:33.218878
| 2017-06-26T12:59:42
| 2017-06-26T12:59:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Makarand Bauskar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class email(Document):
pass
|
[
"sagarshiragawakar@gmail.com"
] |
sagarshiragawakar@gmail.com
|
b4cda9c4c5944bb55bd00d0c587319d667ec5e35
|
85fa329cadd8edb7aa8ad32d573a1da91445c676
|
/RSVP_MVPA/MVPA_multi_methods/accs_multi_classifiers/do_MVPA_alltime_eeg.py
|
610f5f953b3b7c06808c11dcda1f379a0783158c
|
[] |
no_license
|
listenzcc/RSVP_scripts
|
05aaed6d1aded2c3b1851ece61f52442c8a9eba8
|
e01a60c980c2bf6a002f2673a5b8984d3ad70f6e
|
refs/heads/master
| 2020-05-02T19:33:43.838999
| 2019-07-03T01:13:25
| 2019-07-03T01:13:25
| 178,161,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,947
|
py
|
# coding: utf-8
'''
This script is to do MVPA on MEG RSVP dataset
'''
import matplotlib.pyplot as plt
import mne
import numpy as np
import os
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
import time
import pdb
'''
# Function: Setting MVPA stuff.
# Output: cv, cross-validation maker.
# Output: pca_pipeline, pipeline of pca decomposition.
# Output: xdawn_pipeline, pipeline of xdawn filter.
# Output: clf_*, classifier of svm and lr.
'''
xdawn = mne.preprocessing.Xdawn(n_components=8)
cv = StratifiedKFold(n_splits=10, shuffle=True)
normalize_pipeline = make_pipeline(mne.decoding.Vectorizer(), MinMaxScaler())
clf_svm_rbf = svm.SVC(gamma='scale', kernel='rbf', class_weight='balanced', verbose=True)
clf_svm_linear = svm.SVC(gamma='scale', kernel='linear', class_weight='balanced', verbose=True)
clf_lr = LogisticRegression(class_weight='balanced', verbose=True)
def report_results(true_label, pred_label, title=None):
print(title)
report = classification_report(true_label, pred_label, target_names=['odd', 'norm'])
print(report)
if title is None:
return
with open(os.path.join(results_dir, '%s.txt' % title), 'w') as f:
f.writelines(report)
'''
# Function: Setting evrionment for the script.
# Output: root_path, directory of project.
# Output: time_stamp, string of beginning time of the script.
# Output: id_string, customer identifier string.
# Output: results_dir, directory for storing results.
'''
root_dir = os.path.join('/nfs/cell_a/userhome/zcc/documents/RSVP_experiment/')
time_stamp = time.strftime('%Y-%m-%d-%H-%M-%S')
id_string = 'RSVP_MEG'
results_dir = os.path.join(root_dir, 'RSVP_MVPA', 'MVPA_lr')
epochs_dir = os.path.join(root_dir, 'epochs_saver', 'epochs_freq_0.5_30_crop_n0.2_p1.1')
read_save_stuff = {}
read_save_stuff['S01'] = dict(
range_run = range(1, 11),
epochs_path = os.path.join(epochs_dir, 'eeg_S01_epochs_%d-epo.fif'),
report_path = os.path.join(results_dir, 'accs_eeg_S01.txt'))
read_save_stuff['S01'] = dict(
range_run = range(1, 11),
epochs_path = os.path.join(epochs_dir, 'eeg_S02_epochs_%d-epo.fif'),
report_path = os.path.join(results_dir, 'accs_eeg_S02.txt'))
for stuff in read_save_stuff.values():
print('-'*80)
for e in stuff.items():
print(e[0], e[1])
'''
# Function: Reading epochs.
'''
labels = None
epochs_data = None
epochs_list = []
for i in stuff['range_run']:
# Function: Reading epochs from -epo.fif.
epo_path = os.path.join(stuff['epochs_path'] % i)
epochs = mne.read_epochs(epo_path, verbose=True)
epochs.crop(tmin=0.0, tmax=1.0)
# Attention!!!
# This may cause poor alignment between epochs.
# But this is necessary for concatenate_epochs.
if epochs_list.__len__() != 0:
epochs.info['dev_head_t'] = epochs_list[0].info['dev_head_t']
epochs_list.append(epochs)
# Function: Preparing dataset for MVPA.
if labels is None:
labels = epochs.events[:, -1]
epochs_data = epochs.get_data()
else:
labels = np.concatenate([labels, epochs.events[:, -1]])
epochs_data = np.concatenate([epochs_data, epochs.get_data()], 0)
epochs = mne.epochs.concatenate_epochs(epochs_list)
'''
# Function: Repeat training and testing.
# Output:
'''
sfreq = epochs.info['sfreq']
w_length = int(sfreq * 0.1) # running classifier: window length
w_step = int(sfreq * 0.05) # running classifier: window step size
w_start = np.arange(0, epochs.get_data().shape[2] - w_length, w_step)
# init preds results.
preds_xdawn_svm_rbf = np.empty([len(labels), len(w_start)+1])
preds_xdawn_svm_linear = np.empty([len(labels), len(w_start)+1])
preds_xdawn_lr = np.empty([len(labels), len(w_start)+1])
for train, test in cv.split(epochs_data, labels):
print('-' * 80)
# xdawn
xdawn_data_train = xdawn.fit_transform(epochs[train])
xdawn_data_test = xdawn.transform(epochs[test])
data_train_ = xdawn_data_train[:, :, :]
data_test_ = xdawn_data_test[:, :, :]
# SVM rbf
clf_svm_rbf.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_rbf[test, len(w_start)] = clf_svm_rbf.predict(normalize_pipeline.transform(data_test_))
# SVM linear
clf_svm_linear.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_linear[test, len(w_start)] = clf_svm_linear.predict(normalize_pipeline.transform(data_test_))
# LR
clf_lr.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_lr[test, len(w_start)] = clf_lr.predict(normalize_pipeline.transform(data_test_))
for j, start in enumerate(w_start):
print(j, start)
# xdawn
data_train_ = xdawn_data_train[:, :, start:start+w_length]
data_test_ = xdawn_data_test[:, :, start:start+w_length]
# SVM rbf
clf_svm_rbf.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_rbf[test, j] = clf_svm_rbf.predict(normalize_pipeline.transform(data_test_))
# SVM linear
clf_svm_linear.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_linear[test, j] = clf_svm_linear.predict(normalize_pipeline.transform(data_test_))
# LR
clf_lr.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_lr[test, j] = clf_lr.predict(normalize_pipeline.transform(data_test_))
'''
# Function: Save report into file.
'''
fpath = os.path.join(stuff['report_path'])
with open(fpath, 'w') as f:
report_svm_rbf = classification_report(preds_xdawn_svm_rbf[:, len(w_start)], labels, target_names=['odd', 'norm'])
print(report_svm_rbf)
f.writelines('\n[all_SVM_rbf]\n')
f.writelines(report_svm_rbf)
report_svm_linear = classification_report(preds_xdawn_svm_linear[:, len(w_start)], labels, target_names=['odd', 'norm'])
print(report_svm_linear)
f.writelines('\n[all_SVM_linear]\n')
f.writelines(report_svm_linear)
report_lr = classification_report(preds_xdawn_lr[:, len(w_start)], labels, target_names=['odd', 'norm'])
print(report_lr)
f.writelines('\n[all_LR]\n')
f.writelines(report_lr)
for j, start in enumerate(w_start):
print(j)
report_svm_rbf = classification_report(preds_xdawn_svm_rbf[:, j], labels, target_names=['odd', 'norm'])
with open(fpath, 'a') as f:
print(report_svm_rbf)
f.writelines('\n[%d-%d, %f, %f, SVM_rbf]\n' % (start, start+w_length, epochs.times[start], epochs.times[start+w_length]))
f.writelines(report_svm_rbf)
report_svm_linear = classification_report(preds_xdawn_svm_linear[:, j], labels, target_names=['odd', 'norm'])
with open(fpath, 'a') as f:
print(report_svm_linear)
f.writelines('\n[%d-%d, %f, %f, SVM_linear]\n' % (start, start+w_length, epochs.times[start], epochs.times[start+w_length]))
f.writelines(report_svm_linear)
report_lr = classification_report(preds_xdawn_lr[:, j], labels, target_names=['odd', 'norm'])
with open(fpath, 'a') as f:
print(report_lr)
f.writelines('\n[%d-%d, %f, %f, LR]\n' % (start, start+w_length, epochs.times[start], epochs.times[start+w_length]))
f.writelines(report_lr)
|
[
"listenzcc@mail.bnu.edu.cn"
] |
listenzcc@mail.bnu.edu.cn
|
e2aa1c6699efd5f2501f3a550014dce289e3e328
|
445b158bd10c79e19a679264745add3b3353dea3
|
/linux/bin/django-admin
|
c97656a5f6288659744de8ee0c98cf01c2083159
|
[] |
no_license
|
Carlosdher/topicos_especiasi
|
27e523830408b49e852c8c03fc4d0c6ecb14f5e9
|
86df42ea4b514fe9159d83a44ed9cd7a9544ca96
|
refs/heads/master
| 2020-03-30T18:48:38.385266
| 2018-10-04T04:57:30
| 2018-10-04T04:57:30
| 151,515,273
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
#!/home/ifpb/topicos/AndrmedAnime/andromedanimes/linux/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"carlosabc436@gmail.com"
] |
carlosabc436@gmail.com
|
|
a2f9ef70d3a18cdda815fb0235790cb046d2e584
|
4a0e3ffff54be178b377a4c18fe0ced2d44b7be6
|
/tests/test_forbidden_ops.py
|
455eb958543a82e631cf21bd2396fe317e29d2a1
|
[] |
no_license
|
WinVector/data_algebra
|
608371904c0fcc99ffab7e0fe57c49dc75fd6b21
|
1e96817919ae891ba108d8d7471b2200b2528271
|
refs/heads/main
| 2023-04-13T20:11:18.682084
| 2023-04-10T14:09:41
| 2023-04-10T14:09:41
| 203,080,133
| 113
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,729
|
py
|
import pytest
import data_algebra
from data_algebra.data_ops import *
import lark.exceptions
def test_forbidden_ops_raises():
with pytest.raises(lark.exceptions.UnexpectedToken):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x && y"}
)
with pytest.raises(lark.exceptions.UnexpectedToken):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x || y"}
)
with pytest.raises(lark.exceptions.UnexpectedCharacters): # not in grammar
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "! y"}
)
with pytest.raises(AttributeError): # objects don't implement ~
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "~ y"}
)
with pytest.raises(lark.exceptions.UnexpectedToken):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x = y"}
)
def test_forbidden_ops_inlines_left_alone():
assert 'x ** y' in str(TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x ** y"}
))
def test_forbidden_ops_inline():
with pytest.raises(ValueError):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x & y"}
)
with pytest.raises(ValueError):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x | y"}
)
with pytest.raises(ValueError):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x ^ y"}
)
|
[
"jmount@win-vector.com"
] |
jmount@win-vector.com
|
ae917e55d2a596415e27aed0be505c99621cfeff
|
1fe4f9eb9b1d756ad17e1ff6585e8ee7af23903c
|
/saleor/dashboard/brand/filters.py
|
61af9c9da3dd49b1b7a5fa75b7bb664b1a2508f7
|
[
"BSD-3-Clause"
] |
permissive
|
Chaoslecion123/Diver
|
ab762e7e6c8d235fdb89f6c958488cd9b7667fdf
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
refs/heads/master
| 2022-02-23T10:43:03.946299
| 2019-10-19T23:39:47
| 2019-10-19T23:39:47
| 216,283,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,411
|
py
|
from django import forms
from django.utils.translation import npgettext, pgettext_lazy
from django_filters import CharFilter, ChoiceFilter, OrderingFilter
from ...core.filters import SortedFilterSet
from ...brand.models import Brand
SORT_BY_FIELDS = {
'name': pgettext_lazy('Brand list sorting option', 'name')}
BOOLEAN_CHOICES = (
('1', pgettext_lazy('Is active filter choice', 'Yes')),
('0', pgettext_lazy('Is active filter choice', 'No')))
class BrandFilter(SortedFilterSet):
name = CharFilter(
label=pgettext_lazy('Brand list name filter label', 'Name'),
lookup_expr='icontains')
is_featured = ChoiceFilter(
label=pgettext_lazy('Brand list filter label', 'Is featured'),
choices=BOOLEAN_CHOICES,
empty_label=pgettext_lazy('Filter empty choice label', 'All'),
widget=forms.Select)
sort_by = OrderingFilter(
label=pgettext_lazy('Brand list sorting filter label', 'Sort by'),
fields=SORT_BY_FIELDS.keys(),
field_labels=SORT_BY_FIELDS)
class Meta:
model = Brand
fields = []
def get_summary_message(self):
counter = self.qs.count()
return npgettext(
'Number of matching records in the dashboard brands list',
'Found %(counter)d matching brand',
'Found %(counter)d matching brands',
number=counter) % {'counter': counter}
|
[
"chaoslecion71@gmail.com"
] |
chaoslecion71@gmail.com
|
4de65748c9746a5924fdb0a8c063ac3daef292d1
|
aa0d55b2aa22da0af6545ce0da46d04dbdc3bffc
|
/cpgames/core/games/pingpong/modules/sprites.py
|
5f00745d148bc0bb48b7b36a20e715cd42b66d90
|
[
"Apache-2.0"
] |
permissive
|
cyanghsieh/Games
|
19fdad463cf12cbd503a399ed2700c0dae615714
|
07767df6d181b9eae89ce0a8b883d19afb450cc1
|
refs/heads/master
| 2023-05-11T11:11:09.777569
| 2023-02-22T14:28:18
| 2023-02-22T14:28:18
| 283,113,319
| 0
| 0
|
MIT
| 2020-07-28T05:49:13
| 2020-07-28T05:49:12
| null |
UTF-8
|
Python
| false
| false
| 3,664
|
py
|
'''
Function:
一些必要的精灵类
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import random
import pygame
from .utils import loadImage
'''乒乓球'''
class Ball(pygame.sprite.Sprite):
def __init__(self, imgpath, cfg, **kwargs):
pygame.sprite.Sprite.__init__(self)
self.cfg = cfg
self.image = loadImage(imgpath)
self.rect = self.image.get_rect()
self.reset()
'''移动'''
def move(self, ball, racket_left, racket_right, hit_sound, goal_sound):
self.rect.left = self.rect.left + self.speed * self.direction_x
self.rect.top = min(max(self.rect.top + self.speed * self.direction_y, 0), self.cfg.SCREENSIZE[1] - self.rect.height)
# 撞到球拍
if pygame.sprite.collide_rect(ball, racket_left) or pygame.sprite.collide_rect(ball, racket_right):
self.direction_x, self.direction_y = -self.direction_x, random.choice([1, -1])
self.speed += 1
scores = [0, 0]
hit_sound.play()
# 撞到上侧的墙
elif self.rect.top == 0:
self.direction_y = 1
self.speed += 1
scores = [0, 0]
# 撞到下侧的墙
elif self.rect.top == self.cfg.SCREENSIZE[1] - self.rect.height:
self.direction_y = -1
self.speed += 1
scores = [0, 0]
# 撞到左边的墙
elif self.rect.left < 0:
self.reset()
racket_left.reset()
racket_right.reset()
scores = [0, 1]
goal_sound.play()
# 撞到右边的墙
elif self.rect.right > self.cfg.SCREENSIZE[0]:
self.reset()
racket_left.reset()
racket_right.reset()
scores = [1, 0]
goal_sound.play()
# 普通情况
else:
scores = [0, 0]
return scores
'''初始化'''
def reset(self):
self.rect.centerx = self.cfg.SCREENSIZE[0] // 2
self.rect.centery = random.randrange(self.rect.height // 2, self.cfg.SCREENSIZE[1] - self.rect.height // 2)
self.direction_x = random.choice([1, -1])
self.direction_y = random.choice([1, -1])
self.speed = 1
'''绑定到屏幕上'''
def draw(self, screen):
screen.blit(self.image, self.rect)
'''乒乓球拍'''
class Racket(pygame.sprite.Sprite):
def __init__(self, imgpath, type_, cfg, **kwargs):
pygame.sprite.Sprite.__init__(self)
self.cfg = cfg
self.type_ = type_
self.image = loadImage(imgpath, False)
self.rect = self.image.get_rect()
self.reset()
'''移动'''
def move(self, direction):
if direction == 'UP':
self.rect.top = max(0, self.rect.top - self.speed)
elif direction == 'DOWN':
self.rect.bottom = min(self.cfg.SCREENSIZE[1], self.rect.bottom + self.speed)
else:
raise ValueError('[direction] in Racket.move is %s, expect %s or %s...' % (direction, 'UP', 'DOWN'))
'''电脑自动移动'''
def automove(self, ball):
if ball.rect.centery - 25 > self.rect.centery:
self.move('DOWN')
if ball.rect.centery + 25 < self.rect.centery:
self.move('UP')
'''初始化'''
def reset(self):
# 左/右边的拍
self.rect.centerx = self.cfg.SCREENSIZE[0] - self.rect.width // 2 if self.type_ == 'RIGHT' else self.rect.width // 2
self.rect.centery = self.cfg.SCREENSIZE[1] // 2
# 速度
self.speed = 5
'''绑定到屏幕上'''
def draw(self, screen):
screen.blit(self.image, self.rect)
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
3a8bab67ec8ceee72f60fd6eb8163fcbd1d325f5
|
77717d0024c8597fec83600259ea5547abbc183a
|
/demo/image_demo.py
|
4a35a75b5f67f853a69af3ad51dc8550ac08a6b6
|
[
"Apache-2.0"
] |
permissive
|
fengyouliang/wheat_detection
|
0a090ef5eda7f2c5463996f4795f9ce06dd04050
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
refs/heads/master
| 2022-11-17T15:09:29.113493
| 2020-07-18T13:47:34
| 2020-07-18T13:47:34
| 276,532,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
from argparse import ArgumentParser
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
if __name__ == '__main__':
main()
|
[
"1654388696@qq.com"
] |
1654388696@qq.com
|
cbab46f2372b7b4738133a09e2c904eba5e527ca
|
2caf6885511af24443e22aaa43cd679d694f6f80
|
/note/download_note/first_month/day15/my_project/skill_system/skill_deployer.py
|
0ebb3c60b4ac54c50d95e2cf3c952ee6d6af1563
|
[] |
no_license
|
nandadao/Python_note
|
7f9ba54a73af05c935b4f7e24cacb728859a6c69
|
abddfc2e9a1704c88867cff1898c9251f59d4fb5
|
refs/heads/master
| 2020-11-25T18:29:50.607670
| 2019-12-19T01:28:02
| 2019-12-19T01:28:02
| 228,793,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
# 导入模块成功的唯一条件:
# sys.path + 导入路径 可以正确定位模块
import sys
sys.path.append('/home/tarena/month01/day15/my_project')
print(sys.path)
from common.list_helper import ListHelper
class SkillDeployer:
def generate_skill(self):
print("SkillDeployer -- generate_skill")
ListHelper.fun01()
|
[
"1361335953@qq.com"
] |
1361335953@qq.com
|
49d39d44fc161e989c2e466a7903314ea706eff8
|
fab7b6e422b74424fb59398635f74faca9ff5a58
|
/waimak_extended_boundry/model_and_NSMC_build/pumping uncertainty.py
|
402dbd2253b25d06ce083a921ee8614cb69809c8
|
[] |
no_license
|
hansonmcoombs/Waimakariri-Model-Ashley-to-Selwyn
|
c7a56a2ebd0d421c9679cb4a16ae319dfb2041b1
|
c96c2663b010975ec08d42840fbc7970f3c2b085
|
refs/heads/master
| 2023-05-29T10:57:33.916912
| 2020-04-23T21:32:21
| 2020-04-23T21:32:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,168
|
py
|
# -*- coding: utf-8 -*-
"""
Author: matth
Date Created: 11/08/2017 3:15 PM
"""
from __future__ import division
import numpy as np
import pandas as pd
from core.ecan_io import rd_sql, sql_db
mike = pd.read_hdf(r"P:\Groundwater\Waimakariri\Groundwater\Numerical GW model\supporting_data_for_scripts\ex_bd_va_sdp\m_ex_bd_inputs\sd_est_all_mon_vol.h5")
mike = mike.loc[(mike.time >= pd.datetime(2008, 1, 1)) & (mike.take_type == 'Take Groundwater')]
mike.loc[:, 'd_in_m'] = mike.time.dt.daysinmonth
data = mike.groupby('wap').aggregate({'usage_est': np.sum, 'mon_allo_m3': np.sum, 'crc': ','.join, 'd_in_m': np.sum})
data.loc[:, 'flux'] = data.loc[:, 'usage_est'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
data.loc[:, 'flux_cav'] = data.loc[:, 'mon_allo_m3'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
well_details = rd_sql(**sql_db.wells_db.well_details)
well_details = well_details.set_index('WELL_NO')
out_data = pd.merge(data, pd.DataFrame(well_details.loc[:, 'WMCRZone']), left_index=True, right_index=True)
out_data = out_data.loc[np.in1d(out_data.WMCRZone, [7, 8])]
temp = out_data.flux/out_data.flux_cav
temp2 = temp[temp<=1]
print 'done'
|
[
"hansonmcoombs@gmail.com"
] |
hansonmcoombs@gmail.com
|
cabba0c30aedd1e6ddf910614a74a65d4b90f2ce
|
7066555f4c2ff9b405754d2e793b97bf04b6ab98
|
/data_structure/arrays_and_strings/283_move_zeroes.py
|
99d522304d65fe65ab51f1bbd92f5af51da2a6ee
|
[] |
no_license
|
yangtao0304/hands-on-programming-exercise
|
c0d0fe324ffaf73c7b4c45aba721a245a8cc9ce2
|
cc7740026c3774be21ab924b99ae7596ef20d0e4
|
refs/heads/master
| 2020-09-11T02:05:51.305196
| 2020-03-19T03:45:53
| 2020-03-19T03:45:53
| 221,904,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
class Solution(object):
def move_zeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
i = 0
for idx in range(len(nums)):
if nums[idx] != 0:
nums[i] = nums[idx]
i += 1
for idx in range(i, len(nums)):
nums[idx] = 0
def move_zeroes_2(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
i = 0
for idx in range(len(nums)):
if nums[idx] != 0:
# swap
nums[i], nums[idx] = nums[idx], nums[i]
i += 1
|
[
"im.yangtao0304@gmail.com"
] |
im.yangtao0304@gmail.com
|
bb1b2964504bbee76a34c09f71ff3d2ff2ebd505
|
c175c4e3560c6c66ec2b0c4b439cd586878b44a5
|
/prplatform/submissions/tests/test_models.py
|
3c2be9e4a42d51767fb62c1b5615a045a1c8072f
|
[
"MIT"
] |
permissive
|
piehei/prplatform
|
fd30e2e388597583b9ef0e59462ea9643f7244ba
|
f3248b66019f207bb06a4681a62057e175408b3e
|
refs/heads/master
| 2020-03-09T17:09:47.893706
| 2019-09-18T15:24:58
| 2019-09-18T15:24:58
| 128,902,940
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,242
|
py
|
from django.db import OperationalError
from django.test import TestCase
from prplatform.exercises.models import (
SubmissionExercise,
ReviewExercise,
)
from prplatform.submissions.models import (
OriginalSubmission,
ReviewSubmission,
)
from prplatform.submissions.reviewlock_models import ReviewLock
from prplatform.users.models import User
class ReviewExerciseTestCase(TestCase):
fixtures = [
'courses.yaml'
]
def setUp(self):
self.se = SubmissionExercise.objects.get(name='T1 TEXT')
self.course = self.se.course
self.re = ReviewExercise.objects.get(name='T1 TEXT REVIEW')
self.s1 = User.objects.get(username='student1')
self.s2 = User.objects.get(username='student2')
self.s3 = User.objects.get(username='student3')
def test_save_and_destroy_lock_reviewsubmission(self):
os = OriginalSubmission(course=self.course,
exercise=self.se,
submitter_user=self.s1,
text="jadajada")
os.save()
rl = ReviewLock(review_exercise=self.re,
user=self.s2,
original_submission=os)
rl.save()
rs = ReviewSubmission(course=self.course,
exercise=self.re,
submitter_user=self.s2,
reviewed_submission=os)
self.assertEqual(ReviewLock.objects.count(), 1)
rs.save_and_destroy_lock()
self.assertEqual(ReviewLock.objects.count(), 0)
rs2 = ReviewSubmission(course=self.course,
exercise=self.re,
submitter_user=self.s2,
reviewed_submission=os)
self.assertRaises(OperationalError,
rs2.save_and_destroy_lock)
rs2 = ReviewSubmission(course=self.course,
exercise=self.re,
submitter_user=self.s3,
reviewed_submission=os)
self.assertRaises(OperationalError,
rs2.save_and_destroy_lock)
|
[
"ph@extreg.com"
] |
ph@extreg.com
|
14b52891220f3fee7d733147a3f39618853e24d8
|
134178ca3575d30bc3314b2182cd1fc26ed0385f
|
/day2/ifs.py
|
c0b6b70bcd96f1bfe59ddbf964559e8cbef2a819
|
[] |
no_license
|
mpdevilleres/python-study-2021
|
c19d50138158ccc6c18e96c5831546ce1ec03a0d
|
36edc4711e0a39bc87eb84dd43b6ba058a726b20
|
refs/heads/master
| 2023-06-26T19:41:04.472116
| 2021-07-16T10:39:26
| 2021-07-16T10:39:26
| 378,050,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
# # defining if statements
#
# if (condition1):
# pass
# elif (condition2):
# pass
# else:
# pass
#
# # example
# if count == 1:
# print('count is not 0')
n = int(input("input a number: "))
if n % 2 == 0:
print(n, 'is divisible by 2')
elif n % 3 == 0:
print(n, 'is divisible by 3')
elif n % 5 == 0:
print(n, 'is divisible by 5')
else:
print(n, "undetermined")
# if it divisible by 2
# if it divisible by 3
# if it divisible by 5
# if it divisible by 7
|
[
"mpdevilleres@gmail.com"
] |
mpdevilleres@gmail.com
|
61689e089f90433880de471c2b687b9a35801ef4
|
cd8b429ba73017bd20d60b20e4d6dcf05ba44691
|
/profiles/migrations/0003_profile_location.py
|
aef6ac69739be6cf62ea6bfa014aece7716dbe64
|
[] |
no_license
|
henrymbuguak/E-commerce-Site-Created-Using-Django-1.11.1
|
61d45f1f6861b9b8d308519660f2719d5d0e7b4e
|
327f6faa7fe8d13e9dad913b5b9f90884d77fbdd
|
refs/heads/master
| 2021-11-29T00:28:02.300796
| 2021-11-26T16:48:20
| 2021-11-26T16:48:20
| 93,396,154
| 16
| 17
| null | 2021-11-26T16:48:21
| 2017-06-05T11:16:58
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-03 18:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_profile_description'),
]
operations = [
migrations.AddField(
model_name='profile',
name='location',
field=models.CharField(default='Nairobi,Kenya', max_length=120),
),
]
|
[
"henrymbuguak@gmail.com"
] |
henrymbuguak@gmail.com
|
832eba339e037d20014f155348e0ebee2b4ace38
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/dngmon002/question1.py
|
440db247854a75f0996c65246e23cce55cd49087
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# Fancy art output
# Monwabisi Dingane
# 25 February 2014
print(" ____ ____ ___ ____ _____ _ _ _ _ _ ")
print(" / ___/ ___|_ _/ ___|| ___| | | | \ | | |")
print("| | \___ \| |\___ \| |_ | | | | \| | |")
print("| |___ ___) | | ___) | _| | |_| | |\ |_|")
print(" \____|____/___|____/|_| \___/|_| \_(_)")
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
83745ed9a8e0c07cda36512d74784643936d8f65
|
2fd087fbc5faf43940153693823969df6c8ec665
|
/pyc_decrypted/latest/pymac/dlls/FSEvent.py
|
6fdf9e3df3d8a50f53eb9769bec23e32851ce298
|
[] |
no_license
|
mickeystone/DropBoxLibrarySRC
|
ed132bbffda7f47df172056845e5f8f6c07fb5de
|
2e4a151caa88b48653f31a22cb207fff851b75f8
|
refs/heads/master
| 2021-05-27T05:02:30.255399
| 2013-08-27T13:16:55
| 2013-08-27T13:16:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
#Embedded file name: pymac/dlls/FSEvent.py
from __future__ import absolute_import
from ctypes import POINTER, c_ubyte
from ..lazydll import FakeDLL
from ..lazyframework import LazyFramework
from ..types import CFAllocatorRef, CFArrayRef, CFUUIDRef, CFRunLoopRef, CFStringRef, CFTimeInterval, dev_t, FSEventStreamCallback, FSEventStreamContext, FSEventStreamCreateFlags, FSEventStreamEventId, FSEventStreamRef
class LazyFSEvent(LazyFramework):
def __init__(self):
super(LazyFSEvent, self).__init__()
self._dllname = u'Foundation'
self._func_defs = {}
def F(name, ret = None, args = [], errcheck = None):
self._func_defs[name] = {'restype': ret,
'argtypes': args}
F('FSEventsCopyUUIDForDevice', CFUUIDRef, [dev_t])
F('FSEventStreamCreate', FSEventStreamRef, [CFAllocatorRef,
FSEventStreamCallback,
POINTER(FSEventStreamContext),
CFArrayRef,
FSEventStreamEventId,
CFTimeInterval,
FSEventStreamCreateFlags])
F('FSEventStreamGetLatestEventId', FSEventStreamEventId, [FSEventStreamRef])
F('FSEventsGetCurrentEventId', FSEventStreamEventId, None)
F('FSEventStreamStart', c_ubyte, [FSEventStreamRef])
F('FSEventStreamInvalidate', None, [FSEventStreamRef])
F('FSEventStreamRelease', None, [FSEventStreamRef])
F('FSEventStreamStop', None, [FSEventStreamRef])
F('FSEventStreamScheduleWithRunLoop', None, [FSEventStreamRef, CFRunLoopRef, CFStringRef])
FSEvent = FakeDLL(LazyFSEvent)
|
[
"bizonix@me.com"
] |
bizonix@me.com
|
bc90df29176cb40490d288bb8254b2327d3d0992
|
dd770e697daddab20e09fbf8ce199c97ee540c37
|
/bigtop-packages/src/charm/zookeeper/layer-zookeeper/actions/smoke-test
|
64814629a162acdf459dab3899fc8a1978368d94
|
[
"FreeBSD-DOC",
"MIT",
"DOC",
"Apache-2.0"
] |
permissive
|
PKConsul/bigtop
|
0e7b5133be17a2093c0d5279b000c60b67072a16
|
2f8311b184bf0c5d25756b098895e43b1dbc3c2e
|
refs/heads/master
| 2021-01-20T02:08:29.012667
| 2017-04-22T17:44:30
| 2017-04-23T06:27:13
| 89,379,381
| 1
| 0
| null | 2017-04-25T15:53:29
| 2017-04-25T15:53:29
| null |
UTF-8
|
Python
| false
| false
| 1,526
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('lib')
from charmhelpers.core import hookenv # noqa: E402
from charms.layer.apache_bigtop_base import Bigtop # noqa: E402
from charms.reactive import is_state # noqa: E402
def fail(msg, output=None):
if output:
hookenv.action_set({'output': output})
hookenv.action_fail(msg)
sys.exit()
if not is_state('zookeeper.started'):
fail('Charm is not yet ready to run the Bigtop smoke test(s)')
# Bigtop smoke test components
smoke_components = ['zookeeper']
bigtop = Bigtop()
result = bigtop.run_smoke_tests(smoke_components)
if result == 'success':
hookenv.action_set({'outcome': result})
else:
fail('{} smoke tests failed'.format(smoke_components), result)
|
[
"kevin.monroe@canonical.com"
] |
kevin.monroe@canonical.com
|
|
76532f0bfd74859dc88b4d5b0f4c9a449a6a84e2
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/stacks_20200703102749.py
|
12f8182c2862178934b96e70b251a7b34a1c4479
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# stacks - where the last item to be added is the first to be reversed
# reversing an array using stacks
# def stacks(arr):
# arr.append(6)
# arr.append(7)
# newArr = []
# for i in range(len(arr)):
# newArr.append(arr.pop())
# print(newArr)
# stacks([3,4,5])
# ========================================================
# Queue
from collections import deque
def Queue():
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
8868615a32d3f22a1d4f5f08ac876bc011a65f74
|
18c9109e3e6dfea227b80e0a8ebc5e92cfa117d3
|
/tests/unit/shared/test_containers.py
|
38a022c659f4139352e210f735651c518af4df3e
|
[
"Apache-2.0"
] |
permissive
|
Xilinx/pyxir
|
9b0179da550471d251acd95c26e9bfe6f54502dd
|
8ce8a385a155f3ffdd84ce61501ca870cfb4a905
|
refs/heads/master
| 2023-09-05T12:07:59.732179
| 2022-03-31T19:24:48
| 2022-03-31T19:24:48
| 265,640,658
| 34
| 23
|
Apache-2.0
| 2022-05-29T08:05:58
| 2020-05-20T17:36:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for testing the StrContainer and BytesContainerdata structure """
import unittest
import libpyxir as lpx
from pyxir.shared.container import StrContainer, BytesContainer
class TestStrContainer(unittest.TestCase):
def test_constructor(self):
s = "test"
sc = StrContainer(s)
assert sc.get_str() == "test"
def test_eq(self):
s = "test"
sc = StrContainer(s)
assert sc == "test"
def test_set_str(self):
s = "test"
sc = StrContainer(s)
sc.set_str("2")
assert sc == "2"
assert sc.get_str() == "2"
class TestBytesContainer(unittest.TestCase):
def test_constructor(self):
b = b"test"
bc = BytesContainer(b)
assert isinstance(bc.get_bytes(), bytes)
assert bc.get_bytes() == b"test"
assert bc.get_bytes() != "test"
b2 = "test".encode('latin1')
bc2 = BytesContainer(b2)
assert bc.get_bytes() == "test".encode('latin1')
def test_eq(self):
b = b"test"
bc = BytesContainer(b)
assert bc == b"test"
def test_set_bytes(self):
b = b"test"
bc = BytesContainer(b)
bc.set_bytes(b"2")
assert bc == b"2"
assert bc.get_bytes() == b"2"
def test_set_bytes_latin1(self):
b = b"test"
bc = BytesContainer(b)
bc.set_bytes("2".encode('latin1'))
assert bc == "2".encode('latin1')
assert bc.get_bytes() == "2".encode('latin1')
|
[
"jornt@xilinx.com"
] |
jornt@xilinx.com
|
69f15ffcdc89289d39337e9f2cbdc77eeb439882
|
3e1fcf34eae508a3f3d4668edfb334069a88db3d
|
/tests/test_case_info.py
|
f9f4bb37beb8939a3a6647694a29a2215398d96f
|
[
"ISC"
] |
permissive
|
mscarey/court-scraper
|
26d32cb7354b05bb5d5d27a55bf4042e5dde1a4d
|
e29135331526a11aa5eb0445a9223fc3f7630895
|
refs/heads/main
| 2023-07-14T20:23:33.488766
| 2020-08-31T14:02:19
| 2020-08-31T14:02:19
| 384,977,976
| 0
| 0
|
ISC
| 2021-07-11T15:04:57
| 2021-07-11T15:04:57
| null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
from court_scraper.case_info import CaseInfo
def test_attribute_mapping():
mapping = { 'case_num': 'number', }
data = { 'foo': 'bar', 'case_num': '1' }
CaseInfo._map = mapping
ci = CaseInfo(data)
assert hasattr(ci, 'case_num') == False
assert ci.number == '1'
assert ci.foo == 'bar'
def test_standardized_data():
mapping = {
'case_num': 'number',
}
data = {
'place_id': 'ga_dekalb',
'case_num': '1',
'status': 'Open',
'foo': 'bar',
}
# Number should be standardized,
# and foo should not appear
expected = {
'place_id': 'ga_dekalb',
'number': '1',
'status': 'Open',
}
CaseInfo._map = mapping
ci = CaseInfo(data)
assert ci.standard_data == expected
|
[
"zstumgoren@gmail.com"
] |
zstumgoren@gmail.com
|
95ef73ae86b57acc18a1491c332ea73babf5daf3
|
b924079a344e718f1de3dccdae8064c8c24be373
|
/quantum/service.py
|
48ef432e2ae732b5fea5389b6ee75bad647545e8
|
[
"Apache-2.0"
] |
permissive
|
ruijie/quantum
|
b24a14636a00c2363e1f2f365f41b58f6a5f1c07
|
b63a721785801a3b6f0aeb10bb2eb49b76323496
|
refs/heads/master
| 2021-01-20T05:08:35.704182
| 2012-11-12T08:33:23
| 2012-11-12T08:33:23
| 6,650,142
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from quantum.common import config
from quantum.openstack.common import cfg
from quantum import wsgi
LOG = logging.getLogger(__name__)
class WsgiService(object):
"""Base class for WSGI based services.
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
"""
def __init__(self, app_name):
self.app_name = app_name
self.wsgi_app = None
def start(self):
self.wsgi_app = _run_wsgi(self.app_name)
def wait(self):
self.wsgi_app.wait()
class QuantumApiService(WsgiService):
"""Class for quantum-api service."""
@classmethod
def create(cls):
app_name = "quantum"
# Setup logging early, supplying both the CLI options and the
# configuration mapping from the config file
# We only update the conf dict for the verbose and debug
# flags. Everything else must be set up in the conf file...
# Log the options used when starting if we're in debug mode...
config.setup_logging(cfg.CONF)
LOG.debug("*" * 80)
LOG.debug("Configuration options gathered from config file:")
LOG.debug("================================================")
items = dict([(k, v) for k, v in cfg.CONF.items()
if k not in ('__file__', 'here')])
for key, value in sorted(items.items()):
LOG.debug("%(key)-30s %(value)s" % {'key': key,
'value': value,
})
LOG.debug("*" * 80)
service = cls(app_name)
return service
def serve_wsgi(cls):
try:
service = cls.create()
except Exception:
logging.exception('in WsgiService.create()')
raise
service.start()
return service
def _run_wsgi(app_name):
app = config.load_paste_app(app_name)
if not app:
LOG.error(_('No known API applications configured.'))
return
server = wsgi.Server("Quantum")
server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host)
return server
|
[
"root@vm1.net"
] |
root@vm1.net
|
1b063a5d5a6f4416bf8b1ac6fbcf2c18198492dd
|
f6c62c253a49678e368d074302ab0358190d2f05
|
/CrossMgrCamera/ScaledImage.py
|
50cd5f5111eb00c9f3dcb2a2221972b2522f4fb1
|
[] |
no_license
|
Adefx/CrossMgr
|
4ab0563972dfb8de173dae1542b0c322aef7ab20
|
ad803339c81994a784426164c20215a67fdbaba1
|
refs/heads/master
| 2021-01-17T08:24:52.641643
| 2017-02-24T14:05:50
| 2017-02-24T14:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,260
|
py
|
import wx
contrastColour = wx.Colour( 255, 130, 0 )
def RescaleImage( image, width, height ):
wImage = image.GetWidth()
hImage = image.GetHeight()
ratio = min( float(width) / float(wImage), float(height) / float(hImage) )
return image.Copy().Rescale( int(wImage*ratio), int(hImage*ratio), wx.IMAGE_QUALITY_NORMAL ) if not (0.94 < ratio < 1.06) else image
class ScaledImage( wx.Panel ):
def __init__( self, parent, id=wx.ID_ANY, size=(640,480), style=0, drawFinishLine=False ):
super(ScaledImage, self).__init__( parent, id, size=size, style=style )
self.SetBackgroundStyle( wx.BG_STYLE_CUSTOM )
self.image = None
self.drawFinishLine = drawFinishLine
self.Bind( wx.EVT_PAINT, self.OnPaint )
def OnPaint( self, event=None ):
dc = wx.AutoBufferedPaintDC( self )
dc.SetBackground( wx.WHITE_BRUSH )
dc.Clear()
width, height = self.GetSizeTuple()
try:
bitmap = wx.BitmapFromImage( RescaleImage(self.image, width, height) )
except Exception as e:
return
dc.DrawBitmap( bitmap, max(0,(width - bitmap.GetWidth())//2), max(0,(height - bitmap.GetHeight())//2) )
if self.drawFinishLine:
dc.SetPen( wx.Pen(contrastColour, 1) )
dc.DrawLine( width//2, 0, width//2, height )
def SetImage( self, image ):
self.image = image
self.Refresh()
def GetImage( self ):
return self.image
def SetToEmpty( self ):
width, height = self.GetSize()
bitmap = wx.EmptyBitmapRGBA( width, height, 255, 255, 255, 0 )
self.image = wx.ImageFromBitmap( bitmap )
def SetTile( self, tile ):
width, height = self.GetSize()
bitmap = wx.EmptyBitmap( width, height )
dc = wx.MemoryDC()
dc.SelectObject( bitmap )
wTile = tile.GetWidth()
hTile = tile.GetHeight()
for y in xrange( 0, height, hTile ):
for x in xrange( 0, width, wTile ):
dc.DrawBitmap( tile, x, y )
self.SetImage( bitmap.ConvertToImage() )
def SetTestImage( self ):
# Return a test image.
width, height = self.GetSize()
bitmap = wx.EmptyBitmap( width, height )
dc = wx.MemoryDC()
dc.SelectObject( bitmap )
colours = [(255,255,255), (255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (0,0,0) ]
rWidth = int(float(width) / len(colours) + 0.5)
for y, hCur in ((0, height*0.75), (height*0.75, height*0.25)):
for i, c in enumerate(colours):
dc.SetBrush( wx.Brush(wx.Colour(*c), wx.SOLID) )
dc.DrawRectangle( rWidth * i, y, rWidth+1, hCur )
colours.reverse()
s = min(width, height) / 1.5
x = (width-s) / 2
y = (height-s) / 2
angle = 360.0 / len(colours)
for i, c in enumerate(colours):
dc.SetBrush( wx.Brush(wx.Colour(*c), wx.SOLID) )
dc.DrawEllipticArc(x, y, s, s, angle*i, angle*(i+1))
dc.SelectObject( wx.NullBitmap )
self.SetImage( bitmap.ConvertToImage() )
if __name__ == '__main__':
app = wx.App(False)
displayWidth, displayHeight = wx.GetDisplaySize()
imageWidth, imageHeight = 640, 480
if imageWidth*2 + 32 > displayWidth or imageHeight*2 + 32 > displayHeight:
imageWidth /= 2
imageHeight /= 2
mainWin = wx.Frame(None,title="ScaledImage", size=(imageWidth,imageHeight))
scaledImage = ScaledImage( mainWin, size=(imageWidth, imageHeight) )
scaledImage.SetTestImage()
# scaledImage.SetToEmpty()
mainWin.Show()
app.MainLoop()
|
[
"edward.sitarski@gmail.com"
] |
edward.sitarski@gmail.com
|
ac9076f960b12064af864a6c2ebfe3ba357e8c2d
|
25dda94672497e3287a7403e283fb279ad171b79
|
/SW Expert Academy/2806. N-Queen.py
|
b2fd14ecd8a8c0c0a85200aacc0298fd3c6d1edc
|
[] |
no_license
|
woorud/Algorithm
|
c94b844e8c96a446c5fdee5c0abb159bfee384d7
|
f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541
|
refs/heads/master
| 2023-02-23T13:53:28.645036
| 2021-01-29T12:24:23
| 2021-01-29T12:24:23
| 230,908,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
def diagnol(idx, c): # 대각선 위치 확인
for i in range(idx):
if idx-i == abs(c-map[i]): # 행 - 열의 절대값이 같으면 대각선에 위치
return True
return False
def dfs(idx):
if idx == N:
global cnt
cnt += 1
return
for i in range(N):
if visited[i]:
continue
if diagnol(idx, i):
continue
visited[i] = 1
map[idx] = i
dfs(idx+1)
visited[i] = 0
t = int(input())
for _ in range(t):
N = int(input())
map = [0 for i in range(N)]
visited = [0 for i in range(N)]
cnt = 0
dfs(0)
print('#{} {}'.format(_+1, cnt))
|
[
"woorud96@gmail.com"
] |
woorud96@gmail.com
|
d2ac1e03d5df1efcebfca1377db95f6e07600de8
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/eXe/rev2283-2366/left-trunk-2366/twisted/test/myrebuilder2.py
|
9eb92e11450989004c198b1d66a4b7c045cba6d0
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902
| 2013-03-28T16:28:47
| 2013-03-28T16:28:47
| 9,080,611
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
class A:
def a(self):
return 'b'
try:
object
except NameError:
pass
else:
class B(A, object):
def b(self):
return 'c'
class Inherit(A):
def a(self):
return 'd'
|
[
"joliebig@fim.uni-passau.de"
] |
joliebig@fim.uni-passau.de
|
9d49c884c0df31778961a4fde18b09ec0e3aac9d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02420/s842100220.py
|
389376fb975d7637a465bc9f05a0d09e6c60ebbb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
while True:
first = input().rstrip()
if first == '-': break
m = int(input())
for _ in range(m):
h = int(input())
first = first[h:] + first[0:h]
print(first)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1a08b0f2979c899b504f8fc87218e7d979d9d652
|
8255dcf7689c20283b5e75a452139e553b34ddf3
|
/app/views/dashboard/media/photos.py
|
ae8b16add3758cab891abe9a8780cbfb5dd38862
|
[
"MIT"
] |
permissive
|
Wern-rm/raton.by
|
09871eb4da628ff7b0d0b4415a150cf6c12c3e5a
|
68f862f2bc0551bf2327e9d6352c0cde93f45301
|
refs/heads/main
| 2023-05-06T02:26:58.980779
| 2021-05-25T14:09:47
| 2021-05-25T14:09:47
| 317,119,285
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,815
|
py
|
from flask import render_template, redirect, url_for, request, current_app
from flask_login import login_required
from app import db, logger
from app.controllers.dashboard_controller import dashboard_controller
from app.forms.dashboard_media import MediaPhotos
from app.models.photo_catalogs import PhotoCatalogs
from app.models.photos import Photos
from app.utils.flask_upload_files import UploadFiles, IMAGES
from app.views.dashboard import bp
@bp.route('/media/photos/<int:catalog_id>', methods=['GET', 'POST'])
@login_required
@dashboard_controller
def media_photos(catalog_id: int, **kwargs):
catalog = db.session.query(PhotoCatalogs).filter(PhotoCatalogs.id == catalog_id).first()
if not catalog:
return redirect(url_for('dashboard.media'))
form = MediaPhotos()
uploader = UploadFiles(basedir=current_app.config.get('STATIC_APP'), storage='uploads', extensions=IMAGES)
if form.validate_on_submit() and request.form['form-id'] == '1':
try:
filename = uploader.save(file=form.file.data)
file_url = uploader.get_path(filename=filename)
db.session.add(Photos(catalog_id=catalog_id, url=file_url))
db.session.commit()
return redirect(url_for('dashboard.media_photos', catalog_id=catalog_id, action='success', id=37))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.media_photos', catalog_id=catalog_id, action='warning', id=1))
kwargs['title'] = 'Управление медиа'
kwargs['data'] = db.session.query(Photos).filter(Photos.catalog_id == catalog_id).order_by(Photos.id).all()
kwargs['form'] = form
kwargs['catalog'] = catalog
return render_template("dashboard/media/photos.html", **kwargs)
|
[
"devwern@gmail.com"
] |
devwern@gmail.com
|
5911126de8a3316316c55a211087dfce6aca418a
|
13d222bc3332378d433835914da26ed16b583c8b
|
/src/pemjh/challenge39/main.py
|
8f4884cd82bb3bbb2d75381260a80f02e168258f
|
[] |
no_license
|
mattjhussey/pemjh
|
c27a09bab09cd2ade31dc23fffac07374bea9366
|
2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99
|
refs/heads/master
| 2023-04-16T03:08:59.390698
| 2023-04-08T10:54:00
| 2023-04-08T10:54:00
| 204,912,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
""" Challenge039 """
def number_of_perimeters(length):
"""
>>> number_of_perimeters(120)
3
"""
count = 0
# c > b >= a
# a + b > c
# a + b + c = n
# n = 100
# 1 <= a <= 33
# a <= b <= (n - a) / 2 + a
# b <= c <= n - a - b
limit = length // 3
if length % 3 != 0:
limit += 1
for a_length in range(1, limit):
b_limit = (length - a_length) // 2 + a_length
for b_length in range(a_length, b_limit):
c_length = length - a_length - b_length
if (a_length**2 + b_length**2) == (c_length**2):
count += 1
break
return count
def main():
""" challenge039 """
limit = 1000
results = [(number_of_perimeters(i), i)
for i in range(4, limit + 1, 2)]
return max(results, key=lambda i: i[0])[1]
|
[
"matthew.hussey@googlemail.com"
] |
matthew.hussey@googlemail.com
|
5bb3b06fa5b6ac7552d33eaa640de020e126f6c3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_hampshire.py
|
46f4aec748d11c63eb044ddd89f4c44dd82194b4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
#calss header
class _HAMPSHIRE():
def __init__(self,):
self.name = "HAMPSHIRE"
self.definitions = [u'a county (= area with its own local government) in southern England']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
94731e2c76a433376aed579f28bc5fc10c71f7c6
|
7e627a6f120f5e668d09f8b362c76f2592430a92
|
/dictionaria/assets.py
|
de3831bff39e3ef48f68a6fa44076febf86ba784
|
[
"Apache-2.0"
] |
permissive
|
LimorNaaman/dictionaria
|
a541bb9d812d8f4b5fb340b9525f2d136d28a40f
|
9f8a5139af99eb4ae1af9ed0b340120c486cf112
|
refs/heads/master
| 2020-03-15T12:05:25.162179
| 2018-06-05T18:01:49
| 2018-06-05T18:01:49
| 132,136,322
| 0
| 0
|
Apache-2.0
| 2018-06-05T18:01:50
| 2018-05-04T12:16:53
|
Mako
|
UTF-8
|
Python
| false
| false
| 282
|
py
|
from clld.web.assets import environment
from clldutils.path import Path
import dictionaria
environment.append_path(
Path(dictionaria.__file__).parent.joinpath('static').as_posix(),
url='/dictionaria:static/')
environment.load_path = list(reversed(environment.load_path))
|
[
"xrotwang@googlemail.com"
] |
xrotwang@googlemail.com
|
06e1d46135ac73c4d98acecedfbc42f6b36f52fd
|
892dd32ee0be7135cd33c875b06dcc66307dcc99
|
/automation/MPTS/apikey_set_delegatedAdmin.py
|
ece407e17d0b344e8ac87658a5b870a8be88ba74
|
[] |
no_license
|
cloudbytestorage/devops
|
6d21ed0afd752bdde8cefa448d4433b435493ffa
|
b18193b08ba3d6538277ba48253c29d6a96b0b4a
|
refs/heads/master
| 2020-05-29T08:48:34.489204
| 2018-01-03T09:28:53
| 2018-01-03T09:28:53
| 68,889,307
| 4
| 8
| null | 2017-11-30T08:11:39
| 2016-09-22T05:53:44
|
Python
|
UTF-8
|
Python
| false
| false
| 3,694
|
py
|
import json
import requests
import md5
import fileinput
import sys
import time
from cbrequest import sendrequest, filesave, timetrack, queryAsyncJobResult, configFile, configFileName
config = configFile(sys.argv);
configfilename = configFileName(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
#######Generate Apikeys for Site Admin
for x in range(1, int(config['Number_of_SiteAdmins'])+1):
### List Users
querycommand = 'command=listUsers'
resp_listUsers = sendrequest(stdurl, querycommand)
filesave("logs/CurrentUsersList.txt", "w", resp_listUsers)
data = json.loads(resp_listUsers.text)
users = data["listusersresponse"]["user"]
user_id = ""
for user in users:
if "%s" %(config['siteAdminUsername%d' %(x)]) == user['account']:
user_id = user['id']
print user['account']
print user_id
m = md5.new()
m.update("%s" %(config['siteAdminPassword%d' %(x)]))
md5_site_pwd = m.hexdigest()
### Generate ApiKey
querystring = 'command=registerUserKeys&id=%s' %(user_id)
resp_registerUserKeys = sendrequest(stdurl, querystring)
filesave("logs/registerUserkeys.txt", "w", resp_registerUserKeys)
data = json.loads(resp_registerUserKeys.text)
#print data
try:
apikey = data["registeruserkeysresponse"]["userkeys"]["apikey"]
print "Current Apikey from Devman --- "+apikey
except:
print "Didnt get Apikey"
continue
existingApikey = "%s" %(config['siteAdminApikey%d' %(x)])
print "Existing API Key in Config File --- "+existingApikey
print "ConfigFilename %s" %(configfilename)
for line in fileinput.FileInput('%s' %(configfilename),inplace=1):
line = line.replace(existingApikey,apikey)
print line,
fileinput.close()
print "End of loop1"
#############Apikey Generated for Site Admin
#config = configFile(sys.argv);
#configfilename = configFileName(sys.argv);
#stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
#######Generate Apikeys for HA Admin
for x in range(1, int(config['Number_of_HAAdmins'])+1):
### List Users
querycommand = 'command=listUsers'
resp_listUsers = sendrequest(stdurl, querycommand)
filesave("logs/CurrentUsersList.txt", "w", resp_listUsers)
data = json.loads(resp_listUsers.text)
users = data["listusersresponse"]["user"]
user_id = ""
for user in users:
if "%s" %(config['haAdminUsername%d' %(x)]) == user['account']:
user_id = user['id']
print user['account']
print user_id
m = md5.new()
m.update("%s" %(config['haAdminPassword%d' %(x)]))
md5_ha_pwd = m.hexdigest()
### Generate ApiKey
querystring = 'command=registerUserKeys&id=%s' %(user_id)
resp_registerUserKeys = sendrequest(stdurl, querystring)
filesave("logs/registerUserkeys.txt", "w", resp_registerUserKeys)
data = json.loads(resp_registerUserKeys.text)
#print data
try:
hapikey = data["registeruserkeysresponse"]["userkeys"]["apikey"]
print "Current Apikey from Devman --- "+hapikey
except:
print "Didnt get Apikey"
continue
hexistingApikey = "%s" %(config['haAdminApikey%d' %(x)])
print "Existing API Key in Config File --- "+hexistingApikey
print "ConfigFilename for HA %s" %(configfilename)
for line in fileinput.FileInput('%s' %(configfilename),inplace=1):
line = line.replace(hexistingApikey,hapikey)
print line,
fileinput.close()
print "End of loop1"
#############Apikey Generated
|
[
"karthik.s@cloudbyte.com"
] |
karthik.s@cloudbyte.com
|
d9d31ca5bdaefea63563a4acb77cd20e1e91a9a6
|
3b5f28ed1505c68f94ec1df496fe061d110294ce
|
/lixian_alias.py
|
fa09dd28ac832bbfe0362309b4481423e064db57
|
[
"MIT"
] |
permissive
|
yuanlizbyy/xunlei-lixian
|
089d388fbf4023bfae217906268c19dde43528e1
|
fe96ee19c1af8a268dc39818a5e8d33ff71e50ee
|
refs/heads/master
| 2021-01-17T21:48:13.932068
| 2012-12-10T05:35:43
| 2012-12-10T05:35:43
| 7,854,959
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
def get_aliases():
return {'d': 'download', 'l': 'list', 'a': 'add', 'x': 'delete'}
def get_alias(a):
aliases = get_aliases()
if a in aliases:
return aliases[a]
def to_alias(a):
return get_alias(a) or a
|
[
"iambus@gmail.com"
] |
iambus@gmail.com
|
38394182dc771084f43996ec5c98459435358d6b
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_terse.py
|
6f23ba45e2ed3a286c756f26ee23e7427c152b34
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
#calss header
class _TERSE():
def __init__(self,):
self.name = "TERSE"
self.definitions = [u'using few words, sometimes in a way that seems rude or unfriendly: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
cd521274e84ead7de41a90a16c88da24457be533
|
2cdd957f6cbf326ea902160011cb4f496e037bf9
|
/python_oops/prk2.py
|
1ae8c2a8d79c5f80d9275cabe183ae4f7ca24d16
|
[] |
no_license
|
Nitesh101/thundersoft
|
81511c5672e8cb61055818b59fd216b26a784b1e
|
aa5cef1cfeb8a00d438a5280dff231bda494252d
|
refs/heads/master
| 2020-09-24T20:56:53.655269
| 2019-12-04T10:43:23
| 2019-12-04T10:43:23
| 225,841,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
class parent():
def first(self):
print("first function")
class child(parent):
def second(self):
print("second functtion")
f = child()
f.first()
f.second()
|
[
"m.veeranitesh@gmail.com"
] |
m.veeranitesh@gmail.com
|
008181a3ff8d888d84ca86e89c8bb777fa600932
|
afc677459e46635ceffccf60d1daf50e62694557
|
/ACME/math/randrotm.py
|
4c23dd04c57218e5324cda86a0211924591e4879
|
[
"MIT"
] |
permissive
|
mauriziokovacic/ACME
|
056b06da4bf66d89087fcfcbe0fd0a2e255d09f3
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
refs/heads/master
| 2020-05-23T23:40:06.667416
| 2020-01-10T14:42:01
| 2020-01-10T14:42:01
| 186,997,977
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from .constant import *
from .eul2rotm import *
def randrotm(n=1, device='cuda:0'):
"""
Returns n randorm rotation matrices
Parameters
----------
n : int (optional)
the number of rotation matrices to generate (default is 1)
device : str (optional)
the device to store the tensor to (default is 'cuda:0')
Returns
-------
Tensor
the (n,3,3,) rotation matrices tensor
"""
return eul2rotm(torch.rand(n, 3, dtype=torch.float, device=device)*PI2)
|
[
"maurizio.kovacic@gmail.com"
] |
maurizio.kovacic@gmail.com
|
873cbdcd0a29dfe7fca8f80e22b8dad16471b2fb
|
11f7add72635ad985b3e98fd77e9426e8c74ab08
|
/google-api-python-client-1.0beta7/samples/adsense/sample_utils.py
|
e4463e584deb6cc3a870ce678814f5b070beb7f1
|
[] |
no_license
|
harshdarji/python
|
afa6b11338504567ece8bb1e78e841d13716ff14
|
8bad854304f423264b7b0724b87c7cd7de748cd6
|
refs/heads/master
| 2020-12-31T01:48:04.439466
| 2012-09-13T09:22:58
| 2012-09-13T09:22:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,811
|
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auxiliary file for AdSense Management API code samples.
Handles various tasks to do with logging, authentication and initialization.
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import logging
import os
import sys
from apiclient.discovery import build
import gflags
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/adsense.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for applications.
# Run this program with the '--help' argument to see all the flags that it
# understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def process_flags(argv):
"""Uses the command-line flags to set the logging level."""
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def prepare_credentials():
"""Handles auth. Reuses credentialss if available or runs the auth flow."""
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('adsense.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
return credentials
def retrieve_service(http):
"""Retrieves an AdSense Management API service via the discovery service."""
# Construct a service object via the discovery service.
service = build("adsense", "v1", http=http)
return service
def initialize_service():
"""Builds instance of service from discovery data and does auth."""
# Create an httplib2.Http object to handle our HTTP requests.
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = prepare_credentials()
http = credentials.authorize(http)
# Retrieve service.
return retrieve_service(http)
|
[
"jeromecukier@gmail.com"
] |
jeromecukier@gmail.com
|
eed9a66ad4a6c595e1640777cc94f4b3abebc576
|
25040bd4e02ff9e4fbafffee0c6df158a62f0d31
|
/www/htdocs/wt/lapnw/data/item_20_3.tmpl.py
|
28c4804d0e6461b17d8ba3b9ace7498eb496d3a5
|
[] |
no_license
|
erochest/atlas
|
107a14e715a058d7add1b45922b0f8d03bd2afef
|
ea66b80c449e5b1141e5eddc4a5995d27c2a94ee
|
refs/heads/master
| 2021-05-16T00:45:47.585627
| 2017-10-09T10:12:03
| 2017-10-09T10:12:03
| 104,338,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from lap.web.templates import GlobalTemplate, SubtemplateCode
class main(GlobalTemplate):
title = 'Page.Item: 20.3'
project = 'lapnw'
class page(SubtemplateCode):
pass
|
[
"eric@eric-desktop"
] |
eric@eric-desktop
|
f626569e98ae081d24c8713a307a06dba8355c47
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/surface/netapp/volumes/snapshots/describe.py
|
bbd36f4ef5454bc19ac3d530aa4c3c5ee430b40d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697
| 2023-08-23T18:23:16
| 2023-08-23T18:23:16
| 335,182,594
| 9
| 2
|
NOASSERTION
| 2022-10-29T20:49:13
| 2021-02-02T05:47:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe a Cloud NetApp Volume Snapshot."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.netapp.volumes.snapshots import client as snapshots_client
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.netapp import flags
from googlecloudsdk.command_lib.netapp.volumes.snapshots import flags as snapshots_flags
from googlecloudsdk.command_lib.util.concepts import concept_parsers
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describe a Cloud NetApp Volume Snapshot."""
_RELEASE_TRACK = base.ReleaseTrack.GA
detailed_help = {
'DESCRIPTION': """\
Describe a Cloud NetApp Volume Snapshot.
""",
'EXAMPLES': """\
The following command describes a Snapshot named NAME in the given location and volume:
$ {command} NAME --location=us-central1 --volume=vol1
""",
}
@staticmethod
def Args(parser):
concept_parsers.ConceptParser([flags.GetSnapshotPresentationSpec(
'The Snapshot to describe.')]).AddToParser(parser)
snapshots_flags.AddSnapshotVolumeArg(parser)
def Run(self, args):
"""Get a Cloud NetApp Volume Snapshot in the current project."""
snapshot_ref = args.CONCEPTS.snapshot.Parse()
if args.CONCEPTS.volume.Parse() is None:
raise exceptions.RequiredArgumentException(
'--volume', 'Requires a volume to describe snapshot of')
client = snapshots_client.SnapshotsClient(release_track=self._RELEASE_TRACK)
return client.GetSnapshot(snapshot_ref)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class DescribeBeta(Describe):
"""Describe a Cloud NetApp Volume Snapshot."""
_RELEASE_TRACK = base.ReleaseTrack.BETA
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class DescribeAlpha(DescribeBeta):
"""Describe a Cloud NetApp Volume Snapshot."""
_RELEASE_TRACK = base.ReleaseTrack.ALPHA
|
[
"cloudsdk.mirror@gmail.com"
] |
cloudsdk.mirror@gmail.com
|
1f258b67de069bd06008aaeaf03c969cf81ea192
|
9bdc868dbc3910ae72a05ab66cf53d30dffab2a8
|
/test/functional/p2p_zpos_fakestake.py
|
b572c1e97eb8fd15a6ab5dbf088628530f98b212
|
[
"MIT"
] |
permissive
|
YEPCOIN/Yep-Core
|
6aa8a3750e8496509501b7ff4d663a2681854c96
|
541ada7485b28abe1429c400835ce228ca9a6903
|
refs/heads/master
| 2020-07-03T04:44:44.361866
| 2020-05-06T19:45:05
| 2020-05-06T19:45:05
| 201,787,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,016
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Yep Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend
of an already spent coin.
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import yep_FakeStakeTest
class zPoSFakeStake(yep_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend of an already spent coin."
self.init_test()
DENOM_TO_USE = 5000 # zc denomination
INITAL_MINED_BLOCKS = 321 # First mined blocks (rewards collected to mint)
MORE_MINED_BLOCKS = 301 # More blocks mined before spending zerocoins
self.NUM_BLOCKS = 2 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 70:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
spent_mints = []
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
spent_mints.append(mint)
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
# 5) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 5 more blocks...")
self.node.generate(5)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
spending_utxo_list = self.node.listunspent()
sleep(1)
# 7) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake zPoS blocks...")
err_msgs = self.test_spam("Main", mints, spending_utxo_list=spending_utxo_list, fZPoS=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStake().main()
|
[
"ultrapoolcom@gmail.com"
] |
ultrapoolcom@gmail.com
|
a3747ed815403f95d7732066115c2c6a00eb89b8
|
17079988dedef6f830633a7a54b181355231fe3e
|
/Car/Car3.py
|
db4c58f81a811a3cd44229bac7dcc8e68dec6f07
|
[] |
no_license
|
sum008/python-backup
|
cdf6eaff60d882c36fe86b47ad311955d5869b02
|
729fbe2a5220941f9ba085c693c871592a529da8
|
refs/heads/master
| 2022-12-12T21:21:48.259680
| 2020-09-12T15:36:05
| 2020-09-12T15:36:05
| 285,461,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
import pygame as p
import vector_
import math
p.init()
width=600
height=600
display=p.display.set_mode((width,height))
image=p.image.load("car.png")
run=True
velocity=vector_.vector_functions()
x=200
y=200
l=0
position=vector_.vector_functions()
position.create(x, y)
velocity.set_length(l)
position.add_to_xy(velocity)
accelerate = vector_.vector_functions()
accelerate.create(0.6, 0.6)
friction = 0.98
angle=0
image=p.transform.scale(image, (30, 55))
image=p.transform.rotate(image, -270)
acc=0.5
deacc=3
lastangle=angle
move="null"
last_dir="null"
while run:
display.fill((0,150,0))
angle_rot = velocity.get_angle()
img=p.transform.rotate(image, angle)
getrect=img.get_rect()
getrect.center=(position.getX()%width,position.getY()%height)
display.blit(img,getrect)
velocity.set_angle(lastangle)
b=p.Vector2(0,0)
a=p.Vector2(velocity.getX(),velocity.getY())
if last_dir=="r":
b=p.Vector2(velocity.getX()+100,velocity.getY()+100)
elif last_dir=="l":
b=p.Vector2(velocity.getX()-100,velocity.getY()-100)
c=a-b
if c[0]!=0 or c[1]!=0:
c=c.normalize()*0.9
print(c)
vel2=vector_.vector_functions()
vel2.create(velocity.getX()+c[0], velocity.getY()+c[1])
position.add_to_xy(vel2)
for event in p.event.get():
if event.type==p.QUIT:
run=False
keys=p.key.get_pressed()
if keys[p.K_LEFT] and abs(velocity.get_length())>0.75:
angle=(angle+1)%360
lastangle=-angle*0.0174
if move=="r":
lastangle=math.pi-(angle*0.0174)
else:
lastangle=-angle*0.0174
last_dir="l"
if keys[p.K_RIGHT] and abs(velocity.get_length())>0.75:
angle=(angle-1)%360
if move=="r":
lastangle=math.pi-(angle*0.0174)
else:
lastangle=-angle*0.0174
last_dir="r"
if keys[p.K_UP]: #Accelerate
if(velocity.get_length()<10):
velocity.set_length(velocity.get_length()+acc)
lastangle=-angle*0.0174
move="null"
# if keys[p.K_DOWN] and velocity.get_length()>0.75: #Brakes
# velocity.set_length(velocity.get_length()-acc)
# lastangle=-(angle*0.0174)
# move="null"
if keys[p.K_DOWN]:# and velocity.get_length()<=0:
velocity.set_length(velocity.get_length()-deacc)
lastangle=math.pi-(angle*0.0174)
move="r"
if velocity.get_length()<0.5:
velocity.set_length(0)
last_dir="null"
velocity.set_length(velocity.get_length()*friction)
print(position.getX(),angle,getrect)
p.display.flip()
p.time.Clock().tick(60)
|
[
"noreply@github.com"
] |
sum008.noreply@github.com
|
ac56f5e8e9874e1a72c9f0a01d547345569ccffd
|
2c1a2724d4e1edfd99597ef700624650de7ed5b6
|
/amazon_cells_labelled.py
|
a79c25b69e01af09a06da35634be9b2c0c26803b
|
[] |
no_license
|
hellosandeep1999/Machine_Learning
|
d91dd3b2930fef69cc1c6b6409b6591c4b8ca2e7
|
20b6296009c2a7844ad8d06d3e43b53b30a4b450
|
refs/heads/master
| 2022-10-11T17:52:50.610155
| 2020-06-08T02:53:02
| 2020-06-08T02:53:02
| 257,350,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,762
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:07:17 2020
@author: user
"""
"""
Q1. Code Challegene (NLP)
Dataset: amazon_cells_labelled.txt
The Data has sentences from Amazon Reviews
Each line in Data Set is tagged positive or negative
Create a Machine learning model using Natural Language Processing that can
predict wheter a given review about the product is positive or negative
"""
import pandas as pd
# Importing the dataset
# Ignore double qoutes, use 3
dataset = pd.read_csv('amazon_cells_labelled.txt', delimiter = '\t')
dataset.columns = ["sms","label"]
import nltk
# download the latest list of stopwords from Standford Server
#nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import re
corpus = []
for i in range(0, 999):
sms = re.sub('[^a-zA-Z]', ' ', dataset['sms'][i])
sms = sms.lower()
sms = sms.split()
sms = [word for word in sms if not word in set(stopwords.words('english'))]
ps = PorterStemmer()
sms = [ps.stem(word) for word in sms]
sms = ' '.join(sms)
corpus.append(sms)
print(corpus)
print(len(corpus))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 2000)
# it is known as sparse matrix of the features ND Array
features = cv.fit_transform(corpus).toarray() # 2000 columns
labels = dataset.iloc[:, 1].values
print(features.shape)
print(labels.shape)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = \
train_test_split(features, labels, test_size = 0.20, random_state = 0)
#applying knn on this text dataset
# Fitting Knn to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
classifier.fit(features_train, labels_train)
# Predicting the Test set results
labels_pred = classifier.predict(features_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_knn = confusion_matrix(labels_test, labels_pred)
print(cm_knn) #0.72
# for better NLP results we need lot of data
-----------------------------------------------------------------------
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(features_train, labels_train)
# Predicting the Test set results
labels_pred = classifier.predict(features_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_nb = confusion_matrix(labels_test, labels_pred)
print(cm_nb) #0.72
#it means Naive bayes and K nearest Neighbors have same solution
|
[
"sandeepjain20178@gmail.com"
] |
sandeepjain20178@gmail.com
|
1d26a049c57d17eed0c9ca0b3efec122a762a23c
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/apimanagement/v20201201/get_api_policy.py
|
6924530cfd3623114e7744c89265ceeb2de1896e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,113
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApiPolicyResult',
'AwaitableGetApiPolicyResult',
'get_api_policy',
'get_api_policy_output',
]
@pulumi.output_type
class GetApiPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, format=None, id=None, name=None, type=None, value=None):
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
class AwaitableGetApiPolicyResult(GetApiPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiPolicyResult(
format=self.format,
id=self.id,
name=self.name,
type=self.type,
value=self.value)
def get_api_policy(api_id: Optional[str] = None,
format: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiPolicyResult:
"""
Policy Contract details.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['format'] = format
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20201201:getApiPolicy', __args__, opts=opts, typ=GetApiPolicyResult).value
return AwaitableGetApiPolicyResult(
format=__ret__.format,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type,
value=__ret__.value)
@_utilities.lift_output_func(get_api_policy)
def get_api_policy_output(api_id: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Optional[str]]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiPolicyResult]:
"""
Policy Contract details.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
7800d2c0d0a64a5bc8c602666596e3007524a6ca
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_199/3905.py
|
40351dcbf8a07ac6656283972fb4be0fa055fb3c
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
#!/usr/local/bin/python
import sys
def flip(s, n, i):
return tuple( not s[j] if i <= j and j < i + n else s[j] for j in range(len(s)) )
def answer(initial, n):
l = len(initial)
maxflip = l - (n - 1)
worklist = [initial]
states = { initial: 0 }
while worklist:
state = worklist.pop(0)
flips = states[state]
#print state
if all(state):
return flips
for i in range(maxflip):
newstate = flip(state, n, i)
if newstate not in states:
states[newstate] = flips + 1
worklist.append(newstate)
return 'IMPOSSIBLE'
def solve():
with open(sys.argv[1]) as f:
f.readline()
i = 1
for line in f:
parts = line.strip().split(' ')
n = int(parts[1])
ps = tuple( c == '+' for c in parts[0] )
result = answer(ps, n)
print('Case #{}: {}'.format(i, result))
i = i + 1
solve()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0c425e4fe95e0209d87b445dc116be63eb4ffdda
|
bc11e10521fa313d83011e77a2c31a0b6ed581af
|
/lib/rubyfox/server/data/lib/Lib/test/test_rfc822.py
|
470b92878fe1a54f6ff113544a80e5f9235b3637
|
[
"MIT"
] |
permissive
|
neopoly/rubyfox-server
|
f6f191c68dcc30b8c56d22c8209e4a69251f4f27
|
26d67687fc642111ef8d02507f2b567828bd1ebd
|
refs/heads/master
| 2023-07-20T15:04:32.028192
| 2023-07-17T09:16:36
| 2023-07-17T09:33:20
| 6,457,322
| 3
| 4
|
MIT
| 2020-08-11T06:53:50
| 2012-10-30T13:06:32
|
Python
|
UTF-8
|
Python
| false
| false
| 7,518
|
py
|
import rfc822
import sys
import test_support
import unittest
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class MessageTestCase(unittest.TestCase):
def create_message(self, msg):
return rfc822.Message(StringIO(msg))
def test_get(self):
msg = self.create_message(
'To: "last, first" <userid@foo.net>\n\ntest\n')
self.assert_(msg.get("to") == '"last, first" <userid@foo.net>')
self.assert_(msg.get("TO") == '"last, first" <userid@foo.net>')
self.assert_(msg.get("No-Such-Header") is None)
self.assert_(msg.get("No-Such-Header", "No-Such-Value")
== "No-Such-Value")
def test_setdefault(self):
msg = self.create_message(
'To: "last, first" <userid@foo.net>\n\ntest\n')
self.assert_(not msg.has_key("New-Header"))
self.assert_(msg.setdefault("New-Header", "New-Value") == "New-Value")
self.assert_(msg.setdefault("New-Header", "Different-Value")
== "New-Value")
self.assert_(msg["new-header"] == "New-Value")
self.assert_(msg.setdefault("Another-Header") == "")
self.assert_(msg["another-header"] == "")
def check(self, msg, results):
"""Check addresses and the date."""
m = self.create_message(msg)
i = 0
for n, a in m.getaddrlist('to') + m.getaddrlist('cc'):
try:
mn, ma = results[i][0], results[i][1]
except IndexError:
print 'extra parsed address:', repr(n), repr(a)
continue
i = i + 1
if mn == n and ma == a:
pass
else:
print 'not found:', repr(n), repr(a)
out = m.getdate('date')
if out:
self.assertEqual(out,
(1999, 1, 13, 23, 57, 35, 0, 0, 0),
"date conversion failed")
# Note: all test cases must have the same date (in various formats),
# or no date!
def test_basic(self):
self.check(
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'From: Guido van Rossum <guido@CNRI.Reston.VA.US>\n'
'To: "Guido van\n'
'\t : Rossum" <guido@python.org>\n'
'Subject: test2\n'
'\n'
'test2\n',
[('Guido van\n\t : Rossum', 'guido@python.org')])
self.check(
'From: Barry <bwarsaw@python.org\n'
'To: guido@python.org (Guido: the Barbarian)\n'
'Subject: nonsense\n'
'Date: Wednesday, January 13 1999 23:57:35 -0500\n'
'\n'
'test',
[('Guido: the Barbarian', 'guido@python.org')])
self.check(
'From: Barry <bwarsaw@python.org\n'
'To: guido@python.org (Guido: the Barbarian)\n'
'Cc: "Guido: the Madman" <guido@python.org>\n'
'Date: 13-Jan-1999 23:57:35 EST\n'
'\n'
'test',
[('Guido: the Barbarian', 'guido@python.org'),
('Guido: the Madman', 'guido@python.org')
])
self.check(
'To: "The monster with\n'
' the very long name: Guido" <guido@python.org>\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('The monster with\n the very long name: Guido',
'guido@python.org')])
self.check(
'To: "Amit J. Patel" <amitp@Theory.Stanford.EDU>\n'
'CC: Mike Fletcher <mfletch@vrtelecom.com>,\n'
' "\'string-sig@python.org\'" <string-sig@python.org>\n'
'Cc: fooz@bat.com, bart@toof.com\n'
'Cc: goit@lip.com\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('Amit J. Patel', 'amitp@Theory.Stanford.EDU'),
('Mike Fletcher', 'mfletch@vrtelecom.com'),
("'string-sig@python.org'", 'string-sig@python.org'),
('', 'fooz@bat.com'),
('', 'bart@toof.com'),
('', 'goit@lip.com'),
])
self.check(
'To: Some One <someone@dom.ain>\n'
'From: Anudder Persin <subuddy.else@dom.ain>\n'
'Date:\n'
'\n'
'test',
[('Some One', 'someone@dom.ain')])
self.check(
'To: person@dom.ain (User J. Person)\n\n',
[('User J. Person', 'person@dom.ain')])
def test_twisted(self):
# This one is just twisted. I don't know what the proper
# result should be, but it shouldn't be to infloop, which is
# what used to happen!
self.check(
'To: <[smtp:dd47@mail.xxx.edu]_at_hmhq@hdq-mdm1-imgout.companay.com>\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('', ''),
('', 'dd47@mail.xxx.edu'),
('', '_at_hmhq@hdq-mdm1-imgout.companay.com'),
])
def test_commas_in_full_name(self):
# This exercises the old commas-in-a-full-name bug, which
# should be doing the right thing in recent versions of the
# module.
self.check(
'To: "last, first" <userid@foo.net>\n'
'\n'
'test',
[('last, first', 'userid@foo.net')])
def test_quoted_name(self):
self.check(
'To: (Comment stuff) "Quoted name"@somewhere.com\n'
'\n'
'test',
[('Comment stuff', '"Quoted name"@somewhere.com')])
def test_bogus_to_header(self):
self.check(
'To: :\n'
'Cc: goit@lip.com\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('', 'goit@lip.com')])
def test_addr_ipquad(self):
self.check(
'To: guido@[132.151.1.21]\n'
'\n'
'foo',
[('', 'guido@[132.151.1.21]')])
def test_rfc2822_phrases(self):
# RFC 2822 (the update to RFC 822) specifies that dots in phrases are
# obsolete syntax, which conforming programs MUST recognize but NEVER
# generate (see $4.1 Miscellaneous obsolete tokens). This is a
# departure from RFC 822 which did not allow dots in non-quoted
# phrases.
self.check('To: User J. Person <person@dom.ain>\n\n',
[('User J. Person', 'person@dom.ain')])
# This takes to long to add to the test suite
## def test_an_excrutiatingly_long_address_field(self):
## OBSCENELY_LONG_HEADER_MULTIPLIER = 10000
## oneaddr = ('Person' * 10) + '@' + ('.'.join(['dom']*10)) + '.com'
## addr = ', '.join([oneaddr] * OBSCENELY_LONG_HEADER_MULTIPLIER)
## lst = rfc822.AddrlistClass(addr).getaddrlist()
## self.assertEqual(len(lst), OBSCENELY_LONG_HEADER_MULTIPLIER)
def test_parseaddr(self):
eq = self.assertEqual
eq(rfc822.parseaddr('<>'), ('', ''))
eq(rfc822.parseaddr('aperson@dom.ain'), ('', 'aperson@dom.ain'))
eq(rfc822.parseaddr('bperson@dom.ain (Bea A. Person)'),
('Bea A. Person', 'bperson@dom.ain'))
eq(rfc822.parseaddr('Cynthia Person <cperson@dom.ain>'),
('Cynthia Person', 'cperson@dom.ain'))
def test_main():
test_support.run_unittest(MessageTestCase)
if __name__ == "__main__":
test_main()
|
[
"ps@neopoly.de"
] |
ps@neopoly.de
|
a2f07df3af8ee8974943f2f72af4f1d8e8c2c4f0
|
12091b1c0723759464f949b0a47b305c76549278
|
/tests/test_pedreader.py
|
ae00fa7fdd2ada7ea03426fe560e3525311546bc
|
[
"MIT"
] |
permissive
|
whatshap/whatshap
|
6311e13d36210f395206683bb00b2054ef639653
|
15c9ff8c4f5b04b86195396dbc6620c874b5ceb8
|
refs/heads/main
| 2023-09-04T07:58:09.567203
| 2023-08-31T08:45:45
| 2023-08-31T08:45:45
| 276,673,862
| 254
| 27
|
MIT
| 2023-09-10T06:47:19
| 2020-07-02T14:53:00
|
Python
|
UTF-8
|
Python
| false
| false
| 883
|
py
|
import io
from pytest import raises
from whatshap.pedigree import PedReader, Trio, ParseError
class TestPedReader:
def test_parse(self):
trios = list(PedReader("tests/data/pedigree.ped"))
assert trios[0] == Trio(child="child1", mother="mother", father="father")
assert trios[1] == Trio(child="child2", mother="mother", father="father")
assert trios[2] == Trio(child="father", mother=None, father=None)
assert trios[3] == Trio(child="mother", mother=None, father=None)
assert trios[4] == Trio(child="orphan", mother=None, father=None)
def test_parse_error(self):
f = io.StringIO("buggy file")
with raises(ParseError):
list(PedReader(f))
def test_duplicate_individual(self):
f = io.StringIO("f1 c m f 0 1\nf1 c m f 0 1")
with raises(ParseError):
list(PedReader(f))
|
[
"marcel.martin@scilifelab.se"
] |
marcel.martin@scilifelab.se
|
5bb44b04e369f75bbfa730979f359f87774d86b2
|
f61cf1a24fa184dd552dd47dd8399b74c5816ee0
|
/tasks/10/10-10.py
|
e06737083170e2f17c7ee5587f9bbf2b030af1e0
|
[] |
no_license
|
desve/netology
|
ea585d9db8658eea5319b98f63259239fa538fcb
|
c6039cc831058b8ba650d417fae25f761520139b
|
refs/heads/master
| 2020-01-23T21:11:31.291807
| 2017-04-06T05:19:08
| 2017-04-06T05:19:08
| 74,572,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
# Полустепени вершин
n = 5
a= [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]
c1 = [0] * n
for i in range(n):
c11 = 0
for j in range(n):
if a[i][j] == 1:
c11 += 1
c1[i] = c11
c2 = [0] * n
for i in range(n):
c22 = 0
for j in range(n):
if a[j][i] == 1:
c22 += 1
c2[i] = c22
for i in range(n):
print("Вершина", i+1)
print("Заходов", c2[i])
print("Исходов", c1[i])
print("--------------------")
|
[
"2901243@mail.ru"
] |
2901243@mail.ru
|
729f408065f93d454e748e77b04b04967fb88c26
|
5cc204e2ecb9a756127e7c71633a1edcdb3e989b
|
/pylmp/InKim/LAMMPS_getViscosity.py
|
549a600e96d47dab74637bbb64c9f0a909aae361
|
[] |
no_license
|
hopefulp/sandbox
|
1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8
|
4d26767f287be6abc88dc74374003b04d509bebf
|
refs/heads/master
| 2023-06-27T17:50:16.637851
| 2023-06-15T03:53:39
| 2023-06-15T03:53:39
| 218,209,112
| 1
| 0
| null | 2022-09-13T13:22:34
| 2019-10-29T05:14:02
|
C++
|
UTF-8
|
Python
| false
| false
| 3,560
|
py
|
#!/home/noische/program/python27/bin/python
"""
template.py
Original: Dec 28 2011 In Kim
"""
# Python Modules
import sys
import os
import string
import random
import time
import getopt
import pprint
# Custom Modules
sys.path.append("/home/noische/scripts")
sys.path.append("/home/noische/script")
import bgf
import bgftools
import nutils as nu
import scipy
# Globals
version = '111228'
def getViscosity(log_file, profile_file, out_file, silent=False):
"""
def template():
Write what this function does.
Function Parameters:
log_file A string of filename or BgfFile class.
profile_file A string of filename or BgfFile class.
"""
# Initialize
log_data = [];
profile_data = [];
boxlength = 0;
f_out_file = open(out_file, 'w')
# Load log_file (dP)
f_log_file = open(log_file);
while 1:
line = f_log_file.readline()
if not line:
break;
if "Step dp TotEng Temp" in line:
break;
if "Box length" in line and "print" not in line:
parse = line.split()
boxlength = float(parse[-1])
while 1:
line = f_log_file.readline()
if not line:
break;
# log_data: [Step dp TotEng Temp]
parse = line.split()
if len(parse) != 4:
break;
log_data.append([int(parse[0]), float(parse[1]), float(parse[2]), float(parse[3])])
# Load .profile and calculate dvx/dvz
f_profile_file = open(profile_file);
while 1:
timestep = 0; bin = 0;
vx = []; vz = [];
line = f_profile_file.readline()
if not line:
break;
if "#" in line:
continue;
parse = line.split()
if len(parse) == 2:
timestep = int(parse[0])
bin = int(parse[1])
# read vz-vx pairs
for i in range(0, bin):
dataline = f_profile_file.readline()
parsedata = dataline.split()
vz.append(float(parsedata[1])*boxlength)
vx.append(float(parsedata[3]))
if len(vz) != bin or len(vx) != bin:
nu.die("The number of vectors for linear regression in the profile file does not match.")
# regression of vx wrt vz (i.e. x = vz, y = vx in common case)
(ar, br) = scipy.polyfit(vz, vx, 1)
temp = [timestep, ar]
profile_data.append(temp)
#f_out_file.write(str(temp)+"\n") # profile reader looks good 2012.2.2
# merge two data: log and profile
# merged_data: [Step dp TotEng Temp (dvx/dvz)]
merged_data = [];
for item1 in log_data:
for item2 in profile_data:
if item1[0] == item2[0]:
temp = item1 + item2[1:]
merged_data.append(temp)
# viscosity = - dp / (dvx/dvz)
for i in merged_data:
vis = -1 * i[1] / i[4]
i.append(vis)
for i in merged_data:
line = "";
for j in i:
line += str(j) + "\t"
line += "\n"
f_out_file.write(line)
# close files
f_out_file.close()
### end of template
if __name__ == '__main__':
option = ""; args = ""; log_file = ""; size = 0.0; profile_file = ""; out_file = "";
number = 0
usage = """
Usage: LAMMPS_getViscosity.py -l logfile -p profile -o output
Options are:
-b Input BGF file.
-o Output BGF file.
"""
if len(sys.argv) < 2:
print(usage); sys.exit(0)
options, args = getopt.getopt(sys.argv[1:], 'hl:p:o:', ['help','log=','profile=','out='])
for option, value in options:
if option in ('-h', '--help'):
print usage; sys.exit(0)
elif option in ('-l', '--log'):
log_file = value
elif option in ('-o', '--output'):
out_file= value
elif option in ('-p', '--profile'):
profile_file = value
elif option in (''):
print(usage); sys.exit(0)
# default settings
if not out_file: out_file = os.path.basename(log_file).split(".log")[0] + "" + ".output"
getViscosity(log_file, profile_file, out_file, silent=False)
|
[
"hopefulp@gmail.com"
] |
hopefulp@gmail.com
|
4b5a64ff4dfa130bfdb064b4d689d22a6410ef8d
|
e8d719fe45dfbff9cbbc4ed872832cec6cabaca6
|
/21_Merge_Two_Sorted_Lists.py
|
eb738c549da826985571891c40557a9c19c0cf19
|
[] |
no_license
|
nlfox/leetcode
|
64f4f48d7f4be6df0542e51cc7037df40bf184a3
|
d61363f99de3d591ebc8cd94f62544a31a026d55
|
refs/heads/master
| 2020-12-21T01:43:01.792899
| 2016-11-14T23:10:12
| 2016-11-14T23:10:12
| 56,680,839
| 2
| 0
| null | 2016-05-17T17:16:37
| 2016-04-20T11:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def construct(l):
res = ListNode(l[0])
head = res
for i in l[1:]:
res.next = ListNode(i)
res = res.next
return head
def pri(node):
p = node
while p:
print p.val,
p = p.next
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
p1, p2 = l1, l2
new = None
head = None
if p1 and p2:
new = ListNode(p2.val) if p1.val > p2.val else ListNode(p1.val)
head = new
if p1.val > p2.val:
p2 = p2.next
else:
p1 = p1.next
while p1 and p2:
if p1.val > p2.val:
new.next = ListNode(p2.val)
new = new.next
p2 = p2.next
else:
new.next = ListNode(p1.val)
new = new.next
p1 = p1.next
if p1:
new.next = p1
if p2:
new.next = p2
else:
head = p1 if p1 else p2
return head
pri(Solution().mergeTwoLists(construct([1, 2, 3]),construct([2, 3, 4])))
|
[
"nlfox@msn.cn"
] |
nlfox@msn.cn
|
f1290c1967e70ae6adf02d821fa34d407fc96a9a
|
e68fc7302d123d26f9e1d49c7877a3c2367cf676
|
/config.py
|
736f1d23ed37ee8f5ae1c0e589c3cb4efcf23da0
|
[
"MIT"
] |
permissive
|
bmeares/sso
|
56ae3fb4336f4864e346d3cc366117b96e3f3a0c
|
d589098c6b6c8510815669184da84e0b561df90d
|
refs/heads/master
| 2023-06-01T10:09:04.656903
| 2021-06-05T17:01:15
| 2021-06-05T17:01:15
| 364,969,061
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,802
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from meerschaum.utils.prompt import prompt
from meerschaum.config import get_plugin_config, write_plugin_config
GOOGLE_CONF_URL = 'https://accounts.google.com/.well-known/openid-configuration'
FACEBOOK_CONF_DICT = {
'api_base_url': 'https://graph.facebook.com/v10.0/',
'access_token_url': 'https://graph.facebook.com/v10.0/oauth/access_token',
'authorize_url': 'https://www.facebook.com/v10.0/dialog/oauth',
'userinfo_endpoint': 'me?fields=id,name,first_name,middle_name,last_name,email,website,gender,locale',
# 'response_type': 'token',
# 'state': "{st=state123abc,ds=123456789}",
}
def get_sso_config(*args, **kw):
_cf = get_plugin_config(*args, warn=False, **{k:v for k in kw if k != 'warn'})
if _cf is None:
_db_label = prompt('Wedding database label:', default='wedding_s')
_prepend = prompt('Prepend path to /sso (blank for the root to be /sso):')
_google_id = prompt('Google Client ID:')
_google_secret = prompt('Google Client Secret:', is_password=True)
_google_callback = prompt('Google Callback URL:')
_facebook_id = prompt('Facebook App ID:')
_facebook_secret = prompt('Facebook App Secret:')
_facebook_callback = prompt('Facebook Callback URL:')
_cf = {
'prepend' : _prepend,
'google' : {
'id' : _google_id, 'secret' : _google_secret, 'callback' : _google_callback,
},
'facebook' : {
'id' : _facebook_id, 'secret' : _facebook_secret, 'callback' : _facebook_callback,
},
}
write_sso_config(_cf)
return get_plugin_config(*args, **kw)
def write_sso_config(config, **kw):
write_plugin_config(config)
|
[
"bennett.meares@gmail.com"
] |
bennett.meares@gmail.com
|
2af7e96fce26a0c36dc8219da9028f0f0366eac9
|
3940b4a507789e1fbbaffeb200149aee215f655a
|
/warmUpOC/binaryHeap-RandomArraytoMaxheap.py
|
87be6ff429535ca6536ad1e160d1c999e33f5d6b
|
[] |
no_license
|
akimi-yano/algorithm-practice
|
15f52022ec79542d218c6f901a54396a62080445
|
1abc28919abb55b93d3879860ac9c1297d493d09
|
refs/heads/master
| 2023-06-11T13:17:56.971791
| 2023-06-10T05:17:56
| 2023-06-10T05:17:56
| 239,395,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,726
|
py
|
# Binary Heap - Random Array to Maxheap
#### Prompt:
Given an array of randomly sorted integers, rearrange the elements so that the
array is a maxheap.
What is the Big O runtime of this algorithm? Please provide your rationale
as well
#### Examples:
```
Example 1:
arr = [3, 6, 4, 2, 7, 5, 1]
return [7, 6, 5, 2, 3, 4, 1]
Example 2:
arr = [1, 2, 3, 4, 5, 6, 7, 8]
return [8, 5, 7, 4, 1, 6, 3, 2]
```
#### Input:
`arr` = `Array of Integers`
#### Output:
`result` = `Array of Integers`
#### Constraints
**Time**: `??`
**Space**: `O(1)`
Where `n` is the length of the `arr` array
Perform the algorithm in place on the input array, and then return this
input array
#### Resources:
[Binary Heaps](http://eloquentjavascript.net/1st_edition/appendix2.html)
#### Hints:
Refer back to the Minheap implementation from your homework
When proving the Big O runtime, consider drawing out the binary tree
representation of a maxheap versus the array representation
#### Solution:
[//]: {{{
```Javascript
function convert(arr) {
function getChild(parent) {
let child1 = 2 * parent + 1;
let child2 = 2 * parent + 2;
if (child1 >= arr.length) {
return child1;
} else if (child2 >= arr.length) {
return child1;
} else if (arr[child1] > arr[child2]) {
return child1;
} else {
return child2;
}
}
function bubbleDown(parent) {
let child = getChild(parent);
while (child < arr.length && arr[parent] < arr[child]) {
[arr[child], arr[parent]] = [arr[parent], arr[child]];
parent = child;
child = getChild(parent);
}
}
let i = arr.length;
while (i--) {
bubbleDown(i);
}
return arr;
}
```
[//]: ---
YOUR WORK HERE
[//]: }}}
|
[
"akimi.mimi.yano@gmail.com"
] |
akimi.mimi.yano@gmail.com
|
ec6a13a47cbceadb43a7cf88141a8bbd15d35e42
|
47d3e3149269277b164fecb176b5d0297d398b2e
|
/Python_coding_dojang/Unit 45/package01.py
|
26e03fb39f10ee0e5620aa19a6b0855bab0ab67a
|
[] |
no_license
|
heechul90/study-python-basic-1
|
325e8c81fe35cd0cd22934869413e475b6734652
|
82d778e5960c0bde102bdc4c52fc61f61ba27745
|
refs/heads/master
| 2022-10-31T07:03:54.213599
| 2022-10-24T10:54:40
| 2022-10-24T10:54:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,923
|
py
|
### Unit 45. 모듈과 패키지 만들기
## 45.3 패키지 만들기
## 모듈은 스크립트 파일이 한 개지만 패키지는 폴더(디렉터리)로 구성되어 있습니다.
# calcpkg/__init__.py
# # __init__.py 파일은 내용을 비워 둘 수 있음
## 폴더(디렉터리) 안에 __init__.py 파일이 있으면 해당 폴더는 패키지로 인식됩니다.
## 그리고 기본적으로 __init__.py 파일의 내용은 비워 둘 수 있습니다
## (파이썬 3.3 이상부터는 __init__.py 파일이 없어도 패키지로 인식됩니다.
## 하지만 하위 버전에도 호환되도록 __init__.py 파일을 작성하는 것을 권장합니다).
## 45.3.1 패키지에 모듈 만들기
## 첫 번째 모듈은 덧셈, 곱셈 함수가 들어있는 operation 모듈이고,
## 두 번째 모듈은 삼각형, 사각형의 넓이 계산 함수가 들어있는 geometry 모듈입니다
# calcpkg/operation.py
# def add(a, b):
# return a + b
#
# def mul(a, b):
# return a * b
# calcpkg/geometry.py
# def triangle_area(base, height):
# return base * height / 2
#
# def rectangle_area(width, height):
# return width * height
## 45.3.2 패키지 사용하기
# import 패키지.모듈
# 패키지.모듈.변수
# 패키지.모듈.함수()
# 패키지.모듈.클래스()
import calcpkg.operation # calcpkg 패키지의 operation 모듈을 가져옴
import calcpkg.geometry # calcpkg 패키지의 geometry 모듈을 가져옴
print(calcpkg.operation.add(10, 20)) # operation 모듈의 add 함수 사용
print(calcpkg.operation.mul(10, 20)) # operation 모듈의 mul 함수 사용
print(calcpkg.geometry.triangle_area(30, 40)) # geometry 모듈의 triangle_area 함수 사용
print(calcpkg.geometry.rectangle_area(30, 40)) # geometry 모듈의 rectangle_area 함수 사용
## 45.3.3 from import로 패키지의 모듈에서 변수, 함수, 클래스 가져오기
## 패키지의 모듈에서 from import로 함수(변수, 클래스)를 가져온 뒤
# 패키지와 모듈 이름을 붙이지 않고 사용할 수도 있습니다.
# from 패키지.모듈 import 변수
# from 패키지.모듈 import 함수
# from 패키지.모듈 import 클래스
from calcpkg.operation import add, mul
add(10, 20)
mul(10, 20)
# 참고 | 패키지의 모듈과 __name__
# 패키지의 모듈에서는 __name__ 변수에 패키지.모듈 형식으로 이름이 들어갑니다.
# 즉, calcpkg 패키지의 geometry.py에서 __name__의 값을 출력하도록 만들고,
# import로 가져오면 'calcpkg.geometry'가 나옵니다.
# 참고 | 모듈과 패키지를 찾는 경로
# 지금까지 모듈과 패키지는 현재 폴더(디렉터리)에 만들었습니다.
# 파이썬에서는 현재 폴더에 모듈, 패키지가 없으면 다음 경로에서 모듈, 패키지를 찾습니다.
import sys
sys.path
|
[
"heechul4296@gmail.com"
] |
heechul4296@gmail.com
|
96df125c2050e8380da4e03b47062197c37e68f7
|
5bd4893a793ed739127f15becd9558cacf461540
|
/scripts/rot_photos.py
|
5219086d03b68403f7db22b5c1aced82b54b5007
|
[] |
no_license
|
hauensteina/ahn-repo
|
d3aa665eeef846e426b866d587e8649c8283e74c
|
93bd7c54548a083f39510fc562c9e7540c4f672a
|
refs/heads/master
| 2023-07-24T05:34:51.289699
| 2023-07-13T16:10:25
| 2023-07-13T16:10:25
| 99,860,476
| 0
| 1
| null | 2023-07-15T01:33:35
| 2017-08-09T23:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,121
|
py
|
#!/usr/bin/env python
# Rotate all *JPG files in the current folder so that the image header
# matches the actual image rotation.
# Then you can rotate them manually for upload to hauenstein.nine.ch/andiconny .
# AHN, Jan 2020
from __future__ import division, print_function
import os,sys,re,glob,shutil
import subprocess
import argparse
from pdb import set_trace as BP
#---------------------------
def usage(printmsg=False):
name = os.path.basename(__file__)
msg = '''
Name:
%s -- Rotate jpeg images consistent with header
Synopsis:
%s --run
Description:
Rotate all *JPG files in the current folder so that the image header
matches the actual image rotation. Then you can rotate them manually
for upload to hauenstein.nine.ch/andiconny .
HEIC images are converted to jpg on the way.
Example:
%s --run
''' % (name,name,name)
if printmsg:
print(msg)
exit(1)
else:
return msg
#--------------
def main():
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument("--run", required=True, action='store_true')
args = parser.parse_args()
IMG_FOLDER = '.'
images = glob.glob(IMG_FOLDER + '/*.jpg')
images += glob.glob(IMG_FOLDER + '/*.jpeg')
images += glob.glob(IMG_FOLDER + '/*.JPG')
images += glob.glob(IMG_FOLDER + '/*.JPEG')
images += glob.glob(IMG_FOLDER + '/*.HEIC')
ORIGFOLDER = 'orig'
if not os.path.exists( ORIGFOLDER):
os.mkdir( ORIGFOLDER)
for img in images:
shutil.move( img, ORIGFOLDER)
for img in images:
print( img)
inf = os.path.basename( img)
ext = os.path.splitext( inf)[1]
jpgfile = '%s.%s' % (os.path.splitext( inf)[0], 'jpg')
if ext == '.HEIC':
cmd = 'convert %s/%s %s/%s' % (ORIGFOLDER, inf, ORIGFOLDER, jpgfile)
subprocess.check_output( cmd, shell=True)
inf = jpgfile
cmd = 'ffmpeg -i %s/%s -c:a copy %s' % (ORIGFOLDER, inf, jpgfile)
subprocess.check_output( cmd, shell=True)
if __name__ == '__main__':
main()
|
[
"hauensteina@gmail.com"
] |
hauensteina@gmail.com
|
2023fb2002eba629aa483150c449161bf19a08ed
|
58b87ea29a95a5ceeaae4c2d7db1b16502ed158f
|
/ComputationalPhysics/Homework/hw3.py
|
d368ea50245383ff59eb1bac5299c93e92da3b2c
|
[] |
no_license
|
meyerpa/Python
|
b609e8c036b478b20cd17a4cc47b71c129c968f8
|
3797f9be3341e69d5e9eccfc1b4e7f52fdd9c666
|
refs/heads/master
| 2021-01-01T03:58:40.183829
| 2018-03-14T14:24:57
| 2018-03-14T14:24:57
| 56,526,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 26 13:30:49 2017
@author: Paige Meyer
@date: 1-26-2016
@file: homework3
@description: This file contains code to read sunspots.txt,
show the number of months, graph the sunspots with respect to time,
and average the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
# format filename
filename = join("cpresources", "sunspots.txt")
# read the data from sunspots.txt
data = np.loadtxt(filename, float)
x = data[:, 0]
y = data[:, 1]
# take only first 1000 datapts
x = x[:1000]
y = y[:1000]
# calculate the average of ten points
avg = []
for i in x:
summ = 0
for j in np.linspace(i-5, i+5, 10):
if j >= 0 and j < len(x):
summ += y[int(j)]
avg.append(1/(2*5)*summ)
# plot stuff
plt.plot(x, y, color="r", alpha=.3, label="Sunspot count")
plt.plot(x, avg, color="c", label="Average sunspots")
# format plot
plt.legend()
plt.xlabel("month")
plt.ylabel("number of sunspots")
plt.title("Sunspots vs. time")
plt.show()
|
[
"meyerpa@mnstate.edu"
] |
meyerpa@mnstate.edu
|
7f87b5c5bed34bb76c1ee9f8face990205269f2d
|
de392462a549be77e5b3372fbd9ea6d7556f0282
|
/accounts/migrations/0035_auto_20200910_1200.py
|
dac2d5c9008ca94f225b7cedf49b36e06ab4e2cf
|
[] |
no_license
|
amutebe/AMMS_General
|
2830770b276e995eca97e37f50a7c51f482b2405
|
57b9b85ea2bdd272b44c59f222da8202d3173382
|
refs/heads/main
| 2023-07-17T02:06:36.862081
| 2021-08-28T19:07:17
| 2021-08-28T19:07:17
| 400,064,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
# Generated by Django 3.0.2 on 2020-09-10 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0034_auto_20200910_1126'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_number',
field=models.CharField(default='TEGA10092020946', max_length=200, primary_key=True, serialize=False, verbose_name='Corrective action no.:'),
),
migrations.AlterField(
model_name='employees',
name='employeeID',
field=models.CharField(default='TEGA239', max_length=10, primary_key=True, serialize=False, verbose_name='Employee ID'),
),
]
|
[
"mutebe2@gmail.com"
] |
mutebe2@gmail.com
|
d7d3c2d7ef21f281073e54af1f20b0d335a6d4a2
|
3c17e189622018329bc0ebd8523eae8db9f3112a
|
/ykdl/extractors/netease/live.py
|
8a3b1f40c764c49483d13ce1bbf800566e2c18ed
|
[
"MIT"
] |
permissive
|
YU-zreo/ykdl
|
167c9b8715a1cecf57c18bf60c7da3b22437ad06
|
b59dacd78bcec79d208d7cb86b86fa65428e386a
|
refs/heads/master
| 2020-12-02T12:47:01.113309
| 2017-07-07T12:39:20
| 2017-07-07T12:39:20
| 96,594,712
| 1
| 0
| null | 2017-07-08T03:57:22
| 2017-07-08T03:57:21
| null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import get_content
from ykdl.util.match import match1
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
import json
class NeteaseLive(VideoExtractor):
name = u"网易直播 (163)"
def prepare(self):
info = VideoInfo(self.name, True)
if not self.vid:
html = get_content(self.url)
self.vid = match1(html, "anchorCcId : \'([^\']+)")
info.title = match1(html, "title: \'([^\']+)")
info.artist = match1(html, "anchorName : \'([^\']+)")
data = json.loads(get_content("http://cgi.v.cc.163.com/video_play_url/{}".format(self.vid)))
info.stream_types.append("current")
info.streams["current"] = {'container': 'flv', 'video_profile': "current", 'src' : [data["videourl"]], 'size': 0}
return info
site = NeteaseLive()
|
[
"zhangn1985@gmail.com"
] |
zhangn1985@gmail.com
|
5660cf373fd81ac3f88d952f37b7290ad5c9e660
|
5c5e7b03c3373e6217665842f542ca89491290ff
|
/2015/day24.py
|
da34556a0fac89ed5e4bf110b667f266d5fe3ae1
|
[] |
no_license
|
incnone/AdventOfCode
|
9c35214e338e176b6252e52a25a0141a01e290c8
|
29eac5d42403141fccef3c3ddbb986e01c89a593
|
refs/heads/master
| 2022-12-21T21:54:02.058024
| 2022-12-15T17:33:58
| 2022-12-15T17:33:58
| 229,338,789
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
from getinput import get_input
import itertools
import copy
def list_prod(x):
prod = 1
for s in x:
prod *= s
return prod
def partitionable(weights):
weight_sum = sum(w for w in weights)
if weight_sum % 2 != 0:
return False
n = len(weights)
k = weight_sum // 2
# p[i][j] = There exists a subset of the first j weights summing to i (hence, we want to know p[k][n])
p = [[False for _ in range(n + 1)] for _ in range(k + 1)]
for j in range(len(p[0])):
p[0][j] = True
# Fill out one row at a time
for i in range(1, k + 1):
for j in range(1, n + 1):
# If the next weight isn't too large, then we can make i either by using this weight and prior weights,
# or by only using prior weights
if (i - weights[j-1]) >= 0:
p[i][j] = p[i][j-1] or p[i - weights[j-1]][j-1]
# Otherwise, the only way to make a weight of i is with the weights before this one
else:
p[i][j] = p[i][j-1]
return p[k][n]
def balanceable(subset, weights):
remaining_weights = [w for w in weights if w not in subset]
desired_weight = sum(subset)
if sum(remaining_weights) != 2*desired_weight:
return False
return partitionable(weights)
def sums_exist_hlpr(weights, idx, sums, cache):
"""Check whether the set weights[:idx+1] can be split into sets with the sums given in sums. Use cache
to store the result of computations."""
if (idx, sums) in cache:
return cache[(idx, sums)]
if not any(x != 0 for x in sums):
return True
if idx < 0:
return False
sums_exist = False
for jdx in range(len(sums)):
remainder = sums[jdx] - weights[idx]
if remainder >= 0:
sums_exist = sums_exist \
or sums_exist_hlpr(weights, idx-1, sums[:jdx] + (remainder,) + sums[jdx+1:], cache)
cache[(idx, sums)] = sums_exist
return sums_exist
def tripartitionable(weights):
wsum = sum(weights)
if wsum % 3 != 0:
return False
n = len(weights)
cache = dict()
answer = sums_exist_hlpr(weights, n-1, (wsum//3, wsum//3, wsum//3), cache)
return answer
def parse_input(s):
weights = []
for line in s.splitlines(keepends=False):
weights.append(int(line))
return weights
def part_1(weights):
for subset_size in range(1, len(weights)+1):
subsets = sorted(itertools.combinations(weights, subset_size), key=lambda x: list_prod(x))
for subset in subsets:
if balanceable(subset, weights):
return list_prod(subset)
return None
def part_2(weights):
packagesum = sum(weights)
assert packagesum % 4 == 0
for subset_size in range(1, len(weights)+1):
subsets = sorted(itertools.combinations(weights, subset_size), key=lambda x: list_prod(x))
for subset in subsets:
if sum(subset) != packagesum // 4:
continue
if tripartitionable([w for w in weights if w not in subset]):
return list_prod(subset)
return None
if __name__ == "__main__":
the_pkg_weights = parse_input(get_input(24))
print('Part 1:', part_1(the_pkg_weights))
print('Part 2:', part_2(the_pkg_weights))
|
[
"incohatus.none+git@gmail.com"
] |
incohatus.none+git@gmail.com
|
1bd16f88bb8cf77b42c10f23cb961dac40c8112e
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/torch/nn/parallel/scatter_gather.py
|
022b96bf08f30d37561e898459ab0e809d0e29ed
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:b69f556cfd2c67160d3730c231d1bb6d26eaf9dcc7d69128f5c480af8679521d
size 2690
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.