blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ab9be6590aa380def959b4559f589e13c5bde100 | 58f1cb1761d4e7114bbe0cc54195fc3bf72ccc4d | /python_solution/301_310/RemoveInvalidParentheses.py | c82c942d4bec2ad4c2189d486fec5a3868a3b8e1 | [] | no_license | CescWang1991/LeetCode-Python | 4e2e9b1872ef92d128c681c1ed07db19938b1ef5 | 0584b86642dff667f5bf6b7acfbbce86a41a55b6 | refs/heads/master | 2020-03-31T02:37:12.065854 | 2019-04-24T02:33:15 | 2019-04-24T02:33:15 | 151,832,885 | 1 | 1 | null | 2019-04-15T06:34:41 | 2018-10-06T11:22:49 | Python | UTF-8 | Python | false | false | 2,185 | py | # 301. Remove Invalid Parentheses
class Solution:
# 递归解法,首先统计了多余的半括号的数量,然后遍历字符串,逐个删去多余的半括号,运用深度优先遍历,当多余的括号均删去
# 时,判断剩余的字符串是否有效
def removeInvalidParentheses(self, s):
"""
:type s: str
:rtype: list[str]
"""
# 我们分别记录需要删除的左括号和右括号的数目
right = 0
stack = []
for p in s: # 利用堆栈,将左括号压入,遍历到右括号,若栈不为空,将栈顶pop,否则right+1
if not stack and p == ")":
right += 1
elif p == ")":
stack.pop()
elif p == "(":
stack.append(p)
left = len(stack)
self.res = []
self.dfs(s, left, right, 0)
return self.res
def dfs(self, s, left, right, start):
# 参数start用来标记上一层删去的元素的位置,我们从start开始遍历避免重复运算
if left == right == 0 and self.isValid(s): # left和right均为0时,s中的左右括号相等,判断是否为有效的str
self.res.append(s)
if left > 0: # 删去左括号,注意连续多个左括号,只遍历第一个即可
for i in range(start, len(s)):
if (s[i] == "(" and i == start) or (s[i] == "(" and s[i-1] != "("):
self.dfs(s[:i] + s[i+1:], left-1, right, i)
if right > 0: # 删去右括号,同理,注意不能用elif,要同时删除
for i in range(start, len(s)):
if (s[i] == ")" and i == start) or (s[i] == ")" and s[i-1] != ")"):
self.dfs(s[:i] + s[i+1:], left, right-1, i)
return
# 参考#20 Valid Parentheses
def isValid(self, s):
stack = []
for p in s:
if p == "(":
stack.append("(")
elif p == ")":
if not stack:
return False
else:
stack.pop()
return not stack | [
"cescwang1991@gmail.com"
] | cescwang1991@gmail.com |
6b7e6a720113491f5ac26c4f0926a0a9d61607f3 | 4ae178e2f872acba3acdcb06cb145b82e48908f8 | /trial_test_ws/build/iiwa_hw/catkin_generated/pkg.installspace.context.pc.py | 949ec279bb76f064bf54d64182ab6876d794e7b9 | [] | no_license | ZhikaiZhang1/ros-lbr-repo | 51279a0c1e00f1e1d5f0f3be2e3feb2dc04600df | 8fce59c6145481a0ec58d345cb3caa641c59f78e | refs/heads/master | 2023-06-04T15:55:18.769023 | 2021-06-22T12:34:00 | 2021-06-22T12:34:00 | 380,094,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "iiwa_ros;iiwa_msgs;pluginlib;controller_interface;controller_manager;hardware_interface;control_toolbox".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-liiwa_hw".split(';') if "-liiwa_hw" != "" else []
PROJECT_NAME = "iiwa_hw"
PROJECT_SPACE_DIR = "/home/logan/trial_test_ws/install"
PROJECT_VERSION = "1.4.0"
| [
"zhikaizhangHULK@gmail.com"
] | zhikaizhangHULK@gmail.com |
2ea952b80df8b60ff89f67b5241918f8c5f4897e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03745/s993598442.py | 7ab832b8c29c3103718a8da3ea5169b67688def5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | n = int(input())
a = list(map(int, input().split()))
state = None
cnt = 1
for i in range(1, n):
if state == None:
if a[i] > a[i-1]:
state = 'up'
elif a[i] < a[i-1]:
state = 'down'
else:
continue
elif state == 'up' and a[i] < a[i-1]:
cnt += 1
state = None
elif state == 'down' and a[i] > a[i-1]:
cnt += 1
state = None
print(cnt)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5eba23b59be7cb948d58c56e2d6cf0fea531ab96 | 0604602b54581ee29c15dfb440843fecdef9baaf | /manage.py | 1b4d2d7a82b2f0862d770e476753517fbcf4abe6 | [] | no_license | VirginiaNdungu1/watchlist | efbccf6f3bc9bf720dc20fe1e81a058b20aebcd0 | 840cbbee8c85ac1febc06a99fa8b72253900f957 | refs/heads/master | 2021-07-14T12:12:32.456787 | 2017-10-19T12:03:39 | 2017-10-19T12:03:39 | 107,099,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from app import create_app
from flask_script import Manager, Server
app = create_app('development')
'''
Create app instance
How?
Call the create_app function
pass in the onfiguration_options key - 'development'
'''
manager = Manager(app)
'''
Instantiate the Manager Class
How?
Pass in the app instance
'''
manager.add_command('server', Server)
'''
Create a new command 'server' to launch the application server
'''
@manager.command
def test():
'''
function to run the unittests
'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__':
manager.run()
| [
"ndungu.wairimu22@gmail.com"
] | ndungu.wairimu22@gmail.com |
fc21779d27a71f555b352dc441b4136ec792dbad | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/46/usersdata/125/19401/submittedfiles/funcoes1.py | 7ce00720df93d18b211087e3cf892525e1713099 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | # -*- coding: utf-8 -*-
from __future__ import division
def crescente (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]<lista[i+1]:
cont=cont+1
if cont!=0:
return False
else:
return True
def decrescente (lista):
cont=0
for i in range(0,len(lista)-1,1):
if lista[i]>lista[i+1]:
cont=cont+1
if cont!=0:
return False
else:
return True
def consecutivos (lista):
cont=0
for i in range(0,(lista)-1,1):
if lista[i]=lista[i+1]:
con=cont+1
if cont!=0:
return False
else:
return True
n=int(input('Digite uma quantidade de termos:'))
a=[]
b=[]
c=[]
for i in range(0,n,1):
a.append(input('Digite um valor de a:'))
for i in range(0,n,1):
b.append(input('Digite um valor de b:'))
for i in range(0,n,1):
c.append(input('Digite um valor de c:'))
if crescente(a):
print ('S')
else:
print ('N')
if decrescente(a):
print ('S')
else:
print ('N')
if consecutivos(a):
print ('S')
else:
print ('N')
if crescente(b):
print ('S')
else:
print ('N')
if decrescente(b):
print ('S')
else:
print ('N')
if consecutivos(b):
print ('S')
else:
print ('N')
if crescente(c):
print ('S')
else:
print ('N')
if decrescente(c):
print ('S')
else:
print ('N')
if consecutivos(c):
print ('S')
else:
print ('N')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
da6149217634e42d50560e1fe5807192edf13344 | 747febe786dd6b7fd6c63cfe73dbe3023354daa8 | /src/tt_impacts/tt_impacts/handlers.py | 65645df622bd34f61fb4d72f19d0fa1a7aa693d5 | [
"BSD-3-Clause"
] | permissive | the-tale/the-tale | 4e4b8d91dc873a5fb935fe58e9721a877baa6d3f | e8450bd2332344da805b1851e728da5a3e5bf0ef | refs/heads/develop | 2023-08-01T13:53:46.835667 | 2022-12-25T18:04:56 | 2022-12-25T18:04:56 | 1,949,167 | 98 | 52 | BSD-3-Clause | 2023-02-15T18:57:33 | 2011-06-24T18:49:48 | Python | UTF-8 | Python | false | false | 3,766 | py |
from tt_web import handlers
from tt_protocol.protocol import impacts_pb2
from . import protobuf
from . import operations
@handlers.protobuf_api(impacts_pb2.AddImpactsRequest)
async def add_impacts(message, config, **kwargs):
impacts = [protobuf.to_impact(impact) for impact in message.impacts]
await operations.add_impacts(impacts=impacts,
log_single_impacts=config['custom'].get('log_single_impacts', True),
log_actors_impacts=config['custom'].get('log_actors_impacts', True),
log_target_impacts=config['custom'].get('log_target_impacts', True))
return impacts_pb2.AddImpactsResponse()
@handlers.protobuf_api(impacts_pb2.GetImpactsHistoryRequest)
async def get_impacts_history(message, config, **kwargs):
if message.filter == impacts_pb2.GetImpactsHistoryRequest.FilterType.Value('NONE'):
impacts = await operations.last_impacts(limit=message.limit)
elif message.filter == impacts_pb2.GetImpactsHistoryRequest.FilterType.Value('ONLY_ACTOR'):
impacts = await operations.last_actor_impacts(actor=protobuf.to_object(message.actor), limit=message.limit)
elif message.filter == impacts_pb2.GetImpactsHistoryRequest.FilterType.Value('ONLY_TARGET'):
impacts = await operations.last_target_impacts(target=protobuf.to_object(message.target), limit=message.limit)
elif message.filter == impacts_pb2.GetImpactsHistoryRequest.FilterType.Value('BOTH'):
impacts = await operations.last_actor_target_impacts(actor=protobuf.to_object(message.actor),
target=protobuf.to_object(message.target),
limit=message.limit)
return impacts_pb2.GetImpactsHistoryResponse(impacts=[protobuf.from_impact(impact) for impact in impacts])
@handlers.protobuf_api(impacts_pb2.GetTargetsImpactsRequest)
async def get_targets_impacts(message, config, **kwargs):
impacts = await operations.get_targets_impacts([protobuf.to_object(target) for target in message.targets])
return impacts_pb2.GetTargetsImpactsResponse(impacts=[protobuf.from_target_impact(impact) for impact in impacts])
@handlers.protobuf_api(impacts_pb2.GetActorImpactsRequest)
async def get_actor_impacts(message, config, **kwargs):
impacts = await operations.get_actor_impacts(protobuf.to_object(message.actor), message.target_types)
return impacts_pb2.GetActorImpactsResponse(impacts=[protobuf.from_target_impact(impact) for impact in impacts])
@handlers.protobuf_api(impacts_pb2.GetImpactersRatingsRequest)
async def get_impacters_ratings(message, config, **kwargs):
ratings = await operations.get_impacters_ratings(targets=[protobuf.to_object(target) for target in message.targets],
actor_types=message.actor_types,
limit=message.limit)
return impacts_pb2.GetImpactersRatingsResponse(ratings=[protobuf.from_rating(target, rating)
for target, rating in ratings.items()])
@handlers.protobuf_api(impacts_pb2.ScaleImpactsRequest)
async def scale_impacts(message, config, **kwargs):
await operations.scale_impacts(target_types=message.target_types,
scale=message.scale,
chunk_size=config['custom']['scale_chunk_size'])
return impacts_pb2.ScaleImpactsResponse()
@handlers.protobuf_api(impacts_pb2.DebugClearServiceRequest)
async def debug_clear_service(message, **kwargs):
await operations.clean_database()
return impacts_pb2.DebugClearServiceResponse()
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
9ab19df506f2d84bf103f3c3dd8846201be7cbd8 | c146744b6a1eba53d4691fc0fdddedbd1c6a4f7a | /lictor/fake.py | f0c230cc36ded9b6334d5dde1ada430eddc19c96 | [] | no_license | adw0rd/django-lictor | 509590a5431eb86d69bba21d376d05a3197f552f | ed07fdb02ba7596c259908b66b4c917f95665d62 | refs/heads/master | 2021-01-11T10:24:22.194217 | 2012-08-19T23:56:12 | 2012-08-19T23:56:12 | 76,251,649 | 1 | 0 | null | 2016-12-12T11:47:05 | 2016-12-12T11:47:04 | null | UTF-8 | Python | false | false | 1,148 | py | import hashlib
import simplejson
from django.conf import settings
from lictor.models import Trace
def md5(s):
return hashlib.md5(s).hexdigest()
def make_fake(request):
session = request.COOKIES.get(settings.LICTOR_SESSION_COOKIE_NAME)
dump = [
{"i": md5("test_project.films.urls:12"), "t": "Url", "f": "test_project.films.urls", "l": 12, "n": r'^(?P<page>\d+)/$', "c": [md5("test_project.films.views:123")]},
{"i": md5("test_project.films.views:123"), "t": "View", "f": "test_project.films.views", "l": 123, "n": "get", "c": [md5("test_project.films.forms:23")]},
{"i": md5("test_project.films.models:42"), "t": "Model", "f": "test_project.films.models", "l": 42, "n": "__unicode__"},
{"i": md5("test_project.films.forms:23"), "t": "Form", "f": "test_project.films.forms", "l": 23, "n": "init", "c": [md5("test_project.films.models:42")]},
{"i": md5("test_project.films.forms:31"), "t": "Form", "f": "test_project.films.forms", "l": 31, "n": "clean", "c": [md5("test_project.films.models:42")]},
]
Trace.objects.create(
session=session,
json=simplejson.dumps(dump))
| [
"x11org@gmail.com"
] | x11org@gmail.com |
b622ce5c2241a8e7493f504171e3abae1dcdda25 | aaa762ce46fa0347cdff67464f56678ea932066d | /AppServer/lib/django-1.5/tests/regressiontests/aggregation_regress/models.py | dd4ff50aec093526827fe15867c543b7a25b54d3 | [
"Apache-2.0",
"BSD-3-Clause",
"LGPL-2.1-or-later",
"MIT",
"GPL-2.0-or-later",
"MPL-1.1"
] | permissive | obino/appscale | 3c8a9d8b45a6c889f7f44ef307a627c9a79794f8 | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | refs/heads/master | 2022-10-01T05:23:00.836840 | 2019-10-15T18:19:38 | 2019-10-15T18:19:38 | 16,622,826 | 1 | 0 | Apache-2.0 | 2022-09-23T22:56:17 | 2014-02-07T18:04:12 | Python | UTF-8 | Python | false | false | 1,965 | py | # coding: utf-8
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
age = models.IntegerField()
friends = models.ManyToManyField('self', blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publisher(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
isbn = models.CharField(max_length=9)
name = models.CharField(max_length=255)
pages = models.IntegerField()
rating = models.FloatField()
price = models.DecimalField(decimal_places=2, max_digits=6)
authors = models.ManyToManyField(Author)
contact = models.ForeignKey(Author, related_name='book_contact_set')
publisher = models.ForeignKey(Publisher)
pubdate = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
class Entries(models.Model):
EntryID = models.AutoField(primary_key=True, db_column='Entry ID')
Entry = models.CharField(unique=True, max_length=50)
Exclude = models.BooleanField()
class Clues(models.Model):
ID = models.AutoField(primary_key=True)
EntryID = models.ForeignKey(Entries, verbose_name='Entry', db_column = 'Entry ID')
Clue = models.CharField(max_length=150)
@python_2_unicode_compatible
class HardbackBook(Book):
weight = models.FloatField()
def __str__(self):
return "%s (hardback): %s" % (self.name, self.weight)
| [
"root@lucid64.hsd1.ca.comcast.net"
] | root@lucid64.hsd1.ca.comcast.net |
0a96785256165015b3ea2915a7a984ab4931056a | ade0524b00e48325f00ffa61a5dd3f724f421f31 | /test/rules/functions/test_find_in_map_keys.py | fd9cd88480a3f7e9a3fe108faf3c0813719b55c2 | [
"MIT-0"
] | permissive | jlongtine/cfn-python-lint | 14247dc62ed07934802570534c5b7012bff9126a | 3f5324cfd000e14d9324a242bb7fad528b22a7df | refs/heads/master | 2020-07-19T03:46:30.741522 | 2019-09-04T19:35:34 | 2019-09-04T19:35:34 | 206,368,016 | 1 | 0 | NOASSERTION | 2019-09-04T16:50:06 | 2019-09-04T16:50:06 | null | UTF-8 | Python | false | false | 1,693 | py | """
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.functions.FindInMapKeys import FindInMapKeys # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestRulesFindInMapKeys(BaseRuleTestCase):
"""Test Find In Map Keys Rule """
def setUp(self):
"""Setup"""
super(TestRulesFindInMapKeys, self).setUp()
self.collection.register(FindInMapKeys())
self.success_templates = [
'test/fixtures/templates/good/functions/findinmap_keys.yaml',
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/functions/findinmap_keys.yaml', 3)
| [
"kddejong@amazon.com"
] | kddejong@amazon.com |
21bc287dfd69591caf52e2875859d6647a1cd3cd | e2f0806ca1cdd887ea40d050a19fa2710427bd38 | /기본 문제/04주차_그리디/11047_동전 0/yeonwoo_11047.py | 8ef51f58a33cd340bf384386b9045c25e98bd3f3 | [] | no_license | JY-Dev/AlgorithmStudy-1 | 001f94d80097c850c79eeb2bc86971a01aa5bd5d | 2ad1df0fd65c72a6f6d1feeba09f889000ff8c15 | refs/heads/main | 2023-08-21T18:38:18.235994 | 2021-09-28T07:07:11 | 2021-09-28T07:07:11 | 406,208,087 | 1 | 0 | null | 2021-09-14T03:14:32 | 2021-09-14T03:14:31 | null | UTF-8 | Python | false | false | 396 | py | import sys
N, K = map(int ,input().split())
coin = []
for i in range(N):
coin.append(int(sys.stdin.readline().rstrip()))
count_coin = 0
for i in range(N-1, -1, -1): # coin리스트 역순으로 받아와줌
count_coin = count_coin + (K // coin[i]) # 나머지값 받아옴
# print(count_coin)
K = K % coin[i] # 몫값 가져와줌
# print(K)
print(count_coin) | [
"noreply@github.com"
] | JY-Dev.noreply@github.com |
e6e60d494fdd99e137d47cf846a1c54beac9548c | 7a0144da5a567d8497551b09875298ea224bb5bd | /test/two five.py | ccc96ab3ac487d676b1c4ac6afe2aad91b13d4fb | [] | no_license | SLT-DJH/algorithm | dba34614bb0fbbad0ecf5d85b02cb541ab047c5a | e33c843be4efdfc6c6a7300ab4e53b9a7c4b2e67 | refs/heads/master | 2023-03-08T08:27:01.476793 | 2021-02-25T15:06:42 | 2021-02-25T15:06:42 | 297,017,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | a = int(input())
get = a
twocount = 0
fivecount = 0
while get != 1 :
tempget = get
while True :
if tempget % 2 == 0 :
twocount += 1
tempget = tempget // 2
else :
if tempget % 5 == 0 :
fivecount += 1
tempget = tempget // 5
else :
break
get = get - 1
print(twocount, fivecount)
| [
"jydwww@naver.com"
] | jydwww@naver.com |
2ab19d5e36b7eff67501d509d4c02705a7805ce7 | 2940f5416082dadd9c646cd9a46d2d0a99883efb | /venv/Lib/site-packages/pandas/tests/indexes/base_class/test_formats.py | f07b06acbfbdbc7ab38c9b4ff65b22a07e5b217d | [
"MIT"
] | permissive | tpike3/SugarScape | 4813e4fefbfb0a701f5913d74f045fd0eaed1942 | 39efe4007fba2b12b75c72f7795827a1f74d640b | refs/heads/main | 2021-06-20T03:55:46.288721 | 2021-01-20T17:06:35 | 2021-01-20T17:06:35 | 168,583,530 | 11 | 3 | MIT | 2021-01-20T17:19:53 | 2019-01-31T19:29:40 | Jupyter Notebook | UTF-8 | Python | false | false | 5,155 | py | import numpy as np
import pytest
import pandas._config.config as cf
from pandas import Index
class TestIndexRendering:
@pytest.mark.parametrize(
"index,expected",
[
# ASCII
# short
(
Index(["a", "bb", "ccc"]),
"""Index(['a', 'bb', 'ccc'], dtype='object')""",
),
# multiple lines
(
Index(["a", "bb", "ccc"] * 10),
"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
"'bb', 'ccc', 'a', 'bb', 'ccc',\n"
" 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "
"'bb', 'ccc', 'a', 'bb', 'ccc',\n"
" 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
" dtype='object')",
),
# truncated
(
Index(["a", "bb", "ccc"] * 100),
"Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n"
" ...\n"
" 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"
" dtype='object', length=300)",
),
# Non-ASCII
# short
(
Index(["あ", "いい", "ううう"]),
"""Index(['あ', 'いい', 'ううう'], dtype='object')""",
),
# multiple lines
(
Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう'],\n"
" dtype='object')"
),
),
# truncated
(
Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.parametrize(
"index,expected",
[
# short
(
Index(["あ", "いい", "ううう"]),
("Index(['あ', 'いい', 'ううう'], dtype='object')"),
),
# multiple lines
(
Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう'],\n"
" dtype='object')"
""
),
),
# truncated
(
Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
"'いい', 'ううう', 'あ', 'いい',\n"
" 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context("display.unicode.east_asian_width", True):
result = repr(index)
assert result == expected
def test_repr_summary(self):
with cf.option_context("display.max_seq_items", 10):
result = repr(Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
def test_index_repr_bool_nan(self):
# GH32146
arr = Index([True, False, np.nan], dtype=object)
exp1 = arr.format()
out1 = ["True", "False", "NaN"]
assert out1 == exp1
exp2 = repr(arr)
out2 = "Index([True, False, nan], dtype='object')"
assert out2 == exp2
| [
"tpike3@gmu.edu"
] | tpike3@gmu.edu |
1d833bc8cf167e0f94fff7c8988a86ae745c6943 | 404728244681a773f55be7f7b0c4933f439f3106 | /tests/web/suite/client.py | ef125df6d7ee968bfda8eaafe73e63085cbca8e9 | [] | no_license | limingjin10/walis | c4e22db27d964cefa068883edf979cabfedd74d6 | 198a4e94992c1790b7a9f2cd34b1686fefc87845 | refs/heads/master | 2021-05-29T04:50:34.091849 | 2015-06-15T14:19:23 | 2015-06-15T14:19:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | # coding=utf8
from __future__ import absolute_import, division, print_function
import pytest
from walis.server import app
# TODO 在每个模块conftest中使用权限检查
# web_client.set_cookie('eleme.test', 'god_token', 'true')
# web_client.set_cookie('eleme.test', 'god_uid', '485388')
# web_client.set_cookie('eleme.test', 'user_id', '485388')
@pytest.fixture(scope="session", autouse=True)
def web_client():
with app.test_client() as client:
client.set_cookie(
'',
'SID',
value='GOmrIgCkWQx8Y4FJPxAxFUP75RKSB1XYUa4A'
)
response = client.get('/api/ping', buffered=True)
assert response.status_code == 200
response = client.get('/api/login', buffered=True)
assert response.status_code == 200
return client
| [
"shaofeirong2006@126.com"
] | shaofeirong2006@126.com |
2afe1add9070b59bf76e664827eaba73908b8798 | 4010495de5381f5c7084ee8968aafd311167e6e0 | /brainiac_libs/brainiac_cores/conf_colors.py | 3a4dc97a0101be76e1226725d307e6d29f950c78 | [
"MIT"
] | permissive | marioaugustorama/brainiac_pwn | 4920131051e9de17f50c06ba2a2b36a4c827ceab | daf2abefe54800682e84177762ec6869398f3090 | refs/heads/master | 2020-03-08T10:04:15.422637 | 2018-04-04T14:55:20 | 2018-04-04T14:55:20 | 128,062,869 | 2 | 0 | MIT | 2018-04-04T13:02:05 | 2018-04-04T13:02:05 | null | UTF-8 | Python | false | false | 637 | py | arry_cores = {
"vermelho" :'\033[31m',
"verde" : '\033[32m',
"azul" :'\033[34m',
"ciano" : '\033[36m',
"magenta" : '\033[35m',
"amarelo" : '\033[33m',
"preto " : '\033[30m',
"branco" : '\033[37m',
"normal" : '\033[0;0m',
"negrito" : '\033[1m',
"reverso " : '\033[2m',
"fundo_preto" : '\033[40m',
"fundo_vermelho" : '\033[41m',
"fundo_verde" : '\033[42m',
"fundo_amarelo" : '\033[43m',
"fundo_azul" : '\033[44m',
"fundo_magenta" : '\033[45m',
"fundo_ciano" : '\033[46m',
"fundo_branco" :'\033[47m',
}
| [
"darkcode357@gmail.com"
] | darkcode357@gmail.com |
ac63cf07f7fa28aa40b6d176ed686d15b8b7b345 | 7465148de5d656ebfe68b588a2f271a11384ed6a | /litepipeline/litepipeline/manager/utils/litedfs.py | 97177e15c398e93168fde3f58e61711e161513ad | [] | no_license | fiefdx/LitePipeline | 1462dacdd1a0f2c67972b6014b428c2c45d46949 | 09608f8c5f248d2ba10e5840bf00d69e76ed6291 | refs/heads/master | 2023-04-14T11:45:18.929249 | 2023-04-02T06:48:30 | 2023-04-02T06:48:30 | 226,355,739 | 2 | 0 | null | 2023-04-01T17:49:14 | 2019-12-06T15:17:33 | Python | UTF-8 | Python | false | false | 394 | py | # -*- coding: utf-8 -*-
import os
import time
import json
import hashlib
import logging
from tornado import ioloop
from tornado import gen
from litedfs_client.client import LiteDFSClient
from litepipeline.manager.config import CONFIG
LOG = logging.getLogger(__name__)
LDFS = None
class LiteDFS(object):
def __init__(self, host, port):
self.client = LiteDFSClient(host, port)
| [
"fiefdx@163.com"
] | fiefdx@163.com |
e15b04c7931d37f5633a4cf349ba6d4c044639d0 | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/FuwuScoresGetRequest.py | 5f9681e9ab9bb4e5931bb956f6bd0e3218c9b4f2 | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class FuwuScoresGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.current_page = None
self.date = None
self.page_size = None
def getapiname(self):
return 'taobao.fuwu.scores.get'
| [
"poorevil@gmail.com"
] | poorevil@gmail.com |
10f532670bd79d4f62ef5ccbf956de943ba37d7d | e7188f29592fcfebce8b2ac86336534e1b15a505 | /network-program/thread_learn/simple_thread_2.py | 23eb5f5fdbcaf74653b14bc6383d30210a436c67 | [] | no_license | liuxingrichu/python-learn-log | 6f924172ae4d66aabd5740a2f161d7d653cf4af1 | 4556ba50388310e0c17010c0f999f54db5eef755 | refs/heads/master | 2021-01-19T17:16:42.413655 | 2017-03-23T15:12:20 | 2017-03-23T15:12:20 | 82,430,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
类方式创建线程
"""
import threading
class MyThread(threading.Thread):
def __init__(self, n):
super(MyThread, self).__init__()
self.n = n
def run(self):
print('task', self.n)
t1 = MyThread('t1')
t2 = MyThread('t2')
t1.start()
t2.start()
| [
"liuxingrichu@163.com"
] | liuxingrichu@163.com |
865a4b2b3225f7c2378ed05a01be12a13dd5d5e0 | 3f182e860f4485e304bc9195d1c1eaa8b2de70aa | /estee/generators/simple.py | 5da78603b17e110a8fd9cb4c17c5d41708f2a23b | [
"MIT"
] | permissive | spirali/estee | 0feda169af35edc51bd4ac9cab2d36377561a576 | 55c0834db3d7da407b7c37d46fa41b5b563e2bbe | refs/heads/master | 2020-03-28T02:22:22.218292 | 2019-04-05T13:55:16 | 2019-04-05T13:55:16 | 147,566,052 | 9 | 4 | MIT | 2019-04-05T12:32:29 | 2018-09-05T19:01:33 | Python | UTF-8 | Python | false | false | 1,243 | py | import random
def random_dependencies(count: int, edge_density: float, task_fn):
"""
Creates a complete graph with the given edge density.
"""
nodes = [task_fn() for i in range(count)]
for n1 in nodes:
for n2 in nodes:
if (n1 == n2 or random.random() > edge_density
or n1.is_predecessor_of(n2)):
continue
n1.add_input(random.choice(n2.outputs))
def random_levels(counts, inputs, task_fn):
"""
Counts - number of tasks in each of level, it may be an integer or
range (min, max)
Inputs - number of inputs for each level, it may be an integer or
range (min, max)
"""
prev = None
for count, inps in zip(counts, inputs):
if isinstance(count, tuple):
count = random.randint(count[0], count[1])
level = [task_fn() for _ in range(count)]
for task in level:
if inps:
if isinstance(inps, tuple):
inps = random.randint(min(len(prev), inps[0]),
min(len(prev), inps[1]))
task.add_inputs(random.sample(prev, inps))
prev = sum((task.outputs for task in level), ())
| [
"stanislav.bohm@vsb.cz"
] | stanislav.bohm@vsb.cz |
2b46dce55e29ad70824f427037c67831606c0bed | d1ae7f66513c622e71d23ecf5f19cd7a56a4c409 | /apigentools/commands/list_config.py | 7979cf7093b4b7fb5bae06d1332834ae41056d95 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | DataDog/apigentools | 5471ccd145631f0a3ccd65bdb2b26710e59cf270 | 8176b85e4fe9cdb47b58c1f2ccc4b9901e1baf57 | refs/heads/master | 2023-08-17T00:31:39.982410 | 2023-07-03T15:37:49 | 2023-07-03T15:37:49 | 194,347,428 | 33 | 13 | BSD-3-Clause | 2023-07-03T15:37:50 | 2019-06-29T00:26:52 | Python | UTF-8 | Python | false | false | 4,020 | py | # Unless explicitly stated otherwise all files in this repository are licensed
# under the 3-clause BSD style license (see LICENSE).
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2020-Present Datadog, Inc.
import json
import logging
import click
import jsonpath_ng
from apigentools.commands.command import Command, run_command_with_config
from apigentools.utils import env_or_val
log = logging.getLogger(__name__)
@click.group(invoke_without_command=True)
@click.option(
"-f",
"--full-spec-file",
default=env_or_val("APIGENTOOLS_FULL_SPEC_FILE", "full_spec.yaml"),
help="Name of the OpenAPI full spec file to write (default: 'full_spec.yaml'). "
+ "Note that if some languages override config's spec_sections, additional "
+ "files will be generated with name pattern 'full_spec.<lang>.yaml'",
)
@click.option(
"-L",
"--list-languages",
is_flag=True,
help="List only what languages are supported",
)
@click.option(
"-V", "--list-versions", is_flag=True, help="List only what versions are supported"
)
@click.pass_context
def config(ctx, **kwargs):
"""Displays information about the configuration for the spec being worked on, including supported languages,
api versions, and the paths to the generated api yaml. These languages and api versions can be directly
passed to the `--languages` and `--api-versions` flags of the supported commands."""
if ctx.invoked_subcommand is None:
run_command_with_config(ConfigCommand, ctx, **kwargs)
@config.command("get")
@click.option(
"-r",
"--raw",
is_flag=True,
default=False,
help="If the result is a simple value (string, number or boolean), it will be written directly without quotes",
)
@click.argument(
"jsonpath",
)
@click.pass_context
def jsonpath(ctx, **kwargs):
"""Search expanded config for a single value by given JSONPATH."""
kwargs["_get_value"] = True
run_command_with_config(ConfigCommand, ctx, **kwargs)
@config.command("list")
@click.argument(
"jsonpath",
)
@click.pass_context
def jsonpath(ctx, **kwargs):
"""Search expanded config for values by given JSONPATH."""
run_command_with_config(ConfigCommand, ctx, **kwargs)
class ConfigCommand(Command):
def run(self):
if "jsonpath" in self.args is not None:
try:
jsonpath_expr = jsonpath_ng.parse(self.args["jsonpath"])
result_values = [
match.value for match in jsonpath_expr.find(self.config.dict())
]
if self.args.get("_get_value", False):
if len(result_values) == 1:
to_print = json.dumps(result_values[0])
if isinstance(to_print, str) and self.args.get("raw", False):
to_print = to_print.strip('"')
print(to_print)
else:
log.error(
"Result doesn't have exactly 1 value: %s", result_values
)
return 1
else:
print(json.dumps(result_values))
except (
Exception
) as e: # jsonpath_ng parser really does `raise Exception`, not a more specific exception class
log.error("Failed parsing JSONPath expression: %s", e)
return 1
else:
# Yields tuples (language, version, spec_path)
language_info = self.yield_lang_version_specfile()
# Modify the returned data based on user flags
if self.args.get("list_languages"):
out = {lang_info[0] for lang_info in language_info}
elif self.args.get("list_versions"):
out = {lang_info[1] for lang_info in language_info}
else:
out = [lang_info for lang_info in language_info]
click.echo(out)
return 0
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
71bd652762d3bc860af54b0a0a640abb435e9056 | 5b7c2feb27a71837edf526315d413706a6bf82ff | /tests/trainers/lightning/test_logging.py | 5b28d53b6bbd5331da9bfd575b4bbbef67e30191 | [
"BSD-3-Clause"
] | permissive | facebookresearch/mmf | df675223566dc8fb2359aa3e1a2d49db5e3c2b9a | 63f76fbcfe2d056b88734fc41a983251d20e6c61 | refs/heads/main | 2023-08-23T23:40:46.827046 | 2023-07-11T06:18:50 | 2023-07-11T06:18:50 | 138,831,170 | 2,432 | 592 | NOASSERTION | 2023-08-11T20:26:11 | 2018-06-27T04:52:40 | Python | UTF-8 | Python | false | false | 4,666 | py | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
from unittest.mock import MagicMock, patch
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.lightning_core.loop_callback import LightningLoopCallback
from mmf.utils.timer import Timer
from tests.test_utils import skip_if_no_network
from tests.trainers.test_utils import (
get_config_with_defaults,
get_lightning_trainer,
get_mmf_trainer,
run_lightning_trainer,
)
class TestLightningTrainerLogging(unittest.TestCase):
def setUp(self):
self.mmf_tensorboard_logs = []
self.lightning_tensorboard_logs = []
@skip_if_no_network
@patch("mmf.common.test_reporter.PathManager.mkdirs")
@patch("mmf.trainers.callbacks.logistics.setup_output_folder", return_value="logs")
@patch("mmf.trainers.lightning_trainer.setup_output_folder", return_value="logs")
@patch("mmf.utils.logger.setup_output_folder", return_value="logs")
@patch("torch.utils.tensorboard.SummaryWriter")
@patch("mmf.trainers.callbacks.logistics.get_mmf_env", return_value="logs")
@patch("mmf.common.test_reporter.get_mmf_env", return_value="logs")
@patch("mmf.trainers.lightning_trainer.get_mmf_env", return_value="logs")
def test_tensorboard_logging_parity(
self,
summary_writer,
mmf,
lightning,
logistics,
logistics_logs,
report_logs,
trainer_logs,
mkdirs,
):
# mmf trainer
config = self._get_mmf_config(
max_updates=8,
batch_size=2,
max_epochs=None,
log_interval=3,
evaluation_interval=9,
tensorboard=True,
)
mmf_trainer = get_mmf_trainer(config=config)
def _add_scalars_mmf(log_dict, iteration):
self.mmf_tensorboard_logs.append({iteration: log_dict})
mmf_trainer.load_metrics()
logistics_callback = LogisticsCallback(mmf_trainer.config, mmf_trainer)
logistics_callback.snapshot_timer = MagicMock(return_value=None)
logistics_callback.train_timer = Timer()
logistics_callback.tb_writer.add_scalars = _add_scalars_mmf
mmf_trainer.logistics_callback = logistics_callback
mmf_trainer.on_validation_end = logistics_callback.on_validation_end
mmf_trainer.callbacks = [logistics_callback]
mmf_trainer.early_stop_callback = MagicMock(return_value=None)
mmf_trainer.on_update_end = logistics_callback.on_update_end
mmf_trainer.training_loop()
# lightning_trainer
config = self._get_config(
max_steps=8,
batch_size=2,
log_every_n_steps=3,
val_check_interval=9,
tensorboard=True,
)
trainer = get_lightning_trainer(config=config, prepare_trainer=False)
def _add_scalars_lightning(log_dict, iteration):
self.lightning_tensorboard_logs.append({iteration: log_dict})
def _on_fit_start_callback():
trainer.tb_writer.add_scalars = _add_scalars_lightning
callback = LightningLoopCallback(trainer)
trainer.callbacks.append(callback)
run_lightning_trainer(trainer, on_fit_start_callback=_on_fit_start_callback)
self.assertEqual(
len(self.mmf_tensorboard_logs), len(self.lightning_tensorboard_logs)
)
for mmf, lightning in zip(
self.mmf_tensorboard_logs, self.lightning_tensorboard_logs
):
self.assertDictEqual(mmf, lightning)
def _get_config(
self, max_steps, batch_size, log_every_n_steps, val_check_interval, tensorboard
):
config = {
"trainer": {
"params": {
"max_steps": max_steps,
"log_every_n_steps": log_every_n_steps,
"val_check_interval": val_check_interval,
}
},
"training": {"batch_size": batch_size, "tensorboard": tensorboard},
}
return get_config_with_defaults(config)
def _get_mmf_config(
self,
max_updates,
max_epochs,
batch_size,
log_interval,
evaluation_interval,
tensorboard,
):
config = {
"training": {
"batch_size": batch_size,
"tensorboard": tensorboard,
"max_updates": max_updates,
"max_epochs": max_epochs,
"log_interval": log_interval,
"evaluation_interval": evaluation_interval,
}
}
return get_config_with_defaults(config)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
2df994f504e9aeb22890f567378afa42539ae9c3 | 9f5dd76fe21a66f90700f83840f2491dd34b17f9 | /tensorflow/python/ops/linalg/linear_operator_full_matrix.py | 8fe6891925099510389e5a7714124a14b97cf287 | [
"Apache-2.0"
] | permissive | thuanvh/tensorflow | b328964da68744bbd77799b13729835dcf4dbf79 | a599e0e2fc5a0e7964ad25c2f5c7e6ed5b679dc6 | refs/heads/master | 2021-07-20T13:55:40.451003 | 2019-12-03T06:34:09 | 2019-12-03T06:34:09 | 119,012,773 | 3 | 1 | Apache-2.0 | 2019-12-03T06:42:38 | 2018-01-26T06:05:36 | C++ | UTF-8 | Python | false | false | 6,660 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random.normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = linear_operator_util.convert_nonref_to_tensor(
matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# TODO(b/143910018) Remove graph_parents in V3.
self._set_graph_parents([self._matrix])
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.shape.ndims is not None and matrix.shape.ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.shape
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return math_ops.matmul(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
30f417b36b012ac709085c69683beb06c6c8d0ea | bc368e94d950af97b71e0b0c2a3d2b86c6a9d173 | /learn-theano/snn/membrane_cnn-master/membrane_cnn-master/membrane_examples.py | fee74d6761f1d20bec9c7b46ab8b2b655883ebc1 | [
"BSD-3-Clause"
] | permissive | ChenLiangbo/Learning-python | 6100c6ff74330fb1710622cdb22bde5a1d52e40b | 868cc4d71d1b9bd362b9fac8a39f295037d20b4c | refs/heads/master | 2020-06-11T07:42:04.434182 | 2018-05-08T05:06:14 | 2018-05-08T05:06:14 | 75,731,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,149 | py | import mahotas
import scipy.ndimage
import scipy.misc
import gzip
import cPickle
imgrad = 47
imgd = 2 * imgrad + 1
zrad = 0
zd = 2 * zrad + 1
#test mode
#nimages = 10
#display_output = True
#ntrain = 5
#nvalid = 2
#ntest = 2
#live mode
nimages = 75
display_output = False
ntrain = 5000
nvalid = 1000
ntest = 1000
train_set = (np.zeros((ntrain, imgd*imgd*zd), dtype=np.uint8), np.zeros(ntrain, dtype=uint8))
valid_set = (np.zeros((nvalid, imgd*imgd*zd), dtype=np.uint8), np.zeros(nvalid, dtype=uint8))
test_set = (np.zeros((ntest, imgd*imgd*zd), dtype=np.uint8), np.zeros(ntest, dtype=uint8))
saturation_level = 0.005
def normalize_image(original_image):
sorted_image = np.sort( np.uint8(original_image).ravel() )
minval = np.float32( sorted_image[ len(sorted_image) * ( saturation_level / 2 ) ] )
maxval = np.float32( sorted_image[ len(sorted_image) * ( 1 - saturation_level / 2 ) ] )
norm_image = np.uint8( (original_image - minval) * ( 255 / (maxval - minval)) )
return norm_image
shrink_radius = 5
y,x = np.ogrid[-shrink_radius:shrink_radius+1, -shrink_radius:shrink_radius+1]
shrink_disc = x*x + y*y <= shrink_radius*shrink_radius
gblur_sigma = 1
min_border = ceil( sqrt( 2 * ( (imgrad + 1) ** 2 ) ) )
mask = None
nsample_images = nimages - zrad * 2
membrane_proportion = 0.5
random.seed(7)
train_i = 0;
valid_i = 0;
test_i = 0;
sample_count = 0;
for imgi in range (zrad, nimages-zrad):
input_img = normalize_image(mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi)))
#input_img = mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi))
label_img = mahotas.imread('D:\\dev\\datasets\\isbi\\train-labels\\train-labels_{0:04d}.tif'.format(imgi))
input_vol = zeros((input_img.shape[0], input_img.shape[1], zd), dtype=uint8)
for zoffset in range (zd):
if zd == zrad:
input_vol[:,:,zoffset] = input_img
else:
input_vol[:,:,zoffset] = normalize_image(mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi - zrad + zoffset)))
#input_vol[:,:,zoffset] = mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi - zrad + zoffset))
blur_img = scipy.ndimage.gaussian_filter(input_img, gblur_sigma)
boundaries = label_img==0;
boundaries[0:-1,:] = np.logical_or(boundaries[0:-1,:], diff(label_img, axis=0)!=0);
boundaries[:,0:-1] = np.logical_or(boundaries[:,0:-1], diff(label_img, axis=1)!=0);
# erode to be sure we include at least one membrane
inside = mahotas.erode(boundaries == 0, shrink_disc)
#display = input_img.copy()
#display[np.nonzero(inside)] = 0
#figure(figsize=(20,20))
#imshow(display, cmap=cm.gray)
seeds = label_img.copy()
seeds[np.nonzero(inside==0)] = 0
grow = mahotas.cwatershed(255-blur_img, seeds)
membrane = np.zeros(input_img.shape, dtype=uint8)
membrane[0:-1,:] = diff(grow, axis=0) != 0;
membrane[:,0:-1] = np.logical_or(membrane[:,0:-1], diff(grow, axis=1) != 0);
#display[np.nonzero(membrane)] = 2
#figure(figsize=(20,20))
#imshow(display, cmap=cm.gray)
# erode again to avoid all membrane
non_membrane = mahotas.erode(inside, shrink_disc)
if mask is None:
mask = ones(input_img.shape, dtype=uint8)
mask[:min_border,:] = 0;
mask[-min_border:,:] = 0;
mask[:,:min_border] = 0;
mask[:,-min_border:] = 0;
membrane_indices = np.nonzero(np.logical_and(membrane, mask))
nonmembrane_indices = np.nonzero(np.logical_and(non_membrane, mask))
train_target = int32(float32(ntrain) / nsample_images * (imgi - zrad + 1))
valid_target = int32(float32(nvalid) / nsample_images * (imgi - zrad + 1))
test_target = int32(float32(ntest) / nsample_images * (imgi - zrad + 1))
while train_i < train_target or valid_i < valid_target or test_i < test_target:
membrane_sample = random.random() < membrane_proportion
if membrane_sample:
randmem = random.choice(len(membrane_indices[0]))
(samp_i, samp_j) = (membrane_indices[0][randmem], membrane_indices[1][randmem])
membrane_type = "membrane"
else:
randnonmem = random.choice(len(nonmembrane_indices[0]))
(samp_i, samp_j) = (nonmembrane_indices[0][randnonmem], nonmembrane_indices[1][randnonmem])
membrane_type = "non-membrane"
# rotate by a random amount (linear interpolation)
rotation = random.random()*360
samp_vol = zeros((imgd, imgd, zd), dtype=uint8)
for zoffset in range(zd):
sample_img = input_vol[samp_i-min_border:samp_i+min_border, samp_j-min_border:samp_j+min_border, zoffset]
sample_img = scipy.misc.imrotate(sample_img, rotation)
samp_vol[:,:,zoffset] = sample_img[min_border-imgrad:min_border+imgrad+1, min_border-imgrad:min_border+imgrad+1]
if display_output:
output = zeros((imgd, imgd * zd), dtype=uint8)
for out_z in range(zd):
output[:,out_z * imgd : (out_z + 1) * imgd] = samp_vol[:,:,out_z]
figure(figsize=(5, 5 * zd))
title(membrane_type)
imshow(output, cmap=cm.gray)
if train_i < train_target:
train_set[0][train_i,:] = samp_vol.ravel()
train_set[1][train_i] = membrane_sample
train_i = train_i + 1
sample_type = 'train'
elif valid_i < valid_target:
valid_set[0][valid_i,:] = samp_vol.ravel()
valid_set[1][valid_i] = membrane_sample
valid_i = valid_i + 1
sample_type = 'valid'
elif test_i < test_target:
test_set[0][test_i,:] = samp_vol.ravel()
test_set[1][test_i] = membrane_sample
test_i = test_i + 1
sample_type = 'test'
#print "Sampled {5} at {0}, {1}, {2}, r{3:.2f} ({4})".format(samp_i, samp_j, imgi, rotation, sample_type, membrane_type)
sample_count = sample_count + 1
if sample_count % 5000 == 0:
print "{0} samples ({1}, {2}, {3}).".format(sample_count, train_i, valid_i, test_i)
print "Made a total of {0} samples ({1}, {2}, {3}).".format(sample_count, train_i, valid_i, test_i)
outfile = "MembraneSamples_{0}x{0}x{1}_mp{2:0.2f}_train{3}_valid{4}_test{5}.pkl.gz".format(imgd, zd, membrane_proportion, ntrain, nvalid, ntest)
print "Saving to {0}.".format(outfile)
#Save the results
f = gzip.open(outfile,'wb', compresslevel=1)
cPickle.dump((train_set, valid_set, test_set),f)
f.close()
print "Saved."
| [
"chenlb@polarwin.cn"
] | chenlb@polarwin.cn |
5f99c906f9825caa252f0cca18c5639428ae94c1 | 6e615744c7065f39abee294bc0d1579c67de3151 | /custom_fields_app/migrations/0003_developer.py | b54089e118ae106a34213d2b8da2f68f7a803809 | [] | no_license | idelfrides/POC_Django_custom_fields | 1b9b6d92df7e09e0d52d27d85de3651d075b23b1 | 9fbc5af266454f3b01af73d15b081f83f776ef23 | refs/heads/master | 2020-08-15T16:37:19.712152 | 2019-12-03T21:05:12 | 2019-12-03T21:05:12 | 215,372,253 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | # Generated by Django 2.2.6 on 2019-10-14 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('custom_fields_app', '0002_delete_developer'),
]
operations = [
migrations.CreateModel(
name='Developer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=225)),
('role', models.CharField(max_length=225)),
('company', models.CharField(max_length=225)),
('gender', models.CharField(default='Male', max_length=6)),
('age', models.IntegerField()),
],
),
]
| [
"idelfridesjorgepapai@gmail.com"
] | idelfridesjorgepapai@gmail.com |
7f3da276e490e57aa84557f62d5dc1e52cd4b8f3 | a4843f673692e4c703d09b4a625ecf14d0c238ec | /bin/assembly/phyluce_assembly_get_bed_for_genome_enabled_taxon | c94c4043b1f252e60b9d992f993d35fa095e5420 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | carloliveros/phyluce | db0fd5f04b6eb1f362ce7a95ca9dde5514301ff8 | 1b227a51b655820154ecca4e9e52ac7722d7a71e | refs/heads/master | 2020-04-25T13:02:32.637099 | 2019-05-10T18:34:24 | 2019-05-10T18:34:24 | 136,492,179 | 0 | 0 | NOASSERTION | 2019-02-26T21:03:20 | 2018-06-07T14:52:12 | Python | UTF-8 | Python | false | false | 4,894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2015 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 22 May 2015 08:39 CDT (-0500)
"""
import os
import re
import sqlite3
import argparse
from collections import defaultdict
from phyluce import lastz
from phyluce.helpers import is_dir, is_file, CreateDir, FullPaths
from phyluce.log import setup_logging
#import pdb
def get_args():
"""Get arguments from CLI"""
parser = argparse.ArgumentParser(
description="""Get a BED file for the given taxon using the probe.matches.sqlite database, the lastz file, and a taxon name"""
)
parser.add_argument(
"--locus-db",
required=True,
type=is_file,
help="""The probe.matches.sqlite database"""
)
parser.add_argument(
"--lastz-file",
required=True,
type=is_file,
help="""The lastz file to use"""
)
parser.add_argument(
"--taxon-name",
required=True,
type=str,
help="""The taxon name to search for in probe.matches.sqlite"""
)
parser.add_argument(
"--output",
required=True,
help="""The output BED file to create"""
)
parser.add_argument(
"--verbosity",
type=str,
choices=["INFO", "WARN", "CRITICAL"],
default="INFO",
help="""The logging level to use."""
)
parser.add_argument(
"--log-path",
action=FullPaths,
type=is_dir,
default=None,
help="""The path to a directory to hold logs."""
)
parser.add_argument(
"--regex",
type=str,
default="^(uce-\d+)(?:_p\d+.*)",
help="""A regular expression to apply to the probe names for replacement [default='^(uce-\d+)(?:_p\d+.*)'].""",
)
return parser.parse_args()
def remove_duplicates_from(c, organism, matches):
if not organism.endswith('*'):
st = "SELECT uce, {0} FROM match_map WHERE uce in ({1})"
query = st.format(organism, ','.join(["'{0}'".format(i) for i in matches]))
else:
st = "SELECT uce, {0} FROM extended.match_map WHERE uce in ({1})"
query = st.format(organism.rstrip('*'), ','.join(["'{0}'".format(i) for i in matches]))
c.execute(query)
data = c.fetchall()
m = defaultdict(list)
for d in data:
node = d[1].split('(')[0]
m[node].append(d[0])
single_matches = [v[0] for k, v in m.iteritems() if len(v) <= 1]
return single_matches
def get_all_matches_by_organism(c, organisms):
organismal_matches = {}
for organism in organisms:
if not organism.endswith('*'):
c.execute("SELECT uce FROM matches WHERE {0} = 1".format(organism))
else:
c.execute("SELECT uce FROM extended.matches WHERE {0} = 1".format(organism.rstrip('*')))
matches = set([uce[0] for uce in c.fetchall()])
# we've removed dupe UCE matches, but we need to remove
# dupe node matches (i,e. we've removed dupe target matches
# and we also need to remove dupe query matches - they pop up as
# data change, so it's a constant battle)
matches = remove_duplicates_from(c, organism, matches)
organismal_matches[organism] = matches
return organismal_matches
def new_get_probe_name(header, regex):
match = re.search(regex, header)
return match.groups()[0]
def main():
args = get_args()
# get a list of the loci for the taxon for which we want BED files
# setup logging
log, my_name = setup_logging(args)
# connect to the database
conn = sqlite3.connect(args.locus_db)
c = conn.cursor()
loci = get_all_matches_by_organism(c, [args.taxon_name])
loci_dict = {locus: set() for locus in loci[args.taxon_name]}
for match in lastz.Reader(args.lastz_file):
target_name = new_get_probe_name(match.name2, args.regex)
if target_name in loci_dict.keys():
header_dict = dict([tuple(i.split(":")) for i in match.name1.split("|")[1:]])
loci_dict[header_dict['uce']].update(["{}:{}".format(header_dict['contig'], header_dict['match'])])
with open(args.output, 'w') as outfile:
outfile.write('''track name="uce-v-{0}" description="UCE probe matches to {0}" visibility=2 itemRgb="On"\n'''.format(args.taxon_name))
for locus, location in loci_dict.iteritems():
for element in list(location):
scaffold, pos = element.split(":")
start, end = [int(i) for i in pos.split('-')]
outfile.write("{0}\t{1}\t{2}\t{3}\t1000\t+\t{1}\t{2}\t100,149,237\n".format(scaffold, start, end, locus))
text = " Completed {} ".format(my_name)
log.info(text.center(65, "="))
if __name__ == '__main__':
main()
| [
"brant@faircloth-lab.org"
] | brant@faircloth-lab.org | |
9fd96a9913457074ce2f27981629d2f1cb9b2fec | df1482f251a2d801e7b77bbd1d5c9a408e71e7ef | /05_visualization/plot_blastm7.py | 19343ab1300daf72018d878749a86f77091a2fb6 | [] | no_license | silvewheat/bioNotes | a34e1aa79df42799d83b5c4f3e1b2a583360fc1a | d96c4710c1f620c18fdf92fe21f38d73671b9580 | refs/heads/master | 2021-12-30T01:52:40.925861 | 2021-12-27T14:11:30 | 2021-12-27T14:11:30 | 113,825,423 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,232 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 01:32:47 2018
@author: YudongCai
@Email: yudongcai216@gmail.com
"""
import click
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def load_blastout(blastoutfile):
"""
'7 std qcovs qcovhsp qcovus'
"""
result = []
with open(blastoutfile) as f:
for line in f:
if line[0] != '#':
tline = line.strip().split()
result.append([tline[0], tline[1], float(tline[2]), int(tline[3]), int(tline[6]), int(tline[7]), int(tline[8]), int(tline[9]), int(tline[-3])])
return pd.DataFrame(result, columns=['qid', 'sid', 'ident', 'alnlen', 'qstart', 'qend', 'sstart', 'send', 'qcov'])
def load_querylen(querylen):
lendict = {x.split()[0]: int(x.strip().split()[1]) for x in open(querylen).readlines()}
return lendict
def plot(tmpdf, sid, lendict, outfile):
fig, ax = plt.subplots(1, 1, figsize=(25,3))
# 画subject
sstart = tmpdf[['sstart', 'send']].min().min()
send = tmpdf[['sstart', 'send']].max().max()
slen = send - sstart + 1
ax.fill_between([sstart, send], 7, 8)
# 计算query的scale以及之间的gap长度
query_totallen = 0
n_query = 0
for query in tmpdf.sort_values('sstart')['qid'].unique():
query_totallen += lendict[query]
n_query += 1
qscale = (slen * 0.95) / query_totallen # query和subject对齐,并且总gap为subject长度的5%
gap = (slen - query_totallen * qscale) / (n_query-1) if n_query > 1 else 0
# 画query
offset = sstart
qoffset = {}
for query in tmpdf.sort_values('sstart')['qid'].unique():
qoffset[query] = offset
xrange = [offset, offset+(lendict[query]*qscale)]
ax.fill_between(xrange, 4, 5)
plt.text(offset+(lendict[query]*qscale/2), 4.5, query, horizontalalignment='center')
qcov = np.unique(tmpdf.loc[tmpdf['qid']==query, 'qcov'])[0]
plt.text(offset+(lendict[query]*qscale/2), 4, f'{qcov}%', horizontalalignment='center')
offset = offset + (lendict[query]*qscale) + gap
# 画比对连线
patches = []
idents = []
for qid, qstart, qend, sstart, send, ident, alnlen in tmpdf[['qid', 'qstart', 'qend', 'sstart', 'send', 'ident', 'alnlen']].values:
offset = qoffset[qid]
qalnlen = (qend-qstart+1)*qscale
qstart_scaled = (qstart-1)*qscale
polygon = Polygon([[offset+qstart_scaled, 5], [offset+qstart_scaled+qalnlen, 5], [send, 7], [sstart, 7]], closed=True)
patches.append(polygon)
idents.append(ident)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.9)
colors = idents
p.set_array(np.array(colors))
ax.add_collection(p)
fig.colorbar(p, ax=ax)
ax.set_xlabel(sid)
plt.subplots_adjust(bottom=0.2)
plt.savefig(outfile, dpi=300)
plt.close()
@click.command()
@click.option('--blastoutfile', help='blast结果')
@click.option('--querylen', help='两列,queryid\tquerylen')
@click.option('--minqcov', help='某个subject至少有一个query比对上去的总%coverage达到这个值才被画出来, 默认30', default=30, type=int)
@click.option('--minident', help='%相似度达到这个值的比对结果才会被画出来, 默认0', default=0, type=int)
@click.option('--minaln', help='长度达到这个值的比对结果才被画出来, 默认100', default=100, type=int)
@click.option('--outprefix', help='输出图片前缀')
def main(blastoutfile, querylen, minqcov, minident, minaln, outprefix):
"""
画blast输出结果
'7 std qcovs qcovhsp qcovus'
"""
print(__doc__)
print(blastoutfile)
df = load_blastout(blastoutfile)
lendict = load_querylen(querylen)
for sid in df['sid'].unique():
outfile = f'{outprefix}_{sid}.pdf'
tmpdf = df.loc[(df['sid']==sid) &
(df['ident']>=minident) &
(df['alnlen']>=minaln), :]
if (tmpdf.shape[0] > 0) and (tmpdf['qcov'].max()) >= minqcov:
plot(tmpdf, sid, lendict, outfile)
if __name__ == '__main__':
main()
| [
"silverwheat@163.com"
] | silverwheat@163.com |
2accf3a6530ef8af910cb9a427c3290c0476a0d0 | fe3265b72e691c6df8ecd936c25b6d48ac33b59a | /homeassistant/components/enphase_envoy/const.py | cd3235f1be5c745c1a510fe8bb3bdd5f646f07de | [
"Apache-2.0"
] | permissive | bdraco/home-assistant | dcaf76c0967783a08eec30ce704e5e9603a2f0ca | bfa315be51371a1b63e04342a0b275a57ae148bd | refs/heads/dev | 2023-08-16T10:39:15.479821 | 2023-02-21T22:38:50 | 2023-02-21T22:38:50 | 218,684,806 | 13 | 7 | Apache-2.0 | 2023-02-21T23:40:57 | 2019-10-31T04:33:09 | Python | UTF-8 | Python | false | false | 2,491 | py | """The enphase_envoy component."""
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.const import Platform, UnitOfEnergy, UnitOfPower
DOMAIN = "enphase_envoy"
PLATFORMS = [Platform.SENSOR]
COORDINATOR = "coordinator"
NAME = "name"
SENSORS = (
SensorEntityDescription(
key="production",
name="Current Power Production",
native_unit_of_measurement=UnitOfPower.WATT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.POWER,
),
SensorEntityDescription(
key="daily_production",
name="Today's Energy Production",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
),
SensorEntityDescription(
key="seven_days_production",
name="Last Seven Days Energy Production",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL,
device_class=SensorDeviceClass.ENERGY,
),
SensorEntityDescription(
key="lifetime_production",
name="Lifetime Energy Production",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
),
SensorEntityDescription(
key="consumption",
name="Current Power Consumption",
native_unit_of_measurement=UnitOfPower.WATT,
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.POWER,
),
SensorEntityDescription(
key="daily_consumption",
name="Today's Energy Consumption",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
),
SensorEntityDescription(
key="seven_days_consumption",
name="Last Seven Days Energy Consumption",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL,
device_class=SensorDeviceClass.ENERGY,
),
SensorEntityDescription(
key="lifetime_consumption",
name="Lifetime Energy Consumption",
native_unit_of_measurement=UnitOfEnergy.WATT_HOUR,
state_class=SensorStateClass.TOTAL_INCREASING,
device_class=SensorDeviceClass.ENERGY,
),
)
| [
"noreply@github.com"
] | bdraco.noreply@github.com |
d7e1e5991c2847b04917d61442775dfb53cb4335 | 6719aa2e0598ec5bb9bb6bfd47390583b91dd015 | /90/93.py | 59df648a2df79bb202f7c276369e211e24946444 | [] | no_license | huangyuzhen/let | 2f8ed6f41792a4bedf1b808d6b1c21e99f406da9 | 653d8a5aee803d2b414d0135f791a8f9d83bb272 | refs/heads/master | 2020-09-05T05:22:50.220859 | 2020-02-16T12:15:39 | 2020-02-16T12:15:39 | 219,995,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | class Solution(object):
def backtrack(self, s, r, t = 0):
if t == 4:
if s == '':
self.result.add('.'.join(r))
return
if s == '': return
if t == 3 and int(s) > 255: return
if s[0] == '0':
r.append('0')
self.backtrack(s[1:], r, t+1)
r.pop()
return
length = min(3, len(s))
for i in range(length):
segment = s[:i+1]
if int(segment) > 255:
break
r.append(segment)
self.backtrack(s[i+1:], r, t+1)
r.pop()
if s[0] == '0':
break
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
self.result = set()
self.backtrack(s, [])
return list(self.result)
s = "25525511135"
s = "0000"
solution = Solution()
x = solution.restoreIpAddresses(s)
print(x) | [
"huangyuzhen@earch"
] | huangyuzhen@earch |
1ebab44da97346e9f56af6d43430ba7a99a1bcb2 | 545e73186a7c95182957d77a4958c9832180ef46 | /wikigeolinks/config/routing.py | 222d0165d8c3117fe7c7baa91458b47481f69c29 | [] | no_license | kailIII/wikigeolinks | 038368e3e9c493698d6acef0bc7edfa415f9b958 | 3fd45edcbda62ce3b5b5fe12d5ac8c687614d1bf | refs/heads/master | 2021-01-22T21:46:03.150845 | 2012-03-07T22:41:20 | 2012-03-07T22:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | """Routes configuration
The more specific and detailed routes should be defined first so they
may take precedent over the more generic routes. For more information
refer to the routes manual at http://routes.groovie.org/docs/
"""
from routes import Mapper
def make_map(config):
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'])
map.minimization = False
map.explicit = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('/error/{action}', controller='error')
map.connect('/error/{action}/{id}', controller='error')
# CUSTOM ROUTES HERE
map.connect("/articles/{id}/linked/count", controller="articles", action="get_links_count")
map.connect("/articles/{id}/linked", controller="articles", action="get_linked")
map.connect("/articles/count", controller="articles", action="count")
map.resource("article", "articles")
#map.connect('/{controller}/{action}')
#map.connect('/{controller}/{action}/{id}')
return map
| [
"amercadero@gmail.com"
] | amercadero@gmail.com |
8c594d903b2a9af9128b6bd101fce9ee33ab90f4 | c9fde4576216a22e8d5711bbe97adda1aafa2f08 | /model-optimizer/extensions/ops/correlation.py | b61ed48d1a2c4d6615dee1f37325667b54aaa720 | [
"Apache-2.0"
] | permissive | dliang0406/dldt | c703d6a837de3f996528fc8a9543f9530b23342c | d9b10abcebafe8b10ba81e09e433de7a366c072c | refs/heads/2018 | 2020-04-03T08:24:47.723353 | 2018-10-29T07:58:05 | 2018-10-29T07:58:05 | 155,132,108 | 3 | 1 | Apache-2.0 | 2019-10-10T08:39:46 | 2018-10-29T01:03:54 | C++ | UTF-8 | Python | false | false | 2,397 | py | """
Copyright (c) 2017-2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from math import ceil
import networkx as nx
# Concat infer : N - number of inputs to concat
# axis - dimension number for tensors concatenation
import numpy as np
from mo.graph.graph import Node
from mo.ops.op import Op
class CorrelationOp(Op):
op = 'Correlation'
def __init__(self, graph: nx.MultiDiGraph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'infer': CorrelationOp.corr_infer
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
'pad',
'kernel_size',
'max_displacement',
'stride_1',
'stride_2',
'single_direction',
'do_abs',
'correlation_type'
]
@staticmethod
def corr_infer(node: Node):
outn = node.out_node(0)
inn = node.in_node(0)
outn.shape = np.zeros(4, dtype=int)
outn.shape[0] = inn.shape[0]
bottomchannels = inn.shape[1]
paddedbottomheight = inn.shape[2]
paddedbottomwidth = inn.shape[3] + 2 * node.pad
kernel_radius_ = (node.kernel_size - 1) / 2;
border_size_ = node.max_displacement + kernel_radius_
outn.shape[3] = ceil((float)(paddedbottomwidth - border_size_ * 2) / node.stride_1)
outn.shape[2] = ceil((float)(paddedbottomheight - kernel_radius_ * 2) / node.stride_1)
neighborhood_grid_radius_ = node.max_displacement / node.stride_2
if node.single_direction != 0:
neighborhood_grid_width_ = neighborhood_grid_radius_ + 1
else:
neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1
outn.shape[1] = neighborhood_grid_width_ * neighborhood_grid_width_
| [
"openvino_pushbot@intel.com"
] | openvino_pushbot@intel.com |
09eb40d2c0197a245f5b0fb3750928ff087087f0 | abc72a2f2072ab7a5a338e41d81c354324943b09 | /MC 102 (Exemplos de aula)/testando.py | ad0d47f312098fea5134485a04aa4a2969c68d7b | [] | no_license | gigennari/mc102 | a3d39fd9a942c97ef477a9b59d7955f4269b202a | fce680d5188a8dfb0bc1832d6f430cbcaf68ef55 | refs/heads/master | 2023-04-05T01:40:58.839889 | 2020-07-27T20:33:56 | 2020-07-27T20:33:56 | 354,130,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | """if i % 2 == 1:
lista_conceitos.append(dado)
print(lista_conceitos)
"""
lista = ['tarefa0', 'A', 'tarefa1', 'C', 'tarefa2', 'C', 'faltou']
lista_conceitos = []
for i, dado in enumerate(lista):
while dado != 'presente':
print('não é presente')
| [
"g198010@dac.unicamp.br"
] | g198010@dac.unicamp.br |
9381ae1c0554ac84fff0a3d537df28f32ff61d4c | ab2f1f18f64d9f2d49a4eea5c6a78ee1275662de | /trex_client/external_libs/scapy-2.3.1/python3/scapy/as_resolvers.py | f04322b8a23330e645c906b21f13d9105e7dfda5 | [
"MIT"
] | permissive | alwye/trex-http-proxy | d09d7fabe60add4a445e5ceb71f5f2a6d209e0a0 | e30f5af03aaaad518b5def6e1804c3741dd5d0c6 | refs/heads/master | 2021-08-16T22:32:56.643253 | 2021-06-08T19:52:35 | 2021-06-08T19:52:35 | 60,734,923 | 4 | 3 | MIT | 2021-06-08T19:39:18 | 2016-06-08T22:27:35 | Python | UTF-8 | Python | false | false | 3,423 | py | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Resolve Autonomous Systems (AS).
"""
import socket
from .config import conf
class AS_resolver:
server = None
options = "-k"
def __init__(self, server=None, port=43, options=None):
if server is not None:
self.server = server
self.port = port
if options is not None:
self.options = options
def _start(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.server,self.port))
if self.options:
self.s.send(self.options+b"\n")
self.s.recv(8192)
def _stop(self):
self.s.close()
def _parse_whois(self, txt):
asn,desc = None,b""
for l in txt.splitlines():
if not asn and l.startswith(b"origin:"):
asn = l[7:].strip().decode('utf-8')
if l.startswith(b"descr:"):
if desc:
desc += br"\n"
desc += l[6:].strip()
if asn is not None and desc.strip():
desc = desc.strip().decode('utf-8')
break
return asn, desc
def _resolve_one(self, ip):
self.s.send(b"".join([ip.encode('ascii')])+b"\n")
x = b""
while not (b"%" in x or b"source" in x):
x += self.s.recv(8192)
asn, desc = self._parse_whois(x)
return ip,asn,desc
def resolve(self, *ips):
self._start()
ret = []
for ip in ips:
ip,asn,desc = self._resolve_one(ip)
if asn is not None:
ret.append((ip,asn,desc))
self._stop()
return ret
class AS_resolver_riswhois(AS_resolver):
server = "riswhois.ripe.net"
options = b"-k -M -1"
class AS_resolver_radb(AS_resolver):
server = "whois.ra.net"
options = b"-k -M"
class AS_resolver_cymru(AS_resolver):
server = "whois.cymru.com"
options = None
def resolve(self, *ips):
ASNlist = []
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.server,self.port))
s.send(b"begin\r\n"+b"\r\n".join([ i.encode('ascii') for i in ips])+b"\r\nend\r\n")
r = b""
while 1:
l = s.recv(8192)
if l == b"":
break
r += l
s.close()
for l in r.splitlines()[1:]:
if b"|" not in l:
continue
asn,ip,desc = [ i.decode('ascii') for i in map(bytes.strip, l.split(b"|")) ]
if asn == "NA":
continue
asn = int(asn)
ASNlist.append((ip,asn,desc))
return ASNlist
class AS_resolver_multi(AS_resolver):
resolvers_list = ( AS_resolver_cymru(),AS_resolver_riswhois(),AS_resolver_radb() )
def __init__(self, *reslist):
if reslist:
self.resolvers_list = reslist
def resolve(self, *ips):
todo = ips
ret = []
for ASres in self.resolvers_list:
res = ASres.resolve(*todo)
resolved = [ ip for ip,asn,desc in res ]
todo = [ ip for ip in todo if ip not in resolved ]
ret += res
return ret
conf.AS_resolver = AS_resolver_multi()
| [
"alzverev@cisco.com"
] | alzverev@cisco.com |
2117661416d13323b3deec9077547e0aeea00cae | d9a4121ac2872bbe3f76564caebe6818dc5888a7 | /mythx_models/base.py | 02d39eb23e0658afe2292faada3728d3be159fec | [
"MIT"
] | permissive | s0b0lev/mythx-models | ecb07abada43eb9c26929bfd6cd76dca9105207f | 0fc14fef9e41a68a7d97e0bb170fd0eca5693d9a | refs/heads/master | 2020-08-20T19:22:14.320454 | 2019-10-11T08:32:04 | 2019-10-11T08:32:04 | 216,057,981 | 0 | 0 | MIT | 2019-10-18T15:47:10 | 2019-10-18T15:47:09 | null | UTF-8 | Python | false | false | 2,797 | py | """This module contains the base domain model."""
import abc
import json
import logging
import jsonschema
from mythx_models.exceptions import ValidationError
LOGGER = logging.getLogger(__name__)
class JSONSerializable(abc.ABC):
"""An abstract base class defining an interface for a JSON serializable class."""
@classmethod
def from_json(cls, json_str: str):
"""Deserialize a given JSON string to the given domain model.
Internally, this method uses the :code:`from_dict` method.
:param json_str: The JSON string to deserialize
:return: The concrete deserialized domain model instance
"""
try:
parsed = json.loads(json_str)
except json.JSONDecodeError as exc:
raise ValidationError(exc)
return cls.from_dict(parsed)
def to_json(self):
"""Serialize the current domain model instance to a JSON string.
Internally, this method uses the :code:`to_dict` method.
:return: The serialized domain model JSON string
"""
return json.dumps(self.to_dict())
@classmethod
@abc.abstractmethod
def from_dict(cls, d: dict):
"""An abstract method to construct the given domain model from a Python dict instance.
:param d: The dict instance to deserialize
"""
pass # pragma: no cover
@abc.abstractmethod
def to_dict(self):
"""An abstract method to serialize the current domain model instance to a Python dict.
:return: A Python dict instance holding the serialized domain model data
"""
pass # pragma: no cover
class BaseModel(JSONSerializable, abc.ABC):
"""An abstract object describing responses from the MythX API."""
schema = None
@classmethod
def validate(cls, candidate):
"""Validate the object's data format.
This is done using a schema contained at the class level. If no schema is given, it is
assumed that the request does not contain any meaningful data (e.g. an empty logout
response) and no validation is done.
If the schema validation fails, a :code:`ValidationError` is raised.
If this method is called on a concrete object that does not contain a schema,
:code:`validate` will return right away and log a warning as this behaviour might not have
been intended by a developer.
:param candidate: The candidate dict to check the schema against
:return: None
"""
if cls.schema is None:
LOGGER.warning("Cannot validate {} without a schema".format(cls.__name__))
return
try:
jsonschema.validate(candidate, cls.schema)
except jsonschema.ValidationError as e:
raise ValidationError(e)
| [
"dmuhs@protonmail.ch"
] | dmuhs@protonmail.ch |
898b43374ea9ea4fde31ca60ec3d23fe0f2f9d83 | fa0bd730981a4a7333e7858c03e2a16c75e9cf5c | /Chapter 4/cifar10.py | 5cc85429f23def0681e8baf01a41efc95eff3ca4 | [
"MIT"
] | permissive | PacktPublishing/Deep-Learning-with-TensorFlow-2-and-Keras | 4cb5f7249dcd1efe6ea5a5263fb862240ce303bb | e23d2b4a4292386b70977473805acb2f93ef16ca | refs/heads/master | 2023-02-13T04:04:57.531730 | 2023-02-07T19:23:47 | 2023-02-07T19:23:47 | 228,759,428 | 311 | 214 | MIT | 2021-06-01T14:06:06 | 2019-12-18T04:42:07 | Jupyter Notebook | UTF-8 | Python | false | false | 1,797 | py | import tensorflow as tf
from tensorflow.keras import datasets, layers, models, optimizers
# CIFAR_10 is a set of 60K images 32x32 pixels on 3 channels
IMG_CHANNELS = 3
IMG_ROWS = 32
IMG_COLS = 32
#constant
BATCH_SIZE = 128
EPOCHS = 20
CLASSES = 10
VERBOSE = 1
VALIDATION_SPLIT = 0.2
OPTIM = tf.keras.optimizers.RMSprop()
#define the convnet
def build(input_shape, classes):
model = models.Sequential()
model.add(layers.Convolution2D(32, (3, 3), activation='relu',
input_shape=input_shape))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(classes, activation='softmax'))
return model
# data: shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = datasets.cifar10.load_data()
# normalize
X_train, X_test = X_train / 255.0, X_test / 255.0
# convert to categorical
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, CLASSES)
model=build((IMG_ROWS, IMG_COLS, IMG_CHANNELS), CLASSES)
model.summary()
# use TensorBoard, princess Aurora!
callbacks = [
# Write TensorBoard logs to `./logs` directory
tf.keras.callbacks.TensorBoard(log_dir='./logs')
]
# train
model.compile(loss='categorical_crossentropy', optimizer=OPTIM,
metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=BATCH_SIZE,
epochs=EPOCHS, validation_split=VALIDATION_SPLIT,
verbose=VERBOSE, callbacks=callbacks)
score = model.evaluate(X_test, y_test,
batch_size=BATCH_SIZE, verbose=VERBOSE)
print("\nTest score:", score[0])
print('Test accuracy:', score[1])
| [
"noreply@github.com"
] | PacktPublishing.noreply@github.com |
9898760709ef15969b84d5e10bcbbaeed17caaf9 | 38c004d5d25687abbcbf8219f1b6c47380ef9f28 | /tests/circrequests/steps/validate_job_preconditions_step_test.py | 0a2d496fa731ddd355f4120f819a3344a8b96183 | [
"Apache-2.0"
] | permissive | umd-lib/caia | 25ff8276eaf0b13fd5244a753216496c40a4fa4d | b6fc6b7ceb1987e67d593d2a19d64f16645f6f7b | refs/heads/develop | 2023-07-16T02:35:39.145431 | 2021-08-12T17:27:25 | 2021-08-12T17:27:25 | 263,131,105 | 0 | 1 | Apache-2.0 | 2021-03-26T13:48:33 | 2020-05-11T18:58:06 | Python | UTF-8 | Python | false | false | 1,811 | py | from caia.circrequests.circrequests_job_config import CircrequestsJobConfig
from caia.circrequests.steps.validate_job_preconditions import ValidateJobPreconditions
def test_validate_preconditions_returns_true_if_all_preconditions_are_met():
config = {
'caiasoft_api_key': 'SECRET_CAIASOFT_API_KEY',
'source_url': 'http://example.com/source',
'dest_url': 'http://example.org/dest',
'log_dir': '/tmp/',
'storage_dir': '/tmp/',
'last_success_lookup': 'tests/storage/circrequests/circrequests_last_success.txt',
'last_success_filepath': 'etc/circrequests_FIRST.json',
'denied_keys_filepath': 'tests/storage/circrequests/circrequests_denied_keys.json',
'denied_items_wait_interval': '604800'
}
job_config = CircrequestsJobConfig(config)
validate_job_preconditions = ValidateJobPreconditions(job_config)
step_result = validate_job_preconditions.execute()
assert step_result.was_successful() is True
def test_validate_preconditions_returns_false_if_some_preconditions_are_not_met():
config = {
# Missing 'caiasoft_api_key'
'source_url': 'http://example.com/source',
'dest_url': 'http://example.org/dest',
'log_dir': '/tmp/',
'storage_dir': '/tmp/',
'last_success_lookup': 'tests/storage/circrequests/circrequests_last_success.txt',
'last_success_filepath': 'etc/circrequests_FIRST.json',
'denied_keys_filepath': 'tests/storage/circrequests/circrequests_denied_keys.json',
'denied_items_wait_interval': '604800'
}
job_config = CircrequestsJobConfig(config)
validate_job_preconditions = ValidateJobPreconditions(job_config)
step_result = validate_job_preconditions.execute()
assert step_result.was_successful() is False
| [
"dsteelma@umd.edu"
] | dsteelma@umd.edu |
1ba7fb48e0a08d6dcf87b5d1d7e13f5f5d205d3c | d8d1daed8162cc70a989d696adece741fad39632 | /amplify/agent/__init__.py | 105155c78d8eee608cd86fed8c377ab6b4ff792b | [
"BSD-2-Clause"
] | permissive | sakomws/nginx-amplify-agent | 16085a236f5c3eaf0e4b53cbb935c4fab3ce079b | e33eb8724a7fc06cc44137a23653da11c8c07e82 | refs/heads/master | 2020-04-03T08:10:14.812192 | 2018-09-26T13:49:52 | 2018-09-26T13:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # -*- coding: utf-8 -*-
from collections import defaultdict
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class Singleton(object):
_instance = None
def __new__(cls, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls)
return cls._instance
| [
"dedm@nginx.com"
] | dedm@nginx.com |
1f9ade535812ace2203a94e4d09a4869f2213aea | f57bba82fed27b74dca1319f41ed3cf9047fcc55 | /0x08-python-more_classes/7-rectangle.py | 73a746567f04c48f85dc87db0b1a4f54611d2cf3 | [] | no_license | ibeckermayer/holbertonschool-higher_level_programming | cb76d64d698e7b2c8e60d8498c25ba31dc7e337f | 3cb19fb206a77cccbf12a2c2e06067fa1be303f1 | refs/heads/master | 2020-03-09T13:36:27.959941 | 2018-09-06T00:57:53 | 2018-09-06T00:57:53 | 128,815,447 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,416 | py | #!/usr/bin/python3
class Rectangle:
"""A Rectangle
Attributes:
number_of_instances (int): total number of instances
print_symbol (str): the symbol the rectangle will be printed with
"""
number_of_instances = 0
print_symbol = "#"
def __init__(self, width=0, height=0):
"""constructor
Args:
width (int, optional): the width of the rectangle
height (int, optional): the height of the rectangle
"""
self.__width = width
self.__height = height
Rectangle.number_of_instances += 1
@property
def width(self):
"""getter for width
"""
return self.__width
@width.setter
def width(self, width):
"""setter for width
Args:
width (int, optional): the width of the rectangle
Raises:
TypeError: if width is not an integer
ValueError: if width is not >= 0
"""
if not isinstance(width, int):
raise TypeError("width must be an integer")
if width < 0:
raise ValueError("width must be >= 0")
self.__width = width
@property
def height(self):
"""getter for height
"""
return self.__height
@height.setter
def height(self, height):
"""setter for height
Args:
height (int, optional): the height of the rectangle
Raises:
TypeError: if height is not an integer
ValueError: if height is not >= 0
"""
if not isinstance(height, int):
raise TypeError("height must be an integer")
if height < 0:
raise ValueError("height must be >= 0")
self.__height = height
# @print_symbol.setter
# def print_symbol(self, print_symbol):
# """setter for print_symbol
# Args:
# print_symbol (str): the print_symbol
# """
# self.print_symbol = print_symbol
def area(self):
"""returns the area of the rectangle
Returns:
int: the area of the rectangle
"""
return self.__width * self.__height
def perimeter(self):
"""returns the perimeter of the rectangle
If either width or height is 0, returns 0
Returns:
int: the perimeter of the rectangle
"""
if self.__width == 0 or self.__height == 0:
return 0
return self.__width * 2 + self.__height * 2
def __str__(self):
"""returns the rectangle with #'s
Returns:
str: the rectangle with #'s or empty string width or height = 0
"""
rec = ""
if self.__width == 0 or self.__height == 0:
return rec
for h in range(self.__height):
for w in range(self.__width):
rec += str(self.print_symbol)
if h < self.__height - 1:
rec += "\n"
return rec
def __repr__(self):
"""returns a string for recreating the rectangle
Returns:
str: a string for recreating the rectangle
"""
return "Rectangle("+str(self.__width)+", "+str(self.__height)+")"
def __del__(self):
"""destructor for Rectangle
Prints 'Bye rectangle…'
"""
Rectangle.number_of_instances -= 1
print("Bye rectangle...")
| [
"ibeckermayer@gmail.com"
] | ibeckermayer@gmail.com |
a7f6bcdf25adf4797b3f71fe4ecf622b71e2f412 | 8087165de8ffd9a2e1b09782dc3118a4e95b3cc3 | /setup.py | e3542be91757072a7c8bb2af990e898bee643c20 | [
"MIT"
] | permissive | hamogu/atpy | c29a1779dcf2039a3fb2abae76075d7892dff534 | c6f32e87df891762b253bbebe839f420f6486035 | refs/heads/master | 2021-01-16T20:46:21.538047 | 2011-02-07T00:56:05 | 2011-02-07T00:56:05 | 1,336,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | #!/usr/bin/env python
from distutils.core import setup
try: # Python 3.x
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError: # Python 2.x
from distutils.command.build_py import build_py
setup(name='ATpy',
version='0.9.4',
description='Astronomical Tables in Python',
author='Eli Bressert and Thomas Robitaille',
author_email='elibre@users.sourceforge.net, \
robitaille@users.sourceforge.net',
url='http://atpy.sourceforge.net/',
packages=['atpy'],
cmdclass = {'build_py':build_py},
)
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
674789986c4c54706811c0d60fb5332b13032365 | f97e256cd8eab010767c86df8c8efe76b0695acc | /pyfb2/isbn.py | a6de0fce0d0252888d8c4827ec091061b1b53ac1 | [] | no_license | gil9red/pyfb2 | 2afcc2827e274de86c81a420083cd8474eb99f9f | 623a32b1e1b5622fc99521d4e4790b11526a3bb1 | refs/heads/master | 2020-05-01T01:25:11.425010 | 2018-11-02T14:51:55 | 2018-11-02T14:51:55 | 26,398,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | __author__ = 'ipetrash'
""""""
class Isbn:
""""""
# Описание
# ISBN оригинальной (бумажной) книги.
#
# Атрибуты
# xml:lang (опционально) - язык.
#
# Подчиненные элементы
# Нет подчиненных элементов, содержит текстовую строку ? собственно ISBN книги.
#
# Подчинен
# Может содержаться в следующих элементах:
# <publish-info> (опционально).
#
# Пример использования
# <publish-info>
# <book-name>Долгин А.Б. Экономика символического обмена</book-name>
# <publisher>Инфра-М</publisher>
# <city>Москва</city>
# <year>2006</year>
# <isbn>5-16-002911-7</isbn>
# </publish-info>
# TODO: доделать
def __init__(self):
self.lang = None
self.text = None
def get_source(self):
if not self.text:
raise NameError('Не указано ISBN оригинальной книги.')
source = '<isbn'
if self.lang:
source += ' xml:lang="{}"'.format(self.lang)
source += '>'
source += self.text
source += '</isbn>'
return source | [
"gil9red@gmail.com"
] | gil9red@gmail.com |
95cf171d9f3d9732f66c214ab0a8ff210939af53 | e0370fc68acc7eba333c17d379fa2182f114488a | /libs/detection_oprations/proposal_opr_.py | d0cbf540f3adb52139570f93ce8ec1b123e39a30 | [
"MIT"
] | permissive | wolfworld6/CSL_RetinaNet_Tensorflow | 5056cbaa3957b38999a3a440c4b18044d3302430 | ecc5ae71db108ccecb0004855512ff258e006b9b | refs/heads/master | 2022-11-26T22:26:33.768840 | 2020-08-04T09:06:31 | 2020-08-04T09:06:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,114 | py | # encoding: utf-8
from libs.configs import cfgs
from libs.box_utils import bbox_transform
from libs.box_utils import nms_rotate
import tensorflow as tf
import numpy as np
from libs.box_utils.coordinate_convert import coordinate_present_convert, coords_regular
def postprocess_detctions(rpn_bbox_pred, rpn_cls_prob, rpn_angle_prob, anchors, is_training):
return_boxes_pred = []
return_boxes_pred_angle = []
return_scores = []
return_labels = []
for j in range(0, cfgs.CLASS_NUM):
scores = rpn_cls_prob[:, j]
if is_training:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.VIS_SCORE)), [-1, ])
else:
indices = tf.reshape(tf.where(tf.greater(scores, cfgs.FILTERED_SCORE)), [-1, ])
anchors_ = tf.gather(anchors, indices)
rpn_bbox_pred_ = tf.gather(rpn_bbox_pred, indices)
scores = tf.gather(scores, indices)
rpn_angle_prob_ = tf.gather(rpn_angle_prob, indices)
angle_cls = tf.cast(tf.argmax(rpn_angle_prob_, axis=1), tf.float32)
if cfgs.METHOD == 'H':
x_c = (anchors_[:, 2] + anchors_[:, 0]) / 2
y_c = (anchors_[:, 3] + anchors_[:, 1]) / 2
h = anchors_[:, 2] - anchors_[:, 0] + 1
w = anchors_[:, 3] - anchors_[:, 1] + 1
theta = -90 * tf.ones_like(x_c)
anchors_ = tf.transpose(tf.stack([x_c, y_c, w, h, theta]))
if cfgs.ANGLE_RANGE == 180:
anchors_ = tf.py_func(coordinate_present_convert,
inp=[anchors_, -1],
Tout=[tf.float32])
anchors_ = tf.reshape(anchors_, [-1, 5])
boxes_pred = bbox_transform.rbbox_transform_inv(boxes=anchors_, deltas=rpn_bbox_pred_)
boxes_pred = tf.reshape(boxes_pred, [-1, 5])
angle_cls = (tf.reshape(angle_cls, [-1, ]) * -1 - 0.5) * cfgs.OMEGA
x, y, w, h, theta = tf.unstack(boxes_pred, axis=1)
boxes_pred_angle = tf.transpose(tf.stack([x, y, w, h, angle_cls]))
if cfgs.ANGLE_RANGE == 180:
# _, _, _, _, theta = tf.unstack(boxes_pred, axis=1)
# indx = tf.reshape(tf.where(tf.logical_and(tf.less(theta, 0), tf.greater_equal(theta, -180))), [-1, ])
# boxes_pred = tf.gather(boxes_pred, indx)
# scores = tf.gather(scores, indx)
boxes_pred = tf.py_func(coordinate_present_convert,
inp=[boxes_pred, 1],
Tout=[tf.float32])
boxes_pred = tf.reshape(boxes_pred, [-1, 5])
boxes_pred_angle = tf.py_func(coordinate_present_convert,
inp=[boxes_pred_angle, 1],
Tout=[tf.float32])
boxes_pred_angle = tf.reshape(boxes_pred_angle, [-1, 5])
nms_indices = nms_rotate.nms_rotate(decode_boxes=boxes_pred_angle,
scores=scores,
iou_threshold=cfgs.NMS_IOU_THRESHOLD,
max_output_size=100,
use_angle_condition=False,
angle_threshold=15,
use_gpu=False)
tmp_boxes_pred = tf.reshape(tf.gather(boxes_pred, nms_indices), [-1, 5])
tmp_boxes_pred_angle = tf.reshape(tf.gather(boxes_pred_angle, nms_indices), [-1, 5])
tmp_scores = tf.reshape(tf.gather(scores, nms_indices), [-1, ])
return_boxes_pred.append(tmp_boxes_pred)
return_boxes_pred_angle.append(tmp_boxes_pred_angle)
return_scores.append(tmp_scores)
return_labels.append(tf.ones_like(tmp_scores)*(j+1))
return_boxes_pred = tf.concat(return_boxes_pred, axis=0)
return_boxes_pred_angle = tf.concat(return_boxes_pred_angle, axis=0)
return_scores = tf.concat(return_scores, axis=0)
return_labels = tf.concat(return_labels, axis=0)
return return_boxes_pred, return_scores, return_labels, return_boxes_pred_angle
| [
"yangxue0827@126.com"
] | yangxue0827@126.com |
5566277f3355ee903609ea25f79493d77e545c5c | 57e1d15553d407e869a2479c248c13ef4d8c615a | /photutils/utils/__init__.py | 3f9447e5025dade97438aab8190ff1e72ae9c7ee | [] | no_license | ccd-utexas/photutils | 02a722f075ac0e537ad19dc9848b8e9f847d644a | 63c282b5833a4db6a71709a5aeff51d3dd6517d1 | refs/heads/master | 2021-01-18T02:35:51.208333 | 2014-09-14T19:06:10 | 2014-09-14T19:06:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains developer-oriented utilities.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .check_random_state import *
| [
"larry.bradley@gmail.com"
] | larry.bradley@gmail.com |
f6246d92aed5850c1ff9fb557cbf76d2f726df92 | 7f6c0d160fd8ea94e7d9523c8129e4a52eb07e0e | /MultiplicationTable.py | be358a867acf3e314136a7cc1fcd6cb726447e4d | [] | no_license | GeekJamesO/Python_MultiplicationTable | dbfbe84e5110f20a5054ac401fc019ee51f65ea0 | 0392e8a039daa1b2216ca2a96b72bed3a84f458a | refs/heads/master | 2021-01-02T08:38:44.958455 | 2017-09-06T05:14:16 | 2017-09-06T05:14:16 | 99,040,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | """
Create a program that prints a multiplication table in your console.
Your table should look like the following example:
x 1 2 3 4 5 6 7 8 9 10 11 12
1 1 2 3 4 5 6 7 8 9 10 11 12
2 2 4 6 8 10 12 14 16 18 20 22 24
3 3 6 9 12 15 18 21 24 27 30 33 36
4 4 8 12 16 20 24 28 32 36 40 44 48
5 5 10 15 20 25 30 35 40 45 50 55 60
6 6 12 18 24 30 36 42 48 54 60 66 72
7 7 14 21 28 35 42 49 56 63 70 77 84
8 8 16 24 32 40 48 56 64 72 80 88 96
9 9 18 27 36 45 54 63 72 81 90 99 108
10 10 20 30 40 50 60 70 80 90 100 110 120
11 11 22 33 44 55 66 77 88 99 110 121 132
12 12 24 36 48 60 72 84 96 108 120 132 144
"""
def multiplicationTable(size) :
if (size < 1):
print "Error: Table is too small, must be 1 or larger."
if (size > 99):
print "Error: Table is too large, that will break the warp engines."
aTitleRow = [ ]
#print header
aTitleRow.append("x |")
for i in range(1, size+1):
aTitleRow.append(str(i))
print "\t".join(aTitleRow)
aDetailRow=[ ]
for row in range(1, size+1):
aDetailRow = [ ];
aDetailRow.append("{0} |".format(row))
#print table row
for column in range (1, size+1):
value = column * row;
aDetailRow.append( "{0}".format(value) )
print "\t".join(aDetailRow)
multiplicationTable(12)
| [
"geekjamesorourkejr@icloud.com"
] | geekjamesorourkejr@icloud.com |
5b2920c821ed8556581055b7062bb9643e9f2645 | 9f30f7d18a90dc2a195c7e7343ad22b20e20a787 | /accommodation/accommodation/notifications.py | 28f572074f843b2b4b08350849425591a2a9a663 | [
"MIT"
] | permissive | ashish-greycube/accommodation | 9c6620220abf69eaf3edef5535457e7827eeaef5 | 4c4d68111c2a89d94e7604849367347129b5a2b9 | refs/heads/master | 2020-12-20T06:25:40.194387 | 2020-01-24T11:14:54 | 2020-01-24T11:14:54 | 235,987,454 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,025 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, DBF and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cint
from six import string_types, iteritems
import json
import erpnext
from frappe.utils import date_diff, now_datetime, getdate, cint, flt, fmt_money, add_days
from frappe.model.document import Document
from dateutil.relativedelta import relativedelta
from erpnext.accounts.party import get_party_account
from erpnext.accounts.utils import get_account_currency
from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry, get_company_defaults
from erpnext.accounts.doctype.journal_entry.journal_entry import get_payment_entry_against_invoice
from PyPDF2 import PdfFileWriter, PdfFileReader
def write_to_local(content):
import os
from frappe.utils.file_manager import get_random_filename
public_files = frappe.get_site_path('public', 'files')
fname = get_random_filename(extn=".pdf")
with open(os.path.join(public_files, fname), "w") as f:
f.write(content)
def get_invoice_summary_html(invoices=[]):
'''aggregated summary of all invoices'''
if not invoices:
return ""
doc_list = frappe.db.get_all('Sales Invoice', {"name": ["in", invoices]}, [
'posting_date', 'name', 'company', 'customer', 'customer_name',
'remarks', 'rounded_total'
])
from datetime import datetime
month = ",".join(
list(
set([datetime.strftime(d.posting_date, '%b %Y')
for d in doc_list])))
title, company = "", ""
for invoice in doc_list:
company = invoice.company
header_title = "{0} - Internal Vouchers for {1} {2}".format(
invoice.company, invoice.customer, month)
break
template = "accommodation/templates/emails/internal_voucher_summary.html"
args = {
"company": company,
"date": getdate(),
"header_title": header_title,
"invoices": doc_list,
}
return frappe.render_template(template, {"doc": args}), header_title
def get_invoice_as_attachments(invoices, print_format="Sales Invoice SNS"):
# print_format = frappe.db.get_single_value("Delivery Settings", "dispatch_attachment")
attachments = []
for invoice in invoices:
attachments.append(
frappe.attach_print(
"Sales Invoice",
invoice,
file_name="Internal Voucher - %s" % invoice,
print_format=print_format))
return attachments
def get_invoice_merged(invoices, print_format="Sales Invoice SNS"):
invoices = ['SINV-SNS-190227-1', 'SINV-SNS-190227-3']
# print_format = frappe.db.get_single_value("Delivery Settings", "dispatch_attachment")
attachments = []
from PyPDF2 import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
for invoice in invoices:
output = frappe.get_print(
"Sales Invoice", invoice, print_format, as_pdf=True, output=output)
from frappe.utils.print_format import read_multi_pdf
return [{
"fname": "Internal Voucher.pdf",
"fcontent": read_multi_pdf(output)
}]
@frappe.whitelist()
def send_internal_invoice_emails(invoices=None):
if not invoices:
return
if isinstance(invoices, string_types):
invoices = json.loads(invoices)
for group in get_invoice_email_groups(invoices=invoices):
doc_list = [d for d in group.invoices.split(",")]
message, title = get_invoice_summary_html(doc_list)
attachments = get_invoice_merged(doc_list)
attachments[0]["fname"] = "%s.pdf" % title
recipients = [group.to]
cc = group.cc.split("\n") if group.cc else []
bcc = []
# from frappe.utils.background_jobs import enqueue
# enqueue(
# method=frappe.sendmail,
# queue='short',
# timeout=300,
# now=True,
# is_async=False,
# attachments=attachments,
# subject=title,
# message=message,
# recipients=recipients,
# cc=cc,
# bcc=bcc)
frappe.sendmail(
recipients=recipients,
message=message,
subject=title,
attachments=attachments)
def get_invoice_email_groups(invoices=None):
if not invoices:
return []
where = " where si.name in ('%s')" % "','".join(invoices)
return frappe.db.sql(
"""
select
group_concat(si.name) invoices,
ct.email_id `to`,
cus.customer_details `cc`
from
`tabSales Invoice` si
inner join `tabAccommodation Booking` ab on
ab.invoice = si.name
left outer join `tabContact` ct on
ct.name = ab.customer_contact
left outer join tabCustomer cus on
cus.name = si.customer
{}
group by
ab.customer,
si.customer_name,
ab.customer_contact,
ct.email_id,
cus.customer_details
""".format(where),
as_dict=1)
| [
"mr.ashish.shah@gmail.com"
] | mr.ashish.shah@gmail.com |
d602f0c7e4d1f05bd1f974f2465a76beb734905f | 528def9844f2ce13e6a358938b0b560945ab2248 | /main/migrations/0056_userprofile_heatmap.py | b09258ed7d3fad4da912a64531ec71400bcb6d4b | [
"BSD-3-Clause"
] | permissive | skripkar/noc | 055afbd42ab4c447d05d2cde0a822916f9e0844e | df193b99e478fe39157c8d27ff4098262d9cb734 | refs/heads/master | 2020-04-10T12:53:09.602779 | 2018-12-08T07:50:30 | 2018-12-08T07:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | # -*- coding: utf-8 -*-
from south.db import db
from django.db import models
class Migration:
def forwards(self):
db.add_column(
"main_userprofile",
"heatmap_lon",
models.FloatField("Longitude", blank=True, null=True))
db.add_column(
"main_userprofile",
"heatmap_lat",
models.FloatField("Latitude", blank=True, null=True))
db.add_column(
"main_userprofile",
"heatmap_zoom",
models.IntegerField("Zoom", blank=True, null=True))
def backwards(self):
db.delete_column("main_userprofile", "heatmap_lon")
db.delete_column("main_userprofile", "heatmap_lat")
db.delete_column("main_userprofile", "heatmap_zoom")
| [
"dv@nocproject.org"
] | dv@nocproject.org |
1a2d52c49149c7a1f52bb7f4d4c899b1fad43330 | 35b6013c1943f37d1428afd2663c8aba0a02628d | /profiler/appengine/flexible/noxfile_config.py | 840f3ba70660706276350faa5616c9a5296c8700 | [
"Apache-2.0"
] | permissive | GoogleCloudPlatform/python-docs-samples | d2a251805fbeab15d76ed995cf200727f63f887d | 44e819e713c3885e38c99c16dc73b7d7478acfe8 | refs/heads/main | 2023-08-28T12:52:01.712293 | 2023-08-28T11:18:28 | 2023-08-28T11:18:28 | 35,065,876 | 7,035 | 7,593 | Apache-2.0 | 2023-09-14T20:20:56 | 2015-05-04T23:26:13 | Jupyter Notebook | UTF-8 | Python | false | false | 1,849 | py | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default TEST_CONFIG_OVERRIDE for python repos.
# You can copy this file into your directory, then it will be imported from
# the noxfile.py.
# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
# 3.11 is currently unsupported
"ignored_versions": ["2.7", "3.11"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# If you need to use a specific version of pip,
# change pip_version_override to the string representation
# of the version number, for example, "20.2.4"
"pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
| [
"noreply@github.com"
] | GoogleCloudPlatform.noreply@github.com |
3748009d16efeafdb2601245743c7b00792fe3aa | bb82aa054ae80a09925d86b192922d55eb85e657 | /sfa.py | 3a3086b42bba0f541e7adb64f1c5ec2fd000e379 | [
"MIT"
] | permissive | lagrassa/baselines | d1705bbbd7d0a9c6a6366bc4211402e891dedc7b | 843ab36182add5ca259aa2edcb9a9113d6008135 | refs/heads/master | 2020-04-16T15:49:10.579240 | 2019-08-08T20:32:40 | 2019-08-08T20:32:40 | 165,715,526 | 0 | 0 | null | 2019-01-14T18:46:25 | 2019-01-14T18:46:25 | null | UTF-8 | Python | false | false | 1,945 | py | import numpy as np
from skimage.transform import resize
from PIL import Image
import matplotlib.pyplot as plt
plt.rcParams["font.size"] = 19
import mdp
'''
@param signal_list n x m array with n points in time that are m dimensional
'''
lookback = 3
def train_sfa(signal_list, degree=2, lookback=3):
#put into format where columns are variables and rows are observations
ndims = signal_list.shape[1]
flow = (mdp.nodes.EtaComputerNode() +
mdp.nodes.TimeFramesNode(lookback) +
mdp.nodes.PolynomialExpansionNode(degree) +
mdp.nodes.PCANode(reduce=True)+
mdp.nodes.SFANode(include_last_sample=True, output_dim=6) +
mdp.nodes.EtaComputerNode() )
flow.train(signal_list)
return flow
"""
Just to make sure it's useful, rescale so that the max is 255 and the min is 0
split means split in half
"""
def visualization_matrix(signal, split=False):
if split:
return np.hstack([visualization_matrix(signal[:,:3], split=False), visualization_matrix(signal[:,3:],split=False)])
else:
return np.interp(signal, (signal.min(), signal.max()), (0, 255))
def make_sfa_node(filename):
signal = 1.*np.load(filename)
signal += 0.1*np.random.random(signal.shape)
print(signal.shape)
trained_system = train_sfa(signal)
return trained_system
def plot_im(im):
im = resize(im,(200,50),order=0,anti_aliasing=True).T
#im = im.T
plt.imshow(im, cmap="gray")
#plt.ylabel("force dimensions")
plt.xlabel("time")
#plt.yticks([10,40],["xyz", "rpy"])
plt.yticks([])
plt.show()
if __name__ == "__main__":
trained_system = make_sfa_node("force_states.npy")
signal = np.load("force_states.npy")
im = visualization_matrix(signal,split=True)
#plot_im(im)
encoded = trained_system(signal[0].reshape((1,6)))
im = visualization_matrix(encoded)
plot_im(im)
#Image.fromarray(255*encoded).resize((200,500)).show()
| [
"lagrassa@mit.edu"
] | lagrassa@mit.edu |
43cdf68da99e057b0b815a7520bf33399cf76600 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/LQ/LQToCMu_M_250_TuneCUETP8M1_13TeV_pythia8_cff.py | d7cc1bed05ec637dc94f99d30d949bdaf3d965e4 | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,147 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxeventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
SLHAFileForPythia8 = cms.string('Configuration/Generator/data/LQ_cmu_beta1.0.out'),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'LeptoQuark:gg2LQLQbar = on',
'LeptoQuark:qqbar2LQLQbar = on',
'42:m0 = 250 ! LQ mass',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"dnash@cern.ch"
] | dnash@cern.ch |
d7408f2e3038befa47ffef0498d21527645fe960 | 04f4558aa0dc904b8d7c0ab79b80ec11c34f8ccf | /swagger_client/models/inline_response_200_35.py | ff4cb64e50ad1461786655d82dcde305187d053c | [
"Apache-2.0"
] | permissive | scubawhere/scubawhere-api-python-client | 0fc23ffb97446b0bb0825c93528f954e7d642cf4 | 9f8578e251492c7667f785df7b7c9d66e71f5c8e | refs/heads/master | 2020-12-24T11:10:34.880348 | 2016-11-08T12:20:45 | 2016-11-08T12:20:45 | 73,180,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,676 | py | # coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: bryan@scubawhere.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class InlineResponse20035(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, payments=None):
"""
InlineResponse20035 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'payments': 'list[Payment]'
}
self.attribute_map = {
'payments': 'payments'
}
self._payments = payments
@property
def payments(self):
"""
Gets the payments of this InlineResponse20035.
:return: The payments of this InlineResponse20035.
:rtype: list[Payment]
"""
return self._payments
@payments.setter
def payments(self, payments):
"""
Sets the payments of this InlineResponse20035.
:param payments: The payments of this InlineResponse20035.
:type: list[Payment]
"""
self._payments = payments
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"bryan@iqwebcreations.com"
] | bryan@iqwebcreations.com |
8daec3220944fafce80738c0e80789c0273bcbbc | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_will945945945_codejam1.py | 3ab66a1fd67b3fe2f6a756ed21c188023f10f055 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 450 | py | def f(n):
n = int(n)
S = set(list(str(n)))
i = 0
k = 0
while len(S) != 10:
k += n
T = set(list(str(k)))
S = S.union(T)
i += 1
if i >= 10**5:
return 'INSOMNIA'
return k
F = open('A-large.in')
A = F.read()
A = A.split('\n')[1:-1]
Ans = map(f,A)
E = open('ANS1.large','w')
for i in xrange(len(Ans)):
E.write('Case #' + str(i+1) + ': ' + str(Ans[i]) + '\n')
E.close()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
98f554fa63163672e296e3c2df632125022ebfd4 | a33497a0a8250d06f146230746335e8b84f90394 | /configure/main.py | 99078324289b72fc09a4b3eaee101637edb4b673 | [
"MIT"
] | permissive | gibbs-lab-us/usxp_08_16 | e099c68fd5112a1df4d5390a674bd79d6d01e186 | 06c60525a043486209c7e2c17072e56478d113e5 | refs/heads/master | 2022-11-07T17:31:47.870927 | 2020-06-24T04:17:39 | 2020-06-24T04:17:39 | 274,553,685 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,061 | py | '''
Author: Matt Bougie (mbougie@wisc.edu)
Date: June 21, 2020
Purpose: Central script to call all other scripts for processing data.
Usage: Need to set up connections to directories of scripts and to geodatabases where the output is saved.
Parameters: Parameters for all scripts are stored in the json file and referenced as a global data object.
'''
import sys
import os
# from config import from_config
from sqlalchemy import create_engine
import pandas as pd
import json
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\modules\\')
import general as gen
### import pre ###########################################################################
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\stages\\pre\\')
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\stages\\pre\\lookup_scripts')
import pre
import lookup_scripts_v4
### import refinement scripts ##########################################################
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\stages\\refine')
import mask_fn_yfc_61
import mask_fp_2007
import mask_fp_nlcd_yfc
import mask_fp_nlcd_ytc
import mask_fp_yfc
import mask_fp_ytc
### import core scripts ##################################################################
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\stages\\core\\')
import parallel_core as core
### import post scripts ##################################################################
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\stages\\post\\yxc\\')
sys.path.append('C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\stages\\post\\cdl\\')
import parallel_yxc as yxc
import parallel_cdl as cdl
import parallel_cdl_bfc_bfnc as cdl_bfc_bfnc
import add2pg_yxc
import add2pg_cdl
if __name__ == '__main__':
### get json parameters from current instance
data = gen.getCurrentInstance(file='C:\\Users\\Bougie\\Desktop\\scripts\\projects\\usxp\\configure\\json\\current_instance.json')
print(data)
#####################################################################################################################################################
###### pre and refinement stages #####################################################################################################################
#####################################################################################################################################################
####************ create trajectory ***************###############################
### NOTE: only run script to create new traj lookup#################
lookup_scripts_v4.run(data)
####************* create the refined trajectory *************#################################
####____false negative refinement______________________________________
###run script to create false negative mask
mask_fn_yfc_61.run(data)
#####create the add_yfc trajectories dataset############################
#####apply false negative mask above to trajectory so false negative masks can be applied to it
pre.run(data)
###____false positve refinement________________________________________
###run scripts to create each false positive mask
mask_fp_2007.run(data)
mask_fp_nlcd_yfc.run(data)
mask_fp_nlcd_ytc.run(data)
mask_fp_yfc.run(data)
mask_fp_ytc.run(data)
######################create the rfnd dataset###################################
pre.run(data)
#####################################################################################################################################################
###### core stage ###################################################################################################################################
#####################################################################################################################################################
core.run(data)
add2pg_mtr.run(data)
#####################################################################################################################################################
###### post stage ###################################################################################################################################
#####################################################################################################################################################
##_______YTC________________________________________________
### create rasters
yxc.run(data, 'ytc')
cdl.run(data, 'ytc', 'bfc')
cdl_bfc_bfnc.run(data, 'ytc', 'fc')
### add raster attribute table to database
add2pg_yxc.run(data, 'ytc')
add2pg_cdl.run(data, 'ytc', 'bfc')
add2pg_cdl.run(data, 'ytc', 'fc')
##________YFC_______________________________________________
### create rasters
yxc.run(data, 'yfc')
cdl.run(data, 'yfc', 'fnc')
cdl_bfc_bfnc.run(data, 'yfc', 'bfnc')
### add raster attribute table to database
add2pg_yxc.run(data, 'yfc')
add2pg_cdl.run(data, 'yfc', 'bfnc')
add2pg_cdl.run(data, 'yfc', 'fnc')
| [
"mbougie@wisc.edu"
] | mbougie@wisc.edu |
938e45d7658d1b188094e5d5f989f91da0257c82 | be791583545a1f66a7650085d920171d0df040da | /nni/algorithms/compression/v2/pytorch/pruning/tools/metrics_calculator.py | 2628751adc8649304099ce184c967bc2e36a330d | [
"MIT"
] | permissive | Lijiaoa/nni | de4f598585d346c17aae1030774eab8346ba6b5e | 7bcf1ebd47caf144032825aa078c8d9a51833320 | refs/heads/master | 2023-06-08T08:00:44.947829 | 2022-09-14T08:37:09 | 2022-09-14T08:37:09 | 242,638,482 | 1 | 0 | MIT | 2020-07-16T08:24:42 | 2020-02-24T03:30:45 | Python | UTF-8 | Python | false | false | 6,289 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
from typing import Dict, List
import torch
from torch import Tensor
from .base import MetricsCalculator
from ...utils import Scaling
__all__ = ['NormMetricsCalculator', 'HookDataNormMetricsCalculator', 'DistMetricsCalculator',
'APoZRankMetricsCalculator', 'MeanRankMetricsCalculator', 'StraightMetricsCalculator']
class StraightMetricsCalculator(MetricsCalculator):
"""
This metrics calculator directly returns a copy of data as metrics.
"""
def calculate_metrics(self, data: Dict[str, Dict[str, Tensor]]) -> Dict[str, Dict[str, Tensor]]:
metrics = {}
for module_name, targets_data in data.items():
metrics[module_name] = {}
for target_name, target_data in targets_data.items():
metrics[module_name][target_name] = self._get_scaler(module_name, target_name).shrink(target_data)
return metrics
class NormMetricsCalculator(MetricsCalculator):
"""
Calculate the specify norm for each tensor in data.
L1, L2, Level, Slim pruner use this to calculate metric.
Parameters
----------
p
The order of norm. None means Frobenius norm.
scalers
Please view the base class `MetricsCalculator` docstring.
"""
def __init__(self, p: int | float | None = None, scalers: Dict[str, Dict[str, Scaling]] | Scaling | None = None):
super().__init__(scalers=scalers)
self.p = p if p is not None else 'fro'
def calculate_metrics(self, data: Dict[str, Dict[str, Tensor]]) -> Dict[str, Dict[str, Tensor]]:
def reduce_func(t: Tensor) -> Tensor:
return t.norm(p=self.p, dim=-1) # type: ignore
metrics = {}
for module_name, targets_data in data.items():
metrics[module_name] = {}
for target_name, target_data in targets_data.items():
scaler = self._get_scaler(module_name, target_name)
metrics[module_name][target_name] = scaler.shrink(target_data, reduce_func)
return metrics
class HookDataNormMetricsCalculator(NormMetricsCalculator):
"""
The hook data value format is a two-element list [batch_number, cumulative_data].
Directly use the cumulative_data as new_data to calculate norm metric.
TaylorFO pruner uses this to calculate metric.
"""
def calculate_metrics(self, data: Dict[str, Dict[str, List[Tensor]]]) -> Dict[str, Dict[str, Tensor]]:
new_data = {}
for module_name, targets_data in data.items():
new_data[module_name] = {}
for target_name, (_, target_data) in targets_data.items():
new_data[module_name][target_name] = target_data
return super().calculate_metrics(new_data)
class DistMetricsCalculator(MetricsCalculator):
"""
Calculate the sum of specify distance for each element with all other elements in specify `dim` in each tensor in data.
FPGM pruner uses this to calculate metric.
Parameters
----------
p
The order of norm. None means Frobenius norm.
scalers
Please view the base class `MetricsCalculator` docstring.
"""
def __init__(self, p: int | float | None = None, scalers: Dict[str, Dict[str, Scaling]] | Scaling | None = None):
super().__init__(scalers=scalers)
self.p = p if p is not None else 'fro'
def calculate_metrics(self, data: Dict[str, Dict[str, Tensor]]) -> Dict[str, Dict[str, Tensor]]:
def reduce_func(t: Tensor) -> Tensor:
reshape_data = t.reshape(-1, t.shape[-1])
metric = torch.zeros(reshape_data.shape[0], device=reshape_data.device)
for i in range(reshape_data.shape[0]):
metric[i] = (reshape_data - reshape_data[i]).norm(p=self.p, dim=-1).sum() # type: ignore
return metric.reshape(t.shape[:-1])
metrics = {}
for module_name, targets_data in data.items():
metrics[module_name] = {}
for target_name, target_data in targets_data.items():
scaler = self._get_scaler(module_name, target_name)
metrics[module_name][target_name] = scaler.shrink(target_data, reduce_func)
return metrics
class APoZRankMetricsCalculator(MetricsCalculator):
"""
The data value format is a two-element list [batch_number, batch_wise_zeros_count_sum].
This metric sum the zero number on `dim` then devide the (batch_number * across_dim_size) to calculate the non-zero rate.
Note that the metric we return is (1 - apoz), because we assume a higher metric value has higher importance.
APoZRank pruner uses this to calculate metric.
"""
def calculate_metrics(self, data: Dict[str, Dict[str, List[Tensor]]]) -> Dict[str, Dict[str, Tensor]]:
def reduce_func(t: Tensor) -> Tensor:
return 1 - t.mean(dim=-1)
metrics = {}
for module_name, targets_data in data.items():
metrics[module_name] = {}
for target_name, target_data in targets_data.items():
target_data = target_data[1] / target_data[0]
scaler = self._get_scaler(module_name, target_name)
metrics[module_name][target_name] = scaler.shrink(target_data, reduce_func)
return metrics
class MeanRankMetricsCalculator(MetricsCalculator):
"""
The data value format is a two-element list [batch_number, batch_wise_activation_sum].
This metric simply calculate the average on `self.dim`, then divide by the batch_number.
MeanRank pruner uses this to calculate metric.
"""
def calculate_metrics(self, data: Dict[str, Dict[str, List[Tensor]]]) -> Dict[str, Dict[str, Tensor]]:
def reduce_func(t: Tensor) -> Tensor:
return t.mean(dim=-1)
metrics = {}
for module_name, targets_data in data.items():
metrics[module_name] = {}
for target_name, target_data in targets_data.items():
target_data = target_data[1] / target_data[0]
scaler = self._get_scaler(module_name, target_name)
metrics[module_name][target_name] = scaler.shrink(target_data, reduce_func)
return metrics
| [
"noreply@github.com"
] | Lijiaoa.noreply@github.com |
074f348734d23aa58b3c21900e08312d205392e5 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_032/ch18_2020_03_08_21_48_35_100424.py | d5d6a13b5f497d97e06155ea19823128df29554b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | def verifica_idade(idade):
if idade>=21:
return "Liberado EUA e BRASIL"
elif idade>= 18:
return "Liberado BRASIL"
else:
return "Não está liberado" | [
"you@example.com"
] | you@example.com |
1c327427f69011e83e4e3db08d9f67da61680805 | 3c7150c5ee763ba965be678af00e488a0810fa6c | /test/tstpkg/packtest/packa.py | 6d4aedb460a38efbd88563cc740f75f67e3849ee | [] | no_license | jeppeter/pylib | 3dffb188f2736f4dba730fa142102962ccadd95e | fad29d7efb90782254e766467a8a3bc50bd07312 | refs/heads/master | 2023-08-16T03:50:05.387168 | 2021-12-14T03:43:58 | 2021-12-14T03:43:58 | 58,593,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py | #!/usr/bin/python
class PackBase:
def __init__(self):
print ('init packbase')
return
def call_a_new():
return PackBase() | [
"jeppeter@gmail.com"
] | jeppeter@gmail.com |
83c2ed80a7b142d6129f75e8dc3c02d54f29d899 | 242086b8c6a39cbc7af3bd7f2fd9b78a66567024 | /python/PP4E-Examples-1.4/Examples/PP4E/Gui/Tour/demo-radio-multi.py | 518610e67784054e5fd5bd69e93729aa96d8fdc8 | [] | no_license | chuzui/algorithm | 7537d0aa051ac4cbe9f6a7ca9a3037204803a650 | c3006b24c4896c1242d3ceab43ace995c94f10c8 | refs/heads/master | 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # see what happens when some buttons have same value
from tkinter import *
root = Tk()
var = StringVar()
for i in range(10):
rad = Radiobutton(root, text=str(i), variable=var, value=str(i % 3))
rad.pack(side=LEFT)
var.set(' ') # deselect all initially
root.mainloop()
| [
"zui"
] | zui |
ee5c2e68c1e14440944550d0b6e44adadfbd9b03 | 9cdbc9190def43f30b3357c7fccd92bcb0bef6a9 | /pyspark/day_calci.py | 7d8b70407f8e11ac7b4bd0c408aa4c20d861b9ca | [] | no_license | sidaker/dq | 2ff8fac0fc4a79d67909fb968f0a7ec8e976630a | fd41d60b6ca658d5f024701ce4a4729d41a24d30 | refs/heads/master | 2023-05-27T01:31:18.274913 | 2023-05-10T22:20:16 | 2023-05-10T22:20:16 | 225,405,776 | 0 | 0 | null | 2023-05-01T20:38:00 | 2019-12-02T15:19:30 | Jupyter Notebook | UTF-8 | Python | false | false | 1,591 | py | # step 1: ask user for calculation to be performed
operation = input("Would you like to add/subtract/multiply/divide? ").lower( )
print( "You chose {}.".format(operation) ) # for testing purposes
# step 2: ask for numbers, alert order matters for subtracting and dividing
if operation == "subtract" or operation == "divide":
print( "You chose {}.".format(operation) )
print("Please keep in mind that the order of your numbers matter.")
num1 = input("What is the first number? ")
num2 = input("What is the second number? ")
print( "First Number: {}".format(num1) ) # for testing purposes
print( "Second Number: {}".format(num2) ) # for testing purposes
# step 3: setup try/except for mathematical operation
try:
# step 3a: immediately try to convert numbers input to floats
num1, num2 = float(num1), float(num2)
# step 3b: perform operation and print result
if operation == "add":
result = num1 + num2
print( "{} + {} = {}".format(num1, num2, result) )
elif operation == "subtract":
result = num1 - num2
print( "{} - {} = {}".format(num1, num2, result) )
elif operation == "multiply":
result = num1 * num2
print( "{} * {} = { }".format(num1, num2, result) )
elif operation == "divide":
result = num1 / num2
print( "{} / {} = {}".format(num1, num2, result) )
else:
# else will be hit if they didn't chose an option correctly
print("Sorry, but '{}' is not an option.".format(operation) )
except:
# steb 3c: print error
print("Error: Improper numbers used. Please try again.")
| [
"siddartha.bommireddy@digital.homeoffice.gov.uk"
] | siddartha.bommireddy@digital.homeoffice.gov.uk |
e1612a00002719da9c77fc0035d86f29b67bf644 | 1e5c6f4b08d9470fce248cf39e6dccce40e90a41 | /codes/19/change_ifs_format.py | fc3a3020f4ad3bf752d5699bfb8d8619ede9f382 | [] | no_license | misaiya99/scipybook2 | 1529cfb7f800df2ef7ce024a86281af16e343a37 | 734ba177b4705cc25da695d42a8cbada7cd22bd9 | refs/heads/master | 2020-03-10T21:26:23.595494 | 2017-08-25T09:48:07 | 2017-08-25T09:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | # -*- coding: utf-8 -*-
import pickle
import numpy as np
f = file("IFS.data", "rb")
names = pickle.load(f)
data = []
for i in range(len(names)):
data.append( np.load(f).tolist())
f2 = file("ifs2.data", "wb")
pickle.dump(list(zip(names, data)), f2)
| [
"qytang326@gmail.com"
] | qytang326@gmail.com |
55b949d206aa1d21674acd41c27d4e095218cfdd | 98cc042117f1545d95e240a3a6e9afef0a2fedd7 | /datalad/metadata/parsers/base.py | 8789afee080fbdd5fcbaaece9854a589f409c1e6 | [
"MIT",
"BSD-3-Clause"
] | permissive | taylols/datalad | 23fb6a5a03591e25756d0dfa241413b1d03d19d1 | 8d1788b384fa9d998d4c6150c31def7f670c4f49 | refs/heads/master | 2022-02-19T17:55:20.450846 | 2018-01-05T17:15:17 | 2018-01-05T17:15:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Metadata parser base class"""
class BaseMetadataParser(object):
def __init__(self, ds, paths):
"""
Parameters
----------
ds : dataset instance
Dataset to extract metadata from.
paths : list
Paths to investigate when extracting content metadata
"""
self.ds = ds
self.paths = paths
def get_metadata(self, dataset=True, content=True):
"""
Returns
-------
dict or None, dict or None
Dataset metadata dict, dictionary of filepath regexes with metadata,
dicts, each return value could be None if there is no such metadata
"""
# default implementation
return \
self._get_dataset_metadata() if dataset else None, \
((k, v) for k, v in self._get_content_metadata()) if content else None
def _get_dataset_metadata(self):
"""
Returns
-------
dict
keys are homogenized datalad metadata keys, values are arbitrary
"""
raise NotImplementedError
def _get_content_metadata(self):
"""Get ALL metadata for all dataset content.
Returns
-------
generator((location, metadata_dict))
"""
raise NotImplementedError
def get_homogenized_key(self, key):
# TODO decide on how to error
return self._key2stdkey.get(key)
| [
"michael.hanke@gmail.com"
] | michael.hanke@gmail.com |
23c9ba644d6c01a7f88742c512bbbd0c427aaf93 | 9206e405e9be5f80a08e78b59d1cb79c519ae515 | /algorithms/10.7_missing_int.py | 02819c453c90cb2ea2f865c3ac0d0e4d5f768d27 | [] | no_license | mfbx9da4/mfbx9da4.github.io | ac4e34f0e269fb285e4fc4e727b8564b5db1ce3b | 0ea1a0d56a649de3ca7fde2d81b626aee0595b2c | refs/heads/master | 2023-04-13T22:15:19.426967 | 2023-04-12T12:14:40 | 2023-04-12T12:14:40 | 16,823,428 | 2 | 0 | null | 2022-12-12T04:36:08 | 2014-02-14T01:30:20 | SCSS | UTF-8 | Python | false | false | 3,180 | py | # An input file with four billion non-negative integers
# Generate an integer not contained by the file with 1GB of mem
# Follow up: what if all numbers are unique and you only
# have 10MB of memory
# p416
import random
import sys
max_64 = sys.maxsize
max_32 = 2**32 - 1
large_filename = 'large_number_of_ints.txt'
small_filename = 'small_number_of_ints.txt'
filename_32bit = '32bit_ints.txt'
#
# is it possible for 64 bit? =>
# 2**63 = 3 billion billion possible integers
#
# bit_vector = bytearray(array_size // 8)
# byte array overhead is 57 bytes
# each byte is then 1 byte after that
# bit_vector = bytearray((max_32 + 1) // 8)
# print(len(bit_vector))
# print(bit_vector[0])
# bit_vector[0] |= 1 << 5
# print(bit_vector[0])
# bit_vector |= 1 << 5
# print(bit_vector)
def solve(filename, largest_int):
"""
Solve with large bit vector.
- Create bit vector of length max_int_size
- Set bit every time int found
- Iterate through bit vector until first 0
If we have 64 bit integers the bit vector would
be too large to fit into mem. Instead we must
pass through the data in ranges of about (2**32)
or whatever our memory capacity for the bitvector
is.
"""
if largest_int > max_32:
raise TypeError('Largest int should be less than ' + str(max_32))
total = (largest_int + 1) # +1 for 0
num_bytes = total // 8
remainder = total % 8
if remainder:
num_bytes += 1
print('total', total, num_bytes)
bit_vector = bytearray(num_bytes)
# create bit vector
with open(filename) as file:
for line in file:
num = line.split('\n')[0]
if num:
num = int(num)
index = num // 8
remainder = num % 8
bit_vector[index] |= 1 << remainder
# TODO: special case of 0
for i in range(1, largest_int + 1):
index = i // 8
remainder = i % 8
has_bit = bit_vector[index] & 1 << remainder
if has_bit == 0:
return print('missing bit', i)
def write_random_ints(filename, number_of_ints=max_32, largest_int=max_32, chunk_size=100000):
# TODO: ensure unique
rand = lambda x: random.randint(x, largest_int)
with open(filename, "w") as file:
total = 0
while total < number_of_ints:
chunk = chunk_size
if chunk_size + total > number_of_ints:
chunk = number_of_ints - total
total += chunk_size
contents = ''.join(str(rand(i)) + '\n' for i in range(chunk))
file.write(contents)
prev = chunk
print('Wrote', total, 'lines')
print(max_32)
def write_small_file(number_of_ints=100, largest_int=100):
filename = small_filename
chunk_size = 50
write_random_ints(filename, number_of_ints=number_of_ints, \
largest_int=largest_int, chunk_size=chunk_size)
def solve_small():
largest_int = 10
number_of_ints = 10
write_small_file(number_of_ints=number_of_ints, largest_int=largest_int)
solve(small_filename, largest_int)
def write_32_bit():
largest_int = max_32
number_of_ints = max_32
write_random_ints(filename_32bit, number_of_ints=number_of_ints, largest_int=largest_int)
def solve_32_bit():
largest_int = max_32
solve(filename_32bit, largest_int)
# write_32_bit()
solve_32_bit()
| [
"dalberto.adler@gmail.com"
] | dalberto.adler@gmail.com |
c72fb5b1508d9c0e47997158cfac1eb710f45a26 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /XAMKeMw7imaB3zo67_13.py | b883127d748c08c5e3d1811aac0c0d4b8fb120fd | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py |
def trace_word_path(word, grid):
def test(w,a,b,ll):
if a<0 or b<0 or a>=len(grid) or b>=len(grid[0]) or (a,b) in ll: return []
if grid[a][b]!=w[0]: return []
if len(w)<2: return [(a,b)]
for i in range(4):
if i==0:
tmp=test(w[1:],a-1,b,ll+[(a,b)])
elif i==1:
tmp=test(w[1:],a+1,b,ll+[(a,b)])
elif i==2:
tmp=test(w[1:],a,b-1,ll+[(a,b)])
else:
tmp=test(w[1:],a,b+1,ll+[(a,b)])
if len(tmp)==len(w)-1:
return [(a,b)]+tmp
return []
for i in range(len(grid)):
for j in range(len(grid[0])):
tmp=test(word,i,j,[])
print('*',i,j,tmp)
if len(tmp)==len(word):
return tmp
return 'Not present'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
d7df2514ee4f2af55e937be7c2202065dd427d44 | 29a4c1e436bc90deaaf7711e468154597fc379b7 | /modules/arithmetic/doc/sqr.py | c1896061ad4da0d5088b8c53e5f81a11c5ec3df2 | [
"BSL-1.0"
] | permissive | brycelelbach/nt2 | 31bdde2338ebcaa24bb76f542bd0778a620f8e7c | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | refs/heads/master | 2021-01-17T12:41:35.021457 | 2011-04-03T17:37:15 | 2011-04-03T17:37:15 | 1,263,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '1',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'typename boost::result_of<nt2::meta::arithmetic(T,T)>::type',
},
'simd_types' : ['real_'],
'type_defs' : [],
'types' : ['real_', 'unsigned_int_', 'signed_int_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'modified by jt the 01/12/2010',
'included' : [],
'notes' : [],
'stamp' : 'modified by jt the 13/12/2010',
},
'ranges' : {
'real_' : [['T(-10)', 'T(10)']],
'signed_int_' : [['-100', '100']],
'unsigned_int_' : [['0', '100']],
},
'specific_values' : {
'default' : {
},
'real_' : {
'nt2::Inf<T>()' : 'nt2::Inf<T>()',
'nt2::Minf<T>()' : 'nt2::Inf<T>()',
'nt2::Mone<T>()' : 'nt2::One<T>()',
'nt2::Nan<T>()' : 'nt2::Nan<T>()',
'nt2::One<T>()' : 'nt2::One<T>()',
'nt2::Zero<T>()' : 'nt2::Zero<T>()',
},
'signed_int_' : {
'nt2::Mone<T>()' : 'nt2::One<T>()',
'nt2::One<T>()' : 'nt2::One<T>()',
'nt2::Zero<T>()' : 'nt2::Zero<T>()',
},
'unsigned_int_' : {
'nt2::One<T>()' : 'nt2::One<T>()',
'nt2::Zero<T>()' : 'nt2::Zero<T>()',
},
},
'verif_test' : {
'property_call' : {
'default' : ['nt2::sqr(a0)'],
},
'property_value' : {
'default' : ['a0*a0'],
},
'ulp_thresh' : {
'default' : ['0'],
},
},
},
'version' : '0.1',
},
] | [
"jtlapreste@gmail.com"
] | jtlapreste@gmail.com |
ee43c1ceeaaeb16ed860776f5d19480513714718 | 047e467917cc2614a63f48515d11bfa5647ad358 | /bpnet/plot/heatmaps.py | 04c3d94e5749059f5ac880bee020a786f5453012 | [
"MIT"
] | permissive | Scottmakie72/bpnet | e8052c77feb8c2f757ba581814b9ad6cb27d3721 | 0cba2515c15589f8439876c3028ce177544ee9cb | refs/heads/master | 2022-02-23T18:31:46.302341 | 2019-09-02T16:15:15 | 2019-09-02T16:15:15 | 250,239,695 | 1 | 0 | MIT | 2020-03-26T11:27:49 | 2020-03-26T11:27:48 | null | UTF-8 | Python | false | false | 6,733 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.colorbar import colorbar
from matplotlib import colors
from bpnet.plot.utils import MidpointNormalize
class QuantileTruncateNormalizer:
def __init__(self, pmin=50, pmax=99):
self.pmin = pmin
self.pmax = pmax
def __call__(self, signal):
norm_signal = np.minimum(signal, np.percentile(signal, self.pmax))
norm_signal = np.maximum(norm_signal, np.percentile(signal, self.pmin))
return norm_signal
class RowQuantileNormalizer:
def __init__(self, pmin=50, pmax=99):
"""Row-normalize the profile matrix
Args:
pmin: minimum percentile
pmax: maximum percentile
"""
self.pmin = pmin
self.pmax = pmax
def __call__(self, signal):
s = signal.copy()
p50 = np.percentile(s, self.pmin, axis=1)
p99 = np.percentile(s, self.pmax, axis=1)
# mask all values < p50
s[s < p50[:, np.newaxis]] = np.nan
snorms = np.minimum(s / p99[:, np.newaxis], 1)
return snorms
def normalize(p, pmin=50, pmax=99):
"""Back-compatibility
"""
return RowQuantileNormalizer(pmin, pmax)(p)
def heatmap_stranded_profile(signal, ax=None, figsize=(5, 20),
aspect=0.2, normalizer=RowQuantileNormalizer(),
interpolation='nearest', tick_step=25):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
norm_signal = normalizer(signal)
ax.imshow(norm_signal[:, :, 0], cmap=plt.cm.Reds, interpolation=interpolation, aspect=aspect)
ax.imshow(norm_signal[:, :, 1], alpha=0.5, cmap=plt.cm.Blues, interpolation=interpolation, aspect=aspect)
seq_len = signal.shape[1]
ticks = np.arange(0, seq_len + 1 - tick_step, tick_step)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks - seq_len // 2)
ax.set_ylabel("Seqlet index")
ax.set_xlabel("Position")
return fig
def multiple_heatmap_stranded_profile(signal_dict, figsize=(20, 20), sort_idx=None, **kwargs):
"""Plot a dictionary of profiles
"""
tasks = list(signal_dict.keys())
fig, axes = plt.subplots(1, len(tasks), figsize=figsize)
# pre-sort
if sort_idx is None:
total_counts = sum([x.sum(axis=-1).sum(axis=-1) for x in signal_dict.values()])
sort_idx = np.argsort(-total_counts)
for i, (task, ax) in enumerate(zip(tasks, axes)):
heatmap_stranded_profile(signal_dict[task][sort_idx], ax=ax, **kwargs)
ax.set_title(task)
fig.subplots_adjust(wspace=0) # no space between plots
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False) # no numbers
plt.setp([a.get_yaxis() for a in fig.axes[1:]], visible=False) # no numbers
return fig
def heatmap_contribution_profile(signal, ax=None, figsize=(5, 20), aspect=0.2, sort_idx=None, tick_step=25):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
if sort_idx is None:
sort_idx = np.arange(signal.shape[0])
interpolation = 'nearest'
ax.imshow(signal[sort_idx],
cmap=plt.cm.RdBu, norm=MidpointNormalize(midpoint=0),
interpolation=interpolation, aspect=aspect)
seq_len = signal.shape[1]
ticks = np.arange(0, seq_len + 1 - tick_step, tick_step)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks - seq_len // 2)
ax.set_ylabel("Seqlet index")
ax.set_xlabel("Position")
def multiple_heatmap_contribution_profile(signal_dict, sort_idx=None,
figsize=(20, 20), **kwargs):
"""Plot a dictionary of profiles
"""
tasks = list(signal_dict.keys())
fig, axes = plt.subplots(1, len(tasks), figsize=figsize)
# --------------------
# special. TODO - re-factor
if sort_idx is None:
sort_idx = np.arange([x for x in signal_dict.values()][0].shape[0])
for i, (task, ax) in enumerate(zip(tasks, axes)):
heatmap_contribution_profile(signal_dict[task][sort_idx],
ax=ax, **kwargs)
# --------------------
ax.set_title(task)
fig.subplots_adjust(wspace=0) # no space between plots
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False) # no numbers
plt.setp([a.get_yaxis() for a in fig.axes[1:]], visible=False) # no numbers
return fig
def multiple_heatmaps(signal_dict, plot_fn, sort_idx=None, figsize=(20, 20), **kwargs):
tasks = list(signal_dict.keys())
fig, axes = plt.subplots(1, len(tasks), figsize=figsize)
if sort_idx is None:
sort_idx = np.arange([x for x in signal_dict.values()][0].shape[0])
for i, (task, ax) in enumerate(zip(tasks, axes)):
plot_fn(signal_dict[task][sort_idx],
ax=ax, **kwargs)
ax.set_title(task)
fig.subplots_adjust(wspace=0) # no space between plots
plt.setp([a.get_yticklabels() for a in fig.axes[1:]], visible=False) # no numbers
plt.setp([a.get_yaxis() for a in fig.axes[1:]], visible=False) # no numbers
return fig
def heatmap_sequence(one_hot, ax=None, sort_idx=None, aspect='auto',
figsize_tmpl=(8, 4), cbar=True, title=None):
"""Plot a heatmap of sequences
"""
if ax is None:
figsize = (figsize_tmpl[0] * one_hot.shape[1] / 200,
figsize_tmpl[1] * one_hot.shape[0] / 2000)
fig, ax = plt.subplots(figsize=figsize)
if sort_idx is None:
sort_idx = np.arange(one_hot.shape[0])
cmap = colors.ListedColormap(["red", "orange", "blue", "green"][::-1])
qrates = np.array(list("TGCA"))
bounds = np.linspace(-.5, 3.5, 5)
norm = colors.BoundaryNorm(bounds, 4)
fmt = mpl.ticker.FuncFormatter(lambda x, pos: qrates[::-1][norm(x)])
img = ax.imshow(one_hot.argmax(axis=-1)[sort_idx], aspect=aspect, cmap=cmap, norm=norm, alpha=0.8)
if cbar:
ax2_divider = make_axes_locatable(ax)
cax2 = ax2_divider.append_axes("top", size="5%", pad=0.05)
# cb2 = colorbar(im2, cax=cax2, orientation="horizontal")
cb2 = colorbar(img, cax=cax2, cmap=cmap, norm=norm, boundaries=bounds,
orientation="horizontal",
ticks=[0, 1, 2, 3], format=fmt)
cax2.xaxis.set_ticks_position("top")
seq_len = one_hot.shape[1]
ticks = np.arange(0, seq_len + 1, 25)
ax.set_xticks(ticks)
ax.set_xticklabels(ticks - seq_len // 2)
ax.set_ylabel("Seqlet index")
ax.set_xlabel("Position")
if title is not None:
ax.set_title(title)
return fig
| [
"zigaavsec@gmail.com"
] | zigaavsec@gmail.com |
5abe4e81b57cf4a719ec1b46db196a11ad7279e1 | 6571b77f6e6f37d6df91a9cf0c34297a2bee1eb9 | /site-packages/django_filters/widgets.py | 71273067cebeced8bf0a13d9ab157326406e0fa5 | [
"Apache-2.0"
] | permissive | suntao789/Aclsm | ec02a04bb3ba14a1ea6a6c82a325da59d192d0f7 | 2202201c8279391386a4569e69f93d90eca5b96a | refs/heads/master | 2020-04-01T22:39:02.140836 | 2018-10-19T03:49:14 | 2018-10-19T03:49:14 | 153,719,890 | 0 | 0 | Apache-2.0 | 2018-10-19T03:11:46 | 2018-10-19T03:11:46 | null | UTF-8 | Python | false | false | 5,735 | py | from __future__ import absolute_import
from __future__ import unicode_literals
from collections import Iterable
from itertools import chain
try:
from urllib.parse import urlencode
except:
from urllib import urlencode # noqa
from django import forms
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.widgets import flatatt
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.six import string_types
from django.utils.translation import ugettext as _
from .compat import format_value
class LinkWidget(forms.Widget):
def __init__(self, attrs=None, choices=()):
super(LinkWidget, self).__init__(attrs)
self.choices = choices
def value_from_datadict(self, data, files, name):
value = super(LinkWidget, self).value_from_datadict(data, files, name)
self.data = data
return value
def render(self, name, value, attrs=None, choices=()):
if not hasattr(self, 'data'):
self.data = {}
if value is None:
value = ''
final_attrs = self.build_attrs(attrs)
output = ['<ul%s>' % flatatt(final_attrs)]
options = self.render_options(choices, [value], name)
if options:
output.append(options)
output.append('</ul>')
return mark_safe('\n'.join(output))
def render_options(self, choices, selected_choices, name):
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
for option in option_label:
output.append(
self.render_option(name, selected_choices, *option))
else:
output.append(
self.render_option(name, selected_choices,
option_value, option_label))
return '\n'.join(output)
def render_option(self, name, selected_choices,
option_value, option_label):
option_value = force_text(option_value)
if option_label == BLANK_CHOICE_DASH[0][1]:
option_label = _("All")
data = self.data.copy()
data[name] = option_value
selected = data == self.data or option_value in selected_choices
try:
url = data.urlencode()
except AttributeError:
url = urlencode(data)
return self.option_string() % {
'attrs': selected and ' class="selected"' or '',
'query_string': url,
'label': force_text(option_label)
}
def option_string(self):
return '<li><a%(attrs)s href="?%(query_string)s">%(label)s</a></li>'
class RangeWidget(forms.MultiWidget):
def __init__(self, attrs=None):
widgets = (forms.TextInput, forms.TextInput)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.start, value.stop]
return [None, None]
def format_output(self, rendered_widgets):
return '-'.join(rendered_widgets)
class LookupTypeWidget(forms.MultiWidget):
def decompress(self, value):
if value is None:
return [None, None]
return value
class BooleanWidget(forms.Select):
"""Convert true/false values into the internal Python True/False.
This can be used for AJAX queries that pass true/false from JavaScript's
internal types through.
"""
def __init__(self, attrs=None):
choices = (('', _('Unknown')),
('true', _('Yes')),
('false', _('No')))
super(BooleanWidget, self).__init__(attrs, choices)
def render(self, name, value, attrs=None):
try:
value = {
True: 'true',
False: 'false',
'1': 'true',
'0': 'false'
}[value]
except KeyError:
value = ''
return super(BooleanWidget, self).render(name, value, attrs)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
if isinstance(value, string_types):
value = value.lower()
return {
'1': True,
'0': False,
'true': True,
'false': False,
True: True,
False: False,
}.get(value, None)
class BaseCSVWidget(forms.Widget):
def _isiterable(self, value):
return isinstance(value, Iterable) and not isinstance(value, string_types)
def value_from_datadict(self, data, files, name):
value = super(BaseCSVWidget, self).value_from_datadict(data, files, name)
if value is not None:
if value == '': # empty value should parse as an empty list
return []
return value.split(',')
return None
def render(self, name, value, attrs=None):
if not self._isiterable(value):
value = [value]
if len(value) <= 1:
# delegate to main widget (Select, etc...) if not multiple values
value = value[0] if value else value
return super(BaseCSVWidget, self).render(name, value, attrs)
# if we have multiple values, we need to force render as a text input
# (otherwise, the additional values are lost)
surrogate = forms.TextInput()
value = [force_text(format_value(surrogate, v)) for v in value]
value = ','.join(list(value))
return surrogate.render(name, value, attrs)
class CSVWidget(BaseCSVWidget, forms.TextInput):
pass
| [
"suntao789@huawei.com"
] | suntao789@huawei.com |
a7a264147c541b0bd26d2ec61751e182506f86d9 | 941c912f44beff33a072e086c1f561f6cdd64626 | /LeetCode/codes/Random/380.py | edddb84beba6a0dc20b0c03ff7747787b25659cb | [] | no_license | adreena/MyStudyCorner | 3a13a743769ed144965b767f547c16df4d0fa0dd | 355c0dbd32ad201800901f1bcc110550696bc96d | refs/heads/master | 2023-02-20T07:39:32.391421 | 2021-01-25T01:46:21 | 2021-01-25T01:46:21 | 255,104,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | # time : O(1)
# space: O(N)
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.data = []
self.data_idx = defaultdict(lambda:-1)
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val not in self.data_idx:
self.data_idx[val] = len(self.data)
self.data.append(val)
return True
return False
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.data:
to_remove_id = self.data_idx[val]
last_id = self.data_idx[self.data[-1]]
self.data[to_remove_id] = self.data[last_id]
self.data_idx[self.data[-1]] = to_remove_id
del self.data_idx[val]
self.data.pop()
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return random.choice(self.data)
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom() | [
"kim.hszd@gmail.com"
] | kim.hszd@gmail.com |
1795de05ea7fffb09adc945433f45f9ccf7a70e5 | 1bd14e051251d08393731c03ccfb37a324227e1c | /tests/canned/iam/test_canned_iam_for_ec2.py | 6ef63de5bd692eb58c1cdf704dcc5f0437b222c8 | [
"MIT"
] | permissive | tsuttsu305/troposphere_mate-project | f04bb6a3d137be3e265652c626008edfbb670b55 | 15ee94cc913efb32bc991979efcad943c992074c | refs/heads/master | 2023-06-07T15:07:47.041944 | 2021-07-05T02:02:00 | 2021-07-05T02:02:00 | 285,152,616 | 0 | 0 | MIT | 2020-08-05T02:08:01 | 2020-08-05T02:08:00 | null | UTF-8 | Python | false | false | 502 | py | # -*- coding: utf-8 -*-
import pytest
from troposphere_mate.canned.iam import CannedCommonEc2IamRole
def test():
can = CannedCommonEc2IamRole(
PROJECT_NAME="my_project",
STAGE="dev",
)
tpl = can.create_template()
assert can.iam_role_ec2_s3_full_access is not None
assert can.iam_instance_profile_ec2_s3_full_access is not None
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| [
"husanhe@gmail.com"
] | husanhe@gmail.com |
6d0c7b923cd03dc1ebd00d71e3aff652e8fe40fe | 6aa8fd438e12e4e285d9b89be15e211e607821e0 | /.metadata/.plugins/org.eclipse.core.resources/.history/37/00bbfa0851aa00141441885f3a9b55b3 | c8408093efc3ba0346a327c9cdb03e9b5b77b39e | [] | no_license | phoenixproject/python | 2aa251c9fe9a3a665043d5f3d29d48c0f95b9273 | f8171d31d1d33a269d29374e7605a8f5bce6b5d6 | refs/heads/master | 2021-03-12T19:15:01.611936 | 2015-02-04T08:25:27 | 2015-02-04T08:25:27 | 30,287,884 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | #!/usr/bin/python3
def main():
funcao(5)
funcao(3)
funcao2(4)
funcao2()
def funcao(inicio):
for i in range(inicio,10):
print(i, end = ' ')
print()
def funcao2(inicio = 6):
for i in range(inicio,10):
print(i, end = ' ')
print()
if __name__ == "__main__" : main() | [
"phoenixproject.erp@gmail.com"
] | phoenixproject.erp@gmail.com | |
329ea89cee88744fd323d5dcbb08aadd70796f68 | cefd6c17774b5c94240d57adccef57d9bba4a2e9 | /WebKit/Tools/Scripts/webkitpy/style/checkers/common_unittest.py | 8449b989ba3d42b8a46f20b8b30723adffd8dda0 | [
"BSL-1.0"
] | permissive | adzhou/oragle | 9c054c25b24ff0a65cb9639bafd02aac2bcdce8b | 5442d418b87d0da161429ffa5cb83777e9b38e4d | refs/heads/master | 2022-11-01T05:04:59.368831 | 2014-03-12T15:50:08 | 2014-03-12T15:50:08 | 17,238,063 | 0 | 1 | BSL-1.0 | 2022-10-18T04:23:53 | 2014-02-27T05:39:44 | C++ | UTF-8 | Python | false | false | 5,220 | py | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for common.py."""
import unittest2 as unittest
from common import CarriageReturnChecker
from common import TabChecker
# FIXME: The unit tests for the cpp, text, and common checkers should
# share supporting test code. This can include, for example, the
# mock style error handling code and the code to check that all
# of a checker's categories are covered by the unit tests.
# Such shared code can be located in a shared test file, perhaps
# even this file.
class CarriageReturnCheckerTest(unittest.TestCase):
"""Tests check_no_carriage_return()."""
_category = "whitespace/carriage_return"
_confidence = 1
_expected_message = ("One or more unexpected \\r (^M) found; "
"better to use only a \\n")
def setUp(self):
self._style_errors = [] # The list of accumulated style errors.
def _mock_style_error_handler(self, line_number, category, confidence,
message):
"""Append the error information to the list of style errors."""
error = (line_number, category, confidence, message)
self._style_errors.append(error)
def assert_carriage_return(self, input_lines, expected_lines, error_lines):
"""Process the given line and assert that the result is correct."""
handle_style_error = self._mock_style_error_handler
checker = CarriageReturnChecker(handle_style_error)
output_lines = checker.check(input_lines)
# Check both the return value and error messages.
self.assertEqual(output_lines, expected_lines)
expected_errors = [(line_number, self._category, self._confidence,
self._expected_message)
for line_number in error_lines]
self.assertEqual(self._style_errors, expected_errors)
def test_ends_with_carriage(self):
self.assert_carriage_return(["carriage return\r"],
["carriage return"],
[1])
def test_ends_with_nothing(self):
self.assert_carriage_return(["no carriage return"],
["no carriage return"],
[])
def test_ends_with_newline(self):
self.assert_carriage_return(["no carriage return\n"],
["no carriage return\n"],
[])
def test_carriage_in_middle(self):
# The CarriageReturnChecker checks only the final character
# of each line.
self.assert_carriage_return(["carriage\r in a string"],
["carriage\r in a string"],
[])
def test_multiple_errors(self):
self.assert_carriage_return(["line1", "line2\r", "line3\r"],
["line1", "line2", "line3"],
[2, 3])
class TabCheckerTest(unittest.TestCase):
"""Tests for TabChecker."""
def assert_tab(self, input_lines, error_lines):
"""Assert when the given lines contain tabs."""
self._error_lines = []
def style_error_handler(line_number, category, confidence, message):
self.assertEqual(category, 'whitespace/tab')
self.assertEqual(confidence, 5)
self.assertEqual(message, 'Line contains tab character.')
self._error_lines.append(line_number)
checker = TabChecker('', style_error_handler)
checker.check(input_lines)
self.assertEqual(self._error_lines, error_lines)
def test_notab(self):
self.assert_tab([''], [])
self.assert_tab(['foo', 'bar'], [])
def test_tab(self):
self.assert_tab(['\tfoo'], [1])
self.assert_tab(['line1', '\tline2', 'line3\t'], [2, 3])
| [
"adzhou@hp.com"
] | adzhou@hp.com |
024c549f319392d611692ce42b6b7f4c97d7b21c | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/73da2663a3d0129f1b329995fe72d2e1c74c4aae-<get_host_info_dict_from_instance>-fix.py | 428eeea9b87eb087cb4f0cabe245d3e0b822a208 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | def get_host_info_dict_from_instance(self, instance):
instance_vars = {
}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe(('ec2_' + key))
if (key == 'ec2__state'):
instance_vars['ec2_state'] = (instance.state or '')
instance_vars['ec2_state_code'] = instance.state_code
elif (key == 'ec2__previous_state'):
instance_vars['ec2_previous_state'] = (instance.previous_state or '')
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif (type(value) in [int, bool]):
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif (type(value) == type(None)):
instance_vars[key] = ''
elif (key == 'ec2_region'):
instance_vars[key] = value.name
elif (key == 'ec2__placement'):
instance_vars['ec2_placement'] = value.zone
elif (key == 'ec2_tags'):
for (k, v) in value.items():
if (self.expand_csv_tags and (',' in v)):
v = list(map((lambda x: x.strip()), v.split(',')))
key = self.to_safe(('ec2_tag_' + k))
instance_vars[key] = v
elif (key == 'ec2_groups'):
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars['ec2_security_group_ids'] = ','.join([str(i) for i in group_ids])
instance_vars['ec2_security_group_names'] = ','.join([str(i) for i in group_names])
elif (key == 'ec2_block_device_mapping'):
instance_vars['ec2_block_devices'] = {
}
for (k, v) in value.items():
instance_vars['ec2_block_devices'][os.path.basename(k)] = v.volume_id
else:
pass
return instance_vars | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
036df0aa746ad7fecfa5b6e2cc6a5c8838772084 | 3b863f7e7efea09f5a120eb8c323e5a3742b82b3 | /DNA_Construction/DNA_ConstructionMed/DNA_ConstructionMed.pyde | 6eb99e012823392d7ca02dcfead15b85d3f66ccc | [] | no_license | TriceG/DNA_Project | b6096fbc91c35621b659dd5154a1972a9674d881 | 469df295120fbfe32070fd973c55f36b2af99341 | refs/heads/master | 2021-01-19T21:32:24.914550 | 2017-06-20T14:15:27 | 2017-06-20T14:15:27 | 88,661,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | pyde | from Shape_Classes import*
#phosphate group
phosphate = Circle(200, 400, 10)
#Sugar molecule
sugar = Pentagon(375, 400)
#amino acid
amino = Rectangle(530, 400, 28, 13)
minutes = 0
start = True
def setup():
size (800, 500)
ellipseMode(CENTER)
rectMode(CENTER)
#create a polygon with side length 15.
def sugarMol(x, y):
beginShape()
vertex(x-12.14, y-3.94)
vertex(x, y-12.76)
vertex(x+12.14, y-3.94)
vertex(x+7.5, y+10.32)
vertex(x-7.5, y+10.32)
vertex(x-12.14, y-3.95)
endShape()
def distance(ax, ay, bx, by):
distance = abs(sqrt((bx-ax)**2 + (by-ay)**2))
if distance < 20:
return True
else:
return False
s = ""
def draw():
global minutes, start, s
background(255)
fill(0)
text("Phosphate", 175, 375)
text("5-Carbon Sugar", 325, 375)
text("Nitrogenous Base", 485, 375)
#TIMER#
if start:
millisecs = int(millis()/100)%10
seconds = int(millis()/1000)%60
if seconds >= 60:
minutes+= 1
s = "Time Elapsed: " + str(minutes) + ":" + str(seconds) + "." + str(millisecs)
text(s, 600, 100)
#if mouse over circle
if phosphate.overCircle() or sugar.overSugar() or amino.overRect():
cursor(HAND)
#if mouse not over circle
else:
cursor(ARROW)
#draw shapes to be dragged
sugarMol(sugar.x, sugar.y)
if distance(sugar.x, sugar.y, phosphate.x, phosphate.y):
lockP = True
phosphate.x = sugar.x
phosphate.y = sugar.y
else:
lockP = False
if not lockP:
ellipse(phosphate.x, phosphate.y, 2*phosphate.r, 2*phosphate.r)
phosphate.drag()
sugar.drag()
elif lockP:
ellipse(sugar.x-20, sugar.y-20, 2*phosphate.r, 2*phosphate.r)
sugar.drag()
if distance(sugar.x, sugar.y, amino.x, amino.y):
lockA = True
amino.x = sugar.x
amino.y = sugar.y
else:
lockA = False
if not lockA:
rect(amino.x, amino.y, amino.l, amino.w)
amino.drag()
elif lockA:
rect(sugar.x+30, sugar.y, amino.l, amino.w)
sugar.drag()
if lockP and lockA:
start = False
textSize(16)
text(s, 300, 100)
fill(238, 18, 255)
textSize(30)
rect(75, 450, 75, 45)
fill(0)
text("Quit", 40, 460)
textSize(12)
def mouseDragged():
" " | [
"none@none"
] | none@none |
a92dd447f189a511b8441dbf4801e14097fe467c | c91eac635507950941003dd79a494a95cd39dc77 | /src/exceptions/exceptions.py | 7f87689399976acf0c1a600242b25e9669cc4a68 | [] | no_license | GabrielPenaU3F/confiabilidad-software | 29b064cc9f866c06833cf6afc0bc424fd20619c6 | c57572ec3f9fba01331718d892d94d720cc5d04d | refs/heads/master | 2023-03-19T01:47:40.939503 | 2021-03-17T02:03:39 | 2021-03-17T02:03:39 | 193,144,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | class ExceptionWithMessage(Exception):
def __init__(self, arg):
self.strerror = str(arg)
self.args = tuple(arg)
super().__init__()
class NotAdmittedFormatException(ExceptionWithMessage):
pass
class InvalidArgumentException(ExceptionWithMessage):
pass
class InvalidFitException(ExceptionWithMessage):
pass
class InvalidStageDefinitionException(ExceptionWithMessage):
pass
| [
"gpena@untref.edu.ar"
] | gpena@untref.edu.ar |
f9da48dddbc1aefcaba92b272f261c45279c3946 | 67d8173a716da10a7350213d98938aae9f2115ce | /ProgrammingCourses/CS61A/week04/exam_prep_02.py | ba6e859cb03136814c34b20a47501001049ff505 | [] | no_license | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 3,275 | py | """CS61A Exam Prep 02: Recursion & Lamda Functions"""
# Express yourself
def kbonacci(n, k):
"""Return element N of a K-bonacci sequence.
>>> kbonacci(3, 4)
1
>>> kbonacci(9, 4)
29
>>> kbonacci(4, 2)
3
>>> kbonacci(8, 2)
21
"""
if n < k - 1:
return 0
elif n == k - 1:
return 1
else:
total = 0
i = n - k
while i < n:
total = total + kbonacci(i, k)
i = i + 1
return total
# Non recursion version
# if n < k - 1:
# return 0
# elif n == k - 1:
# return 1
# else:
# result = [0] * (k - 1) + [1]
# for i in range(0, n-k+1):
# temp = sum([result[x] for x in range(-1*k, 0)])
# result.append(temp)
# return result[-1]
# Combine Reverse and Remove
def combine(left, right):
"""Return all of LEFT"s digits followed by all of RIGHT"s digits."""
factor = 1
while factor <= right:
factor = factor * 10
return left * factor + right
def reverse(n):
"""Return the digits of N in reverse.
>>> reverse(122543)
345221
"""
if n < 10:
return n
else:
return combine(n % 10, reverse(n // 10))
def remove(n, digit):
"""Return all digits of N that are not DIGIT, for DIGIT less than 10.
>>> remove(243132, 3)
2412
>>> remove(remove(243132, 1), 2)
433
"""
# removed = 0
# while n != 0:
# sample, n = n % 10, n // 10
# if sample != digit:
# removed = removed * 10 + sample
# return reverse(removed)
# optional and better:
removed = 0
while n != 0:
sample, n = n % 10, n // 10
if sample != digit:
removed = combine(sample, removed) # use combine to reverse on the run
return removed # then no need to reverse the whole thing again
# You complete Me
# (a)
square = lambda x: x * x
double = lambda x: 2 * x
def memory(x, f):
"""Return a higher-order function that prints its
memories.
>>> f = memory(3, lambda x: x)
>>> f = f(square)
3
>>> f = f(double)
9
>>> f = f(print)
6
>>> f = f(square)
3
None
"""
def g(h):
print(f(x))
return memory(x, h)
return g
# # (b)
# Add parentheses and single-digit integers in the blanks below so that the expression on the second line evaluates to 2015
lamb = lambda lamb: lambda: lamb + lamb
X = lamb(1000)() + (lambda b, c: b() * b() - c)(lamb(2), 1)
assert(X == 2015)
# Frog goes Croak
def mouse(n):
if n >= 10:
squeak = n // 100
n = frog(squeak) + n % 10
return n
def frog(croak):
if croak == 0:
return 1
else:
return 10 * mouse(croak + 1)
print(mouse(357))
# >>> 47
# If (s)he can wield the Hammer...
# # what would python show:
from operator import add
avengers = 6
def vision(avengers):
print(avengers)
return avengers + 1
def hawkeye(thor, hulk):
love = lambda black_widow: add(black_widow, hulk)
return thor(love)
def hammer(worthy, stone):
if worthy(stone) < stone:
return stone
elif worthy(stone) > stone:
return -stone
return 0
capt = lambda iron_man: iron_man(avengers)
print(capt(vision))
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
fdb2ab950b373515c3e9ecdc1eb3d8c1f1318b75 | 363f7c43dbbf36cec32c83a8d95d410d5715b39b | /7-kyu/sushi-go.py | 819544c231dc73139fab1fd80cbe10ac538ca89d | [] | no_license | artbohr/codewars-algorithms-in-python | cffc7c24d141c8ca1f313405486835cb74616009 | 1cb87c5b74076556ce649eb615dd2a3d3c51b437 | refs/heads/master | 2021-06-17T10:11:18.872347 | 2019-10-29T16:21:32 | 2019-10-29T16:21:32 | 104,100,778 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | def total_bill(s):
num = s.count('r')
return num * 2 if num <5 else (num - num // 5) * 2
'''
Sam has opened a new sushi train restaurant - a restaurant where sushi is served
on plates that travel around the bar on a conveyor belt and customers take the plate that they like.
Sam is using Glamazon's new visual recognition technology that allows a computer
to record the number of plates at a customer's table and the colour of those plates.
The number of plates is returned as a string. For example, if a customer has eaten 3
plates of sushi on a red plate the computer will return the string 'rrr'.
Currently, Sam is only serving sushi on red plates as he's trying to attract customers
to his restaurant. There are also small plates on the conveyor belt for condiments such
as ginger and wasabi - the computer notes these in the string that is returned as a space
('rrr r' //denotes 4 plates of red sushi and a plate of condiment).
Sam would like your help to write a program for the cashier's machine to read the string
and return the total amount a customer has to pay when they ask for the bill. The current
price for the dishes are as follows:
Red plates of sushi ('r') - $2 each, but if a customer eats 5 plates the 5th one is free.
Condiments (' ') - free.
Input: String
Output: Number
Examples:
Input: 'rr' Output: 4
Input: 'rr rrr' Output: 8
Input: 'rrrrr rrrrr' Output: 16
'''
| [
"bohrcs@gmail.com"
] | bohrcs@gmail.com |
265c489b63e1e6848a68187bb1975bcb6766fd95 | efd5d2e831bb9a22a4358f7db3d684f8e0e1e889 | /bitwise_and_of_numbers_range_201.py | 03062d07d95b581d5b2f29c8d5da146408ba68fe | [
"Apache-2.0"
] | permissive | danielsunzhongyuan/my_leetcode_in_python | b8112bac812fa0ea06121dec2c6c86d0a3d49e84 | 34d34280170c991ea7a28d74a3f2338753844917 | refs/heads/master | 2020-12-24T19:46:47.813091 | 2018-12-05T15:51:22 | 2018-12-05T15:51:22 | 56,480,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | """
Given a range [m, n] where 0 <= m <= n <= 2147483647, return the bitwise AND of all numbers in this range, inclusive.
Example 1:
Input: [5,7]
Output: 4
Example 2:
Input: [0,1]
Output: 0
"""
class Solution(object):
def rangeBitwiseAnd(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
# Solution One
# ans=n
# while ans>m:
# ans=ans&(ans-1)
# return ans
# Solution Two
count = 0
while m < n:
m >>= 1
n >>= 1
count += 1
return m << count
| [
"sunzhongyuan@lvwan.com"
] | sunzhongyuan@lvwan.com |
b5c6c510d1192fdfef21a15a7128b8ea522efbf6 | 8f080c90a6536ce9a66351dbdba7c6c839e31f4e | /server/conf/mssp.py | 8606a4a50f29a638a68c280cde78a67221e66605 | [
"MIT"
] | permissive | Reilena/arxcode | 358e9b21467c187e36cb47cd143d58842a5a6241 | e1a2006ee728f76ee0f1a7056d99d2fdfa8e4706 | refs/heads/stable_orphan | 2022-11-25T09:19:52.324389 | 2019-06-10T05:25:24 | 2019-06-14T13:22:06 | 148,411,502 | 5 | 7 | MIT | 2022-11-04T19:24:39 | 2018-09-12T02:47:55 | Python | UTF-8 | Python | false | false | 4,777 | py | """
MSSP (Mud Server Status Protocol) meta information
MUD website listings (that you have registered with) can use this
information to keep up-to-date with your game stats as you change
them. Also number of currently active players and uptime will
automatically be reported. You don't have to fill in everything
(and most are not used by all crawlers); leave the default
if so needed. You need to @reload the game before updated
information is made available to crawlers (reloading does not
affect uptime).
"""
MSSPTable = {
# Required fieldss
"NAME": "Arx: After the Reckoning",
# Generic
"CRAWL DELAY": "-1", # limit how often crawler updates the listing. -1 for no limit
"HOSTNAME": "play.arxgame.org", # current or new hostname
"PORT": ["3000"], # most important port should be last in list
"CODEBASE": "Evennia",
"CONTACT": "arxmush@gmail.com", # email for contacting the mud
"CREATED": "2016", # year MUD was created
"ICON": "http://play.arxgame.org/static/images/arx_badge_small.png", # url to icon 32x32 or larger; <32kb.
"IP": "", # current or new IP address
"LANGUAGE": "English", # name of language used, e.g. English
"LOCATION": "United States", # full English name of server country
"MINIMUM AGE": "18", # set to 0 if not applicable
"WEBSITE": "play.arxgame.org",
# Categorisation
"FAMILY": "Custom", # evennia goes under 'Custom'
"GENRE": "Fantasy", # Adult, Fantasy, Historical, Horror, Modern, None, or Science Fiction
"GAMEPLAY": "", # Adventure, Educational, Hack and Slash, None,
# Player versus Player, Player versus Environment,
# Roleplaying, Simulation, Social or Strategy
"STATUS": "Open Beta", # Alpha, Closed Beta, Open Beta, Live
"GAMESYSTEM": "Custom", # D&D, d20 System, World of Darkness, etc. Use Custom if homebrew
"INTERMUD": "IMC2", # evennia supports IMC2.
"SUBGENRE": "None", # LASG, Medieval Fantasy, World War II, Frankenstein,
# Cyberpunk, Dragonlance, etc. Or None if not available.
# World
"AREAS": "0",
"HELPFILES": "0",
"MOBILES": "0",
"OBJECTS": "0",
"ROOMS": "0", # use 0 if room-less
"CLASSES": "0", # use 0 if class-less
"LEVELS": "0", # use 0 if level-less
"RACES": "0", # use 0 if race-less
"SKILLS": "0", # use 0 if skill-less
# Protocols set to 1 or 0)
"ANSI": "1",
"GMCP": "0",
"MCCP": "0",
"MCP": "0",
"MSDP": "0",
"MSP": "0",
"MXP": "0",
"PUEBLO": "0",
"UTF-8": "1",
"VT100": "0",
"XTERM 256 COLORS": "1",
# Commercial set to 1 or 0)
"PAY TO PLAY": "0",
"PAY FOR PERKS": "0",
# Hiring set to 1 or 0)
"HIRING BUILDERS": "0",
"HIRING CODERS": "0",
# Extended variables
# World
"DBSIZE": "0",
"EXITS": "0",
"EXTRA DESCRIPTIONS": "0",
"MUDPROGS": "0",
"MUDTRIGS": "0",
"RESETS": "0",
# Game (set to 1 or 0, or one of the given alternatives)
"ADULT MATERIAL": "0",
"MULTICLASSING": "0",
"NEWBIE FRIENDLY": "0",
"PLAYER CITIES": "0",
"PLAYER CLANS": "0",
"PLAYER CRAFTING": "0",
"PLAYER GUILDS": "0",
"EQUIPMENT SYSTEM": "None", # "None", "Level", "Skill", "Both"
"MULTIPLAYING": "Restricted", # "None", "Restricted", "Full"
"PLAYERKILLING": "Restricted", # "None", "Restricted", "Full"
"QUEST SYSTEM": "None", # "None", "Immortal Run", "Automated", "Integrated"
"ROLEPLAYING": "Enforced", # "None", "Accepted", "Encouraged", "Enforced"
"TRAINING SYSTEM": "Skill", # "None", "Level", "Skill", "Both"
"WORLD ORIGINALITY": "All Original", # "All Stock", "Mostly Stock", "Mostly Original", "All Original"
# Protocols (only change if you added/removed something manually)
"ATCP": "0",
"MSDP": "0",
"MCCP": "1",
"SSL": "1",
"UTF-8": "1",
"ZMP": "0",
"XTERM 256 COLORS": "1"}
| [
"surly.mime@gmail.com"
] | surly.mime@gmail.com |
3e48c6dfc2e95ac82c49b0f7df885f1d83ff0582 | 849a174efea976d4daed419b85668c2ba05fd2b9 | /datastructures/linkedlist/singly/sum_last_n_nodes.py | 0e861631e4082ce98d56e2d459a94e2fbabaf10a | [] | no_license | samyuktahegde/Python | 61e6fedbdd2a94b29e4475621afa6d5e98bf49b8 | b02fa6e908661a918e0024f508df0192d5553411 | refs/heads/master | 2018-09-18T20:27:55.980689 | 2018-08-09T05:49:33 | 2018-08-09T05:49:33 | 116,491,078 | 0 | 0 | null | 2018-02-05T05:33:53 | 2018-01-06T14:52:16 | null | UTF-8 | Python | false | false | 910 | py | class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def print_list(self):
temp = self.head
while(temp):
print(temp.data)
temp = temp.next
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def sum_last_n(self, n):
l_length = 0
temp = llist.head
stack = []
while temp is not None:
l_length+=1
stack.append(temp.data)
temp = temp.next
sum = 0
while n>0:
sum+=stack.pop()
n-=1
return sum
llist = LinkedList()
llist.push(1)
llist.push(2)
llist.push(3)
llist.push(4)
llist.print_list()
print(llist.sum_last_n(3)) | [
"noreply@github.com"
] | samyuktahegde.noreply@github.com |
2741cbf9e67792ce19740948ddc9cc46389b7ba9 | e0c56b4d18fa29e181af7751d2bea539a32ae738 | /aligulac/teamranks.py | 73ba72d12bb306991eeaa2309956544f634ccca1 | [] | no_license | Arzar/aligulac | 24a8beacd2d80b730633e6e7a15987d5c4c9a09d | 12dd057b723c87cb94554c859411c463852ab3e0 | refs/heads/master | 2021-01-15T18:36:06.783352 | 2013-07-01T19:59:07 | 2013-07-01T19:59:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,644 | py | #!/usr/bin/python
'''
This script recomputes the team rankings, all-kill or proleague.
./teamranks.py [ak|pl]
'''
# This is required to make Django imports work properly.
import os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aligulac.settings")
from itertools import combinations
from random import shuffle
from ratings.models import Period, Rating, Group
from ratings.tools import filter_active_ratings
from simul.playerlist import make_player
from simul.formats.teamak import TeamAK
from simul.formats.teampl import TeamPL
# Get argument: proleague or allkill rank
try:
proleague = (sys.argv[1] == 'pl')
except:
proleague = False
# Setup
nplayers_max = 6 if proleague else 5
nplayers_needed = 6 if proleague else 1
Simulator = TeamPL if proleague else TeamAK
# Get a list of all teams that can compete
current_period = Period.objects.filter(computed=True).order_by('-id')[0]
teams = Group.objects.filter(active=True, is_team=True)
allowed_teams = []
for team in teams:
ratings = Rating.objects.filter(period=current_period,
player__groupmembership__group=team,
player__groupmembership__current=True,
player__groupmembership__playing=True)\
.exclude(player__race='S').exclude(player__race='R')
if filter_active_ratings(ratings).count() >= nplayers_needed:
allowed_teams.append(team)
nteams = len(allowed_teams)
# Prepare the score table
scores = dict()
for t in allowed_teams:
scores[t] = 0.0
# Loop over all pairs of teams
for (team_a, team_b) in combinations(allowed_teams, 2):
print team_a.name, '--', team_b.name
# Get player lists for both teams
players = []
for team in [team_a, team_b]:
ratings = Rating.objects.filter(period=current_period,
player__groupmembership__group=team,
player__groupmembership__current=True,
player__groupmembership__playing=True)\
.exclude(player__race='S').exclude(player__race='R')
ratings = list(filter_active_ratings(ratings).order_by('-rating')[:nplayers_max])
if not proleague:
# First six in random order, then strongest player for ace match
ace = ratings[0]
shuffle(ratings)
players.append(ratings + [ace])
else:
# Five players in order from weakest to strongest
players.append(ratings[::-1])
# Convert to player objects for the simul library
if proleague:
sim_players = [make_player(r.player) for r in players[0]] +\
[make_player(r.player) for r in players[1]]
else:
sim_players = [[make_player(r.player) for r in ratings] for ratings in players]
# Simulate the match
obj = Simulator(2)
obj.set_players(sim_players)
obj.compute()
# Add the scores
if proleague:
scores[team_a] += obj._tally[0].win/(nteams-1)
scores[team_b] += obj._tally[1].win/(nteams-1)
else:
scores[team_a] += obj._tally[0][1]/(nteams-1)
scores[team_b] += obj._tally[1][1]/(nteams-1)
# Write the scores to database
if proleague:
teams.update(scorepl=0.0)
else:
teams.update(scoreak=0.0)
allowed_teams = sorted(list(allowed_teams), key=lambda team: -scores[team])
for team in allowed_teams:
if proleague:
team.scorepl = scores[team]
else:
team.scoreak = scores[team]
team.save()
print '%5.2f%%: %s' % (100*scores[team], team.name)
| [
"evfonn@gmail.com"
] | evfonn@gmail.com |
a89ab82f48b155c45a67446e90ebee541ce84bae | 0049d7959ff872e2ddf6ea3ce83b6c26512425a6 | /multiAppProject/multiAppProject/wsgi.py | bb30b29af872e357937d2958a77cf99afa956bd4 | [] | no_license | srazor09/Django_projects | 9806ab25d966af780cdabe652a1792220c7806a8 | 8d664ba4c9478bd93c8e5bcbcaf594e8ffe6ce93 | refs/heads/master | 2023-04-18T02:13:15.993393 | 2021-05-04T20:34:05 | 2021-05-04T20:34:05 | 364,379,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for multiAppProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'multiAppProject.settings')
application = get_wsgi_application()
| [
"sourabhaws09@gmail.com"
] | sourabhaws09@gmail.com |
b18b2790001d74354419da074d2886b2e2ca2ae8 | 18b977dccd70e9e5a1b553b28ab0413fb3f54f4b | /SoftUni/Python Developmen/Python-Advanced/lists_as_stacks_and_queues/5_hot_potato.py | 4f0020b2adc98b71d47aaefadc69ab4336a89bc6 | [] | no_license | stevalang/Coding-Lessons | 7203e3a18b20e33e8d596e3dfb58d26c50b74530 | 2d0060c2268ad966efdcae4e6e994ac15e57243a | refs/heads/master | 2023-06-05T08:28:33.290530 | 2021-06-16T19:37:29 | 2021-06-16T19:37:29 | 284,852,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | from collections import deque
def solve(people, n):
players = deque(people)
while len(players) > 1:
players.rotate(-n)
print(f"Removed {players.pop()}")
winner = players.pop()
print(f'Last is {winner}')
solve(input().split(' '), int(input())) | [
"rbeecommerce@gmail.com"
] | rbeecommerce@gmail.com |
320011f1755ad3e78df560279a7ee599f455ae8e | 4702d2b5aac63b8179b47d946837965664824406 | /configs/paa/paa_r101_fpn_1x_minicoco.py | c0a6ea8e0e0e6d0d2766c44700f8daa191b61dbf | [
"Apache-2.0"
] | permissive | ruiningTang/mmdetection | 551bf276ee581667703cbe89c2872dc8e7f43bb8 | 100b0b5e0edddc45af0812b9f1474493c61671ef | refs/heads/master | 2021-12-02T05:58:03.301831 | 2021-11-20T10:58:35 | 2021-11-20T10:58:35 | 387,680,731 | 0 | 0 | Apache-2.0 | 2021-07-20T05:20:10 | 2021-07-20T05:20:09 | null | UTF-8 | Python | false | false | 2,201 | py | _base_ = [
'../_base_/datasets/minicoco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PAA',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='PAAHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001)
# work_dir
work_dir = './work_dirs/minicoco/paa/paa_r101_fpn_1x_minicoco'
| [
"tangruining@zju.edu.cn"
] | tangruining@zju.edu.cn |
0cbdb4892a93179fc09b69b38c129ce95e1329df | 707bd0e873ae25146f1341b40d8efbf5134025af | /ln2xevents/filtermodels.py | 7784221527e92664a752719aaabbffe2e2644276 | [] | no_license | yosmangel/djangoLn2x | 76acd748adfddb5b21ad3e0342e3d7eb19b81bc9 | 24d068458e8271aacfa98d762c0dc117e65d41cf | refs/heads/master | 2021-04-27T07:55:36.488176 | 2017-06-01T17:48:26 | 2017-06-01T17:48:26 | 122,641,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,359 | py | from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.conf import settings
from django_filters import rest_framework as filters
from ln2xevents.models import EventPage, CoursePage, PageContent, \
LEVELS, REGIONFILTER
from crispy_forms.helper import FormHelper
from datetime import datetime
from unifypage.models import MultilangKeyword
class EventPageFilter(filters.FilterSet):
lang_code = filters.MultipleChoiceFilter(name='lang_code', \
lookup_expr='exact', choices=settings.LANGUAGES, \
widget=forms.CheckboxSelectMultiple, initial='en', \
label=_('Language'))
keywords = filters.ModelMultipleChoiceFilter(
name='course__multilang_keywords',
queryset=MultilangKeyword.objects.all(),
widget=forms.CheckboxSelectMultiple,
label=_('Keywords'))
level = filters.MultipleChoiceFilter(name='course__level', \
lookup_expr='exact', choices=LEVELS, \
widget=forms.CheckboxSelectMultiple, \
label=_('Level'))
delivery_method = filters.MultipleChoiceFilter(name='course__delivery_format', \
lookup_expr='exact',\
choices= [ (key,cat) for key,cat in CoursePage.DELIVERY_FORMAT if key != 'e-Learning'], \
widget=forms.CheckboxSelectMultiple, \
label=_('Delivery Method'))
region = filters.MultipleChoiceFilter(name='macro_region', \
lookup_expr='exact', choices=REGIONFILTER, \
widget=forms.CheckboxSelectMultiple, \
label=_('Regions'))
o = filters.OrderingFilter(
choices=(
('start date', _('Start date')),
('course', _('Course')),
),
fields={
'start_date': 'start date',
'course__title': 'course',
},
initial= 'start date',
help_text=''
)
class Meta:
model = EventPage
fields = {
}
class CoursePageFilter(filters.FilterSet):
lang_code = filters.MultipleChoiceFilter(name='lang_code', \
lookup_expr='exact', choices=settings.LANGUAGES, \
widget=forms.CheckboxSelectMultiple, initial='en', \
label=_('Language'))
keywords = filters.ModelMultipleChoiceFilter(
name='multilang_keywords',
queryset=MultilangKeyword.objects.all(),
widget=forms.CheckboxSelectMultiple,
label=_('Keywords'))
level = filters.MultipleChoiceFilter(name='level', \
lookup_expr='exact', choices=LEVELS, \
widget=forms.CheckboxSelectMultiple, \
label=_('Level'))
delivery_method = filters.MultipleChoiceFilter(name='delivery_format', \
lookup_expr='exact', choices=CoursePage.DELIVERY_FORMAT, \
widget=forms.CheckboxSelectMultiple, \
label=_('Delivery Method'))
o = filters.OrderingFilter(
choices=(
('title', _('Title')),
),
fields={
'title': 'title',
},
initial= 'title',
help_text=''
)
class Meta:
model = CoursePage
fields = {
}
| [
"yosmangel_yk@hotmail.com"
] | yosmangel_yk@hotmail.com |
396b0fa47098f767cbd5d682614362f84969b99f | d05b260a9f81e708298d907b94610550a39bced2 | /Source_Code/DeIdentify.py | d109b7613081512c4a9fad691b980cd4ce1f4621 | [
"MIT-0"
] | permissive | aws-samples/serverless-stream-processing-at-scale | 4608b08a8304028c19d4d3efde28665e2c27831f | 80720f8bfcd4c1f49a90311c421ec5785181a181 | refs/heads/master | 2020-08-27T12:07:26.999867 | 2019-12-03T23:17:16 | 2019-12-03T23:17:16 | 217,362,247 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | import json
import boto3
import os
from decimal import Decimal
print('Loading function')
dynamodb = boto3.resource('dynamodb', region_name='us-west-2')
firehose = boto3.client('firehose')
def lambda_handler(event, context):
# print IoT message
iot_msg = json.dumps(event)
print('Received event: ',iot_msg)
# put PHI/PII into dynamo
table = dynamodb.Table(os.environ['TableName'])
response = table.put_item(
Item={
'patient_id': event["patient_id"],
'timestamp': event["timestamp"],
'device_id': event["device_id"],
'name': event["name"],
'dob': event["dob"],
'temp': Decimal(str(event["temp"])),
'oxygen_percent': Decimal(str(event["oxygen_percent"]))
}
)
print('Dynamo PutItem: ',json.dumps(response))
# de-identify data
event["name"] = ''
event["dob"] = ''
event["temp"] = None
event["oxygen_percent"] = None
de_identified = json.dumps(event)
print('De-Identified: ',de_identified)
# put de-identified data into kinesis
response = firehose.put_record(
DeliveryStreamName=os.environ['DeliveryStream'],
Record={
'Data': de_identified.encode()
}
)
print('Kinesis Firehose PutRecord: ',json.dumps(response))
| [
"noreply@github.com"
] | aws-samples.noreply@github.com |
f41f71f436f3abb5d5aa16f4c5eab45a7a96ac20 | 3d7f92b6de3a5dfab7cf4bc14783481124a00233 | /sandbox/team_members/pudumula/ros/ros_serial/build/rosserial/rosserial_msgs/catkin_generated/pkg.develspace.context.pc.py | 238d518d15808230a7a74c7379a1064549febd88 | [
"Apache-2.0"
] | permissive | Project-Heisenberg/quantum | ec24a262df8d39c56432a38c0a32aca5a3d8ee87 | f3ad8f4693007e45e80a88f928273adcfdc8529d | refs/heads/master | 2021-05-04T10:38:43.692806 | 2017-05-01T07:09:24 | 2017-05-01T07:09:24 | 54,696,027 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/neo/ros/ros_serial/devel/include".split(';') if "/home/neo/ros/ros_serial/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosserial_msgs"
PROJECT_SPACE_DIR = "/home/neo/ros/ros_serial/devel"
PROJECT_VERSION = "0.7.5"
| [
"you@example.com"
] | you@example.com |
7f717bdf86af9d795766de8d2aad2f239ca63c9e | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/storage_profile_py3.py | e3b5222965bb916597c0e97b3ab715b046672d8a | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,421 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageProfile(Model):
"""Specifies the storage settings for the virtual machine disks.
:param image_reference: Specifies information about the image to use. You
can specify information about platform images, marketplace images, or
virtual machine images. This element is required when you want to use a
platform image, marketplace image, or virtual machine image, but is not
used in other creation operations.
:type image_reference:
~azure.mgmt.compute.v2018_10_01.models.ImageReference
:param os_disk: Specifies information about the operating system disk used
by the virtual machine. <br><br> For more information about disks, see
[About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type os_disk: ~azure.mgmt.compute.v2018_10_01.models.OSDisk
:param data_disks: Specifies the parameters that are used to add a data
disk to a virtual machine. <br><br> For more information about disks, see
[About disks and VHDs for Azure virtual
machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:type data_disks: list[~azure.mgmt.compute.v2018_10_01.models.DataDisk]
"""
_attribute_map = {
'image_reference': {'key': 'imageReference', 'type': 'ImageReference'},
'os_disk': {'key': 'osDisk', 'type': 'OSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'},
}
def __init__(self, *, image_reference=None, os_disk=None, data_disks=None, **kwargs) -> None:
super(StorageProfile, self).__init__(**kwargs)
self.image_reference = image_reference
self.os_disk = os_disk
self.data_disks = data_disks
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
e95f0ae0f274cc543657a96ce3d4226adfc38387 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=91/params.py | 49c91e254bad8a2701700e2f8791e841eb918bdc | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.583905',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 91,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
dddd32db56f39b48919d8dcbc8ccb6a515bf949b | 714b3afd9089063c88c23c003807af825576c9e6 | /tests/test_factories.py | e13dd12e13a18b4642cceda694754ccd813fff12 | [
"MIT"
] | permissive | sdementen/piecash | db944267c1b0502ceb3ceda51688d4a75d89f747 | ec30cf469198cccf35f7ba968f889d360cfe1824 | refs/heads/master | 2022-12-30T05:57:06.631198 | 2021-10-19T08:42:33 | 2021-10-19T08:42:33 | 26,486,077 | 283 | 93 | NOASSERTION | 2023-02-28T17:15:03 | 2014-11-11T13:27:51 | Python | UTF-8 | Python | false | false | 5,752 | py | # coding=utf-8
from __future__ import unicode_literals
from datetime import datetime
from decimal import Decimal
import pytest
import pytz
import tzlocal
from piecash import GnucashException, Commodity
from piecash.core import factories
from test_helper import (
db_sqlite_uri,
db_sqlite,
new_book,
new_book_USD,
book_uri,
book_basic,
needweb,
)
# dummy line to avoid removing unused symbols
a = db_sqlite_uri, db_sqlite, new_book, new_book_USD, book_uri, book_basic
class TestFactoriesCommodities(object):
def test_create_stock_accounts_simple(self, book_basic):
with pytest.raises(GnucashException):
factories.create_stock_accounts(
book_basic.default_currency,
broker_account=book_basic.accounts(name="broker"),
)
broker = book_basic.accounts(name="broker")
appl = Commodity(namespace="NMS", mnemonic="AAPL", fullname="Apple")
acc, inc_accounts = factories.create_stock_accounts(appl, broker_account=broker)
assert inc_accounts == []
assert broker.children == [acc]
def test_create_stock_accounts_incomeaccounts(self, book_basic):
broker = book_basic.accounts(name="broker")
income = book_basic.accounts(name="inc")
appl = Commodity(namespace="NMS", mnemonic="AAPL", fullname="Apple")
appl["quoted_currency"] = "USD"
acc, inc_accounts = factories.create_stock_accounts(
appl, broker_account=broker, income_account=income, income_account_types="D"
)
assert len(inc_accounts) == 1
acc, inc_accounts = factories.create_stock_accounts(
appl,
broker_account=broker,
income_account=income,
income_account_types="CL",
)
assert len(inc_accounts) == 1
acc, inc_accounts = factories.create_stock_accounts(
appl,
broker_account=broker,
income_account=income,
income_account_types="CS",
)
assert len(inc_accounts) == 1
acc, inc_accounts = factories.create_stock_accounts(
appl, broker_account=broker, income_account=income, income_account_types="I"
)
assert len(inc_accounts) == 1
acc, inc_accounts = factories.create_stock_accounts(
appl,
broker_account=broker,
income_account=income,
income_account_types="D/CL/CS/I",
)
assert len(income.children) == 4
book_basic.flush()
assert sorted(income.children, key=lambda x: x.guid) == sorted(
[_acc.parent for _acc in inc_accounts], key=lambda x: x.guid
)
assert broker.children == [acc]
@needweb
def test_create_stock_from_symbol(self, book_basic):
assert len(book_basic.commodities) == 2
factories.create_stock_from_symbol("AAPL", book_basic)
assert len(book_basic.commodities) == 3
cdty = book_basic.commodities(mnemonic="AAPL")
assert cdty.namespace == "NMS"
assert cdty.quote_tz == "America/New_York"
assert cdty.quote_source == "yahoo"
assert cdty.mnemonic == "AAPL"
assert cdty.fullname == "Apple Inc."
def test_create_currency_from_ISO(self, book_basic):
assert factories.create_currency_from_ISO("CAD").fullname == "Canadian Dollar"
with pytest.raises(ValueError):
factories.create_currency_from_ISO("EFR").fullname
class TestFactoriesTransactions(object):
def test_single_transaction(self, book_basic):
today = datetime.today()
print("today=", today)
factories.single_transaction(
today.date(),
today,
"my test",
Decimal(100),
from_account=book_basic.accounts(name="inc"),
to_account=book_basic.accounts(name="asset"),
)
book_basic.save()
tr = book_basic.transactions(description="my test")
assert len(tr.splits) == 2
sp1, sp2 = tr.splits
if sp1.value > 0:
sp2, sp1 = sp1, sp2
# sp1 has negative value
assert sp1.account == book_basic.accounts(name="inc")
assert sp2.account == book_basic.accounts(name="asset")
assert sp1.value == -sp2.value
assert sp1.quantity == sp1.value
assert tr.enter_date == pytz.timezone(str(tzlocal.get_localzone())).localize(
today.replace(microsecond=0)
)
assert tr.post_date == pytz.timezone(str(tzlocal.get_localzone())).localize(today).date()
def test_single_transaction_tz(self, book_basic):
today = pytz.timezone(str(tzlocal.get_localzone())).localize(datetime.today())
tr = factories.single_transaction(
today.date(),
today,
"my test",
Decimal(100),
from_account=book_basic.accounts(name="inc"),
to_account=book_basic.accounts(name="asset"),
)
book_basic.save()
tr = book_basic.transactions(description="my test")
assert tr.post_date == today.date()
assert tr.enter_date == today.replace(microsecond=0)
def test_single_transaction_rollback(self, book_basic):
today = pytz.timezone(str(tzlocal.get_localzone())).localize(datetime.today())
factories.single_transaction(
today.date(),
today,
"my test",
Decimal(100),
from_account=book_basic.accounts(name="inc"),
to_account=book_basic.accounts(name="asset"),
)
book_basic.validate()
assert len(book_basic.transactions) == 1
book_basic.cancel()
assert len(book_basic.transactions) == 0
| [
"sdementen@gmail.com"
] | sdementen@gmail.com |
ca234ceb876a8bdcce7b154ef0997cb9c4ea66bd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_interdicting.py | 9fa3e465e94cb3647c1f7842c7f360caf13d9762 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
#calss header
class _INTERDICTING():
def __init__(self,):
self.name = "INTERDICTING"
self.definitions = interdict
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['interdict']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
78c0edf508be8ffa63e6a3b89a60f342f0e3aeac | 165753d576a230400ba4ae4cc3a4767748b3b5bf | /thespomat/__init__.py | a121004a7664007450088fe6ef5c6b3681b1bc30 | [] | no_license | snare/thespomat | 3ac792223044ac6fd8f853a254de7fd646c7068a | c9a85bf6e7ff971c8e1fa6f5a99e38d1e92bf60d | refs/heads/master | 2020-04-14T07:56:28.103980 | 2016-09-13T11:19:36 | 2016-09-13T11:19:36 | 68,084,925 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | import logging
import logging.config
import os
from scruffy import ConfigFile, File, PackageDirectory
from .main import main
config = ConfigFile('~/.thespomat.conf', defaults=File('config/default.cfg', parent=PackageDirectory()), apply_env=True)
config.load()
| [
"snare@ho.ax"
] | snare@ho.ax |
818c5776da2ee141af5c96bc55cbd41b4d926b3c | 3d589d1c56b55fbd2b45b03564b8a9442ebf142b | /lib/tests/unit/message/test_serializer.py | 314beeee5bd13e975a6baf3206dc336e4edf8240 | [
"Apache-2.0"
] | permissive | spotify/klio | 1aff27412e92c9d699259e5ab1eaeb39dc3e9571 | e625565708ed846201d2e05f782c0ce585554346 | refs/heads/develop | 2023-05-25T14:33:28.348335 | 2022-03-23T20:34:09 | 2022-03-23T20:34:09 | 285,928,366 | 815 | 57 | Apache-2.0 | 2023-05-24T21:07:09 | 2020-08-07T22:02:58 | Python | UTF-8 | Python | false | false | 5,385 | py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from apache_beam import pvalue
from google.protobuf import message as gproto_message
from klio_core.proto.v1beta1 import klio_pb2
from klio.message import exceptions
from klio.message import serializer
def _get_klio_job():
job = klio_pb2.KlioJob()
job.job_name = "klio-job"
job.gcp_project = "test-project"
return job
def _get_klio_message():
parent_klio_job = _get_klio_job()
msg = klio_pb2.KlioMessage()
msg.metadata.visited.extend([parent_klio_job])
msg.metadata.force = True
msg.metadata.ping = True
msg.data.element = b"1234567890"
msg.version = klio_pb2.Version.V2
return msg
@pytest.fixture
def klio_message():
return _get_klio_message()
@pytest.fixture
def klio_message_str(klio_message):
return klio_message.SerializeToString()
@pytest.fixture
def logger(mocker):
return mocker.Mock()
@pytest.mark.parametrize(
"version",
(klio_pb2.Version.UNKNOWN, klio_pb2.Version.V1, klio_pb2.Version.V2),
)
@pytest.mark.parametrize(
"element,entity_id,payload",
(
(b"an-element", None, None),
(None, "an-entity-id", None),
(None, "an-entity-id", b"some-payload"),
(b"an-element", None, b"some-payload"),
(None, None, b"some-payload"),
),
)
def test_handle_msg_compat(version, element, entity_id, payload):
msg = klio_pb2.KlioMessage()
msg.version = version
if element:
msg.data.element = element
if payload:
msg.data.payload = payload
if entity_id:
msg.data.entity_id = entity_id
actual_msg = serializer._handle_msg_compat(msg)
assert actual_msg.version is not klio_pb2.Version.UNKNOWN
# we assume in the function's logic that v2 messages are already parsed
# correctly
if entity_id and not klio_pb2.Version.V2:
assert entity_id == actual_msg.data.element.decode("utf-8")
def test_to_klio_message(klio_message, klio_message_str, klio_config, logger):
actual_message = serializer.to_klio_message(
klio_message_str, klio_config, logger
)
assert klio_message == actual_message
logger.error.assert_not_called()
def test_to_klio_message_allow_non_kmsg(klio_config, logger, monkeypatch):
monkeypatch.setattr(
klio_config.job_config, "allow_non_klio_messages", True
)
incoming = b"Not a klio message"
expected = klio_pb2.KlioMessage()
expected.data.element = incoming
expected.version = klio_pb2.Version.V2
expected.metadata.intended_recipients.anyone.SetInParent()
actual_message = serializer.to_klio_message(incoming, klio_config, logger)
assert expected == actual_message
logger.error.assert_not_called()
def test_to_klio_message_raises(klio_config, logger, monkeypatch):
incoming = b"Not a klio message"
with pytest.raises(gproto_message.DecodeError):
serializer.to_klio_message(incoming, klio_config, logger)
# Just asserting it's called - not testing the error string itself
# to avoid making brittle tests
assert 1 == logger.error.call_count
@pytest.mark.parametrize(
"payload,exp_payload",
(
(None, None),
(b"some payload", b"some payload"),
(_get_klio_message().data, None),
("string payload", b"string payload"),
),
)
def test_from_klio_message(klio_message, payload, exp_payload):
expected = _get_klio_message()
if exp_payload:
expected.data.payload = exp_payload
expected_str = expected.SerializeToString()
actual_message = serializer.from_klio_message(klio_message, payload)
assert expected_str == actual_message
def test_from_klio_message_v1():
payload = b"some-payload"
msg = klio_pb2.KlioMessage()
msg.version = klio_pb2.Version.V1
msg.data.payload = payload
expected_str = msg.SerializeToString()
actual_message = serializer.from_klio_message(msg, payload)
assert expected_str == actual_message
def test_from_klio_message_tagged_output(klio_message):
payload = b"some payload"
expected_msg = _get_klio_message()
expected_msg.data.payload = payload
expected = pvalue.TaggedOutput("a-tag", expected_msg.SerializeToString())
tagged_payload = pvalue.TaggedOutput("a-tag", payload)
actual_message = serializer.from_klio_message(klio_message, tagged_payload)
# can't compare expected vs actual directly since pvalue.TaggedOutput
# hasn't implemented the comparison operators
assert expected.tag == actual_message.tag
assert expected.value == actual_message.value
def test_from_klio_message_raises(klio_message):
payload = {"no": "bytes casting"}
with pytest.raises(
exceptions.KlioMessagePayloadException, match="Returned payload"
):
serializer.from_klio_message(klio_message, payload)
| [
"lynn@spotify.com"
] | lynn@spotify.com |
699d3d044d8b9ee067fbeb56484064f83c02587f | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_Cae_Montenegro_countingSheep.py | 082d7af0f162c0f60776ad35b29a6ae4820407eb | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 926 | py | def getDigitsFromNum(myinteger):
digits = []
number_string = str(myinteger)
for ch in number_string:
digits.append(ch)
return digits
def resolveProblem(problem):
digitsKnown = []
lastNumber = -1
multiplier = 1
while len(digitsKnown)<10 and problem*multiplier != lastNumber:
lastNumber=problem*multiplier
#print(lastNumber)
digits = getDigitsFromNum(lastNumber)
for digit in digits:
if digit not in digitsKnown:
digitsKnown.append(digit)
if len(digitsKnown) >=10:
return lastNumber
multiplier += 1
return "INSOMNIA"
f = open('A-large.in', 'r')
problems = int(f.readline())
print(problems)
problemList = []
for x in range(problems):
problemList.append(int(f.readline()))
outputFile = open("Output.txt", "w")
count=1
for pro in problemList:
result = resolveProblem(pro)
print("result ", result)
outputFile.write("Case #"+str(count)+": "+str(result)+"\n")
count+=1
outputFile.close()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
6e508dfee3027b172cba0f14e92d415d305ff62c | 1e9ad304868c2bda918c19eba3d7b122bac3923b | /kubernetes/client/models/v1beta1_scale.py | 57f00ac181efdcc94b04cc03c85e1ab9c7dc9856 | [
"Apache-2.0"
] | permissive | pineking/client-python | c77e5bd3d476ac852e6dffa96056008baa0f597f | 74a64d7325518f4298600d4bb300f92843c29347 | refs/heads/master | 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,000 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1Scale(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta1Scale - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1ScaleSpec',
'status': 'V1beta1ScaleStatus'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._spec = spec
self._status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta1Scale.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1Scale.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1Scale.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1Scale.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1Scale.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1Scale.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1Scale.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1Scale.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1Scale.
Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
:return: The metadata of this V1beta1Scale.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1Scale.
Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.
:param metadata: The metadata of this V1beta1Scale.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta1Scale.
defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
:return: The spec of this V1beta1Scale.
:rtype: V1beta1ScaleSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta1Scale.
defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
:param spec: The spec of this V1beta1Scale.
:type: V1beta1ScaleSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta1Scale.
current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
:return: The status of this V1beta1Scale.
:rtype: V1beta1ScaleStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta1Scale.
current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.
:param status: The status of this V1beta1Scale.
:type: V1beta1ScaleStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
afa7787fd570a8f3ee66fce0febfc15478524917 | 72b5ed231d50ad31859ddd77e2f0ada8b1cdb6c4 | /code_new_start_2021/Mar/predictive_text.py | e545fddd7ade0146db8c246798ac56cdffd24d55 | [
"Apache-2.0"
] | permissive | dylanlee101/leetcode | bd0f284f5fec5e7a6f528afdd7608990b470cfaa | b059afdadb83d504e62afd1227107de0b59557af | refs/heads/master | 2023-04-14T01:55:05.547873 | 2021-04-21T00:41:16 | 2021-04-21T00:41:16 | 259,027,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | t9 = "22233344455566677778889999"
def letter_digit(x):
assert 'a' <= x and x <= 'z'
return t9[ord(x) - ord('a')]
def word_code(words):
return ''.join(map(letter_digit,words))
def predictive_text(dico):
freq = {}
for words,weights in dico:
prefix = ""
for x in words:
prefix += x
if prefix in freq:
freq[prefix] += weights
else:
freq[prefix] = weights
prop = {}
for prefix in freq:
code = word_code(prefix)
if code not in prop or freq[prop[code]] < freq[prefix]:
prop[code] = prefix
return prop
def propose(prop,seq):
if seq in prop:
return prop[seq]
else:
return "None"
| [
"liwenyi@songguo7.com"
] | liwenyi@songguo7.com |
02fb1e4db028f9e3a0b4ca1d02fe75ac3d592dcb | 43ee54fd20e802c7c4189a2b5c6fde21bcb80115 | /kivy example app/service/main.py | d951f85c8d3eca3b4a32b30be901dd2ec88dc11e | [] | no_license | KeyWeeUsr/android-notification-buttons | 000fd514639f9a9e21cc96c7b2606e7fc40b0205 | 8a8491055842dbc03821925aa95253587fbf327c | refs/heads/master | 2023-08-26T01:06:28.922620 | 2016-07-31T14:34:26 | 2016-07-31T14:34:26 | 64,793,704 | 0 | 1 | null | 2016-08-02T21:27:46 | 2016-08-02T21:27:45 | null | UTF-8 | Python | false | false | 3,657 | py | from time import sleep
from kivy.utils import platform
from kivy.core.audio import SoundLoader
from kivy.lib import osc
from noti_builder.noti_builder import Notification_Builder
try:
from jnius import autoclass
except: pass
class M_Player:
def intent_callback(self,intent,*arg):
## BroadcastReceiver callbacks are done on a different thread and can crash
## the service on unsafe tasks, setting strings is safe
self.parent.queue = intent
def __init__(self,parent):
try:
self.nBuilder = Notification_Builder()
self.nBuilder.set_title('Stop')
self.nBuilder.set_message('msg')
self.nBuilder.set_ticker('Button example')
## 0. Displayed button name
## 1. icon integer available at https://developer.android.com/reference/android/R.drawable.html
## 2. callback
## action= android PendingIntent action, button name will be used if not provided
self.nBuilder.Button('Play', 17301540 , self.intent_callback, action='Play')
self.nBuilder.Button('Pause', 17301539 , self.intent_callback, action='Pause')
self.nBuilder.build()
self.parent = parent
self.sound = None
self.pauseTime = None
self.state = 'stop'
self.path = '/data/data/org.test.npexample/files/rain.ogg'
except Exception as e: osc.sendMsg('/some_api', ['Mplayer exception '+str(e)], port=3002)
def play(self):
osc.sendMsg('/some_api', ['Play'], port=3002)
try:
if self.sound == None:
if self.pauseTime == None:
self.sound = SoundLoader.load(self.path)
if self.sound:
self.sound.play()
else:
self.sound.play()
if self.state == 'pause':
sleep(0.2)
self.sound.seek(int(self.pauseTime))
self.state = 'play'
self.nBuilder.set_title('Play')
self.nBuilder.build()
except Exception as e:
osc.sendMsg('/some_api', [str(e)], port=3002)
def pause(self):
try:
osc.sendMsg('/some_api', ['Pause'], port=3002)
if self.sound == None:
pass
else:
self.pauseTime = self.sound.get_pos()
self.sound.stop()
self.state = 'pause'
self.nBuilder.set_title('Pause')
self.nBuilder.build()
except Exception as e: osc.sendMsg('/some_api', [str(e)], port=3002)
def osc_callback(self,message,*args):
try:
if message[2] == 'Play':
self.play()
elif message[2] == 'Pause':
self.pause()
except Exception as e: osc.sendMsg('/some_api', [str(e)], port=3002)
class Service:
def __init__(self):
sleep(1)
osc.init()
oscid = osc.listen(ipAddr='127.0.0.1', port=3001)
try:
osc.sendMsg('/some_api', ['Init'], port=3002)
self.mplayer = M_Player(self)
osc.bind(oscid, self.mplayer.osc_callback, '/some_api')
self.queue = ''
while True:
osc.readQueue(oscid)
if self.queue != '':
self.mplayer.osc_callback(['','',self.queue])
self.queue = ''
sleep(.3)
except Exception as e:
osc.sendMsg('/some_api', ['Service crash '+str(e)], port=3002)
def main_loop():
service = Service()
if __name__ == '__main__':
main_loop()
| [
"atiskr@gmail.com"
] | atiskr@gmail.com |
38bd3342a88d766a08f9c382a7e4e0c45c1f49fe | 77d6f3a8d4935ca3fff581e0bb3f3f14b7db2e47 | /workbench/audit/migrations/0004_auto_20201017_1016.py | ff382a4b1a04d729044d3dac1eaa311b4b1d0f30 | [
"MIT"
] | permissive | jayvdb/workbench | 192705cf03eaaf96627b1bedde6c7eea6cf54ca7 | a591c8a8aa8266e31095fea23f3d541cee68a7f3 | refs/heads/main | 2023-02-21T16:05:01.855731 | 2021-01-24T16:44:00 | 2021-01-24T16:44:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Generated by Django 3.1.2 on 2020-10-17 08:16
import io
import os
from django.conf import settings
from django.db import migrations
with io.open(os.path.join(settings.BASE_DIR, "stuff", "audit.sql")) as f:
AUDIT_SQL = f.read()
class Migration(migrations.Migration):
dependencies = [
("audit", "0003_auto_20190703_0813"),
]
operations = [
migrations.RunSQL(AUDIT_SQL),
]
| [
"mk@feinheit.ch"
] | mk@feinheit.ch |
c2b6bfcb9bdadb5fe6c8e444ad793b0fe3c08e26 | a824114fcf7c64667aaaa023b52abac598eb419d | /src/CNN/other_files/conv1_plot.py | b94d16265d40e9ec0c850b212a57dc003f778217 | [] | no_license | satti007/FASHION_MNIST | d9195115b18146c284ce04e7f84da7c49500d32a | d71b67a0cafc0b451761f05bf3b818ae83bf0c5c | refs/heads/master | 2020-04-08T09:26:50.670371 | 2018-11-26T19:57:15 | 2018-11-26T19:57:15 | 159,224,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,992 | py | import os
import sys
import time
import random
import numpy as np
import tensorflow as tf
from data_prep import *
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def load_weights1(save_dir,epoch):
f = np.load(save_dir+"/weights_"+str(epoch)+".npz")
initial_weights = [f[p] for p in sorted(f.files,key=lambda s: int(s[4:]))]
return initial_weights[0]
def load_weights(epoch):
f = np.load("weights/weights_"+str(epoch)+".npz")
initial_weights = [f[p] for p in sorted(f.files,key=lambda s: int(s[4:]))]
initial_weights = initial_weights[0:2]
assign_ops = [w.assign(v) for w, v in zip(tf.trainable_variables(), initial_weights)]
sess.run(tf.global_variables_initializer())
sess.run(assign_ops)
print '[INFO] weights loaded from {} epoch'.format(epoch)
def grid_plot(conv1,file_name):
num_cols = 8
fig = plt.figure()
gs = gridspec.GridSpec(num_cols, num_cols, wspace=0.0, hspace=0.0)
ax = [plt.subplot(gs[i]) for i in range(64)]
gs.update(hspace=0, wspace=0 )
conv1 = conv1.transpose(3,0,1,2)
print conv1.shape
for im,j in zip(conv1, range(len(conv1))):
im = np.sum(im, axis=2)
# print im.shape,j
ax[j].imshow(im, cmap="gray")
ax[j].axis('off')
plt.savefig('plots/conv_plots/'+file_name )
plt.cla()
save_dir,epoch = 'weights/', 6
conv1 = load_weights1(save_dir,epoch)
grid_plot(conv1,'conv_weights.png')
x = tf.placeholder(tf.float32, [None,28,28,1], name='input_node')
conv_1 = tf.layers.conv2d(inputs = x,filters=64,kernel_size=(3,3),strides=(1, 1),padding='SAME')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
epoch = 7
load_weights(epoch)
print '[INFO] Loading the data...'
train = '../data/train.csv'
train_data = pd.read_csv(train).as_matrix()
train_X, train_y = train_data[:,1:785], train_data[:,785]
train_X = train_X/255.0
train_X = train_X.reshape(train_X.shape[0], 28, 28, 1)
print '[INFO] Training_data details: ',train_X.shape, train_y.shape
labels = {0: 'Top', 1: 'Trouser', 2: 'Pullover',
3: 'Dress', 4: 'Coat', 5: 'Sandal', 6: 'Shirt',
7: 'Sneaker', 8: 'Bag', 9: 'Ankle boot'}
img_nums = [2426,16,2429,2500,2425,2432,2433,6,2424,2435]
for img_num in img_nums:
print '[INFO] conv plot for {}'.format(labels[train_y[img_num]])
fig = plt.figure()
plt.axis('off')
plt.imshow(train_X[img_num].reshape(28,28),cmap='gray_r', interpolation='nearest')
plt.savefig('plots/conv_plots/img_{}'.format(labels[train_y[img_num]]) + '.png' )
plt.cla()
plt.close()
xs = train_X[img_num].reshape(1,28,28,1)
outputs = sess.run(conv_1,feed_dict={x:xs})
conv1 = outputs.transpose(1,2,0,3)
grid_plot(conv1,'filter_output_{}'.format(labels[train_y[img_num]]) + '.png')
outputs = outputs[0]
outputs = np.sum(outputs, axis=2)
outputs = outputs/np.max(outputs)
print "Plotting image", img_num
fig = plt.figure()
plt.axis('off')
plt.imshow(outputs, cmap='gray_r', interpolation='nearest')
plt.savefig('plots/conv_plots/output_{}'.format(labels[train_y[img_num]]) + '.png')
plt.cla()
| [
"gsatishkumaryadav@gmail.com"
] | gsatishkumaryadav@gmail.com |
b4d2122b58af506ff68a3cbdd4b2aebf7a93ae9e | f819fe72c5b18b42a25a71dc2900c7fa80e17811 | /pandas/tests/extension/test_floating.py | ff0ff7399e3e67ab0fc2ca82e4dbe03d66ee563f | [
"BSD-3-Clause"
] | permissive | JMBurley/pandas | 34d101425acb0ac35a53bcf29fbd47c2d4c88fda | b74dc5c077971301c5b9ff577fa362943f3c3a17 | refs/heads/master | 2022-11-06T00:48:41.465865 | 2022-06-13T19:30:11 | 2022-06-13T19:30:11 | 229,853,377 | 1 | 0 | BSD-3-Clause | 2019-12-24T02:11:54 | 2019-12-24T02:11:53 | null | UTF-8 | Python | false | false | 5,332 | py | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float_dtype
from pandas.core.arrays.floating import (
Float32Dtype,
Float64Dtype,
)
from pandas.tests.extension import base
def make_data():
return (
list(np.arange(0.1, 0.9, 0.1))
+ [pd.NA]
+ list(np.arange(1, 9.8, 0.1))
+ [pd.NA]
+ [9.9, 10.0]
)
@pytest.fixture(params=[Float32Dtype, Float64Dtype])
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return pd.array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return pd.array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return pd.array([pd.NA, 0.1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return pd.array([0.1, 0.2, 0.0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return pd.array([0.1, pd.NA, 0.0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = 0.1
a = 0.0
c = 0.2
na = pd.NA
return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
sdtype = tm.get_dtype(s)
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and is_float_dtype(other.dtype)
):
# other is np.float64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(sdtype.numpy_dtype)
result = op(s, other)
expected = self._combine(s, other, op)
# combine method result in 'biggest' (float64) dtype
expected = expected.astype(sdtype)
self.assert_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
class TestComparisonOps(base.BaseComparisonOpsTests):
# TODO: share with IntegerArray?
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
# Override to do the astype to boolean
expected = s.combine(other, op).astype("boolean")
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op, other):
op_name = f"__{op.__name__}__"
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestIndex(base.BaseIndexTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
pass
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
result = getattr(s, op_name)(skipna=skipna)
if not skipna and s.isna().any():
expected = pd.NA
else:
expected = getattr(s.dropna().astype(s.dtype.numpy_dtype), op_name)(
skipna=skipna
)
tm.assert_almost_equal(result, expected)
@pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py")
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
class Test2DCompat(base.Dim2CompatTests):
pass
| [
"noreply@github.com"
] | JMBurley.noreply@github.com |
61eb7003ebd0696d8bdf54da27e8399d1642f45b | 3f0032fe2eccee8fbe0374d7334fe42b7e592b90 | /services/blueprints/summarizer/views.py | fbf8ed13bb7a932552f3d5b17e97a612c1423b7e | [
"MIT"
] | permissive | OpenKnowledgeMaps/headstart-ifs | 1264000cc996eda2d86d3d48168aefa8f81ffb84 | 95c3e7f7384d66c5b13d02ac26a00b30fbed3cb1 | refs/heads/master | 2021-06-15T07:11:16.867809 | 2019-02-03T12:52:56 | 2019-02-03T12:52:56 | 160,365,546 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,534 | py | import time
import uuid
import json
from flask import Blueprint, request, jsonify
from itertools import chain
import redis
import numpy as np
import igraph as ig
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from nltk.corpus import stopwords
redis_store = redis.StrictRedis(host="localhost", port=6379, db=0)
app = Blueprint('tagger', __name__)
@app.route('/summarize_clusters', methods=['GET', 'POST'])
def summarize_clusters():
"""
list of list of lists of strings
[
[
[cluster1_doc1_token1, cluster1_doc1_token2],
[cluster1_doc2_token1, cluster1_doc2_token2],
],
[cluster2_doc1_token1, cluster2_doc1_token2],
[cluster2_doc2_token1, cluster2_doc2_token2],
]
]
"""
response = {"success": False}
if request.method == 'POST':
r = request.get_json()
top_n = r.get('top_n')
lang = r.get('lang')
method = r.get('method')
clustered_docs = json.loads(r.get('clustered_docs'))
if lang == 'en':
stops = stopwords.words('english')
if lang == 'de':
stops = stopwords.words('german')
else:
stops = []
summaries = []
for cluster in clustered_docs:
try:
# get nc embeddings
doc = list(chain.from_iterable(cluster))
if type(doc[0]) == list:
doc = list(chain.from_iterable(doc))
doc = list(set(doc))
k = str(uuid.uuid4())
d = {"id": k, "doc": doc}
redis_store.rpush("embed_noun_chunks", json.dumps(d))
while True:
result = redis_store.get(k)
if result is not None:
result = json.loads(result.decode('utf-8'))
embeddings = result.get('embeddings')
redis_store.delete(k)
break
time.sleep(0.5)
textrank_scores = get_textrank(doc, embeddings)
except Exception as e:
textrank_scores = [[1, token] for token in doc]
df1 = pd.DataFrame(textrank_scores, columns=['textrank', 'token'])
try:
tfidf_scores = get_tfidfrank(cluster, stops)
except Exception as e:
tfidf_scores = [[1, token] for token in doc]
df2 = pd.DataFrame(tfidf_scores, columns=['tfidf', 'token'])
df = pd.merge(df1, df2, on='token')
# implemented weighted and 2+1 methods here
summary = get_summary(df, method, weights=(0.5, 0.5), top_n=top_n)
summaries.append(summary)
response["summaries"] = summaries
response["success"] = True
return jsonify(response)
def get_summary(df, method, weights=(0.5, 0.5), top_n=3):
"""
scores are rescaled to [0, 1]
if method is weighted, weights should be a tuple of floats that sum up to 1 (0.25, 0.75)
the rank of summary candidates is then calculated by weighting the
tfidf scores with the first value and textrank scores with the second
the final summary is then the top_n of weighted scores
if method it n+n, weights should be a tuple of integers (1, 2)
summary is then the top_n from tfidf scores (first tuple) plus
the top_n from textrank scores (second tuple)
"""
df["textrank"] = df.textrank - df.textrank.min()
df["textrank"] = df.textrank / df.textrank.max()
df["tfidf"] = df.tfidf - df.tfidf.min()
df["tfidf"] = df.tfidf / df.tfidf.max()
df["weighted"] = df.apply(lambda x: x.tfidf * weights[0] +
x.textrank * weights[1], axis=1)
if method == 'weighted':
summary = []
for candidate in df.sort_values('weighted', ascending=False)['token']:
if candidate.lower() not in [s.lower() for s in summary]:
if len(candidate) < 35:
summary.append(candidate.replace(" - ", "-"))
summary = ", ".join(summary[:top_n])
return summary
if method == 'n+n':
tfidf_summary = []
for candidate in df.sort_values('tfidf', ascending=False)['token']:
if candidate.lower() not in [s.lower() for s in summary]:
if len(candidate) < 35:
tfidf_summary.append(candidate.replace(" - ", "-"))
textrank_summary = []
for candidate in df.sort_values('textrank', ascending=False)['token']:
if candidate.lower() not in [s.lower() for s in summary]:
if len(candidate) < 35:
textrank_summary.append(candidate.replace(" - ", "-"))
summary = ", ".join(tfidf_summary[:weights[0]] +
textrank_summary[:weights[1]])
return summary
def get_textrank(tokens, embeddings):
res = [(x, np.array(y))
for x, y in zip(tokens, embeddings)
if y is not None]
# average over noun chunk word vectors for each noun_chunk
tokens = [r[0] for r in res]
embeddings = [r[1] for r in res]
# summarise with textrank
sim_mat = cosine_similarity(embeddings)
np.fill_diagonal(sim_mat, 0)
sources, targets = sim_mat.nonzero()
conn_indices = np.where(sim_mat)
weights = sim_mat[conn_indices]
edgelist = list(zip(sources.tolist(), targets.tolist()))
G = ig.Graph(edges=list(edgelist), directed=True)
G.es['weight'] = weights
scores = G.pagerank(weights="weight")
ranking = sorted(((scores[i], nc)
for i, nc in enumerate(tokens)), reverse=True)
return ranking
def get_tfidfrank(docs, stops):
"""
needs to be a list of docs which are lists of strings
"""
docs = [" ".join([t.replace(" ", "_") for t in chain.from_iterable(d)])
for d in docs]
cv = CountVectorizer(max_df=0.85, lowercase=False, stop_words=stops)
word_count_vector = cv.fit_transform(docs)
token_names = cv.get_feature_names()
token_names = [t.replace("_", " ") for t in token_names]
tfidf = TfidfTransformer(smooth_idf=True, use_idf=True)
tfidf_matrix = tfidf.fit_transform(word_count_vector).tocoo()
tuples = zip(tfidf_matrix.col, tfidf_matrix.data)
scores = sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
ranking = [(score, token_names[ind]) for ind, score in scores]
return ranking
| [
"web@christopherkittel.eu"
] | web@christopherkittel.eu |
81030fc7c53544f0a3448941c05bf3cd34e2c314 | 5a8214b3a452c574e6c883bf5d90ba58ba87c461 | /leetcode/114.py | 7979268339f980ff3693f990a01e0ff2ef33e655 | [] | no_license | phlalx/algorithms | 69a3c8519687816e3c6333ec12b40659d3e3167f | f4da5a5dbda640b9bcbe14cb60a72c422b5d6240 | refs/heads/master | 2023-02-03T10:30:30.181735 | 2020-12-26T09:47:38 | 2020-12-26T09:47:38 | 129,254,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | #TAGS tree, linked list
# cool, TODO think simpler + iterative
class Solution:
def flatten(self, root: TreeNode) -> None:
"""
Do not return anything, modify root in-place instead.
"""
if root is None:
return
def f(root):
old_right = root.right
if root.left is not None:
left_last = f(root.left)
root.right = root.left
root.left = None
else:
left_last = root
left_last.right = old_right
if old_right is not None:
last = f(old_right)
else:
last = left_last
return last
f(root)
| [
"phlalx@users.noreply.github.com"
] | phlalx@users.noreply.github.com |
da362e1a2ccac6d975e65a115a74bac478977887 | a52de6c29280c8c9932f2098acaf7d3c36218801 | /calendarApp/calendarApp/settings.py | 0bf69257bb8d524ffd9dd21384386a0113280f5e | [] | no_license | Chungking-express/LOTTE_Calendar | dd27ac377d073960a4f8407a492eec1208060281 | 005485309c3867866306c7196f3d01790953a0d3 | refs/heads/main | 2022-12-31T18:50:22.271213 | 2020-10-24T07:22:46 | 2020-10-24T07:22:46 | 304,044,664 | 0 | 0 | null | 2020-10-14T14:42:06 | 2020-10-14T14:42:05 | null | UTF-8 | Python | false | false | 3,207 | py | """
Django settings for calendarApp project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2u^lk@fwp!@0_3-v1or(8zh2+uydr2brzremn)x=vh#ni&(5#)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'calendar_testApp',
'fullcalendar'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'calendarApp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'calendarApp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR,'static'),
)
| [
"jrimit@gmail.com"
] | jrimit@gmail.com |
adf79d1b8b14cf47cdcef7a181cc0a4fe8b30661 | 2110f7f13c20e20a2467ed09e87f86d2ee439cc3 | /doc/SConscript | cf0bcbbf989539e6c038d6df1811d3bc4aafe15a | [
"ISC",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | GeekGalaxy/imgcat | 8a148312b1bdca8410a3f065b1c72bd4fd1fdb7f | 794d2b1ff5bde9d94dad643e101d68409526a736 | refs/heads/master | 2021-01-18T04:56:11.109633 | 2014-12-20T13:22:28 | 2014-12-20T13:22:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | import os
manpage_builder = Builder(action='pandoc -sw man '
'''-Vdate="$$(date +'%B %d, %Y')" '''
'$SOURCE -o $TARGET')
env = Environment(BUILDERS={'Manpage': manpage_builder})
# Build from pandoc only if it exists.
if os.system('which pandoc') == 0:
manpage = env.Manpage('imgcat.1', 'imgcat.1.md')
else:
manpage = File('imgcat.1')
Return(['manpage'])
| [
"easantos@ualberta.ca"
] | easantos@ualberta.ca | |
684d7f7de6d6fc3a5859e040a6d55f949ffe919b | 7d5e694aba546c166004cab8e592a000fb7283ef | /PyQt5_Udemy/03_Advanced_Widgets/18_Paint_Event.py | 0396a45c6c4fc951279142c3cba7afe03a3ff489 | [] | no_license | OnurKaraguler/PyQt5 | 45ffe320911f25f2ad0e318de2c7e3851db7be0c | 909546b53c0f80c1eae27c660f47cd5ded3ff1a6 | refs/heads/master | 2022-12-21T09:06:24.063816 | 2020-09-24T14:54:11 | 2020-09-24T14:54:11 | 298,299,258 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | import sys, os
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
from PIL import Image
class Main(QMainWindow):
count = 0
def __init__(self,parent=None):
super(Main, self).__init__(parent)
self.setWindowTitle('Draw demo')
self.setGeometry(500,150,400,400)
# self.setFixedSize(self.size())
self.UI()
self.show()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
qp.setPen(QColor(Qt.red))
qp.setFont(QFont('Arial', 20))
qp.drawText(10, 50, "hello Python")
qp.setPen(QColor(Qt.blue))
qp.drawLine(10, 100, 100, 100)
qp.drawRect(10, 150, 150, 100)
qp.setPen(QColor(Qt.yellow))
qp.drawEllipse(100, 50, 100, 50)
qp.drawPixmap(220, 10, QPixmap("img/1.png"))
qp.fillRect(200, 175, 150, 100, QBrush(Qt.SolidPattern))
qp.end()
def UI(self):
self.window()
def window(self):
self.mainLayout = QVBoxLayout()
self.setLayout(self.mainLayout)
if __name__=='__main__':
app = QApplication(sys.argv)
window = Main()
sys.exit(app.exec_()) | [
"onurkaraguler@hotmail.com"
] | onurkaraguler@hotmail.com |
974b1e84271f21fdbfc78a9d309b654c2e6ca236 | 9a14ca300591f15133a1d3aade19bacd89ae5544 | /ToCluster_Optimize/OptimalProtocolGenerator_Full_105_15.py | 2a00ee055454520f2f3ceec198c538e6eb41d6ed | [] | no_license | StevenLarge/DiscreteControl_PythonCode | 68114fff5ec0b9263b9c9b65ab9589dd17e488f7 | 235bbc107ad313bb60f5355a112a7736e2e85a5c | refs/heads/master | 2020-03-16T19:58:26.983188 | 2018-05-18T19:33:57 | 2018-05-18T19:33:57 | 132,936,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | #This python script generates the Fully-Optimal protocols Discrete nonequilibrium control simulations
#
#Steven Large
#March 5th 2018
import numpy as np
import scipy.optimize
import os
import OptimizeFull as FullOpt
import WriteData
WritePathBase = "Protocols_"
NumberCPVals = [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,25]
ProtocolTimes = [5,10,50,100,500]
Param_Ext = ["105_15/"]
CPStart = -1
PaddingTime = 100
CorrelationPath_Base = "CorrelationMesh_"
FilenameCorr = "CorrelationMesh_2.dat"
FilenameCP = "CPVals.dat"
FilenameLagTime = "LagTime.dat"
FullOptimizerIterations = 15
for ParameterIndex in range(len(Param_Ext)):
WritePath = WritePathBase + Param_Ext[ParameterIndex]
WritePathLog = WritePath + "Logs/"
CorrelationPath = CorrelationPath_Base + Param_Ext[ParameterIndex]
CorrelationArray = FullOpt.ReadCorrelationArray(CorrelationPath,FilenameCorr)
LagTime_Vector = FullOpt.ReadVector(CorrelationPath,FilenameLagTime)
CPVals_Vector = FullOpt.ReadVector(CorrelationPath,FilenameCP)
for index1 in range(len(NumberCPVals)):
for index2 in range(len(ProtocolTimes)):
OptimalCP,OptimalTime,NaiveCP,NaiveTime,CostTracker = FullOpt.Driver_PreRead_Brute(FullOptimizerIterations,NumberCPVals[index1],ProtocolTimes[index2],CPVals_Vector,LagTime_Vector,CorrelationArray)
OptimalTime.append(PaddingTime)
OptimalTime.insert(0,PaddingTime)
NaiveTime.append(PaddingTime)
NaiveTime.insert(0,PaddingTime)
WriteNameFull = "FullOpt_CP" + str(NumberCPVals[index1]) + "_T" + str(ProtocolTimes[index2]) + ".dat"
WriteNameLog = "OptimizerLogFile-Full_CP" + str(NumberCPVals[index1]) + "_T" + str(ProtocolTimes[index2]) + ".dat"
WriteData.WriteProtocol(WritePath,WriteNameFull,OptimalCP,OptimalTime)
WriteData.OptimizerLog_Cost(WritePathLog,WriteNameLog,CostTracker)
| [
"stevelarge7@gmail.com"
] | stevelarge7@gmail.com |
fbd4b941754e32c2a8e4a5685408aff84e66fa37 | cc65020ac0a4b42d7cb0b73616c0d1329be0a21a | /questionnaire/management/commands/slugify_questions.py | 72e53357003907bdebaf7322c6c891b342ab8a19 | [] | no_license | Sashkow/ukrainian-party-policy-constructor | e8c7ed118b6d2ef3ac23326ab8285978e0464c33 | 8ef7eaae81275681dc1a5547ca9fbca642ae08c8 | refs/heads/master | 2021-06-12T01:37:44.818950 | 2019-08-01T12:59:45 | 2019-08-01T12:59:45 | 162,290,445 | 0 | 1 | null | 2021-06-10T21:04:09 | 2018-12-18T13:12:41 | JavaScript | UTF-8 | Python | false | false | 523 | py | from django.core.management.base import BaseCommand, CommandError
from questionnaire.models import QuestionAnswer
from slugify import slugify
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
# def add_arguments(self, parser):
# parser.add_argument('poll_id', nargs='+', type=int)
def handle(self, *args, **options):
qua = QuestionAnswer.objects.all()
for item in qua:
item.slug = slugify(item.question).replace('-', '_')
item.save()
| [
"lykhenko.olexandr@gmail.com"
] | lykhenko.olexandr@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.