blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4007d1d834eb93435d048abbcfd50d203437f465 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/fbs_0710+377/sdB_FBS_0710+377_coadd.py | 784ba860dd2a13c3afcbb4bf0b1f021dbaf1d73b | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[108.462625,37.6635], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_FBS_0710+377/sdB_FBS_0710+377_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_FBS_0710+377/sdB_FBS_0710+377_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
3b517fc30ab30ab7f92ed6c78e129c03e69d2e2c | 18631e9a657324ef1f83da58f4346e9f2c368d28 | /contrib/testgen/base58.py | fcce6ed46f95e9484592c24f2ae01ea5b751fe9a | [
"MIT"
] | permissive | thehomosapien/AMLBitcoin | 1e68bf6621d9ee055385ef420b45c9dc289b4f8c | f097ca52c2e8039761f1927d83a9fe0b4c355b1c | refs/heads/master | 2020-05-26T08:27:40.095034 | 2019-05-24T07:31:43 | 2019-05-24T07:31:43 | 188,166,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | # Copyright (c) 2012-2016 The Bitcoin Core developers
# Copyright (c) 2017 The AmlBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
AmlBitcoin base58 encoding and decoding.
Based on https://AmlBitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# AmlBitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/AmlBitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| [
"rishabhshukla@opulasoft.com"
] | rishabhshukla@opulasoft.com |
6435387f6bb4fd732575479041e194689557fd59 | b6f8e779d6424ddeb5437f56c1334aaaf0b52afa | /oscar_mws/management/commands/mws_submit_product_feed.py | 5ae721616f14ae485241fafaf5750ed870d358d0 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | gaybro8777/django-oscar-mws | f97e9e908935432136ee0bf922f6d03816f1f15f | 4ea012943d61b1c38c14c392230be92e48525f59 | refs/heads/master | 2022-05-08T12:49:17.898825 | 2014-09-29T16:00:22 | 2014-09-29T16:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | from optparse import make_option
from django.db.models import get_model
from django.core.management.base import NoArgsCommand
from oscar_mws.feeds.gateway import submit_product_feed
Product = get_model('catalogue', 'Product')
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help=('Do not submit the product feed put print the generated '
'XML to stdout.')
),
make_option(
'--seller-id',
dest='seller_id',
help=('Seller ID used to submit the product feed')
),
)
def handle_noargs(self, **options):
# get all products without a ASIN assigned
products = Product.objects.all()
merchant_id = options.get('seller_id')
if options.get('dry_run'):
submit_product_feed(products, merchant_id, dry_run=True)
return
submission = submit_product_feed(products, merchant_id)
print "Submitted as ID #{0}".format(submission.submission_id)
| [
"sebastian@roadside-developer.com"
] | sebastian@roadside-developer.com |
8d1dc5d7269623aff468e6d9463065c12ce3978d | 7f179410290d742baf96f363ded51a3275298dc7 | /mmediting/mmedit/models/backbones/sr_backbones/duf.py | e38e23edea06a365121f64aa87717a784dbe304a | [
"Apache-2.0"
] | permissive | H-deep/Trans-SVSR | b0c4f01b34ae3c8d45dc6acdf39f75ab2a8f1dec | ed67fb4bbd734313b4ae7af390f993398f4b30ab | refs/heads/main | 2023-05-23T22:42:00.297036 | 2022-06-20T05:46:44 | 2022-06-20T05:46:44 | 431,305,569 | 38 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class DynamicUpsamplingFilter(nn.Module):
"""Dynamic upsampling filter used in DUF.
Ref: https://github.com/yhjo09/VSR-DUF.
It only supports input with 3 channels. And it applies the same filters
to 3 channels.
Args:
filter_size (tuple): Filter size of generated filters.
The shape is (kh, kw). Default: (5, 5).
"""
def __init__(self, filter_size=(5, 5)):
super().__init__()
if not isinstance(filter_size, tuple):
raise TypeError('The type of filter_size must be tuple, '
f'but got type{filter_size}')
if len(filter_size) != 2:
raise ValueError('The length of filter size must be 2, '
f'but got {len(filter_size)}.')
# generate a local expansion filter, similar to im2col
self.filter_size = filter_size
filter_prod = np.prod(filter_size)
expansion_filter = torch.eye(int(filter_prod)).view(
filter_prod, 1, *filter_size) # (kh*kw, 1, kh, kw)
self.expansion_filter = expansion_filter.repeat(
3, 1, 1, 1) # repeat for all the 3 channels
def forward(self, x, filters):
"""Forward function for DynamicUpsamplingFilter.
Args:
x (Tensor): Input image with 3 channels. The shape is (n, 3, h, w).
filters (Tensor): Generated dynamic filters.
The shape is (n, filter_prod, upsampling_square, h, w).
filter_prod: prod of filter kenrel size, e.g., 1*5*5=25.
upsampling_square: similar to pixel shuffle,
upsampling_square = upsampling * upsampling
e.g., for x 4 upsampling, upsampling_square= 4*4 = 16
Returns:
Tensor: Filtered image with shape (n, 3*upsampling, h, w)
"""
n, filter_prod, upsampling_square, h, w = filters.size()
kh, kw = self.filter_size
expanded_input = F.conv2d(
x,
self.expansion_filter.to(x),
padding=(kh // 2, kw // 2),
groups=3) # (n, 3*filter_prod, h, w)
expanded_input = expanded_input.view(n, 3, filter_prod, h, w).permute(
0, 3, 4, 1, 2) # (n, h, w, 3, filter_prod)
filters = filters.permute(
0, 3, 4, 1, 2) # (n, h, w, filter_prod, upsampling_square]
out = torch.matmul(expanded_input,
filters) # (n, h, w, 3, upsampling_square)
return out.permute(0, 3, 4, 1, 2).view(n, 3 * upsampling_square, h, w)
| [
"hassan.imani@student"
] | hassan.imani@student |
a4f55acb3baf7ca1cbb285f095ba2e8627f814b1 | 85e3de2fec6013d2eeb1969f51871937bce61258 | /blog/models.py | eed6906a9bcf261c1e3f0854726abd05233c564a | [] | no_license | Jrius4/analysisapi | 715477b7a1bfa4462897764385a744020b1e8c32 | 12a4bd66f530fafe587fe02ce84ed164c79e451b | refs/heads/master | 2023-03-25T22:00:03.357108 | 2021-03-23T03:15:56 | 2021-03-23T03:15:56 | 342,872,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,307 | py | from sqlalchemy import func, Column,Integer,String, Boolean,DateTime
from sqlalchemy.sql.expression import true
from .database import Base
from datetime import datetime
class Blog(Base):
__tablename__ = 'blogs'
id = Column(Integer,primary_key=True,index=True,nullable=False)
title = Column(String)
body = Column(String)
published = Column(Boolean)
created_at = Column(
DateTime,
default=datetime.utcnow(),
server_default=func.now(),
nullable=False,
index=True,
)
updated_at = Column(
DateTime,
default=datetime.utcnow(),
server_default=func.now(),
nullable=False,
index=True,
)
class User(Base):
__tablename__ = 'users'
id = Column(Integer,primary_key=True,index=True,nullable=False)
name = Column(String)
email = Column(String)
password = Column(String)
created_at = Column(
DateTime,
default=datetime.utcnow(),
server_default=func.now(),
nullable=False,
index=True,
)
updated_at = Column(
DateTime,
default=datetime.utcnow(),
server_default=func.now(),
nullable=False,
index=True,
)
| [
"kazibwejuliusjunior@gmail.com"
] | kazibwejuliusjunior@gmail.com |
920a54cad36fe30b6373ff452238bbc16566cdc5 | 6d542d331ffff57805b049451e0570a5540b0fae | /venv/bin/django-admin.py | 0ff19f0fa76f9560eed3e876ac587267a235915a | [] | no_license | vansh1999/Task_manager_Django | 94ef99d20940e8c2a2ac74b5ef218aea444ec247 | b276009308eec45f8282d4a83a55c751bce74753 | refs/heads/master | 2020-08-09T02:02:01.199270 | 2019-10-10T19:22:40 | 2019-10-10T19:22:40 | 213,972,534 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/home/vansh/task_manager_django/venv/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"vansh.bhardwaj1999@gmail.com"
] | vansh.bhardwaj1999@gmail.com |
e58575c1a91a1f1c5af23b675785eead0d157265 | a5fcf5efa26615922ad3a8169d4f8911ab6cefe7 | /apis_v1/documentation_source/twitter_sign_in_start_doc.py | ed4818f6e0437c404bc12ca6a5d48238e01dd486 | [
"MIT"
] | permissive | eternal44/WeVoteServer | c0bc5ad96c0c72c6b4b3198a91ef44b6f347cc93 | acaae24d7cb0ec34ec4470247ea1072ee34510cb | refs/heads/master | 2021-01-15T12:44:19.988036 | 2016-04-28T05:03:42 | 2016-04-28T05:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | # apis_v1/documentation_source/twitter_sign_in_start_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def twitter_sign_in_start_doc_template_values(url_root):
"""
Show documentation about twitterSignInStart
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
# {
# 'code': '',
# 'description': '',
# },
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "twitter_redirect_url": string,\n' \
'}'
template_values = {
'api_name': 'twitterSignInStart',
'api_slug': 'twitterSignInStart',
'api_introduction':
"",
'try_now_link': 'apis_v1:twitterSignInStartView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
| [
"dale.mcgrew@wevoteusa.org"
] | dale.mcgrew@wevoteusa.org |
d81f57fdf2f27ac0c815c5915b5bd91d26af1712 | ad8b30544480ba1e5f5b1cb2dec2aa77a644e8d2 | /SWEA/D4_1249_Bogupro.py | f4877e82f0e5fc3d7116d995d07d10be94c7fa43 | [] | no_license | hyunwoojeong123/Algorithm | 79abc82d944ca60342a7f8b6fc44fac20ac55123 | 0baaf3222fbbec699ffbec5d4cc680067cf293fb | refs/heads/master | 2023-07-10T18:28:51.934005 | 2021-08-18T01:51:23 | 2021-08-18T01:51:23 | 284,403,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | di = [0,0,1,-1]
dj = [1,-1,0,0]
T = int(input())
for tc in range(1,T+1):
N = int(input())
temp_board = [input() for x in range(N)]
board = []
for i in range(N):
temp = []
for j in range(N):
temp.append(int(temp_board[i][j]))
board.append(temp)
visited = [[-1 for j in range(N)] for i in range(N)]
visited[0][0] = 0
q = [[0,0]]
while q:
i,j = q.pop(0)
for k in range(4):
ni, nj = i + di[k], j + dj[k]
if ni < 0 or nj < 0 or ni >= N or nj >= N:
continue
if visited[ni][nj] != -1 and visited[i][j] + board[ni][nj] >= visited[ni][nj]:
continue
visited[ni][nj] = visited[i][j] + board[ni][nj]
q.append([ni,nj])
print(f'#{tc} {visited[N-1][N-1]}')
| [
"hw2621@daum.net"
] | hw2621@daum.net |
26dfb480f7f646ad4e455c0664593f6a18c1119a | 62905741553a2c749989af4cc52e636272b495c7 | /lite/migrations/0008_auto_20190503_0229.py | b75167027a23bb888cb5f58cdf93ff47391bbb34 | [] | no_license | bushitan/coffee_server | c030e0e0e48a441e7697c4383d0b431c656824e3 | d69b81ff1c1821d91eef0629c7b17afa79973f0d | refs/heads/master | 2021-07-05T18:13:11.307199 | 2020-09-23T19:03:38 | 2020-09-23T19:03:38 | 184,772,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('lite', '0007_share_valid_time'),
]
operations = [
migrations.AddField(
model_name='prize',
name='valid_time',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='有效期'),
),
migrations.AddField(
model_name='score',
name='valid_time',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='有效期'),
),
]
| [
"373514952@qq.com"
] | 373514952@qq.com |
903247881b0af967c5be8da404a8be59cacbfdcb | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/inspections/PyProtectedMemberInspection/truePositiveInClass.py | 45474e7b4f58b82724d6478d19ba52db91540e57 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 225 | py | __author__ = 'ktisha'
class A:
def __init__(self):
self._a = 1
def foo(self):
self.b= 1
class B:
def __init__(self):
<weak_warning descr="Access to a protected member _a of a class">A()._a</weak_warning>
| [
"Ekaterina.Tuzova@jetbrains.com"
] | Ekaterina.Tuzova@jetbrains.com |
5b61af6e58ff61111af4fcf12acb276b3604b465 | 1c9660a76b948504ebd8a9ae6e588384912df0dd | /tests/failure.py | 217c3fe5f43a42f1b50f0c3c31f56708b2362055 | [] | no_license | MerouaneBen/python-progressbar | b1a64018cb974908f2c202a6964eb3a8008fe4cc | 14b4705482131008c3bd6f36d5de20df77ef72e5 | refs/heads/develop | 2021-01-24T22:26:38.374229 | 2016-03-10T17:27:37 | 2016-03-10T17:27:37 | 54,357,700 | 1 | 0 | null | 2016-03-21T03:39:12 | 2016-03-21T03:39:12 | null | UTF-8 | Python | false | false | 2,060 | py | import pytest
import progressbar
def test_missing_format_values():
with pytest.raises(KeyError):
p = progressbar.ProgressBar(
widgets=[progressbar.widgets.FormatLabel('%(x)s')],
)
p.update(5)
def test_max_smaller_than_min():
with pytest.raises(ValueError):
progressbar.ProgressBar(min_value=10, max_value=5)
def test_no_max_value():
'''Looping up to 5 without max_value? No problem'''
p = progressbar.ProgressBar()
p.start()
for i in range(5):
p.update(i)
def test_correct_max_value():
'''Looping up to 5 when max_value is 10? No problem'''
p = progressbar.ProgressBar(max_value=10)
for i in range(5):
p.update(i)
def test_minus_max_value():
'''negative max_value, shouldn't work'''
p = progressbar.ProgressBar(min_value=-2, max_value=-1)
with pytest.raises(ValueError):
p.update(-1)
def test_zero_max_value():
'''max_value of zero, it could happen'''
p = progressbar.ProgressBar(max_value=0)
p.update(0)
with pytest.raises(ValueError):
p.update(1)
def test_one_max_value():
'''max_value of one, another corner case'''
p = progressbar.ProgressBar(max_value=1)
p.update(0)
p.update(0)
p.update(1)
with pytest.raises(ValueError):
p.update(2)
def test_changing_max_value():
'''Changing max_value? No problem'''
p = progressbar.ProgressBar(max_value=10)(range(20), max_value=20)
for i in p:
pass
def test_backwards():
'''progressbar going backwards'''
p = progressbar.ProgressBar(max_value=1)
p.update(1)
p.update(0)
def test_incorrect_max_value():
'''Looping up to 10 when max_value is 5? This is madness!'''
p = progressbar.ProgressBar(max_value=5)
for i in range(5):
p.update(i)
with pytest.raises(ValueError):
for i in range(5, 10):
p.update(i)
def test_deprecated_maxval():
progressbar.ProgressBar(maxval=5)
def test_deprecated_poll():
progressbar.ProgressBar(poll=5)
| [
"Wolph@wol.ph"
] | Wolph@wol.ph |
76c9f26a88555ed1308279a6eb10b6af3e933a6f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_089/ch83_2020_04_13_00_15_07_042040.py | 715e579553035cc52530d75fbdeaee2de813f3cd | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | def medias_por_inicial(dic):
novo = {}
n = []
for k,v in dic.items():
if k[0] in novo:
float(novo[k[0]]) += v
float(novo[k[0]]) = float(novo[k[0]])/len(n)
if k[0] not in novo:
float(novo[k[0]]) = v
n.append(1)
return novo
| [
"you@example.com"
] | you@example.com |
4d0e5ab6a0eeee30851277b2558bc65ccb583654 | d18ed72d6f8d27dd8a13eab5c6366f9dca48aa6b | /espresso/tags/qecalc-0.2.3/qecalc/qetask/qeparser/outputs/matdyn.py | 8142bf37def068019ddfea4a5aebd3fe2885742e | [] | no_license | danse-inelastic/AbInitio | 6f1dcdd26a8163fa3026883fb3c40f63d1105b0c | 401e8d5fa16b9d5ce42852b002bc2e4274afab84 | refs/heads/master | 2021-01-10T19:16:35.770411 | 2011-04-12T11:04:52 | 2011-04-12T11:04:52 | 34,972,670 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,024 | py | #!/usr/bin/env python
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# QEcalc by DANSE Inelastic group
# Nikolay Markovskiy
# California Institute of Technology
# (C) 2009 All Rights Reserved
#
# File coded by: Nikolay Markovskiy
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import string
import qe_io_dict as io_dict
import numpy
from baseoutput import BaseOutput
class Output(BaseOutput):
def __init__(self):
BaseOutput.__init__(self)
self.parsers = {
'multi phonon' : self.getMultiPhonon,
'phonon dos' : self.getPhononDOS
}
def getMultiPhonon(self,setting):
''' Obtain a list of phonon modes and eigen vectors from output generated \
by matdyn.x'''
return self.matdyn_modes( setting.get('flvec') )
def getPhononDOS(self,setting):
"""Obtain density of states from matdyn.x output. Returns DOS axis in
wave numbers and DOS values, normalized to the number of degrees of
freedom as numpy arrays"""
dosFile = open(setting.get('fldos'), 'r')
line = dosFile.readline()
dos = []
while line:
dos.append([max(float(ch), 0.0) for ch in line.split()]) # get rid of negatives
line = dosFile.readline()
dos = numpy.array(dos)
axis = dos[0:,0]
g = dos[0:,1]
return [(axis, 'cm-1'), (g, None)]
def matdyn_modes(self, fname):
matdynDict = io_dict.read_file(fname)
qKeys = io_dict.find_all_keys_from_string(matdynDict, 'q =')
qP = []
for i in qKeys:
qP.append( [ float(qi) for qi in string.split( matdynDict[ i ] )[2:5] ] )
qPoints = numpy.array(qP)
# find number of atoms per unit cell and dimensionality
# get frequencies keys for the last q-point:
fKeys = io_dict.find_all_keys_from_string_afterkey( matdynDict, qKeys[-1], 'omega')
# get sample displacement vector for the first frequency of the last q point and find its dimensionality
# here omegaShift = 1 means no space between displacements and frequencies (2 means 1 extra line ...)
omegaShift = 1
nDim = len( matdynDict[ fKeys[0] + omegaShift ].split()[1:-1] )/2
# get number of atoms in unit cell
nAtom = fKeys[1] - fKeys[0] - omegaShift
# qShift = 2 means 1 exra line between q line and frequencies line
qShift = 2
# create numpy array in accordance with idf format specification
# Pol = [ [ [ [ ] ] ] ]
Pol = []
Omegas = []
for i in qKeys:
# Mode = [ [ [ ] ] ]
Mode = []
Omega = []
for iOmega in range( i + qShift, nDim*nAtom*(nAtom + omegaShift) + i + qShift, nAtom+omegaShift):
# get omegas in THz:
# print float( matdynDict[ iOmega].split('=')[1].split()[0] )
Omega.append( float( matdynDict[ iOmega].split('=')[2].split()[0] ) )
# Atom = [ [ ] ]
Atom = []
for iAtom in range( iOmega + omegaShift, iOmega + omegaShift + nAtom ):
vecStr = matdynDict[ iAtom ].split()[1:-1]
# vec = [ ]
vec = [ float(v1) + 1.0j*float(v2) for v1,v2 in zip( vecStr[:-1:2], vecStr[1::2] ) ]
Atom.append(vec)
Mode.append( Atom )
Pol.append( Mode )
Omegas.append( Omega )
npOmega = numpy.array(Omegas)
npPol = numpy.array(Pol)
# THz2meV = 4.1357 # meV * s
# output Omega in cm-1
return (npPol, None), (npOmega, 'cm-1'), (qPoints, None)
if __name__ == "__main__":
print "Hello World";
__author__="Nikolay Markovskiy"
__date__ ="$Oct 18, 2009 7:29:26 PM$" | [
"markovskiy@gmail.com"
] | markovskiy@gmail.com |
45a1982dbef916bf652b5d648093ffa3e3c21e7c | bd435e3ff491d13c3cb1ffcf34771ac1c80f7859 | /code/base/bitwise_operator.py | 5b1ad675f38cb82a026a65444d2e7f01b6a975d9 | [] | no_license | luningcowboy/PythonTutorial | 8f4b6d16e0fad99a226540a6f12639ccdff402ff | 9024efe8ed22aca0a1271a2c1c388d3ffe1e6690 | refs/heads/master | 2021-06-16T23:03:22.153473 | 2020-04-09T13:52:12 | 2020-04-09T13:52:12 | 187,571,993 | 0 | 0 | null | 2021-03-25T23:02:36 | 2019-05-20T05:16:13 | Python | UTF-8 | Python | false | false | 309 | py | #!/usr/bing/python
# -*- coding: UTF-8 -*-
a = 60
b = 13
c = 0
# & 按位与运算符
c = a & b
print c
# | 按位或运算符
c = a | b
print c
# ^ 按位异或运算符
c = a ^ b
print c
# ~ 按位取反
c = ~a
print c
# << 左移动运算符
c = a << 2
print c
# >> 右移动运算符
c = a >> 2
print c
| [
"luningcowboy@gmail.com"
] | luningcowboy@gmail.com |
73763402fab667c852ad081aae31d5a7e92819cd | 4fe3d69884994f2eb15723fccde05f48bc4aad8b | /pj0/1111.py | 4ac1a1c7ee667f546ed1439feb4539ca187ffb02 | [] | no_license | jeeHwon/pyPJ | 76b985f97e47a83891063bd9e6357020576a0a62 | 279f57b080862c3347024036dfaf9f4fa89603fc | refs/heads/master | 2023-06-14T21:34:23.782116 | 2021-07-12T06:07:40 | 2021-07-12T06:07:40 | 314,187,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | # 1) 함수정의
# def 함수명(매개변수):
# 내용
# 2) 함수호출
# 함수명(매개변수)
#f1('kim') 함수는 정의 후 사용
def f1(name) :
print('hi~'+name)
#f1('kim')
#print(f1('lee')) #none 리턴값이 없으니까\
def f2(name) :
return 'hi~'+name
#print(f2('lee'))
def f3(x) :
y1 = x * 10
y2 = x * 20
y3 = x * 30
return y1,y2,y3
#a,b,c=10,20,30
# r1,r2,r3 = f3(7)
# print(r1,r2,r3) #각각에 리턴
# r1 = f3(7) #튜플로 리턴
# print(r1)
# print(list(r1))
def f4(x) :
y1 = x * 10
y2 = x * 20
y3 = x * 30
return [y1,y2,y3] #리스트로 리턴
# r1 = f4(8)
# print(r1)
# r1,r2,r3 = f4(8)
# print(r1,r2,r3)
def f5(x) :
y1 = x * 10
y2 = x * 20
y3 = x * 30
return {'y1':y1,'y2':y2,'y3':y3} #딕션어리로 리턴
r1 = f5(3)
# print(r1)
# print(r1.keys(),r1.values())
# print(r1.items())
def f6(x,y) :
print('f6 실행중')
print(x,y)
#f6(3,4)
# f6(3,4,5) error
def f7(x=1,y=2,z=3) :
print(x,y,z)
# f7()
# f7(11)
# f7(11,22)
# f7(11,22,33)
# f7(11,22,33,44) error
# def f8(x=1,y=2,z) : error
def f8(x,y,z=3) :
print(x,y,z)
# f8() error
# f8(10) error
# f8(10,20)
#가변인수 : *, **
def f9(*args) :
print(args)
print(type(args))
hap = 0
for i in args :
hap = hap + i
print('hap=',hap)
# f9()
# f9(1,2,3)
# f9(1,2,3,4,5)
# f9('one','two') #error 숫자와 문자의 +오류
def f10(**args) : #딕션어리로 매개변수 받을경우
print(args)
print(type(args))
#f10('a','b','c')
f10(name='kim',addr='busan',age=10) | [
"jeehwon01@gmail.com"
] | jeehwon01@gmail.com |
061e11b83179c388fd784f0dfc13c8e15527e4ed | bcd0e9e78d7d9202ea5d7d59ed26c69365bd4e9e | /tests/test_wrong_parameters.py | d46688dc75779fd72f6427e5aafec750384feafa | [] | no_license | saso93/barplots | 2fbbf6957e2e56ef3e76e50ca7ecdcde5883f60e | ee74a8821fa62343437a76d88c5da6271e8062a5 | refs/heads/master | 2022-04-14T21:20:28.070296 | 2020-04-10T08:35:13 | 2020-04-10T08:35:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | import pandas as pd
import pytest
from barplots import barplots
def test_wrong_parameters():
root = "test_barplots"
df = pd.read_csv("tests/test_case.csv")
with pytest.raises(ValueError):
barplots(
df[df.cell_line == "CIAO"],
["cell_line", "task", "balancing", "model"],
path="{root}/{{feature}}.png".format(root=root)
)
with pytest.raises(ValueError):
barplots(
df,
["cell_line", "task", "balancing", "model"],
orientation="pinco",
path="{root}/{{feature}}.png".format(root=root)
)
with pytest.raises(ValueError):
barplots(
df,
["model"],
path="{root}/{{feature}}.png".format(root=root),
subplots=True
)
with pytest.raises(ValueError):
barplots(
df,
["model"],
plots_per_row="pinco",
path="{root}/{{feature}}.png".format(root=root),
subplots=True
)
| [
"cappelletti.luca94@gmail.com"
] | cappelletti.luca94@gmail.com |
1abc3ddb82d38253c0605b306bd1fbee2392690b | 06ed3f41ff67204f244003a1d45fb2c9fc643f71 | /crawler.py | d18fba6c823f4f7ca6f9aea8474758b1afedc10a | [] | no_license | JFluo2011/async_crawler | 71ca397efd1a62f7bdd1869d8fa60e88eacfad27 | b3ffc059889f36740d9c16ab0bd92aca6683b984 | refs/heads/master | 2020-03-11T18:11:05.942945 | 2018-04-19T07:40:55 | 2018-04-19T07:40:55 | 130,169,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,061 | py | import os
import re
import sys
import json
import random
import asyncio
from asyncio import Queue
import logging
import concurrent
import aiohttp
import async_timeout
import redis
from lxml import etree
class Crawler(object):
def __init__(self, max_tasks=100, store_path='.', *, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.max_tasks = max_tasks
self.store_path = store_path
self.session = aiohttp.ClientSession(loop=self.loop)
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
}
self.redis_client = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
self.start_page_key = 'start_page'
self.detail_page_key = 'detail_page'
self.download_page_key = 'download_page'
self.start_coroutine_count = self.max_tasks // 20
self.detail_coroutine_count = self.max_tasks // 5
self.download_coroutine_count = self.max_tasks - self.start_coroutine_count - self.detail_coroutine_count
self.download_semaphore = asyncio.Semaphore(self.download_coroutine_count)
self.semaphore = asyncio.Semaphore(self.start_coroutine_count+self.detail_coroutine_count)
self.handle_failed_semaphore = asyncio.Semaphore(10)
self.q = Queue(loop=self.loop)
async def start_task(self):
while True:
await self.create_task(self.start_page_key, self.parse_detail_task)
async def detail_task(self):
while True:
await self.create_task(self.detail_page_key, self.parse_download_task)
async def download_task(self):
while True:
async with self.download_semaphore:
task, json_data = await self.get_task(self.download_page_key)
if task is None:
await asyncio.sleep(10)
continue
content = await self.fetch(json_data['url'], self.download_page_key, task, type_='content')
if content is not None:
self.loop.run_in_executor(None, self.save_image, json_data['path'], content)
async def handle_failed_task(self):
while True:
async with self.handle_failed_semaphore:
redis_key, task = await self.q.get()
self.loop.run_in_executor(None, self.insert_task, redis_key, task)
logging.info('handle failed task: {}'.format(task))
self.q.task_done()
def get_proxy(self):
try:
proxy = random.choice(self.redis_client.keys('http://*'))
except:
return None
return proxy
async def close(self):
await self.session.close()
async def get_task(self, redis_key):
task = self.redis_client.spop(redis_key)
return task, (task is not None) and json.loads(task)
async def parse_detail_task(self, text, json_data):
selector = etree.HTML(text)
for sel in selector.xpath('//*[@class="gallery_image"]'):
xpath_ = './/img[@class="img-responsive img-rounded"]/@src'
category = re.findall('ua/(.*?)/page', json_data['url'])[0]
image_dir, image_number = re.findall('/mini/(\d+)/(\d+)\.jpg', sel.xpath(xpath_)[0])[0]
meta = {
'url': sel.xpath('./@href')[0],
'image_number': image_number,
'image_dir': image_dir,
'category': category,
}
self.loop.run_in_executor(None, self.insert_task, self.detail_page_key, json.dumps(meta))
async def parse_download_task(self, text, json_data):
base_url = 'https://look.com.ua/pic'
selector = etree.HTML(text)
for url in selector.xpath('//*[@class="llink list-inline"]/li/a/@href'):
resolution = re.findall(r'download/\d+/(\d+x\d+)/', url)[0]
path = os.path.join(os.path.abspath(self.store_path), 'images', json_data['category'],
json_data['image_number'], resolution + '.jpg')
url = '/'.join([base_url, json_data['image_dir'], resolution,
'look.com.ua-' + json_data['image_number'] + '.jpg'])
if os.path.exists(path):
logging.info('image {} already downloaded'.format(path))
continue
meta = {'url': url, 'path': path, }
self.loop.run_in_executor(None, self.insert_task, self.download_page_key, json.dumps(meta))
async def create_task(self, redis_key, operate_func):
async with self.semaphore:
task, json_data = await self.get_task(redis_key)
if task is None:
await asyncio.sleep(10)
else:
url = json_data['url']
html = await self.fetch(url, redis_key, task)
if html is not None:
await operate_func(html, json_data)
def insert_task(self, redis_key, task):
self.redis_client.sadd(redis_key, task)
async def fetch(self, url, key, value, type_='text'):
logging.info('active tasks count: {}'.format(len(asyncio.Task.all_tasks())))
try:
async with async_timeout.timeout(30):
async with self.session.get(url, headers=self.headers, ssl=False, timeout=30, allow_redirects=False,
proxy=self.get_proxy()) as response:
return await response.read() if type_ == 'content' else await response.text()
except Exception as err:
if isinstance(err, concurrent.futures._base.TimeoutError):
logging.warning('{} raised TimeoutError'.format(url))
else:
logging.warning('{} raised {}'.format(url, str(err)))
self.loop.run_in_executor(None, self.insert_task, key, value)
return None
def save_image(self, path, content):
if not os.path.exists('\\'.join(path.split('\\')[:-1])):
os.makedirs('\\'.join(path.split('\\')[:-1]))
with open(path, 'wb') as f:
f.write(content)
logging.info('{}: downloaded'.format(path))
async def crawl(self):
"""Run the crawler until all finished."""
workers = []
workers.extend([asyncio.Task(self.start_task(), loop=self.loop) for _ in range(self.start_coroutine_count)])
workers.extend([asyncio.Task(self.detail_task(), loop=self.loop) for _ in range(self.detail_coroutine_count)])
workers.extend([asyncio.Task(self.download_task(), loop=self.loop) for _ in range(self.download_coroutine_count)])
# asyncio.Task(self.start_task(), loop=self.loop)
# asyncio.Task(self.detail_task(), loop=self.loop)
# asyncio.Task(self.download_task(), loop=self.loop)
while True:
await asyncio.sleep(60)
# for w in workers:
# w.cancel()
def main():
pass
if __name__ == '__main__':
main()
| [
"l"
] | l |
17a836b400e4c622b5978ee7c0eb1de33ae291ee | cdb790c9f37c36bd885c36a348df906424393e6b | /Doudizhu/Server/src/DoudizhuServiceRole/Script/PyScript/Action/Action7001.py | e7064cd9ea36728ebf5256eee7900bf20d44268e | [] | no_license | HongXiao/Scut-samples | 855eb33a3d0ec858b7e1c9d53f1b252e737fc2e0 | ea2acd478de7ba58c2800dcf52fe56bbeb4b55fa | refs/heads/master | 2022-07-13T15:16:45.550367 | 2015-05-19T02:48:10 | 2015-05-19T02:48:10 | 36,568,782 | 1 | 0 | null | 2022-06-23T00:19:32 | 2015-05-30T17:46:38 | C# | UTF-8 | Python | false | false | 3,144 | py | import ReferenceLib
from action import *
from System import *
from mathUtils import MathUtils
from ZyGames.Framework.Cache.Generic import *
from System.Collections.Generic import *
from ZyGames.Framework.SyncThreading import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Com.Rank import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Doudizhu.Bll import *
from ZyGames.Doudizhu.Bll.Logic import *
from ZyGames.Doudizhu.Bll.Com.Chat import *
from ZyGames.Framework.Game.Lang import *
from ZyGames.Doudizhu.Model import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Framework.Game.Runtime import *
from ZyGames.Framework.Cache import *
from ZyGames.Doudizhu.Bll.Base import *
#商店物品列表接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
self.ShopType = ShopType.HeadID
self.PageIndex = 0
self.PageSize = 0
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.PageCount = 0
self.GameCoin = 0
self.GoldNum = 0
self.ShopList = List[ShopInfo]();
def getUrlElement(httpGet, parent):
urlParam = UrlParam()
if httpGet.Contains("ShopType")\
and httpGet.Contains("PageIndex")\
and httpGet.Contains("PageSize"):
urlParam.ShopType = httpGet.GetEnum[ShopType]("ShopType")
urlParam.PageIndex = httpGet.GetIntValue("PageIndex")
urlParam.PageSize = httpGet.GetIntValue("PageSize")
else:
urlParam.Result = False
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
userId = str(parent.Current.UserId)
user = PersonalCacheStruct.Get[GameUser](userId)
PaymentService.Trigger(user)
shopList = ConfigCacheSet[ShopInfo]().FindAll(match=lambda s:s.ShopType == urlParam.ShopType)
result = MathUtils.GetPaging[ShopInfo](shopList,urlParam.PageIndex, urlParam.PageSize)
if result:
actionResult.ShopList = result[0]
actionResult.PageCount = result[1]
actionResult.GameCoin = user.GameCoin
gameHall = GameHall(user)
actionResult.GoldNum = gameHall.UserGold
#需要实现
return actionResult
def buildPacket(writer, urlParam, actionResult):
writer.PushIntoStack(actionResult.PageCount)
writer.PushIntoStack(actionResult.GameCoin)
writer.PushIntoStack(actionResult.GoldNum)
writer.PushIntoStack(len(actionResult.ShopList))
for info in actionResult.ShopList:
dsItem = DataStruct()
dsItem.PushIntoStack(info.ShopID)
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.ShopName))
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.HeadID))
dsItem.PushIntoStack(info.Price)
dsItem.PushIntoStack(info.VipPrice)
dsItem.PushIntoStack(info.GameCoin)
dsItem.PushIntoStack(MathUtils.ToNotNullString(info.ShopDesc))
dsItem.PushIntoStack(info.SeqNO)
writer.PushIntoStack(dsItem)
return True | [
"wzf_88@qq.com"
] | wzf_88@qq.com |
4fb40f24de102e6000cef42aeeab4166ff9eb0bd | b2f349672fd4a22d4ac8c104a4816c22de183264 | /Reinforcement_learning_TUT/10_A3C/A3C_discrete_action.py | f17352a7e6d606ac96ca124d41b9a3469c55a7bd | [] | no_license | yuchiho2008/tutorials | 63395d3c1a2d4e49601ab0e00f5471dddcc11af7 | 1879c68debe7420fda2582955156c8cfe4ff8dfe | refs/heads/master | 2021-01-20T03:59:48.795460 | 2017-04-27T00:27:18 | 2017-04-27T00:27:18 | 89,617,449 | 1 | 0 | null | 2017-04-27T16:20:13 | 2017-04-27T16:20:13 | null | UTF-8 | Python | false | false | 7,958 | py | """
Asynchronous Advantage Actor Critic (A3C) with discrete action space, Reinforcement Learning.
The Cartpole example.
View more on [莫烦Python] : https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.0
gym 0.8.0
"""
import multiprocessing
import threading
import tensorflow as tf
import numpy as np
import gym
import os
import shutil
import matplotlib.pyplot as plt
GAME = 'CartPole-v0'
OUTPUT_GRAPH = True
LOG_DIR = './log'
N_WORKERS = multiprocessing.cpu_count()
MAX_GLOBAL_EP = 1000
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 20
GAMMA = 0.9
ENTROPY_BETA = 0.001
LR_A = 0.001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.n
class ACNet(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self.a_prob, self.v = self._build_net()
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('a_loss'):
log_prob = tf.reduce_sum(tf.log(self.a_prob) * tf.one_hot(self.a_his, N_A, dtype=tf.float32), axis=1, keep_dims=True)
exp_v = log_prob * td
entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob), axis=1, keep_dims=True) # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('local_grad'):
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self):
w_init = tf.random_normal_initializer(0., .1)
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 200, tf.nn.relu6, kernel_initializer=w_init, name='la')
a_prob = tf.layers.dense(l_a, N_A, tf.nn.softmax, kernel_initializer=w_init, name='ap')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 100, tf.nn.relu6, kernel_initializer=w_init, name='lc')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
return a_prob, v
def update_global(self, feed_dict): # run by a local
SESS.run([self.update_a_op, self.update_c_op], feed_dict) # local grads applies to global net
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
prob_weights = SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})
action = np.random.choice(range(prob_weights.shape[1]),
p=prob_weights.ravel()) # select action w.r.t the actions prob
return action
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME).unwrapped
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
if self.name == 'W_0':
self.env.render()
a = self.AC.choose_action(s)
s_, r, done, info = self.env.step(a)
if done: r = -5
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.array(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.99 * GLOBAL_RUNNING_R[-1] + 0.01 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
tf.summary.FileWriter(LOG_DIR, SESS.graph)
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
| [
"morvanzhou@hotmail.com"
] | morvanzhou@hotmail.com |
0f96c5fc093aa1d091208aa9ee5811ae624ea1e7 | 361459069b1b2eb5adb180d1f61241742d2fbcd8 | /chapter13/psutil_test.py | 32ee9a055cac8c8983358a93cfd42493abaeba15 | [] | no_license | tangkaiyang/python3_laioxuefeng | 1704e72163aa55ce177e5b7a88a3e7501b415ceb | 02400db01f144417ef202e6c135561c304cacb3a | refs/heads/master | 2020-04-28T15:13:17.163004 | 2019-08-06T07:53:18 | 2019-08-06T07:53:18 | 175,364,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | # -*- coding:UTF-8 -*_
import psutil
a = psutil.cpu_count() # CPU逻辑数量
b = psutil.cpu_count(logical=False) # CPU物理核心
# 2说明是双核超线程,4则是4核非超线程
print(a, b) | [
"945541696@qq.com"
] | 945541696@qq.com |
86bec58195e2010ec4de0106f3b88e936e141121 | 68a8c68f425315e3793c311de7940095945be72d | /aat/exchange/test/harness.py | a9f3f4ff8019c6b3f204b7b691986ab7ea7c2eb6 | [
"Apache-2.0"
] | permissive | z772/aat | 8064b33721eaf115b7220fef71e8a14b98169e7e | 8d6bcaf01c1192ca7d1e0f367c89ede2b4f4a26c | refs/heads/main | 2023-02-13T08:43:37.581203 | 2020-12-30T00:07:43 | 2020-12-30T00:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | from datetime import datetime, timedelta
from typing import List, Any, AsyncGenerator, Optional
from aat import Strategy
from aat.config import EventType, InstrumentType, Side, TradingType
from aat.core import ExchangeType, Event, Instrument, Trade, Order
from aat.exchange import Exchange
class Harness(Exchange):
"""Test harness exchange
This is a synthetic exchange that runs through a sequence of data objects and
asserts some specific behavior in the strategies under test"""
def __init__(self, trading_type: TradingType, verbose: bool) -> None:
super().__init__(ExchangeType("testharness"))
self._trading_type = trading_type
self._verbose = verbose
self._instrument = Instrument("Test.inst", InstrumentType.EQUITY)
self._id = 0
self._start = datetime.now() - timedelta(days=30)
self._client_order: Optional[Order] = None
async def instruments(self) -> List[Instrument]:
"""get list of available instruments"""
return [self._instrument]
async def connect(self) -> None:
# No-op
pass
async def tick(self) -> AsyncGenerator[Any, Event]: # type: ignore[override]
now = self._start
for i in range(1000):
if self._client_order:
self._client_order.filled = self._client_order.volume
t = Trade(
self._client_order.volume,
i,
taker_order=self._client_order,
maker_orders=[],
)
t.taker_order.timestamp = now
self._client_order = None
yield Event(type=EventType.TRADE, target=t)
continue
o = Order(
1,
i,
Side.BUY,
self._instrument,
self.exchange(),
timestamp=now,
filled=1,
)
t = Trade(1, i, o, [])
yield Event(type=EventType.TRADE, target=t)
now += timedelta(minutes=30)
async def newOrder(self, order: Order) -> bool:
order.id = str(self._id)
self._id += 1
self._client_order = order
return True
class TestStrategy(Strategy):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(TestStrategy, self).__init__(*args, **kwargs)
self._orders: List[Order] = []
self._trades: List[Trade] = []
async def onStart(self, event: Event) -> None:
self.periodic(self.onPeriodic, second=0, minute=30)
async def onTrade(self, event: Event) -> None:
pass
async def onTraded(self, event: Event) -> None:
self._trades.append(event.target) # type: ignore
async def onPeriodic(self) -> None:
o = Order(1, 1, Side.BUY, self.instruments()[0], ExchangeType("testharness"))
_ = await self.newOrder(o)
self._orders.append(o)
async def onExit(self, event: Event) -> None:
assert len(self._orders) == len(self._trades)
assert len(self._trades) == 334
assert self._trades[0].price == 2
assert self._trades[1].price == 3
assert self._trades[-1].price == 999
if __name__ == "__main__":
from aat import TradingEngine, parseConfig
cfg = parseConfig(
[
"--trading_type",
"backtest",
"--exchanges",
"aat.exchange.test.harness:Harness",
"--strategies",
"aat.exchange.test.harness:TestStrategy",
]
)
print(cfg)
t = TradingEngine(**cfg)
t.start()
| [
"t.paine154@gmail.com"
] | t.paine154@gmail.com |
e161d47f9aa82b2cecadfa5720a36669d3bd09eb | 56eb496a6a84ab83a058cde64e5d5732772b7214 | /tests/conftest.py | 70d22b19f738ecc48c2b4d45a4c05dec50379946 | [] | no_license | klen/peewee_migrate | e15d5a459e2f8893057dbfefcdffce7e2939b31d | daa96cea3e1ccba6733b8a52ca1b04fa1e20872c | refs/heads/develop | 2023-08-29T15:47:01.427100 | 2023-08-07T11:41:50 | 2023-08-07T11:41:50 | 27,434,828 | 364 | 104 | null | 2023-08-07T11:33:27 | 2014-12-02T13:59:17 | Python | UTF-8 | Python | false | false | 1,071 | py | from __future__ import annotations
import pytest
from peewee import CharField, ForeignKeyField, IntegerField, Model
class Customer(Model):
name = CharField()
age = IntegerField()
class Order(Model):
number = CharField()
uid = CharField(unique=True)
customer = ForeignKeyField(Customer, column_name="customer_id")
@pytest.fixture()
def dburl():
return "sqlite:///:memory:"
@pytest.fixture()
def router(dburl):
from playhouse.db_url import connect
from peewee_migrate import Router
database = connect(dburl)
return Router(database)
@pytest.fixture()
def migrator(database):
from peewee_migrate import Migrator
migrator = Migrator(database)
migrator.create_model(Customer)
migrator.create_model(Order)
migrator()
return migrator
@pytest.fixture()
def database(router):
return router.database
@pytest.fixture(autouse=True)
def _patch_postgres(dburl):
# Monkey patch psycopg2 connect
import psycopg2
from .mocks import postgres
psycopg2.connect = postgres.MockConnection
| [
"horneds@gmail.com"
] | horneds@gmail.com |
6792a220e74b68f537d98492f0e829991249b1df | 74482894c61156c13902044b4d39917df8ed9551 | /cryptoapis/model/delete_automatic_tokens_forwarding_response_item_token_data_bitcoin_omni_token.py | 74b4b2d78038d56eefc592b956dacfbe7ccdd394 | [
"MIT"
] | permissive | xan187/Crypto_APIs_2.0_SDK_Python | bb8898556ba014cc7a4dd31b10e24bec23b74a19 | a56c75df54ef037b39be1315ed6e54de35bed55b | refs/heads/main | 2023-06-22T15:45:08.273635 | 2021-07-21T03:41:05 | 2021-07-21T03:41:05 | 387,982,780 | 1 | 0 | NOASSERTION | 2021-07-21T03:35:29 | 2021-07-21T03:35:29 | null | UTF-8 | Python | false | false | 7,112 | py | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class DeleteAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'property_id': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'property_id': 'propertyId', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, property_id, *args, **kwargs): # noqa: E501
"""DeleteAutomaticTokensForwardingResponseItemTokenDataBitcoinOmniToken - a model defined in OpenAPI
Args:
property_id (int): Defines the `propertyId` of the Omni Layer token.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.property_id = property_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"kristiyan.ivanov@menasoftware.com"
] | kristiyan.ivanov@menasoftware.com |
f7cccc59fd574e4916d03834fa3c3ddd32a1d78c | 9088d49a7716bdfc9b5770e8e54ebf7be6958fcf | /15 - Functions/Ex_100.py | 376d666f4465eb04e3b71961706ab0f20121b789 | [
"MIT"
] | permissive | o-Ian/Practice-Python | 579e8ff5a63a2e7efa7388bf2d866bb1b11bdfe2 | 1e4b2d0788e70006096a53a7cf038db3148ba4b7 | refs/heads/main | 2023-05-02T02:21:48.459725 | 2021-05-18T18:46:06 | 2021-05-18T18:46:06 | 360,925,568 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | from random import randint
def sorteia(lista):
print('Sorteando 5 valores da lista: ', end='')
for c in range(0, 5):
n = randint(0, 100)
lista.append(n)
print(n, end=' ')
print()
def somaPar(lista):
p = 0
print(f'Somando os valores pares de {lista}, temos', end=' ')
for c in lista:
if c % 2 == 0:
p += c
print(p, end='.')
p = []
sorteia(p)
somaPar(p)
| [
"ianstigli@hotmail.com"
] | ianstigli@hotmail.com |
8e4218ea1220951808736e037e6d2bbdcde562ca | 038ce0cf1d4e6f6a8ed6736663b6bb1e02d01b2a | /the_tale/game/bills/tests/test_place_chronicle.py | fcc204731d1eddce25d58539b744f6bc03e88d80 | [
"BSD-2-Clause-Views"
] | permissive | GrandUser/the-tale | d363fc34bc3cd04ced2bd718f375fa83f887c7df | 3f7ec22c457a0c400ddb51dede7e8a3e962acf83 | refs/heads/master | 2021-01-19T06:56:52.868165 | 2016-05-22T15:07:32 | 2016-05-22T15:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | # coding: utf-8
import mock
import datetime
from the_tale.game.bills.prototypes import BillPrototype, VotePrototype
from the_tale.game.bills.bills import PlaceChronicle
from the_tale.game.bills import relations
from the_tale.game.bills.tests.helpers import BaseTestPrototypes
class PlaceChronicleTests(BaseTestPrototypes):
def setUp(self):
super(PlaceChronicleTests, self).setUp()
self.bill_data = PlaceChronicle(place_id=self.place1.id, old_name_forms=self.place1.utg_name, power_bonus=relations.POWER_BONUS_CHANGES.UP)
self.bill = BillPrototype.create(self.account1, 'bill-1-caption', 'bill-1-rationale', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.place_id, self.place1.id)
self.assertTrue(self.bill.data.power_bonus.is_UP)
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.place1)])
def test_update(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'rationale': 'new-rationale',
'chronicle_on_accepted': 'chronicle-on-accepted-2',
'place': self.place2.id,
'power_bonus': relations.POWER_BONUS_CHANGES.DOWN })
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.place_id, self.place2.id)
self.assertTrue(self.bill.data.power_bonus.is_DOWN)
@mock.patch('the_tale.game.bills.conf.bills_settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def check_apply(self, change_power_mock):
VotePrototype.create(self.account2, self.bill, False)
VotePrototype.create(self.account3, self.bill, True)
form = PlaceChronicle.ModeratorForm({'approved': True})
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
with mock.patch('the_tale.game.places.objects.Place.cmd_change_power') as cmd_change_power:
self.assertTrue(self.bill.apply())
self.assertEqual(cmd_change_power.call_args_list, change_power_mock)
bill = BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
def test_apply_up(self):
self.bill.data.power_bonus = relations.POWER_BONUS_CHANGES.UP
self.check_apply([mock.call(has_place_in_preferences=False, has_person_in_preferences=False, power=6400, hero_id=None)])
def test_apply_down(self):
self.bill.data.power_bonus = relations.POWER_BONUS_CHANGES.DOWN
self.check_apply([mock.call(has_place_in_preferences=False, has_person_in_preferences=False, power=-6400, hero_id=None)])
def test_apply_not_change(self):
self.bill.data.power_bonus = relations.POWER_BONUS_CHANGES.NOT_CHANGE
self.check_apply([])
| [
"a.eletsky@gmail.com"
] | a.eletsky@gmail.com |
fd28d44e462da53a2d8d116caf63fbf4725860c6 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq3379.py | 1078d442198e20ed373b4d6bfac4e22fe0bb496a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=37
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=35
c.append(cirq.H.on(input_qubit[3])) # number=36
c.append(cirq.X.on(input_qubit[3])) # number=32
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=25
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.Z.on(input_qubit[1])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=29
c.append(cirq.H.on(input_qubit[3])) # number=30
c.append(cirq.X.on(input_qubit[3])) # number=15
c.append(cirq.rx(1.8001325905069514).on(input_qubit[3])) # number=18
c.append(cirq.Z.on(input_qubit[1])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=16
c.append(cirq.H.on(input_qubit[1])) # number=22
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.X.on(input_qubit[1])) # number=17
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=11
c.append(cirq.Y.on(input_qubit[0])) # number=12
c.append(cirq.Y.on(input_qubit[0])) # number=13
c.append(cirq.Z.on(input_qubit[2])) # number=26
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=23
c.append(cirq.X.on(input_qubit[0])) # number=19
c.append(cirq.X.on(input_qubit[0])) # number=20
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3379.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
12b54e33e3f781915e7a9bb18af05b639a89a263 | dde1cf596cf5969812ecda999828baa9c73e788d | /test/test_auth_netgroups.py | aa3cde9be3901e7e8233e1965412552c0a1976d2 | [] | no_license | dctalbot/isilon_sdk_python3.7 | bea22c91096d80952c932d6bf406b433af7f8e21 | 4d9936cf4b9e6acbc76548167b955a7ba8e9418d | refs/heads/master | 2020-04-25T20:56:45.523351 | 2019-02-28T19:32:11 | 2019-02-28T19:32:11 | 173,065,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_1
from isi_sdk_8_1_1.models.auth_netgroups import AuthNetgroups # noqa: E501
from isi_sdk_8_1_1.rest import ApiException
class TestAuthNetgroups(unittest.TestCase):
"""AuthNetgroups unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthNetgroups(self):
"""Test AuthNetgroups"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_1.models.auth_netgroups.AuthNetgroups() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"dctalbot@umich.edu"
] | dctalbot@umich.edu |
8a9564f1229e40df8de6eb5d9843f665870b2b74 | a045dee4a9094b5d3b5cd61214dbb94fc9365f2f | /python/loop.py | 71dbbab5f93cc931d91804f1d3dfaa7f04435c63 | [] | no_license | koteswari2/Centos7 | c6273c33b3c9b3be780df914021c0bbd8173eb26 | 36ea9c885b564400ebc1ad609b52cf888e66919a | refs/heads/master | 2020-04-23T01:33:53.520430 | 2018-12-06T18:15:36 | 2018-12-06T18:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | #!/bin/python
count = 0
while (count < 12):
print 'The count is:',count
count=count+1
print "Good Bye!"
fruits=['banana', 'apple', 'mango']
for index in range(len(fruits)):
print'Current fruit:',fruits[index]
print "Good bye!"
Numbers=['one', 'Two', 'Three', 'Four']
for numbers in range(len(Numbers)):
print'Numbers are:',Numbers[numbers]
count = 101
while (count > 100):
print "Numbers are:",count
count=count+1
if count == 111:
break
for word in 'Python':
if word == 't':
continue
print 'current letter:', word
a=10
while a>0:
a=a-1
if a == 5:
continue
print 'current variable value:',a
print "Execution completed!"
for word in "DevOps":
print "Value of word is " + " " + word
for Places in ['Hyderabad', 'Pune', 'Chennai', 'Bangalore']:
print "%s are Metropolitean city" % (Places)
fruits = ["Mango", "Apple", "Grappes"]
for i in range (len(fruits)):
print "Value of i is " + str(i)
#n = raw_input()
for i in range(10):
print "Value of i is %d" %i
if i == 5:
print "we are at 5"
break
a = 0
b = 20
while a < b:
print "%s is the value of a and less than %s " %(a,b)
print "######################################################"
a = a + 1
print "Out of the loop"
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
0c0b8e5100b276a875ab57938e7c19b05b57eb45 | 141b42d9d72636c869ff2ce7a2a9f7b9b24f508b | /myvenv/Lib/site-packages/weasyprint/compat.py | 754923cc6f2f74ccd0df4d1a785164f628c3ca5c | [
"BSD-3-Clause"
] | permissive | Fa67/saleor-shop | 105e1147e60396ddab6f006337436dcbf18e8fe1 | 76110349162c54c8bfcae61983bb59ba8fb0f778 | refs/heads/master | 2021-06-08T23:51:12.251457 | 2018-07-24T08:14:33 | 2018-07-24T08:14:33 | 168,561,915 | 1 | 0 | BSD-3-Clause | 2021-04-18T07:59:12 | 2019-01-31T17:00:39 | Python | UTF-8 | Python | false | false | 4,229 | py | # coding: utf-8
"""
weasyprint.compat
-----------------
Workarounds for compatibility with Python 2 and 3 in the same code base.
:copyright: Copyright 2011-2014 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import codecs
import email
import sys
__all__ = ['Request', 'base64_decode', 'base64_encode', 'basestring',
'ints_from_bytes', 'iteritems', 'izip', 'parse_email', 'parse_qs',
'pathname2url', 'quote', 'unicode', 'unichr', 'unquote',
'unquote_to_bytes', 'urlencode', 'urljoin', 'urlopen',
'urllib_get_content_type', 'urllib_get_charset',
'urllib_get_filename', 'urlparse_uses_relative', 'urlsplit',
'xrange']
# getfilesystemencoding() on Linux is sometimes stupid...
FILESYSTEM_ENCODING = sys.getfilesystemencoding() or 'utf-8'
try:
if codecs.lookup(FILESYSTEM_ENCODING).name == 'ascii':
FILESYSTEM_ENCODING = 'utf-8'
except LookupError:
FILESYSTEM_ENCODING = 'utf-8'
if sys.version_info[0] >= 3:
# Python 3
from urllib.parse import (
urljoin, urlsplit, quote, unquote, unquote_to_bytes, parse_qs,
urlencode, uses_relative as urlparse_uses_relative)
from urllib.request import urlopen, Request, pathname2url
from array import array
from base64 import (decodebytes as base64_decode,
encodebytes as base64_encode)
unicode = str
basestring = str
xrange = range
iteritems = dict.items
izip = zip
unichr = chr
def urllib_get_content_type(urlobj):
return urlobj.info().get_content_type()
def urllib_get_charset(urlobj):
return urlobj.info().get_param('charset')
def urllib_get_filename(urlobj):
return urlobj.info().get_filename()
def parse_email(data):
if isinstance(data, bytes):
data = data.decode('utf8')
return email.message_from_string(data)
def ints_from_bytes(byte_string):
"""Return a list of ints from a byte string"""
return list(byte_string)
else:
# Python 2
from urlparse import (urljoin, urlsplit, parse_qs,
uses_relative as urlparse_uses_relative)
from urllib2 import urlopen, Request
from urllib import pathname2url as _pathname2url, quote, unquote, urlencode
from array import array as _array
from itertools import izip, imap
from base64 import (decodestring as base64_decode,
encodestring as base64_encode)
unicode = unicode
basestring = basestring
xrange = xrange
iteritems = dict.iteritems
unichr = unichr
def array(typecode, initializer):
return _array(typecode.encode('ascii'), initializer)
def pathname2url(path):
if isinstance(path, unicode):
path = path.encode(FILESYSTEM_ENCODING)
return _pathname2url(path)
def urllib_get_content_type(urlobj):
return urlobj.info().gettype()
def urllib_get_charset(urlobj):
return urlobj.info().getparam('charset')
def urllib_get_filename(urlobj):
return None
def unquote_to_bytes(data):
if isinstance(data, unicode):
data = data.encode('ascii')
return unquote(data)
def parse_email(data):
if isinstance(data, unicode):
data = data.encode('utf8')
return email.message_from_string(data)
def ints_from_bytes(byte_string):
"""Return a list of ints from a byte string"""
return imap(ord, byte_string)
if sys.version_info >= (3, 2):
from gzip import GzipFile
class StreamingGzipFile(GzipFile):
def __init__(self, fileobj):
GzipFile.__init__(self, fileobj=fileobj)
self.fileobj_to_close = fileobj
def close(self):
GzipFile.close(self)
self.fileobj_to_close.close()
# Inform html5lib to not rely on these:
seek = tell = None
else:
# On older Python versions, GzipFile requires .seek() and .tell()
# which file-like objects for HTTP response do not have.
# http://bugs.python.org/issue11608
StreamingGzipFile = None
| [
"gruzdevasch@gmail.com"
] | gruzdevasch@gmail.com |
0113fac2553965828a7554e21e03e5de86aea6d6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_switching.py | d48ca7272b9db584c77d227981b909ab87a629cc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
from xai.brain.wordbase.nouns._switch import _SWITCH
#calss header
class _SWITCHING(_SWITCH, ):
def __init__(self,):
_SWITCH.__init__(self)
self.name = "SWITCHING"
self.specie = 'nouns'
self.basic = "switch"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0cc095547dc2f9bd880d45fb73fab330a3e35aee | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/17/32/6.py | dc11feea41d14fe14e55e51cbd5016d5f4d1d1cc | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | def f(intervals):
time_used = [0] * 2
for i in intervals:
time_used[i[2]] += i[1] - i[0]
flips = 0
conseq = []
for i in range(len(intervals)):
if intervals[i][2] == intervals[(i+1)% len(intervals)][2]:
gap = intervals[(i+1)% len(intervals)][0] - intervals[i][1]
if gap < 0:
gap += 60 * 24
conseq.append((gap, intervals[i][2]))
# conseq.append([intervals[(i+1)%len(intervals)][0] - intervals[i][1])
# if conseq[-1] < 0:
# conseq[-1] += 60 * 24
flips += 2
else:
flips += 1
conseq.sort()
for c in conseq:
time_used[c[1]] += c[0]
if time_used[c[1]] <= 720:
flips -= 2
return flips
T = int(input())
for case in range(1, T+1):
a,b = map(int, input().split())
intervals = []
for i in range(a):
intervals.append(list(map(int, input().split())))
intervals[-1].append(0)
for i in range(b):
intervals.append(list(map(int, input().split())))
intervals[-1].append(1)
intervals.sort()
ans = f(intervals)
print("Case #%s: %s" % (case, ans))
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
b0e31d35e135eebe91668b2d4f8b64ab8b2db75c | 4a81e33fe6d214f2efaeb97b03b5b05fae12b0d8 | /demos/dagster/airflow_example.py | 6245e0745b6182821d83d69420b062a10d1df7e9 | [] | no_license | franciscojavierarceo/Python | 29aaea28642dde151255c5b4a813158e975a073d | 02715ca6f19fd3c76cefa12de92deeae4ddf9684 | refs/heads/main | 2023-08-27T14:23:04.376095 | 2023-08-27T10:30:37 | 2023-08-27T10:30:37 | 33,146,755 | 7 | 9 | null | 2023-02-16T06:40:35 | 2015-03-30T20:38:00 | Jupyter Notebook | UTF-8 | Python | false | false | 2,107 | py | from datetime import datetime, timedelta
from textwrap import dedent
# The DAG object; we'll need this to instantiate a DAG
from airflow import DAG
# Operators; we need this to operate!
from airflow.operators.bash import BashOperator
# These args will get passed on to each operator
# You can override them on a per-task basis during operator initialization
default_args = {
"owner": "airflow",
"depends_on_past": False,
"email": ["airflow@example.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
}
with DAG(
"tutorial",
default_args=default_args,
description="A simple tutorial DAG",
schedule_interval=timedelta(days=1),
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example"],
) as dag:
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = BashOperator(
task_id="print_date",
bash_command="date",
)
t2 = BashOperator(
task_id="sleep",
depends_on_past=False,
bash_command="sleep 5",
retries=3,
)
t1.doc_md = dedent(
"""\
#### Task Documentation
You can document your task using the attributes `doc_md` (markdown),
`doc` (plain text), `doc_rst`, `doc_json`, `doc_yaml` which gets
rendered in the UI's Task Instance Details page.

"""
)
dag.doc_md = (
__doc__ # providing that you have a docstring at the beginning of the DAG
)
dag.doc_md = """
This is a documentation placed anywhere
""" # otherwise, type it like this
templated_command = dedent(
"""
{% for i in range(5) %}
echo "{{ ds }}"
echo "{{ macros.ds_add(ds, 7)}}"
echo "{{ params.my_param }}"
{% endfor %}
"""
)
t3 = BashOperator(
task_id="templated",
depends_on_past=False,
bash_command=templated_command,
params={"my_param": "Parameter I passed in"},
)
t1 >> [t2, t3]
| [
"arceofrancisco@gmail.com"
] | arceofrancisco@gmail.com |
71a4e010e9094b9e892c02d30e60e6deff2c8e2e | 5d416e0c895f0d2e15ee3d25bbae4bcfe216ed75 | /occ_gallery/core_load_step_ap203.py | 5f64f318964b1f2c2ede543869a4bf0ce76e1f91 | [] | no_license | happydpc/PlotGallery | 952e36f36c115c734c42730dcd085685d7ec9684 | 0724aaac6ffe0328b54ed019d6695a8e9951e734 | refs/heads/master | 2022-11-12T02:21:07.619237 | 2020-06-27T10:55:13 | 2020-06-27T10:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,061 | py | # Copyright 2010-2017 Thomas Paviot (tpaviot@gmail.com)
##
# This file is part of pythonOCC.
##
# pythonOCC is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# pythonOCC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import random
import os
import os.path
import sys
from OCC.Core.Quantity import Quantity_Color, Quantity_TOC_RGB
from OCC.Display.SimpleGui import init_display
from OCC.Extend.TopologyUtils import TopologyExplorer
from OCC.Extend.DataExchange import read_step_file
def import_as_one_shape(event=None):
shp = read_step_file(os.path.join(
'..', 'assets', 'models', 'as1_pe_203.stp'))
display.EraseAll()
display.DisplayShape(shp, update=True)
def import_as_multiple_shapes(event=None):
compound = read_step_file(os.path.join(
'..', 'assets', 'models', 'as1_pe_203.stp'))
t = TopologyExplorer(compound)
display.EraseAll()
for solid in t.solids():
color = Quantity_Color(random.random(),
random.random(),
random.random(),
Quantity_TOC_RGB)
display.DisplayColoredShape(solid, color)
display.FitAll()
def exit(event=None):
sys.exit()
if __name__ == '__main__':
display, start_display, add_menu, add_function_to_menu = init_display()
add_menu('STEP import')
add_function_to_menu('STEP import', import_as_one_shape)
add_function_to_menu('STEP import', import_as_multiple_shapes)
start_display()
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
81fcb6c7d4700cd348b8688a2b53228db278809b | a375f9894dcfe50bc3841c4a9eaec92cd32b51ad | /iss/inventory/filedata.py | 0f0c11b88e78eaa01a6316ef7e4ecceebb0f346a | [] | no_license | v-komarov/iss | 93b53b0856c8bc71b5f10196c0bbe46031d24fd9 | c80ef000f96725041a793a1931365a05cfc71cf6 | refs/heads/master | 2020-04-24T05:21:29.707212 | 2019-09-12T02:03:07 | 2019-09-12T02:03:07 | 67,463,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | #coding:utf-8
import pickle
import mimetypes
from iss.inventory.models import devices_scheme
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from StringIO import StringIO
from pprint import pformat
from iss.localdicts.models import address_house,device_status
from iss.inventory.models import devices
dev_use = device_status.objects.get(name="Используется")
### json схемы
def get_device_scheme(request):
if request.method == "GET":
sch_id = int(request.GET["sch"],10)
sch = devices_scheme.objects.get(pk=sch_id)
response = HttpResponse(content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename="%s.txt"' % sch.name.encode("utf-8")
result = pformat(sch.scheme_device)
response.write(result)
return response
### Аудит портов выгрузка в формате csv
def get_audit_ports(request):
if request.method == "GET":
response_data = u"ADDRESS;MODEL;STATUS;SERIAL;NETELEM;MANAGE;PORTS;PORTS_USE;PORTS_RES;PORTS_TECH;COMBO;COMBO_USE;COMBO_RES;COMBO_TECH;\n"
if request.session.has_key("address_id") and request.session["address_id"] != 'undefined':
addr = address_house.objects.get(pk=int(request.session["address_id"],10))
### Когда определен только город
if addr.city and addr.street == None and addr.house == None:
data = devices.objects.filter(address__city = addr.city).order_by('address__street__name')
### Когда определен город и улица
elif addr.city and addr.street and addr.house == None:
data = devices.objects.filter(address__city = addr.city,address__street = addr.street).order_by('address__house')
### Когда определены город, улица, дом
elif addr.city and addr.street and addr.house:
data = devices.objects.filter(address__city=addr.city, address__street=addr.street, address__house=addr.house).all()
else:
data = []
else:
data = []
for d in data:
netelems = []
for ne in d.get_netelems():
netelems.append(ne['name'])
response_data = response_data + u"{addr};{model};{status};{serial};{netelem};{manage};{ports};{ports_use};{ports_res};{ports_tech};{combo};{combo_use};{combo_res};{combo_tech};\n".format(
addr=d.getaddress(),model=d.device_scheme.name if d.device_scheme else "",status=d.status,serial=d.serial,netelem=" ".join(netelems),
manage=" ".join(d.get_manage_ip()),ports=d.get_ports_count(),ports_use=d.get_use_ports(),ports_res=d.get_reserv_ports(),
ports_tech=d.get_tech_ports(),combo=d.get_combo_count(),combo_use=d.get_use_combo(),combo_res=d.get_reserv_combo(),
combo_tech=d.get_tech_combo()
)
response = HttpResponse(content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename="auditports.csv"'
response.write(response_data)
return response
| [
"komarov-krsk@mail.ru"
] | komarov-krsk@mail.ru |
3a972af2f92e3721a5c5f2f21e01ceec6042fdd7 | 691e48e96cb4d676701f9f62bfb5af936d6d9cd6 | /Archive_2020/M2/Archive/Raber_George_2_1.py | b3ca14620a95adf97e242bd3a004aada631f6a65 | [] | no_license | mapossum/spa | 50d263ff1986dd73692e3d0866c2299ace180e59 | 90da0167aa6cbea067c88d30aea26440b366e6b3 | refs/heads/master | 2022-05-19T08:20:53.293065 | 2022-04-13T20:35:11 | 2022-04-13T20:35:11 | 29,207,514 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 484 | py |
def isTriangle(a,b,c):
if ((a + b) >= c):
if((a + c) >= b):
if ((b+c) >= a):
return True
return False
def isTriangle2(a,b,c):
if (((a + b) >= c) and ((a + c) >= b) and ((b+c) >= a)):
return True
else:
return False
x = raw_input("Enter first side: ")
y = raw_input("Enter second side: ")
z = raw_input("Enter third side: ")
if (isTriangle(float(x),float(y),float(z))):
print "Yes"
else:
print "No"
| [
"george.raber@usm.edu"
] | george.raber@usm.edu |
97f35bb0c9767fcf674fe4fcdb08c9eba754fb54 | 23b333449524887594530f73c0079ce60cb8eefb | /python_module/examples/905_Sort_Array_By_Parity.py | b8d2e84f5d23f08d14ebbc97adc750155e7abe8e | [] | no_license | benbendaisy/CommunicationCodes | 9deb371095f5d67e260030d3d8abf211c90e7642 | 444cc502ef26810b46115797f2e26ab305a4ebdf | refs/heads/master | 2023-08-09T21:46:58.691987 | 2023-07-20T05:11:39 | 2023-07-20T05:11:39 | 27,856,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | from typing import List
class Solution:
def sortArrayByParity1(self, nums: List[int]) -> List[int]:
if not nums:
return nums
current = runner = 0
while current < len(nums) and nums[current] % 2 == 0:
current += 1
runner = current + 1
while runner < len(nums):
while runner < len(nums) and nums[runner] % 2 != 0:
runner += 1
if runner < len(nums):
nums[current], nums[runner] = nums[runner], nums[current]
current, runner = current + 1, runner + 1
return nums
def sortArrayByParity2(self, nums: List[int]) -> List[int]:
nums.sort(key = lambda x: x % 2)
return nums
def sortArrayByParity3(self, nums: List[int]) -> List[int]:
nums1 = [x for x in nums if x % 2 == 0]
nums2 = [x for x in nums if x % 2 != 0]
return nums1 + nums2
def sortArrayByParity4(self, nums: List[int]) -> List[int]:
l, r = 0, len(nums) - 1
while l < r:
while l < r and nums[l] % 2 == 0:
l += 1
while r > l and nums[r] % 2 != 0:
r -= 1
nums[l], nums[r] = nums[r], nums[l]
l, r = l + 1, r - 1
return nums
def sortArrayByParity(self, nums: List[int]) -> List[int]:
l, r = 0, len(nums) - 1
while l < r:
if nums[l] % 2 != 0 and nums[r] % 2 == 0:
nums[l], nums[r] = nums[r], nums[l]
if nums[l] % 2 == 0: l += 1
if nums[r] % 2 != 0: r -= 1
return nums
| [
"benbendaisy@users.noreply.github.com"
] | benbendaisy@users.noreply.github.com |
321a466c69d691f1751545e6dce863aa8e2809d0 | 9b79d3530ff8907351bbc25b292e612727b52292 | /scripts/homogenise_uves.py | f7fe823d909ececc63d06d6c965ab714fd629104 | [] | no_license | andycasey/ges-corot-v2 | 1e75570e040ba1dcf937fd4e1a67457c51d4e9ad | 44c2b399ab51422c4630da9dddaf463dcb780d72 | refs/heads/master | 2021-01-12T05:25:48.721897 | 2018-01-01T21:53:32 | 2018-01-01T21:53:32 | 77,928,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,131 | py |
"""
Homogenisation models.
"""
import yaml
import logging
import numpy as np
import os
from astropy.table import Table
from collections import OrderedDict
from code import GESDatabase
from code.model.ensemble import EnsembleModel, MedianModel
# Initialize logging.
logger = logging.getLogger("ges_corot")
# Create a database object.
db_filename = "db.yaml"
with open(db_filename, "r") as fp:
credentials = yaml.load(fp)
database = GESDatabase(**credentials)
# Load the "benchmarks"
# Only use "benchmarks" with TEFF < 8000 K
benchmarks = Table.read("fits-templates/benchmarks/GES_iDR5_FGKM_Benchmarks_ARC_29092016.fits")
benchmarks = benchmarks[benchmarks["TEFF"] < 8000]
benchmarks["E_FEH"] = 0.05
model_paths = "homogenisation-uves-wg{wg}-{parameter}.model"
wgs = (1, )
parameter_scales = OrderedDict([
("teff", 250.0),
("feh", 0.10),
])
lower_sigma = dict(teff=10, feh=0.01)
sample_kwds = dict(chains=4, iter=20000)
finite = np.isfinite(benchmarks["TEFF"] * benchmarks["LOGG"] * benchmarks["FEH"])
benchmarks = benchmarks[finite]
models = {}
for wg in wgs:
models[wg] = {}
for parameter, scale in parameter_scales.items():
model_path = model_paths.format(wg=wg, parameter=parameter)
if os.path.exists(model_path):
model = EnsembleModel.read(model_path, database)
else:
model = EnsembleModel(database, wg, parameter, benchmarks,
model_path="code/model/ensemble-model-5node.stan" if parameter == "teff" else "code/model/ensemble-model-4node.stan")
data, metadata = model._prepare_data(
default_sigma_calibrator=scale,
sql_constraint="n.name like 'UVES-%'")
assert all([n.startswith("UVES-") for n in metadata["node_names"]])
if parameter == "teff":
assert data["N"] == 5
else:
assert data["N"] == 4
data["lower_sigma"] = lower_sigma[parameter]
init = {
"truths": data["mu_calibrator"],
"biases": np.zeros(data["N"]),
"missing_estimates": np.random.uniform(
data["lower_bound"], data["upper_bound"], size=data["TM"]),
"sigma_sys_constant": scale * np.ones(data["N"]),
"L_corr": np.eye(data["N"])
}
# Polynomial coefficients
for i in range(1, 9):
for tlf in "tlf":
for j, ba in enumerate("ba", start=1):
init["vs_{tlf}{ba}{i}".format(i=i, tlf=tlf, ba=ba)] = j
for i in range(7, 10):
init["vs_tc{}".format(i)] = np.ones(data["N"])
# Optimize and sample.
op_params = model.optimize(data, init=init, iter=100000)
fit = model.sample(data, init=op_params, **sample_kwds)
model.write(model_path, overwrite=True)
model.homogenise_stars_matching_query(
"SELECT DISTINCT ON (cname) cname FROM results WHERE setup LIKE 'UVES%'",
sql_constraint="setup like 'UVES%'")
| [
"andycasey@gmail.com"
] | andycasey@gmail.com |
4a7ca01b50a92f1683aaa1cc9b81c498302dae96 | dbd3f562768ded628d5d8d7ee3e05a3e88970a8d | /novel/crawler/rudaozhisheng/spiders/sdmoz_spider.py | 7bf341537172979515f7c84725ee9ca2bf681dba | [
"Apache-2.0"
] | permissive | East196/hello-py | ff51044494399c01d2c2c82ce6d753fb2258d944 | a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21 | refs/heads/master | 2021-07-23T23:28:26.096703 | 2021-06-11T13:30:13 | 2021-06-11T13:30:13 | 97,002,099 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | import scrapy
class SmallDmozSpider(scrapy.Spider):
name = "sdmoz"
allowed_domains = ["biquge.tw"]
start_urls = [
"http://www.biquge.tw/0_52/"
]
def parse(self, response):
for sel in response.xpath('//*[@id="list"]/dl/dd'):
yield {
'title': sel.xpath('a/text()').extract()[0],
'link': sel.xpath('a/@href').extract()[0],
'desc': sel.xpath('text()').extract()[0],
'url': response.urljoin(sel.xpath('a/@href').extract()[0])
}
| [
"2901180515@qq.com"
] | 2901180515@qq.com |
ccfef64859c6d1e96faa77bb2752ffbec3926fef | 1e7ce1c56f3030aa6df1e928bab559f50c59bad5 | /logIntoRegister/models.py | e4a74fc975ac6ecf50a4fcfd8bb8b944e5518f0b | [] | no_license | AIRob/WxRobot | f7fe37331c399a9d7fb467c7e913f10cc981f8eb | b27a48edb44694d4faa349d68d9b753fe4063276 | refs/heads/master | 2020-06-05T04:53:11.310909 | 2019-05-17T06:46:30 | 2019-05-17T06:46:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | <<<<<<< HEAD
# from django.db import models
#
#
# # Create your models here.
#
#
#
#
# class SelectedGroups(models.Model):
# group_name = models.CharField(max_length=32)
# wechat_id = models.ForeignKey('WechatId',on_delete=models.CASCADE)
#
#
# class SelectedFriends(models.Model):
# friend_name = models.CharField(max_length=32)
# wechat_id = models.ForeignKey('WechatId',on_delete=models.CASCADE)
#
# class WechatId(models.Model):
# puid = models.CharField(max_length=32,null=False,unique=True)
# status = models.NullBooleanField(default=True) # 可以为空的布尔值
# user_info = models.ForeignKey('UserInfo',on_delete=models.CASCADE,default=1)
#
# class UserInfo(models.Model):
# username = models.CharField(max_length=32,unique=True)
# userpwd = models.CharField(max_length=32)
#
=======
# from django.db import models
#
#
# # Create your models here.
#
#
#
#
# class SelectedGroups(models.Model):
# group_name = models.CharField(max_length=32)
# wechat_id = models.ForeignKey('WechatId',on_delete=models.CASCADE)
#
#
# class SelectedFriends(models.Model):
# friend_name = models.CharField(max_length=32)
# wechat_id = models.ForeignKey('WechatId',on_delete=models.CASCADE)
#
# class WechatId(models.Model):
# puid = models.CharField(max_length=32,null=False,unique=True)
# status = models.NullBooleanField(default=True) # 可以为空的布尔值
# user_info = models.ForeignKey('UserInfo',on_delete=models.CASCADE,default=1)
#
# class UserInfo(models.Model):
# username = models.CharField(max_length=32,unique=True)
# userpwd = models.CharField(max_length=32)
#
>>>>>>> acb8c86e5915306157008056c793ddc27ee3fd97
| [
"1194681498@qq.com"
] | 1194681498@qq.com |
c13449ffc202d6270e8d6c963b16b4ecc48dfb1b | 842396faef069255a7fe811cfe92a5aa8da358ec | /models/det/ssd.py | 418d4e6aa5e033d0390c2bb53ae99923089770c6 | [
"Apache-2.0"
] | permissive | Deusy94/PyTorchCV | 76dd659f4f63ae2ad133d63ed05bf6da60379601 | 1cb7378c334763cd695ce7d65bbfc347d2adbb18 | refs/heads/master | 2020-03-20T18:46:24.470852 | 2018-06-15T07:32:35 | 2018-06-15T07:32:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,071 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: YangMaoke, DuanZhixiang({maokeyang, zhixiangduan}@deepmotion.ai)
# SSD model
import torch
import torch.nn.functional as F
from torch import nn
from torchvision import models
from collections import OrderedDict
from utils.layers.det.multibox_layer import MultiBoxLayer
DETECTOR_CONFIG = {
'num_centrals': [256, 128, 128, 128],
'num_strides': [2, 2, 1, 1],
'num_padding': [1, 1, 0, 0],
'vgg_cfg': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512],
}
class SSD(nn.Module):
def __init__(self, configer):
super(SSD, self).__init__()
self.configer = configer
self.img_size = self.configer.get('data', 'input_size')
self.num_features = self.configer.get('details', 'num_feature_list')
self.num_centrals = DETECTOR_CONFIG['num_centrals']
self.num_paddings = DETECTOR_CONFIG['num_padding']
self.num_strides = DETECTOR_CONFIG['num_strides']
self.vgg_features = self.__make_vgg_layers(DETECTOR_CONFIG['vgg_cfg'])
self.norm4 = L2Norm2d(20)
max_size = max(self.img_size)
if max_size < 448:
self.feature1 = None
self.feature2 = None
self.feature3 = None
self.feature4 = None
self.feature5 = None
self.feature_mode = 'small'
elif 448 < max_size < 896:
self.feature1 = None
self.feature2 = None
self.feature3 = None
self.feature4 = None
self.feature5 = None
self.feature6 = None
self.feature_mode = 'large'
else:
self.feature1 = None
self.feature2 = None
self.feature3 = None
self.feature4 = None
self.feature5 = None
self.feature6 = None
self.feature7 = None
self.feature_mode = 'huge'
self.__make_extra_layers(mode=self.feature_mode)
self.multibox_layer = MultiBoxLayer(configer)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@staticmethod
def __make_vgg_layers(cfg, batch_norm=True):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def __make_extra_layers(self, mode='small'):
self.feature1 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(512, self.num_features[0], kernel_size=3, padding=6, dilation=6),
nn.ReLU(),
nn.Conv2d(self.num_features[0], self.num_features[1], kernel_size=1),
nn.ReLU(),
)
# 'num_features': [512, 1024, 512, 256, 256, 256].
# 'num_centrals': [256, 128, 128, 128],
# 'num_strides': [2, 2, 1, 1],
# 'num_padding': [1, 1, 0, 0],
self.feature2 = self.__extra_layer(num_in=self.num_features[1], num_out=self.num_features[2],
num_c=self.num_centrals[0], stride=self.num_strides[0],
pad=self.num_paddings[0])
self.feature3 = self.__extra_layer(num_in=self.num_features[2], num_out=self.num_features[3],
num_c=self.num_centrals[1], stride=self.num_strides[1],
pad=self.num_paddings[1])
self.feature4 = self.__extra_layer(num_in=self.num_features[3], num_out=self.num_features[4],
num_c=self.num_centrals[2], stride=self.num_strides[2],
pad=self.num_paddings[2])
self.feature5 = self.__extra_layer(num_in=self.num_features[4], num_out=self.num_features[5],
num_c=self.num_centrals[3], stride=self.num_strides[3],
pad=self.num_paddings[3])
if mode == 'large':
self.feature6 = self.__extra_layer(num_in=self.num_features[5], num_out=self.num_features[6],
num_c=self.num_centrals[4], stride=self.num_strides[4],
pad=self.num_paddings[4])
elif mode == 'huge':
self.feature6 = self.__extra_layer(num_in=self.num_features[5], num_out=self.num_features[6],
num_c=self.num_centrals[4], stride=self.num_strides[4],
pad=self.num_padding[4])
self.feature7 = self.__extra_layer(num_in=self.num_features[6], num_out=self.num_features[7],
num_c=self.num_centrals[5], stride=self.num_strides[5],
pad=self.num_padding[5])
@staticmethod
def __extra_layer(num_in, num_out, num_c, stride, pad):
layer = nn.Sequential(
nn.Conv2d(num_in, num_c, kernel_size=1),
nn.ReLU(),
nn.Conv2d(num_c, num_out, kernel_size=3, stride=stride, padding=pad),
nn.ReLU(),
)
return layer
def forward(self, _input):
det_feature = []
feature = self.vgg_features(_input)
det_feature.append(self.norm4(feature))
feature = F.max_pool2d(feature, kernel_size=2, stride=2, ceil_mode=True)
feature = self.feature1(feature)
det_feature.append(feature)
feature = self.feature2(feature)
det_feature.append(feature)
feature = self.feature3(feature)
det_feature.append(feature)
feature = self.feature4(feature)
det_feature.append(feature)
feature = self.feature5(feature)
det_feature.append(feature)
if self.feature_mode == 'large':
feature = self.feature6(feature)
det_feature.append(feature)
elif self.feature_mode == 'huge':
feature = self.feature6(feature)
det_feature.append(feature)
feature = self.feature7(feature)
det_feature.append(feature)
loc_preds, conf_preds = self.multibox_layer(det_feature)
return loc_preds, conf_preds
def load_pretrained_weight(self, net):
blocks = self.vgg_features
layer_count = 0
for l1, l2 in zip(net.features, blocks):
if isinstance(l1, nn.Conv2d) and isinstance(l2, nn.Conv2d):
assert l1.weight.size() == l2.weight.size()
l2.weight.data.copy_(l1.weight.data)
layer_count += 1
elif isinstance(l1, nn.BatchNorm2d) and isinstance(l2, nn.BatchNorm2d):
assert l1.weight.size() == l2.weight.size()
l2.weight.data.copy_(l1.weight.data)
print("total %d layers loaded" % layer_count)
class L2Norm2d(nn.Module):
"""L2Norm layer across all channels."""
def __init__(self, scale):
super(L2Norm2d, self).__init__()
self.scale = scale
def forward(self, x, dim=1):
"""out = scale * x / sqrt(\sum x_i^2)"""
_sum = x.pow(2).sum(dim).clamp(min=1e-12).rsqrt()
out = self.scale * x * _sum.unsqueeze(1).expand_as(x)
return out
if __name__ == "__main__":
pass
| [
"youansheng@gmail.com"
] | youansheng@gmail.com |
4dff10447702b74bca7a88c84a38dc58bf3e18c8 | 00a086a141acc551c9e3aa23356013cdc8d61b61 | /LeetCode/python/lc289.py | 9765dda3844433c83de34b7037469f098be009db | [] | no_license | ZwEin27/Coding-Training | f01cebbb041efda78bca4bf64e056133d7b7fad7 | 409109478f144791576ae6ca14e2756f8f2f5cb0 | refs/heads/master | 2021-01-18T12:25:06.081821 | 2016-09-04T17:43:44 | 2016-09-05T17:43:44 | 29,571,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,555 | py | class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m = len(board)
if not m:
return
n = len(board[0])
neighbor_offsets = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
for i in range(m):
for j in range(n):
val = board[i][j]
live_count = 0
# print 'i, j: ', i, j
for neighbor_offset in neighbor_offsets:
r = i + neighbor_offset[0]
c = j + neighbor_offset[1]
if r >= 0 and r < m and c >= 0 and c < n:
# print 'r, c: ', r, c
nei_val = board[r][c]
if nei_val == 1 or nei_val == -1:
live_count += 1
if live_count == 3 and val == 0:
board[i][j] = -2
elif (live_count < 2 or live_count > 3) and val == 1:
board[i][j] = -1
# print live_count, board[i][j]
# print board
for i in range(m):
for j in range(n):
if board[i][j] == -1:
board[i][j] = 0
elif board[i][j] == -2:
board[i][j] = 1
board = [[0,0,0,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,1,0,0],[0,0,0,0,0]]
Solution().gameOfLife(board)
print board
| [
"zwein27@gmail.com"
] | zwein27@gmail.com |
d8cfd574b58fca80b9a6051ace0186f039807463 | 7357d367b0af4650ccc5b783b7a59090fdde47bb | /image_similarity/main.py | 6f971af1a30acd1391e4986d9755c9b5f90eb847 | [
"MIT"
] | permissive | BarracudaPff/code-golf-data-python | fb0cfc74d1777c4246d56a5db8525432bf37ab1a | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | refs/heads/main | 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | app = Flask(__name__)
api = Api(app)
index = RetrievalIndex()
class BuildIndex(Resource):
def post(self):
request_body = json.loads(request.data)
user_id = request_body["user_id"]
image_hashes = request_body["image_hashes"]
image_embeddings = request_body["image_embeddings"]
index.build_index_for_user(user_id, image_hashes, image_embeddings)
return jsonify({"status": True, "index_size": index.indices[user_id].ntotal})
class SearchIndex(Resource):
def post(self):
try:
request_body = json.loads(request.data)
user_id = request_body["user_id"]
image_embedding = request_body["image_embedding"]
if "n" in request_body.keys():
n = int(request_body["n"])
else:
n = 100
res = index.search_similar(user_id, image_embedding, n)
return jsonify({"status": True, "result": res})
except BaseException as e:
logger.error(str(e))
return jsonify({"status": False, "result": []}, status=500)
api.add_resource(BuildIndex, "/build/")
api.add_resource(SearchIndex, "/search/")
if __name__ == "__main__":
logger.info("starting server")
server = WSGIServer(("0.0.0.0", 8002), app)
server_thread = gevent.spawn(server.serve_forever)
gevent.joinall([server_thread]) | [
"sokolov.yas@gmail.com"
] | sokolov.yas@gmail.com |
79f5a64dca1ddc5ecb2d2711d0668cd0a6378b37 | 58bcbdbd6a99a8bdf02fff5bb011f3f8a9b8482e | /Level-2/trie_longest_prefix_matching.py | b30b200f49f4ea135cf1e5a9aa87f0c91f50fc12 | [] | no_license | hansrajdas/algorithms | 0e868bd2a53a77f1e37a9ac4ca3a4c9a8e1ebcca | 062c628f5364414b257b7ba67c97999726128237 | refs/heads/master | 2023-07-20T08:26:37.793097 | 2023-07-06T16:36:19 | 2023-07-06T16:36:19 | 111,433,269 | 80 | 51 | null | 2023-04-30T10:20:58 | 2017-11-20T16:11:02 | Python | UTF-8 | Python | false | false | 3,628 | py | #!/usr/bin/python
# Date: 2018-10-01
#
# Description:
# Given a dictionary of words and an input string, find the longest prefix of
# the string which is also a word in dictionary. For example, let the
# dictionary contains the following words:
# {are, area, base, cat, cater, children, basement}
#
# Below are some input/output examples:
# --------------------------------------
# Input String Output
# --------------------------------------
# caterer cater
# basemexy base
# child <Empty>
#
# Approach:
# Scan given word with matching characters in trie, if end of word is found,
# keep track of it.
# If there is mismatch between input string and trie, exit and return last
# longest matched word if any.
#
# Complexity:
# O(n), n = length of input string
ALPHABET_SIZE = 26
class TrieNode:
"""Hold data required for a single trie node."""
def __init__(self):
"""Initializes a new trie node."""
self.children = [None for i in range(ALPHABET_SIZE)]
self.is_end_of_word = False
class Trie:
"""Implements trie with all basic operations."""
def __init__(self):
"""Initializes a new trie."""
self.root = self.get_new_node();
def get_new_node(self):
"""Initializes a new trie node."""
return TrieNode()
def _char_to_array_index(self, ch):
"""Converts given character to array index of size 26 - between 0 to 25."""
return ord(ch) - ord('a')
def _index_to_char(self, index):
"""Converts an array index(0 to 25) to character between a to z."""
return chr(index + ord('a'))
def insert(self, word):
"""Inserts a new word into trie if not already present."""
ptr = self.root
for ch in word:
index = self._char_to_array_index(ch)
# If node for required character not present then create a new node.
if not ptr.children[index]:
ptr.children[index] = self.get_new_node()
ptr = ptr.children[index]
ptr.is_end_of_word = True
def longest_prefix_matched(self, string):
"""Returns longest prefix matched in trie for a given string."""
ptr = self.root
longest_word_len_matched = None
matched_chars = 0
for ch in string:
index = self._char_to_array_index(ch)
# A word shorter than string found in trie, update longest word len.
if ptr.is_end_of_word:
longest_word_len_matched = matched_chars
if ptr.children[index]:
matched_chars += 1
ptr = ptr.children[index]
else:
break
# Check if whole string was scanned and end of word for this node was set,
# it means whole string passed is present in trie, return actual string.
# Otherwise if longest word match has some value then some shorter string
# was found in trie, return that
if ptr.is_end_of_word and len(string) == matched_chars:
return string
elif longest_word_len_matched:
return string[0:longest_word_len_matched]
else:
return None # No string found in trie having required prefix
def main():
trie = Trie()
words = ['are', 'area', 'base', 'cat', 'cater', 'children', 'basement']
# Insert in trie
for word in words:
trie.insert(word)
print(trie.longest_prefix_matched('are')) # are
print(trie.longest_prefix_matched('arex')) # are
print(trie.longest_prefix_matched('caterer')) # cater
print(trie.longest_prefix_matched('basemexy')) # base
print(trie.longest_prefix_matched('child')) # None
print(trie.longest_prefix_matched('basement')) # basement
print(trie.longest_prefix_matched('xyz')) # None
if __name__ == '__main__':
main()
| [
"raj.das.136@gmail.com"
] | raj.das.136@gmail.com |
849696f2c312e96672252c8cca06d92dd87ae8fb | 404a8596d3c4a55efe57e6fe5f2f19747a487e28 | /baekjoon/2751_sort_nums_2.py | aed4677996952ed8745149aff1d724edcdf1ee23 | [] | no_license | taehwan920/Algorithm | 370b72e48ba404ae1fb7a7786165b88a8daf090a | f837034d0c2f7cac370eb8cceacb8b3827ec62f9 | refs/heads/master | 2021-08-17T07:14:33.594428 | 2021-01-01T14:26:35 | 2021-01-01T14:26:35 | 237,892,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | import heapq
import sys
input = sys.stdin.readline
n = int(input())
a = []
for i in range(n):
heapq.heappush(a, int(input()))
for i in range(n):
print(heapq.heappop(a))
| [
"taehwan920@gmail.com"
] | taehwan920@gmail.com |
3192b95243ac852a4a5ad0bf21df516af0591b37 | b9ff79430c4fd4bb5bf6723d2868d97c9df5194a | /pyina/tools.py | e4bee33648eb80250c31aa4e10971bf4ef8c47a8 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | resurgo-genetics/pyina | e50a011a4647a5027cce508b4cf75b4406e2b4d3 | 5a15c10f184595acb2eab58a2fca1c1300bc6e69 | refs/heads/master | 2021-06-24T23:28:59.463735 | 2017-09-10T21:37:29 | 2017-09-10T21:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,082 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2017 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/pyina/blob/master/LICENSE
"""
Various mpi python tools
Main function exported are::
- ensure_mpi: make sure the script is called by mpi-enabled python
- get_workload: get the workload the processor is responsible for
"""
def ensure_mpi(size = 1, doc = None):
"""
ensure that mpi-enabled python is being called with the appropriate size
inputs:
- size: minimum required size of the MPI world [default = 1]
- doc: error string to throw if size restriction is violated
"""
if doc == None:
doc = "Error: Requires MPI-enabled python with size >= %s" % size
from pyina.mpi import world
mpisize = world.Get_size()
mpirank = world.Get_rank()
if mpisize < size:
if mpirank == 0: print doc
import sys
sys.exit()
return
def mpiprint(string="", end="\n", rank=0, comm=None):
"""print the given string to the given rank"""
from pyina.mpi import world
if comm is None: comm = world
if not hasattr(rank, '__len__'): rank = (rank,)
if comm.rank in rank:
print string+end,
#XXX: has light load on *last* proc, heavy/equal on first proc
from math import ceil
def get_workload(index, nproc, popsize, skip=None):
"""returns the workload that this processor is responsible for
index: int rank of node to calculate for
nproc: int number of nodes
popsize: int number of jobs
skip: int rank of node upon which to not calculate (i.e. the master)
returns (begin, end) index
"""
if skip is not None and skip < nproc:
nproc = nproc - 1
if index == skip: skip = True
elif index > skip: index = index - 1
n1 = nproc
n2 = popsize
iend = 0
for i in range(nproc):
ibegin = iend
ai = int( ceil( 1.0*n2/n1 ))
n2 = n2 - ai
n1 = n1 - 1
iend = iend + ai
if i==index:
break
if skip is True:
return (ibegin, ibegin) if (index < nproc) else (iend, iend)
return (ibegin, iend) #XXX: (begin, end) index for a single element
#FIXME: has light load on *last* proc, heavy/equal on master proc
import numpy as np
def balance_workload(nproc, popsize, *index, **kwds):
"""divide popsize elements on 'nproc' chunks
nproc: int number of nodes
popsize: int number of jobs
index: int rank of node(s) to calculate for (using slice notation)
skip: int rank of node upon which to not calculate (i.e. the master)
returns (begin, end) index vectors"""
_skip = False
skip = kwds.get('skip', None)
if skip is not None and skip < nproc:
nproc = nproc - 1
_skip = True
count = np.round(popsize/nproc)
counts = count * np.ones(nproc, dtype=np.int)
diff = popsize - count*nproc
counts[:diff] += 1
begin = np.concatenate(([0], np.cumsum(counts)[:-1]))
#return counts, index #XXX: (#jobs, begin index) for all elements
if _skip:
if skip == nproc: # remember: nproc has been reduced
begin = np.append(begin, begin[-1]+counts[-1])
counts = np.append(counts, 0)
else:
begin = np.insert(begin, skip, begin[skip])
counts = np.insert(counts, skip, 0)
if not index:
return begin, begin+counts #XXX: (begin, end) index for all elements
#if len(index) > 1:
# return lookup((begin, begin+counts), *index) # index a slice
return lookup((begin, begin+counts), *index) # index a single element
def lookup(inputs, *index):
"""get tuple of inputs corresponding to the given index"""
if len(index) == 1: index = index[0]
else: index = slice(*index)
return tuple(i.__getitem__(index) for i in inputs)
def isoseconds(time):
"""calculate number of seconds from a given isoformat timestring"""
if isinstance(time, int): return time #XXX: allow this?
import datetime
d = 0
try: # allows seconds up to 59 #XXX: allow 60+ ?
t = datetime.datetime.strptime(time, "%S").time()
except ValueError:
fmt = str(time).count(":") or 2 # get ValueError if no ":"
if fmt == 1:
t = datetime.datetime.strptime(time, "%H:%M").time()
elif fmt == 3: # allows days (up to 31)
t = datetime.datetime.strptime(time, "%d:%H:%M:%S")
d,t = t.day, t.time()
else: # maxtime is '23:59:59' #XXX: allow 24+ hours instead of days?
t = datetime.datetime.strptime(time, "%H:%M:%S").time()
return t.second + 60*t.minute + 3600*t.hour + d*86400
def isoformat(seconds):
"""generate an isoformat timestring for the given time in seconds"""
import datetime
d = seconds/86400
if d > 31: datetime.date(1900, 1, d) # throw ValueError
h = (seconds - d*86400)/3600
m = (seconds - d*86400 - h*3600)/60
s = seconds - d*86400 - h*3600 - m*60
t = datetime.time(h,m,s).strftime("%H:%M:%S")
return ("%s:" % d) + t if d else t #XXX: better convert days to hours?
def which_mpirun(mpich=None, fullpath=False):
"""try to autodetect an available mpi launcher
if mpich=True only look for mpich, if False only look for openmpi"""
import os
from pox import which
progs = ['mpiexec', 'mpirun', 'mpiexec-mpich-mp', 'mpiexec-openmpi-mp', 'mpirun-mpich-mp', 'mpirun-openmpi-mp']
if mpich == True: pop = 'openmpi'
elif mpich == False: pop = 'mpich'
else: pop = 'THIS IS NOT THE MPI YOU ARE LOOKING FOR'
progs = (i for i in progs if pop not in i)
mpi = None
for prog in progs:
mpi = which(prog, ignore_errors=True)
if mpi: break
if mpi and not fullpath:
mpi = os.path.split(mpi)[-1]
return mpi
def which_strategy(scatter=True, lazy=False, fullpath=True):
"""try to autodetect an available strategy (scatter or pool)"""
target = 'ezscatter.py' if scatter else 'ezpool.py'
import sys
if (sys.platform[:3] == 'win'): lazy=False
if lazy: target = "`which %s`" % target
# lookup full path
elif not lazy and fullpath:
from pox import which
target = which(target, ignore_errors=True)
if not target: target = None #XXX: better None or "" ?
return target
def which_python(lazy=False, fullpath=True):
"get an invocation for this python on the execution path"
from pox import which_python
# check if the versioned python is on the path
py = which_python(lazy=False, version=True, fullpath=True)
if not lazy and fullpath and py: return py
import sys
if (sys.platform[:3] == 'win'): lazy=False
# if on the path, apply user's options
return which_python(lazy=lazy, version=bool(py), fullpath=fullpath)
# backward compatability
from pox import wait_for
if __name__=='__main__':
n = 7 #12
pop = 12 #7
#XXX: note the two ways to calculate
assert get_workload(0, n, pop) == balance_workload(n, pop, 0)
assert [get_workload(i, n, pop) for i in range(n)] == \
zip(*balance_workload(n, pop))
assert [get_workload(i, n, pop) for i in range(0,n/2)] == \
zip(*balance_workload(n, pop, 0, n/2))
assert zip(*balance_workload(n,pop,0,n)) == zip(*balance_workload(n,pop))
assert zip(*balance_workload(n,pop,0,1)) == [balance_workload(n,pop,0)]
assert get_workload(0,n,pop,skip=0) == balance_workload(n,pop,0,skip=0)
assert get_workload(0,n,pop,skip=n) == balance_workload(n,pop,0,skip=n)
assert get_workload(0,n,pop,skip=n+1) == balance_workload(n,pop,0,skip=n+1)
assert [get_workload(i, n, pop, skip=0) for i in range(n)] == \
zip(*balance_workload(n, pop, skip=0))
assert [get_workload(i, n, pop, skip=n) for i in range(n)] == \
zip(*balance_workload(n, pop, skip=n))
# End of file
| [
"mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df"
] | mmckerns@8bfda07e-5b16-0410-ab1d-fd04ec2748df |
db91578931443d7f2791801190c7bdc58d38034f | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /415/main.py | 24e16793ddd87bd89c0591f41b92f5430b4e3786 | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | """
Given two non-negative numbers num1 and num2 represented as string, return the sum of num1 and num2.
Note:
The length of both num1 and num2 is < 5100.
Both num1 and num2 contains only digits 0-9.
Both num1 and num2 does not contain any leading zero.
You must not use any built-in BigInteger library or convert the inputs to integer directly.
"""
def digit_val(ch):
return ord(ch) - 48
def digit_ch(v):
return chr(v + 48)
class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
l1 = len(num1)
l2 = len(num2)
if l1 < l2:
num1 = "0" * (l2 - l1) + num1
elif l1 > l2:
num2 = "0" * (l1 - l2) + num2
result = ""
carry = 0
for i in range(len(num1) - 1, -1, -1):
digit_sum = digit_val(num1[i]) + digit_val(num2[i]) + carry
carry = digit_sum // 10
result = digit_ch(digit_sum % 10) + result
if carry == 1:
return "1" + result
else:
return result
| [
"shao.zhongren@gmail.com"
] | shao.zhongren@gmail.com |
55cde4ec66954dc7a6e8a6a6b0e71276593a00a3 | 390801ebef56b11e71c012a8a73b8b37a2ab0dc2 | /master/src/redsmaster/server.py | 4a55a3349876e131ac51e21815eedd600e15279d | [
"Apache-2.0"
] | permissive | tpoeppke/reds | 0ced79729c297619ee881109903ef539eca4cf4b | 103aba7dccddc10bc445e8c8170336e44436e3c9 | refs/heads/master | 2021-01-15T16:16:04.650137 | 2014-10-01T18:58:23 | 2014-10-01T18:58:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,652 | py | # coding=utf-8
"""
This module provides the interface and the base class for the servermanager
component.
This file is part of redsmaster, which was developed as part of a
bachelor thesis at the Karlsruhe Institute of Technology, Germany and
is hereby released under the following license terms.
Copyright 2013 Tobias Pöppke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import signal
import os
import time
import atexit
import logging
from logging.handlers import RotatingFileHandler
from cement.core import interface, handler, backend
from redsmaster import config, exc, status, log
from redsmaster.daemonize import daemonize
LOG = log.get_logger("serverbase")
SERVERLOG = log.SERVERLOG
def server_validator(class_, obj):
members = [
"_setup",
"start",
"stop",
"_run",
"exit",
]
interface.validate(IServerManager, obj, members)
class IServerManager(interface.Interface):
class IMeta:
label = "servermanager"
validator = server_validator
def _setup(self):
""" Make sure the server is ready to use."""
pass
def start(self):
""" Start the server to receive requests from clients."""
pass
@staticmethod
def stop(pid):
""" Stop the server with the proccess id pid."""
pass
def _run(self):
""" Runs the main server loop, takes requests and processes them.
This method must be implemented by subclasses of BaseServerManager.
"""
pass
def exit(self):
""" Perform all necessary steps to exit the server.
This method must be implemented by subclasses of BaseServerManager.
"""
pass
class BaseServerManager(handler.CementBaseHandler):
class Meta:
label = None
interface = IServerManager
def __init__(self, **kw):
super(BaseServerManager, self).__init__(**kw)
self.server_logfile = None
self.status = None
def _setup(self, app_obj):
super(BaseServerManager, self)._setup(app_obj)
def _setup_serverlog(self):
self.server_logfile = self.app.configmanager.get_option('serverlog')
filehandler = RotatingFileHandler(self.server_logfile,
backupCount=5)
filehandler.setLevel(logging.DEBUG)
filehandler.setFormatter(log.formatter)
SERVERLOG.addHandler(filehandler)
mountpoint = self.app.configmanager.get_option('mountpoint', self.app)
storage_url = self.app.configmanager.get_option('storage-url', self.app)
SERVERLOG.info("Starting log for storage '%s' at mountpoint '%s'.",
storage_url, mountpoint)
def _update_current_status(self):
mountpoint = self.app.configmanager.get_option('mountpoint')
storage_url = self.app.configmanager.get_option('storage-url')
serverlog = self.app.configmanager.get_option('serverlog')
self.status = status.RedsMasterStatus(mountpoint=mountpoint,
storage_url=storage_url,
serverlog=serverlog,
pid=os.getpid())
def _pre_daemonize(self):
"""
Here the subclass can implement what has to be done before "
the daemon is run.
"""
pass
def _run(self):
"""
Template method for the subclass to overwrite. Here the subclass
can implement what has to be done to run the server.
"""
def start(self):
LOG.debug("Setup serverlog")
self._setup_serverlog()
LOG.debug("Execute pre-daemonize")
self._pre_daemonize()
if not self.app.pargs.fg:
LOG.debug("Make the daemon")
daemonize()
SERVERLOG.debug("Updating status")
self._update_current_status()
atexit.register(self.exit)
SERVERLOG.debug("Register server back to statusmanager")
self.app.statusmanager.register_server(self.status)
SERVERLOG.debug("Actually run the server")
self._run()
@staticmethod
def stop(pid):
try:
LOG.info("Killing process with pid: %s" % pid)
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(1)
except OSError as err:
err = str(err)
if not err.find("No such process") > 0:
raise exc.ServerError("Could not stop the server!\n%s" % err)
def exit(self):
# We have to unregister here
SERVERLOG.info("Unmounting storage...")
self.app.statusmanager.unmount()
self.app.statusmanager.unregister_server()
SERVERLOG.info("Exiting daemon!")
config.register_option(name="fg", section="general", action="store_true",
help=("Don't daemonize the server and stay in "
"foreground."))
| [
"none@none"
] | none@none |
0addd10dbc38520d4cdd0a2ae9394f4066d84e56 | 215bff5814f1f004d993625875c9754041c55b7f | /polyfile/polyfile.py | 64c1137a3f198968001eca8fe2f424e00e65ca60 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | polymath-is/polyfile | d641f3ce8928b29e67f4550fbfe49e18cea7193b | 5c35f703c89cebc208c45adf140ec05e42fcc4c2 | refs/heads/master | 2023-04-30T07:16:15.972382 | 2021-05-19T23:27:59 | 2021-05-19T23:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,749 | py | import base64
from json import dumps
from pathlib import Path
import pkg_resources
from typing import Any, Dict, IO, Iterator, List, Optional, Set, Tuple, Type, Union
from .fileutils import FileStream
from . import logger
from .magic import MagicMatcher, MatchContext
__version__: str = pkg_resources.require("polyfile")[0].version
CUSTOM_MATCHERS: Dict[str, Type["Match"]] = {}
log = logger.getStatusLogger("polyfile")
class submatcher:
def __init__(self, *filetypes):
self.filetypes = filetypes
def __call__(self, MatcherClass: Type["Match"]):
if not hasattr(MatcherClass, 'submatch'):
raise ValueError(f"Matcher class {MatcherClass} must implement the `submatch` function")
for ft in self.filetypes:
CUSTOM_MATCHERS[ft] = MatcherClass
return MatcherClass
class InvalidMatch(ValueError):
pass
class Match:
def __init__(self,
name: str,
match_obj: Any,
relative_offset: int = 0,
length: Optional[int] = None,
parent: Optional["Match"] = None,
matcher: Optional["Matcher"] = None,
display_name: Optional[str] = None,
img_data: Optional[str] = None,
decoded: Optional[bytes] = None
):
if parent is not None:
if not isinstance(parent, Match):
raise ValueError("The parent must be an instance of a Match")
parent._children.append(self)
if matcher is None:
matcher = parent.matcher
if matcher is None:
raise(ValueError("A Match must be initialized with `parent` and/or `matcher` not being None"))
self.matcher: Optional[Matcher] = matcher
self.name: str = name
if display_name is None:
self.display_name: str = name
else:
self.display_name = display_name
self.match = match_obj
self.img_data: Optional[str] = img_data
self.decoded: Optional[bytes] = decoded
self._offset: int = relative_offset
self._length: Optional[int] = length
self._parent: Optional[Match] = parent
self._children: List[Match] = []
@property
def children(self) -> Tuple["Match", ...]:
return tuple(self._children)
def __len__(self):
return len(self._children)
def __iter__(self) -> Iterator["Match"]:
return iter(self._children)
def __getitem__(self, index: int) -> "Match":
return self._children[index]
@property
def parent(self) -> Optional["Match"]:
return self._parent
@property
def offset(self) -> int:
"""The global offset of this match with respect to the original file"""
if self.parent is not None:
return self.parent.offset + self.relative_offset
else:
return self.relative_offset
@property
def root(self) -> "Match":
if self.parent is None:
return self
else:
return self.parent.root
@property
def root_offset(self) -> int:
return self.offset - self.root.offset
@property
def relative_offset(self) -> int:
"""The offset of this match relative to its parent"""
return self._offset
@property
def length(self) -> int:
"""The number of bytes in the match"""
return self._length
def to_obj(self):
ret = {
'relative_offset': self.relative_offset,
'offset': self.offset,
'size': self.length,
'type': self.name,
'name': self.display_name,
'value': str(self.match),
'subEls': [c.to_obj() for c in self]
}
if self.img_data is not None:
ret['img_data'] = self.img_data
if self.decoded is not None:
ret['decoded'] = base64.b64encode(self.decoded).decode('utf-8')
return ret
def json(self) -> str:
return dumps(self.to_obj())
def __repr__(self):
return f"{self.__class__.__name__}(match={self.match!r}, relative_offset={self._offset}, parent={self._parent!r})"
def __str__(self):
return f"Match<{self.match}>@{self._offset}"
class Submatch(Match):
pass
class Matcher:
def __init__(self, try_all_offsets: bool = False, submatch: bool = True, matcher: Optional[MagicMatcher] = None):
if matcher is None:
self.magic_matcher: MagicMatcher = MagicMatcher.DEFAULT_INSTANCE
else:
self.magic_matcher = matcher
self.try_all_offsets: bool = try_all_offsets
self.submatch: bool = submatch
def handle_mimetype(self, mimetype: str, match_obj: Any, data: bytes, file_stream: Union[str, Path, IO, FileStream],
parent: Optional[Match] = None) -> Iterator[Match]:
if self.submatch and mimetype in CUSTOM_MATCHERS:
m = CUSTOM_MATCHERS[mimetype](
mimetype,
match_obj,
0,
length=len(data),
parent=parent,
matcher=self
)
# Don't yield this custom match until we've tried its submatch function
# (which may throw an InvalidMatch, meaning that this match is invalid)
try:
with FileStream(file_stream) as fs:
submatch_iter = m.submatch(fs)
try:
first_submatch = next(submatch_iter)
has_first = True
except StopIteration:
has_first = False
yield m
if has_first:
yield first_submatch
yield from submatch_iter
except InvalidMatch:
pass
else:
yield Match(
mimetype,
match_obj,
0,
length=len(data),
parent=parent,
matcher=self
)
def match(self, file_stream: Union[str, Path, IO, FileStream], parent: Optional[Match] = None) -> Iterator[Match]:
with FileStream(file_stream) as f:
matched_mimetypes: Set[str] = set()
context = MatchContext.load(f, only_match_mime=True)
for magic_match in self.magic_matcher.match(context):
for mimetype in magic_match.mimetypes:
if mimetype in matched_mimetypes:
continue
matched_mimetypes.add(mimetype)
yield from self.handle_mimetype(mimetype, magic_match, context.data, file_stream, parent)
| [
"evan.sultanik@trailofbits.com"
] | evan.sultanik@trailofbits.com |
e81b17c71c1cb0913b2f8e14c46017fe82f0e1cb | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L7H/7H-3D_MD_NVT_rerun/set_5.py | 7f57a150d70e13e49817e3791e8e39dac33e1288 | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L7H/MD_NVT_rerun/ti_one-step/7H_3D/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
a92f9fd99fc7d819745baec75940dd2ea2b01017 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /ouWxFayrk3ySG6jsg_14.py | 7b3411d094999fbd0fc346bae879f0df0acdf731 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | """
Create a function that takes a Tic-tac-toe board and returns `"X"` if the X's
are placed in a way that there are three X's in a row or returns `"O"` if
there is three O's in a row.
### Examples
who_won([
["O", "X", "O"],
["X", "X", "O"],
["O", "X", "X"]
]) ➞ "X"
who_won([
["O", "O", "X"],
["X", "O", "X"],
["O", "X", "O"]
]) ➞ "O"
### Notes
* There are no Ties.
* All places on the board will have either "X" or "O".
* Check **Resources** for more info.
"""
def who_won(board):
# check cross
if board[0][0] == board[1][1] == board[2][2]:
return board[0][0]
elif board[0][2] == board[1][1] == board[2][0]:
return board[0][2]
else:
for i in range(len(board)):
# check row
if board[i][0] == board[i][1] == board[i][2]:
return board[i][0]
# check column
elif board[0][i] == board[1][i] == board[2][i]:
return board[0][i]
print("Draw")
return None
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1843cc115832f296547195c3eea022d4a3149e73 | 8aa0d1d407bb1c66d01261f7e2c4e9832e856a2d | /experiments/experiments_toy/convergence_average/nmf_vb.py | b1db51c8ac94db9d6a3934198d7d2840f56e367b | [] | no_license | garedaba/BNMTF_ARD | 59e3ec1dbfd2a9ab9f4ec61368ec06e3783c3ee4 | 0a89e4b4971ff66c25010bd53ee2622aeaf69ae9 | refs/heads/master | 2022-01-16T06:57:12.581285 | 2018-06-10T10:22:12 | 2018-06-10T10:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | """
Recover the toy dataset using VB.
Measure the convergence over iterations and time.
We run the algorithm 20 times and average the training error and time stamps.
We only store the MSE.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../../"
sys.path.append(project_location)
from BNMTF_ARD.code.models.bnmf_vb import bnmf_vb
import numpy
import scipy
import random
import matplotlib.pyplot as plt
''' Location of toy data, and where to store the performances. '''
input_folder = project_location+"BNMTF_ARD/data/toy/bnmf/"
output_folder = project_location+"BNMTF_ARD/experiments/experiments_toy/convergence_average/results/"
output_file_performances = output_folder+'nmf_vb_all_performances.txt'
output_file_times = output_folder+'nmf_vb_all_times.txt'
''' Model settings. '''
iterations = 500
init_UV = 'random'
I, J, K = 100, 80, 10
ARD = False
repeats = 20
lambdaU = 0.1
lambdaV = 0.1
alphatau, betatau = 1., 1.
alpha0, beta0 = 1., 1.
hyperparams = { 'alphatau':alphatau, 'betatau':betatau, 'alpha0':alpha0, 'beta0':beta0, 'lambdaU':lambdaU, 'lambdaV':lambdaV }
''' Load in data. '''
R = numpy.loadtxt(input_folder+"R.txt")
M = numpy.ones((I,J))
''' Run the algorithm, :repeats times, and average the timestamps. '''
times_repeats = []
performances_repeats = []
for i in range(0,repeats):
# Set all the seeds
numpy.random.seed(i), random.seed(i), scipy.random.seed(i)
# Run the classifier
BNMF = bnmf_vb(R,M,K,ARD,hyperparams)
BNMF.initialise(init_UV)
BNMF.run(iterations)
# Extract the performances and timestamps across all iterations
times_repeats.append(BNMF.all_times)
performances_repeats.append(BNMF.all_performances['MSE'])
''' Print out the performances, and the average times, and store them in a file. '''
all_times_average = list(numpy.average(times_repeats, axis=0))
all_performances_average = list(numpy.average(performances_repeats, axis=0))
print "all_times_average = %s" % all_times_average
print "all_performances_average = %s" % all_performances_average
open(output_file_times,'w').write("%s" % all_times_average)
open(output_file_performances,'w').write("%s" % all_performances_average)
''' Plot the average time plot, and performance vs iterations. '''
plt.figure()
plt.title("Performance against average time")
plt.plot(all_times_average, all_performances_average)
plt.ylim(0,10)
plt.figure()
plt.title("Performance against iteration")
plt.plot(all_performances_average)
plt.ylim(0,10) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
d3b6b9011bd034a5756ca0acced2c681b0e821ae | a04e9aa34f9c1d03186c57035b27c1d62d1b3a5e | /Fundamentals/mathPractice.py | 9e8b679e7d1d496840bf72e9aaecf023c25186ab | [] | no_license | dusty-g/PythonAssignments | eb7eaac6a78e85aad851c2e87dde9ee2a63ef09f | 2f4750fe71cee6205ee9c70d04dbf697174cc6ea | refs/heads/master | 2021-01-19T00:10:00.614254 | 2017-04-08T03:58:48 | 2017-04-08T03:58:48 | 87,146,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | def multiples():
for i in range(1000):
print i + 1
# multiples()
def multiples2():
for i in range(5, 1000001, 5):
print i
# multiples2()
def sumList(list1):
temp = 0
for i in list1:
temp += i
# print temp
return temp
# sumList([1, 2, 5, 10, 255, 3])
def avgList(list1):
print sumList(list1) / float(len(list1))
avgList([1, 2, 5, 10, 255, 3])
| [
"dustygalindo@gmail.com"
] | dustygalindo@gmail.com |
befd707fa5cab2ca392f9e5cfc4bdfa53e2c0bce | 70fa6468c768d4ec9b4b14fc94fa785da557f1b5 | /lib/surface/iam/service_accounts/keys/list.py | 664217fd66ec34a97c87b78406194809f5d2130e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | kylewuolle/google-cloud-sdk | d43286ef646aec053ecd7eb58566ab2075e04e76 | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | refs/heads/master | 2020-04-20T22:10:41.774132 | 2019-01-26T09:29:26 | 2019-01-26T09:29:26 | 169,131,028 | 0 | 0 | NOASSERTION | 2019-02-04T19:04:40 | 2019-02-04T18:58:36 | Python | UTF-8 | Python | false | false | 2,879 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for listing service account keys."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.iam import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iam import iam_util
from googlecloudsdk.core.util import times
class List(base.ListCommand):
"""List the keys for a service account."""
detailed_help = {
'EXAMPLES': textwrap.dedent("""\
To list all user-managed keys created before noon on July 19th, 2015
(to perform key rotation, for example), run:
$ {command} --iam-account my-iam-account@somedomain.com --managed-by user --created-before 2015-07-19T12:00:00Z
"""),
}
@staticmethod
def Args(parser):
parser.add_argument('--managed-by',
choices=['user', 'system', 'any'],
default='any',
help='The types of keys to list.')
parser.add_argument(
'--created-before',
type=arg_parsers.Datetime.Parse,
help=('Return only keys created before the specified time. '
'Common time formats are accepted. This is equivalent to '
'--filter="validAfterTime<DATE_TIME". See '
'$ gcloud topic datetimes for information on time formats.'))
parser.add_argument('--iam-account',
required=True,
type=iam_util.GetIamAccountFormatValidator(),
help='A textual name to display for the account.')
parser.display_info.AddFormat(iam_util.SERVICE_ACCOUNT_KEY_FORMAT)
def Run(self, args):
client, messages = util.GetClientAndMessages()
result = client.projects_serviceAccounts_keys.List(
messages.IamProjectsServiceAccountsKeysListRequest(
name=iam_util.EmailToAccountResourceName(args.iam_account),
keyTypes=iam_util.ManagedByFromString(args.managed_by)))
keys = result.keys
if args.created_before:
ts = args.created_before
keys = [
key for key in keys if times.ParseDateTime(key.validAfterTime) < ts
]
return keys
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
697f3f61c9c7e2df13f7b2058b7bdff717354b4e | 137b64063baf8dea981abe9e649f6846744240f7 | /curvebeats.py | 79633d40bb579965ea482076ce6a1f2f9552bfb4 | [] | no_license | hecanjog/sketches | c46b55cecb7a4d5f26a5233344a2e274d69380e4 | f533114a42f23e405e973fe4822f2aefea18a1e0 | refs/heads/master | 2021-05-15T01:29:32.178054 | 2017-10-09T09:33:45 | 2017-10-09T09:33:45 | 37,076,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | from pippi import dsp, tune
from hcj import keys, fx, drums
kick = dsp.read('snds/kick.wav').data
hat = dsp.read('snds/hat.wav').data
snare = dsp.read('snds/snare.wav').data
bpm = 100
numbars = 8 * 4 * 4
barlength = 6
beat = dsp.bpm2frames(bpm)
# rhodes chord sequence
numchords = numbars
out = ''
# start with ran chord
# each time mutate by moving up or down the scale by a step
# each time between one and two voices move
chord = [ dsp.randint(1, 15) for _ in range(4) ]
def nextChord(last):
degs = dsp.randshuffle(last)[:dsp.randint(1,2)]
for deg in degs:
newdeg = deg + dsp.randchoose([-1,1])
if newdeg == 0:
newdeg = 1
#elif newdeg > 15:
# newdeg = 15
last[last.index(deg)] = newdeg
return last
kickp = 'x '
snarep = ' x '
hatp = 'xx'
def makeHat(length, i, amp):
h = dsp.fill(hat, length, silence=True)
return h
def makeKick(length, i, amp):
k = dsp.fill(kick, length, silence=True)
return k
def makeSnare(length, i, amp):
s = dsp.cut(snare, 0, dsp.randint(dsp.mstf(40), dsp.flen(snare)))
s = dsp.alias(s, dsp.randint(4, 12))
s = dsp.taper(s)
s = dsp.fill(s, length, silence=True)
s = dsp.amp(s, dsp.rand(2,4))
return s
commontone = dsp.randchoose(tune.fromdegrees(chord, octave=1, root='c'))
commontone = dsp.randint(1, 9)
for b in range(numbars):
if b % 4 == 0:
chord = [ dsp.randint(1, 15) for _ in range(4) ]
layers = []
length = beat * dsp.randchoose([2, 3, 4, 6])
for freq in tune.fromdegrees(chord, octave=2, root='c'):
#freq = freq * 2**dsp.randint(0,3)
amp = dsp.rand(0.25, 0.75)
layer = keys.rhodes(length, freq, amp)
layer = dsp.pan(layer, dsp.rand())
layers += [ layer ]
layers = dsp.mix(layers)
ctf = tune.fromdegrees([ commontone ], octave=2, root='c')[0]
drone = dsp.mix([ keys.pulsar(ctf, dsp.flen(layers), amp=0.3) for _ in range(4) ])
chord = nextChord(chord)
if b % 2 == 0:
commontone = commontone + dsp.randchoose([-1,1])
if commontone == 0:
commontone = 1
layers = dsp.split(layers, beat / 3)
layers = dsp.randshuffle(layers)
layers = ''.join(layers)
drone = dsp.split(drone, beat)
drone = dsp.randshuffle(drone)
drone = ''.join(drone)
#hats = dsp.fill(dsp.fill(hat, beat / 4), dsp.flen(layers))
#kicks = dsp.fill(kick, dsp.flen(layers), silence=True)
hats = drums.parsebeat(hatp, 8, beat, dsp.flen(layers), makeHat, 12)
kicks = drums.parsebeat(kickp, 4, beat, dsp.flen(layers), makeKick, 0)
snares = drums.parsebeat(snarep, 8, beat, dsp.flen(layers), makeSnare, 0)
#dr = dsp.mix([ hats, kicks, snares ])
dr = dsp.mix([ kicks, snares ])
d = dsp.split(dr, beat / 8)
d = dsp.randshuffle(d)
#print len(d)
#d = dsp.packet_shuffle(d, dsp.randint(2, 4))
#print len(d)
d = [ dd * dsp.randint(1, 2) for dd in d ]
d = ''.join(d)
d = dsp.fill(dsp.mix([d, dr]), dsp.flen(layers))
d = dsp.amp(d, 3)
layers = dsp.mix([ layers, d, drone ])
ost = keys.rhodes(beat, tune.ntf('c', octave=4), 0.6)
ost = dsp.env(ost, 'phasor')
numosts = dsp.flen(layers) / dsp.flen(ost)
ost = ''.join([ dsp.alias(ost) for _ in range(numosts) ])
layers = dsp.mix([ layers, ost ])
out += layers
dsp.write(out, 'out')
| [
"erik@hecanjog.com"
] | erik@hecanjog.com |
86647ff3247732543dbf966521db608f81e11eb5 | 4e2e57a8b91a7a77c512a60cfe8263812c2c21e6 | /zolia_server/vibration/migrations/0001_initial.py | 08afe0afab4b5b58ded79f340ec3e14ea8d741e8 | [] | no_license | AceOfGSM/Zolpia-Server | 89b31813033626b3f646bf30ed3c99a582bc0a86 | 34f21a4bc541117bc67d6fcccd24d47f78081ca2 | refs/heads/develop | 2023-01-23T23:00:25.237916 | 2020-12-06T12:09:16 | 2020-12-06T12:09:16 | 317,856,809 | 0 | 0 | null | 2020-12-05T05:48:40 | 2020-12-02T12:35:56 | Python | UTF-8 | Python | false | false | 1,341 | py | # Generated by Django 3.1.4 on 2020-12-05 00:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='VibrationPattern',
fields=[
('name', models.CharField(max_length=128, primary_key=True, serialize=False)),
('pattern', models.CharField(default='', max_length=256)),
],
),
migrations.CreateModel(
name='VibrationSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=128)),
('isAlarm', models.BooleanField(default=False)),
('alarmTimeTo', models.CharField(max_length=128, null=True)),
('userID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('vibrationPatternName', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='vibration.vibrationpattern')),
],
),
]
| [
"hanbin8269@gmail.com"
] | hanbin8269@gmail.com |
c9df6ce57932310797cb9fdbc3615f188102aa3e | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/1601. Maximum Number of Achievable Transfer Requests.py | e1bf97fed3d283cd36d5b85c14f6821b87a50d91 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from typing import List
import collections
class Solution:
def maximumRequests(self, n: int, requests: List[List[int]]) -> int:
dp = collections.defaultdict(set)
dp[0] = set([tuple([0] * n)])
ans=0
for req in requests:
s, t = req
new_dp=collections.defaultdict(set)
for reqs in dp:
new_dp[reqs]|=dp[reqs]
for state in dp[reqs]:
new_state=list(state)
new_state[s]-=1
new_state[t]+=1
new_state=tuple(new_state)
if all([x==0 for x in new_state]):
ans=max(ans,reqs+1)
new_dp[reqs+1].add(new_state)
dp=new_dp
return ans
| [
"19241008o"
] | 19241008o |
ac5b27554b844c44be1535d23861da6cc414251e | eb4f61315e8f0b139d0af3a95c59a2907845ebfd | /7-8(am)/all folders/practice/PYTHON/regular expressions/regular5.py | a8cb37d70cc5c524673146e1fa676ae6acb3d6a1 | [] | no_license | vamsikrishna6668/python-core | c1d368792fa6f89bf51ae690d20b45cb5ae0fb98 | b66ad0f6ad0deffbc350d5a7996f23220940e187 | refs/heads/master | 2020-04-02T10:00:39.641928 | 2018-10-23T12:19:14 | 2018-10-23T12:19:14 | 154,320,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import re
fobj1=open('emp','r')
fobj2=open('prasad2.txt','w')
fobj2.write('sid mobile')
fobj2.write('\n')
for line in fobj1:
lst=re.findall(r'\w{1,}',line)
if lst!=[]:
fobj2.write(lst[0]+"\t"+lst[3])
fobj2.write('\n')
fobj1.close()
fobj2.close() | [
"vamsikrishna6668@gmail.com"
] | vamsikrishna6668@gmail.com |
b56cb428597908dc471af9ef6cbde791e048d423 | 1509d32ef8854845428961c3ead89fff26c0dd9d | /automate_boringStuff.py | a0d70d3c6835b83a124da1373794198f6971be0d | [] | no_license | antonioam82/ejercicios-python | d623324e2e59ecddcf03400064a8aa3591bfd7e5 | 2dfe23494b9b34a9a0abe9379dcb69af9b1e9d73 | refs/heads/master | 2023-09-04T02:16:55.584179 | 2023-09-03T19:50:05 | 2023-09-03T19:50:05 | 137,039,851 | 35 | 41 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import pyautogui
import time
from VALID import OKI, ns
#DELETING FILES FROM "Python" FOLDER.
while True:
num =(OKI(input("Número de archivos: ")))+1
for i in range(0,num):
pyautogui.moveTo(241,132)
time.sleep(0.3)
pyautogui.click()
time.sleep(0.5)
pyautogui.click(button='right')
time.sleep(0.6)
pyautogui.moveTo(274,699)
time.sleep(0.3)
pyautogui.click()
conti = ns(input("¿Continuar?: "))
if conti == "n":
break
| [
"noreply@github.com"
] | antonioam82.noreply@github.com |
30c52904bb299428f5bc9084a45ccae38b2cfe9b | cc0a0b1237d5977f5d088839bddc06f4be5f71df | /pairingchef1.py | 986aa7ae6e330835dc132885fe5a5ec9f990c997 | [] | no_license | blitu12345/code-codechef | ec5cb677189802163cca8028e12acc2226cb8d61 | e2acc37dfaa191fe82f76529b8ed09799e576853 | refs/heads/master | 2020-06-14T23:37:52.560907 | 2017-12-15T17:50:40 | 2017-12-15T17:50:40 | 75,399,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py |
t = input()
while(t>0):
t-=1
n,m=map(int,raw_input().split())
a=[0]*m
b=[0]*m
for i in range(m):
a[i],b[i]=map(int,raw_input().split())
A=[];A.append(m-1)
count=1
#print "A"
#print A
while(count!=0):
count=0
for i in range(m):
#print "i"
#print i
if(count==1):
break
for j in A:
#print "i,j"
#print ("%d %d")%(i,j)
if( i!=j and a[i]!=a[j] and b[i]!=b[j] and a[i]!=b[j] and b[i]!=a[j] ):
#print "for=for-if"
count+=1
A.append(i)
break
for j in A:
print j,
| [
"ashishbkarel@gmail.com"
] | ashishbkarel@gmail.com |
52c87f77e2ad7a4d7dd5bdb94ddb8b2da3f6cb29 | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_schizConnect/2018_analysis_2ndpart_clinic/discriminative_clusters_correlations/Freesurfer/00_create_clusters_scores.py | e614c8039e13552076a89f2343bb74d6c1e72c6a | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,634 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 12:08:33 2017
@author: ad247405
"""
import os
import subprocess
import json
import numpy as np
from sklearn.cross_validation import StratifiedKFold
import nibabel
from sklearn import svm
from sklearn.metrics import precision_recall_fscore_support
from sklearn.feature_selection import SelectKBest
import brainomics.image_atlas
import brainomics.array_utils
from scipy.stats import binom_test
from collections import OrderedDict
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score, recall_score
import pandas as pd
from collections import OrderedDict
import nilearn
from nilearn import plotting
from nilearn import image
import seaborn as sns
import matplotlib.pylab as plt
import shutil
import sys
sys.path.insert(0,'/home/ed203246/git/scripts/brainomics')
import array_utils, mesh_processing
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
WD = "/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/\
all_subjects/results/enetall_all+VIP_all/5cv/refit/refit/enettv_0.1_0.1_0.8"
beta = np.load(os.path.join(WD,"beta.npz"))['arr_0'][:]
pop_all_scz = pop_all[pop_all['dx_num']==1]
pop_all_con = pop_all[pop_all['dx_num']==0]
y_all = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/y.npy")
X_all = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/X.npy")
assert X_all.shape == (567, 299865)
X_all_scz = X_all[y_all==1,:]
X_all_con = X_all[y_all==0,:]
assert X_all_scz.shape == (253, 299865)
N_scz = X_all_scz.shape[0]
N_con = X_all_con.shape[0]
N = X_all.shape[0]
mask_mesh = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/Freesurfer/all_subjects/data/mask.npy")
beta, t = array_utils.arr_threshold_from_norm2_ratio(beta, .99)
beta = beta.ravel()
assert X_all_scz.shape[1] == beta.shape[0]
N = X_all_scz.shape[0]
print(pd.Series(beta.ravel()).describe(), t)
# Write morph data
from nibabel import gifti
[coords_l, faces_l], beta_mesh_l, [coords_r, faces_r], beta_mesh_r, stat = \
beta_to_mesh_lr(beta, mask_mesh, mesh_l, mesh_r, threshold=1.)
hemi, view = 'right', 'medial'
if hemi == 'left':
coords_x, faces_x, beta_mesh_x, sulc_x =\
coords_l, faces_l, beta_mesh_l, sulc_l
elif hemi == 'right':
coords_x, faces_x, beta_mesh_x, sulc_x =\
coords_r, faces_r, beta_mesh_r, sulc_r
vmax_beta = np.max(np.abs(beta)) / 10
vmax_beta = np.max(np.abs(beta_mesh_x) * 1000) / 10
plotting.plot_surf_stat_map([coords_x, faces_x], stat_map=1000 * beta_mesh_x,
hemi=hemi, view=view,
bg_map=sulc_x, #bg_on_data=True,
#vmax = vmax_beta,#stat[2] / 10,#vmax=vmax_beta,
darkness=.5,
cmap=plt.cm.seismic,
#symmetric_cbar=True,
#output_file=output_filename
)
print(pd.Series((beta_mesh_l[beta_mesh_l != 0]).ravel()).describe())
print(pd.Series((beta_mesh_r[beta_mesh_r != 0]).ravel()).describe())
mesh_processing.save_texture(os.path.join(WD_CLUST, "beta_lh.gii"),
beta_mesh_l)
mesh_processing.save_texture(os.path.join(WD_CLUST, "beta_rh.gii"),
beta_mesh_r)
# Extract a single score for each cluster
K_interest = [18,14,33,20,4,25,23,22,15,41]
scores_all_scz = np.zeros((N_scz, len(K_interest)))
scores_all_con = np.zeros((N_con, len(K_interest)))
scores_all = np.zeros((N, len(K_interest)))
i=0
for k in range (len(K_interest)):
mask = labels_flt == k
print("Cluster:",k, "size:", mask.sum())
scores_all_scz[:, i] = np.dot(X_all_scz[:, mask], beta[mask]).ravel()
scores_all_con[:, i] = np.dot(X_all_con[:, mask], beta[mask]).ravel()
scores_all[:, i] = np.dot(X_all[:, mask], beta[mask]).ravel()
i= i+1
pop_all_scz["cluster1_cingulate_gyrus"] = scores_all_scz[:, 0]
pop_all_scz["cluster2_right_caudate_putamen"] = scores_all_scz[:,1]
pop_all_scz["cluster3_precentral_postcentral_gyrus"] = scores_all_scz[:, 2]
pop_all_scz["cluster4_frontal_pole"] = scores_all_scz[:, 3]
pop_all_scz["cluster5_temporal_pole"] = scores_all_scz[:, 4]
pop_all_scz["cluster6_left_hippocampus_amygdala"] = scores_all_scz[:, 5]
pop_all_scz["cluster7_left_caudate_putamen"] = scores_all_scz[:, 6]
pop_all_scz["cluster8_left_thalamus"] = scores_all_scz[:, 7]
pop_all_scz["cluster9_right_thalamus"] = scores_all_scz[:, 8]
pop_all_scz["cluster10_middle_temporal_gyrus"] = scores_all_scz[:, 9]
output = "/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/data"
pop_all_scz.to_csv(os.path.join(output,"pop_all_scz.csv") , index=False)
pop_cobre_scz = pop_all_scz[pop_all_scz["site_num"]==1]
pop_cobre_scz.to_csv(os.path.join(output,"pop_cobre_scz.csv") , index=False)
pop_nmorph_scz = pop_all_scz[pop_all_scz["site_num"]==2]
pop_nmorph_scz.to_csv(os.path.join(output,"pop_nmorph_scz.csv") , index=False)
pop_nudast_scz = pop_all_scz[pop_all_scz["site_num"]==3]
pop_nudast_scz.to_csv(os.path.join(output,"pop_nudast_scz.csv") , index=False)
pop_vip_scz = pop_all_scz[pop_all_scz["site_num"]==4]
pop_vip_scz.to_csv(os.path.join(output,"pop_vip_scz.csv") , index=False)
#Test discriminative power of each cluster with a paired t test
##############################################################################
output = "/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/results/supervised_clusters_results/clusters_ttest"
for i in range(10):
plt.figure()
df = pd.DataFrame()
df["score"] = scores_all[:,i]
df["dx"] = y_all
T, p = scipy.stats.ttest_ind(scores_all_scz[:, i],scores_all_con[:, i])
print("Cluster %s: T = %s and p = %s" %(i,T,p))
sns.set_style("whitegrid")
sns.set(font_scale=1.3)
ax = sns.violinplot(x="dx", y="score", hue="dx", data=df,linewidth = 3)
plt.tight_layout()
plt.legend(loc='lower center',ncol=2)
plt.ylabel("Score on component %r"%(i+1))
plt.title(("T : %s and pvalue = %r"%(np.around(T,decimals=3),p)))
plt.savefig(os.path.join(output,"cluster%s"%((i+1))))
def beta_to_mesh_lr(beta, mask_mesh, mesh_l, mesh_r, threshold=.99):
# beta to array of mesh size
#ouput_filename = os.path.splitext(beta_filename)[0] + ".nii.gz"
assert beta.shape[0] == mask_mesh.sum()
beta_t, t = array_utils.arr_threshold_from_norm2_ratio(beta, threshold)
#print(np.sum(beta != 0), np.sum(beta_t != 0), np.max(np.abs(beta_t)))
beta_mesh = np.zeros(mask_mesh.shape)
beta_mesh[mask_mesh] = beta_t.ravel()
# mesh, l+r
mesh_l = nilearn.plotting.surf_plotting.load_surf_mesh(mesh_l)
coords_l, faces_l = mesh_l[0], mesh_l[1]
mesh_r = nilearn.plotting.surf_plotting.load_surf_mesh(mesh_r)
coords_r, faces_r = mesh_r[0], mesh_r[1]
assert coords_l.shape[0] == coords_r.shape[0] == beta_mesh.shape[0] / 2
beta_mesh_l = np.zeros(coords_l.shape)
beta_mesh_l = beta_mesh[:coords_l.shape[0]]
beta_mesh_r = np.zeros(coords_r.shape)
beta_mesh_r = beta_mesh[coords_l.shape[0]:]
return [coords_l, faces_l], beta_mesh_l, [coords_r, faces_r], beta_mesh_r, [np.sum(beta != 0), np.sum(beta_t != 0), np.max(np.abs(beta_t))]
def mesh_lr_to_beta(beta_mesh_l, beta_mesh_r, mask_mesh):
beta_mesh = np.zeros(mask_mesh.shape)
idx_r = int(beta_mesh.shape[0] / 2)
beta_mesh[:idx_r] = beta_mesh_l
beta_mesh[idx_r:] = beta_mesh_r
beta = beta_mesh[mask_mesh]
return beta
| [
"ad247405@is222241.intra.cea.fr"
] | ad247405@is222241.intra.cea.fr |
5ed94beafb22b30e681c06efc08b50f5a29ecf8e | c107c05bc16b53cd057508e18a0dbe9854343a13 | /gridix/views/helpers.py | 22fc09be7515026cea268b35221473a96f6130b8 | [] | no_license | hkmshb/gridix.web | d80b7561aade5f77bcc43257742d8e56a628bf2e | bda0adf5465a085b0337a8f749c87a21b73b7741 | refs/heads/master | 2021-09-07T08:36:25.089579 | 2017-10-02T09:49:52 | 2017-10-02T09:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | from pyramid.httpexceptions import HTTPRedirection, HTTPFound
from pyramid.view import notfound_view_config, forbidden_view_config
@notfound_view_config(renderer='404.html')
def notfound_view(request):
request.response.status = 404
return {}
@forbidden_view_config()
def forbidden_view(request):
request.response.status = 403
return HTTPFound(location=request.application_url + \
'/login?next=' + request.url)
| [
"hkmshb@gmail.com"
] | hkmshb@gmail.com |
e48be328a1e3f65413f15b61216170e5fc4ba758 | e09a6cebfcc9f104b154c1800dc5d768824351c3 | /src/emuvim/api/openstack/helper.py | 5f9d5730642d1be2be6d944f30a8d30e2e0b077c | [
"Apache-2.0"
] | permissive | containernet/vim-emu | e03b0d09eee0d7ab09a7159bc13baa1897dbe1db | af63353116f346dc728588638fbd4888d98243da | refs/heads/master | 2021-06-28T06:43:02.077977 | 2021-02-14T17:37:01 | 2021-02-14T17:37:01 | 217,866,279 | 19 | 5 | Apache-2.0 | 2020-01-30T14:42:58 | 2019-10-27T14:23:04 | Python | UTF-8 | Python | false | false | 1,500 | py | # Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
LOG = logging.getLogger("api.openstack.helper")
def get_host(r):
try:
return urlparse(r.base_url).hostname
except BaseException:
LOG.error("Could not get host part of request URL.")
return "0.0.0.0"
| [
"manuel.peuster@uni-paderborn.de"
] | manuel.peuster@uni-paderborn.de |
23edb498e2bc0ae6b8eabfc29b1a32fa539adcab | ae67b9d90db114c1e15ce63ee0d27942d999a83b | /ask-smapi-model/ask_smapi_model/v1/audit_logs/audit_logs_request.py | 120c02de81d1b4a567c9ac0dc62ecefcf8d98a16 | [
"Apache-2.0"
] | permissive | Birds-Awesome-Org/alexa-apis-for-python | ecb2e351b5cb1b341dda5c3ebc38927fa6d66a93 | d22c1712cb53a442b72f830f53d97ef66075750b | refs/heads/master | 2022-12-30T04:37:51.214040 | 2020-10-09T21:41:03 | 2020-10-09T21:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,699 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
from ask_smapi_model.v1.audit_logs.sort_direction import SortDirection as AuditLogs_SortDirectionV1
from ask_smapi_model.v1.audit_logs.request_filters import RequestFilters as AuditLogs_RequestFiltersV1
from ask_smapi_model.v1.audit_logs.sort_field import SortField as AuditLogs_SortFieldV1
from ask_smapi_model.v1.audit_logs.request_pagination_context import RequestPaginationContext as AuditLogs_RequestPaginationContextV1
class AuditLogsRequest(object):
"""
:param vendor_id: Vendor Id. See developer.amazon.com/mycid.html.
:type vendor_id: (optional) str
:param request_filters:
:type request_filters: (optional) ask_smapi_model.v1.audit_logs.request_filters.RequestFilters
:param sort_direction:
:type sort_direction: (optional) ask_smapi_model.v1.audit_logs.sort_direction.SortDirection
:param sort_field:
:type sort_field: (optional) ask_smapi_model.v1.audit_logs.sort_field.SortField
:param pagination_context:
:type pagination_context: (optional) ask_smapi_model.v1.audit_logs.request_pagination_context.RequestPaginationContext
"""
deserialized_types = {
'vendor_id': 'str',
'request_filters': 'ask_smapi_model.v1.audit_logs.request_filters.RequestFilters',
'sort_direction': 'ask_smapi_model.v1.audit_logs.sort_direction.SortDirection',
'sort_field': 'ask_smapi_model.v1.audit_logs.sort_field.SortField',
'pagination_context': 'ask_smapi_model.v1.audit_logs.request_pagination_context.RequestPaginationContext'
} # type: Dict
attribute_map = {
'vendor_id': 'vendorId',
'request_filters': 'requestFilters',
'sort_direction': 'sortDirection',
'sort_field': 'sortField',
'pagination_context': 'paginationContext'
} # type: Dict
supports_multiple_types = False
def __init__(self, vendor_id=None, request_filters=None, sort_direction=None, sort_field=None, pagination_context=None):
# type: (Optional[str], Optional[AuditLogs_RequestFiltersV1], Optional[AuditLogs_SortDirectionV1], Optional[AuditLogs_SortFieldV1], Optional[AuditLogs_RequestPaginationContextV1]) -> None
"""
:param vendor_id: Vendor Id. See developer.amazon.com/mycid.html.
:type vendor_id: (optional) str
:param request_filters:
:type request_filters: (optional) ask_smapi_model.v1.audit_logs.request_filters.RequestFilters
:param sort_direction:
:type sort_direction: (optional) ask_smapi_model.v1.audit_logs.sort_direction.SortDirection
:param sort_field:
:type sort_field: (optional) ask_smapi_model.v1.audit_logs.sort_field.SortField
:param pagination_context:
:type pagination_context: (optional) ask_smapi_model.v1.audit_logs.request_pagination_context.RequestPaginationContext
"""
self.__discriminator_value = None # type: str
self.vendor_id = vendor_id
self.request_filters = request_filters
self.sort_direction = sort_direction
self.sort_field = sort_field
self.pagination_context = pagination_context
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, AuditLogsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com"
] | ask-pyth@dev-dsk-ask-sdk-python-2b-85d79f62.us-west-2.amazon.com |
90290b8a0ae23c44b6a5fe65a3bff3ca11786546 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /packages/service-library/tests/test_pools.py | 13c62ad0a3a49cec6d66308464ce5a842253de56 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 1,823 | py | from asyncio import BaseEventLoop
from concurrent.futures import ProcessPoolExecutor
from servicelib.pools import (
non_blocking_process_pool_executor,
non_blocking_thread_pool_executor,
)
def return_int_one() -> int:
return 1
async def test_default_thread_pool_executor(event_loop: BaseEventLoop) -> None:
assert await event_loop.run_in_executor(None, return_int_one) == 1
async def test_blocking_process_pool_executor(event_loop: BaseEventLoop) -> None:
assert await event_loop.run_in_executor(ProcessPoolExecutor(), return_int_one) == 1
async def test_non_blocking_process_pool_executor(event_loop: BaseEventLoop) -> None:
with non_blocking_process_pool_executor() as executor:
assert await event_loop.run_in_executor(executor, return_int_one) == 1
async def test_same_pool_instances() -> None:
with non_blocking_process_pool_executor() as first, non_blocking_process_pool_executor() as second:
assert first == second
async def test_different_pool_instances() -> None:
with non_blocking_process_pool_executor(
max_workers=1
) as first, non_blocking_process_pool_executor() as second:
assert first != second
async def test_non_blocking_thread_pool_executor(event_loop: BaseEventLoop) -> None:
with non_blocking_thread_pool_executor() as executor:
assert await event_loop.run_in_executor(executor, return_int_one) == 1
async def test_same_thread_pool_instances() -> None:
with non_blocking_thread_pool_executor() as first, non_blocking_thread_pool_executor() as second:
assert first == second
async def test_different_thread_pool_instances() -> None:
with non_blocking_thread_pool_executor(
max_workers=1
) as first, non_blocking_thread_pool_executor() as second:
assert first != second
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
b1d41c2a061445cdfaefb2fb3eea35a381cb1532 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/application/__init__.py | a462fa6b24a6220fac90807dab0691a4d41bd7a3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 129 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Configuration objects for Twisted Applications.
"""
| [
"354142480@qq.com"
] | 354142480@qq.com |
a204bfa59a920814e91d248b12987f740ff833e4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03775/s140334144.py | 808b79385c9d8a685bb4df5c9b7089e1243a768c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | def calc_digits(num):
ans = 1
while True:
if num // 10 == 0:
break
num //= 10
ans += 1
return ans
def main():
N = int(input())
n = int(N ** 0.5)
ans = 10 ** 10
for A in range(1, n + 1):
if N % A == 0:
B = int(N / A)
else:
continue
A_digits = calc_digits(A)
B_digits = calc_digits(B)
max_digits = max(A_digits, B_digits)
if max_digits < ans:
ans = max_digits
print(ans)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
47eb757a1d1856704188947c9a4c599466ad074f | 387cf5f72ed6679a4d9e04bddd16998a190c4caf | /problems/programmers/lv3/pgs-70130.py | 565a815e56cc4f7a03cb10126243e422562f6780 | [] | no_license | CodyBuilder-dev/Algorithm-Coding-Test | db4ee1e7565fbcef3140192225167eff42ad5c02 | cca5c4ba8bc31679ab00aceccfd8d9d39c232f72 | refs/heads/master | 2021-07-24T00:34:41.888289 | 2021-07-21T14:29:00 | 2021-07-21T14:29:00 | 219,123,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | """
아이디어 : 각 원소의 인덱스를 저장하고, 이로부터 스타 수열의 길이를 찾을 수 있다
"""
from pgs70130tc import tc_in,tc_out
def find_starlen(elem,idx_list,array):
prev_idx = idx_list[0]
star_len = 0
if prev_idx > 0:
prev_position = 1
star_len += 2
elif prev_idx == 0 and len(idx_list) > 1 and idx_list[1] >= 2:
prev_position = 0
star_len += 2
else:
prev_position = 1
for idx in idx_list[1:]:
if prev_position == 0: # 이전 축 원소가 왼쪽에 있던 경우 [prev_idx,?]
if idx == prev_idx + 2 and idx + 1 < len(array):
prev_position = 0
star_len +=2
elif idx >= prev_idx +3:
prev_position = 1
star_len += 2
else: # 이전 축 원소가 오른쪽에 있던 경우 [?,prev_idx]
if idx == prev_idx + 1 and idx + 1 < len(array) :
prev_position = 0
star_len += 2
if idx >= prev_idx +2:
prev_position = 1
star_len += 2
prev_idx = idx
return star_len
def solution(a):
if len(a) == 1:
return 0
elem_idx = {}
for i,elem in enumerate(a):
if elem_idx.get(elem):
elem_idx[elem].append(i)
else:
elem_idx[elem] = [i]
max_len = -987654321
for elem,idx_list in elem_idx.items():
max_len = max(max_len,find_starlen(elem,idx_list,a))
return max_len
# print(solution([0]),0)
# print(solution([5,2,3,3,5,3]),4)
print(solution([0,3,3,0,7,2,0,2,2,0]),8)
print(solution([1, 2, 2, 1, 3]),4)
print(solution([0,0,0,2,3,4,3,5,3,1]),6)
print(solution([4, 0, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 3]),6)
# 빅데이터 테케
print(solution(tc_in),tc_out) | [
"imspecial1@u.sogang.ac.kr"
] | imspecial1@u.sogang.ac.kr |
0cd28581ddd4c00fb63e1b355411284e5474fafe | 668188f5368680567be8c4af55a731e45a2380ba | /util/elections_data/import_elections_data.py | 549adcaa113ec96aaa727c111da1cc587c236faf | [] | no_license | wrishel/Ballot_tally | ec6ff128e61d6ebfe91574c9b55e083849665502 | 3e4ed8c4fe0503ead9b55fac77d3cfcd97c73c41 | refs/heads/master | 2023-06-24T17:35:51.537643 | 2021-07-28T18:38:32 | 2021-07-28T18:38:32 | 387,261,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | """Import data from a specific Verity report that might be called
something like "Detailed vote totals"."""
import csv
from pathlib import Path
import pandas as pd
import dbase
import GLB_globs
import io
GLB = GLB_globs.GLB_globs()
db = dbase.ETPdb()
db.connect('testing')
import_fn = 'Detailed vote totals-12-2-2020 11-07-38 AM (1).CSV'
import_path = Path(__file__).parent / import_fn
with open(import_path, 'r') as inf:
text = inf.read()
# hack for precincts like "1E-26" showing up as "1.00E-26"
text = text.replace('.00E', 'E')
elec_df = pd.read_csv(io.StringIO(text))
db.add_all_elec_results(elec_df)
| [
"wrishel@gmail.com"
] | wrishel@gmail.com |
18e7065e030d68e04728adde091f0254051ef38c | 4b5ee91dabf402522685cea452ba51c10dbb834e | /server/bin/download/terkel.py | 9021d384a19c682b44f2e25661248ae7f3761db8 | [] | no_license | unscrollinc/unscroll | 8fb175d6cf9c2f91bdfc7a97a8da71beca7e702d | 88168af51abf8a0bfa06dcc22bd0ec11b671d989 | refs/heads/master | 2023-01-10T03:15:17.737493 | 2021-01-28T20:57:57 | 2021-01-28T20:57:57 | 196,251,483 | 7 | 0 | null | 2023-01-09T22:06:57 | 2019-07-10T17:52:37 | JavaScript | UTF-8 | Python | false | false | 3,116 | py | import re
import bs4
# import requests_cache
import requests
from pprint import pprint
from tableextractor import Extractor
from unscroll import UnscrollClient
from unscrolldate import UnscrollDate
from edtf import parse_edtf, text_to_edtf, struct_time_to_datetime
from datetime import date
from datetime import datetime
import time
import shelve
shelf = shelve.open('shelf')
import spacy
nlp = spacy.load('en')
def get_url_as_soup(url):
if url not in shelf:
s = requests.Session()
r = s.get(url)
shelf[url] = r.content
bs = bs4.BeautifulSoup(shelf[url], "lxml")
shelf.close()
return bs
def filter_key(k):
a = k.lower()
b = re.sub(r'\s+', '_', a)
c = re.sub('[^a-z_]', '', b)
return c
def get_person(title):
doc = nlp(title)
for ent in doc.ents:
if (ent.label_ == 'PERSON'
and len(ent.text.split()) > 1
and 'Studs Terkel' not in ent.text):
return ent.text
def post_shows(api, scroll):
shows = []
url = 'https://studsterkel.wfmt.com/explore#t=date'
soup = get_url_as_soup(url)
ps = soup.find_all('p')
for p in ps:
show = {}
a = p.find('a')
if a is not None:
date = a.find('span')
if date is not None:
# Evil python mutates `a` object
[s.extract() for s in a('span')]
_edtf = parse_edtf(text_to_edtf(date.text))
title = a.text.strip()
person = get_person(title)
thumb = None
show = {
'when_happened': struct_time_to_datetime(_edtf.upper_strict()),
'resolution': len(str(_edtf)),
'when_original': date.text,
'content_url': 'https://studsterkel.wfmt.com{}'.format(a.get('href')),
'title': a.text.strip(),
'text': '',
'with_thumbnail':thumb,
'media_type':'audio/mpeg',
'content_type':'Oral histories',
'source_url': 'https://studsterkel.wfmt.com/',
'with_thumbnail': api.cache_wiki_thumbnail(person)
}
resp = api.create_event(show, scroll)
pprint(resp.json())
def __main__():
scroll_thumb = "https://upload.wikimedia.org/wikipedia/commons/0/0b/Studs_Terkel_-_1979-1.jpg"
api = UnscrollClient()
title = "Studs Terkel Interviews"
favthumb = api.cache_thumbnail(scroll_thumb)
with_thumbnail = favthumb.get('url')
api.delete_scroll_with_title(title)
scroll = api.create_or_retrieve_scroll(
title,
description='<b>Via the Studs Terkel Radio Archive at WFMT</b>: '
'In his 45 years on WFMT radio, Studs Terkel talked to the 20th '
'century’s most interesting people.',
link='https://studsterkel.wfmt.com/',
with_thumbnail=with_thumbnail,
subtitle='Collection via WFMT',)
post_shows(api, scroll)
__main__()
| [
"ford@ftrain.com"
] | ford@ftrain.com |
b752584723eb119dfedd4df2836597322efccab9 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/Autodesk/Revit/DB/__init___parts/FilterNumericGreaterOrEqual.py | 3e887f1795e5b745c4a120e6d1d8abefc5db08fb | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | class FilterNumericGreaterOrEqual(FilterNumericRuleEvaluator,IDisposable):
"""
Tests whether numeric values from the document are greater than or equal to a certain value
FilterNumericGreaterOrEqual()
"""
def Dispose(self):
""" Dispose(self: FilterNumericRuleEvaluator,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: FilterNumericRuleEvaluator,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
4963d09b963ff6e3efa42cbc57da349c97f102eb | 2da2552656f4470b8f857e9db21473d341f1add2 | /tests/contrib/django/test_middleware.py | 2dafd8a234823e97b2743f524d349325d411f6f1 | [] | no_license | Scofields/dd-trace-py | dab06a8b461211186d18bb6702d9e24a46ef6adb | 0e8ed5282774f5e5b172f93d4868826b4b4ae56b | refs/heads/master | 2021-06-18T04:09:32.616941 | 2017-06-07T21:26:53 | 2017-06-07T21:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | # 3rd party
from nose.tools import eq_
from django.test import modify_settings
from django.core.urlresolvers import reverse
# project
from ddtrace.contrib.django.conf import settings
from ddtrace.contrib.django import TraceMiddleware
# testing
from .utils import DjangoTraceTestCase
class DjangoMiddlewareTest(DjangoTraceTestCase):
"""
Ensures that the middleware traces all Django internals
"""
def test_middleware_trace_request(self):
# ensures that the internals are properly traced
url = reverse('users-list')
response = self.client.get(url)
eq_(response.status_code, 200)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 3)
sp_request = spans[0]
sp_template = spans[1]
sp_database = spans[2]
eq_(sp_database.get_tag('django.db.vendor'), 'sqlite')
eq_(sp_template.get_tag('django.template_name'), 'users_list.html')
eq_(sp_request.get_tag('http.status_code'), '200')
eq_(sp_request.get_tag('http.url'), '/users/')
eq_(sp_request.get_tag('django.user.is_authenticated'), 'False')
eq_(sp_request.get_tag('http.method'), 'GET')
def test_middleware_trace_errors(self):
# ensures that the internals are properly traced
url = reverse('forbidden-view')
response = self.client.get(url)
eq_(response.status_code, 403)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 1)
span = spans[0]
eq_(span.get_tag('http.status_code'), '403')
eq_(span.get_tag('http.url'), '/fail-view/')
eq_(span.resource, 'tests.contrib.django.app.views.ForbiddenView')
def test_middleware_trace_function_based_view(self):
# ensures that the internals are properly traced when using a function views
url = reverse('fn-view')
response = self.client.get(url)
eq_(response.status_code, 200)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 1)
span = spans[0]
eq_(span.get_tag('http.status_code'), '200')
eq_(span.get_tag('http.url'), '/fn-view/')
eq_(span.resource, 'tests.contrib.django.app.views.function_view')
def test_middleware_trace_callable_view(self):
# ensures that the internals are properly traced when using callable views
url = reverse('feed-view')
response = self.client.get(url)
eq_(response.status_code, 200)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 1)
span = spans[0]
eq_(span.get_tag('http.status_code'), '200')
eq_(span.get_tag('http.url'), '/feed-view/')
eq_(span.resource, 'tests.contrib.django.app.views.FeedView')
@modify_settings(
MIDDLEWARE={
'remove': 'django.contrib.auth.middleware.AuthenticationMiddleware',
},
MIDDLEWARE_CLASSES={
'remove': 'django.contrib.auth.middleware.AuthenticationMiddleware',
},
)
def test_middleware_without_user(self):
# remove the AuthenticationMiddleware so that the ``request``
# object doesn't have the ``user`` field
url = reverse('users-list')
response = self.client.get(url)
eq_(response.status_code, 200)
# check for spans
spans = self.tracer.writer.pop()
eq_(len(spans), 3)
sp_request = spans[0]
sp_template = spans[1]
sp_database = spans[2]
eq_(sp_request.get_tag('http.status_code'), '200')
eq_(sp_request.get_tag('django.user.is_authenticated'), None)
| [
"emanuele.palazzetti@datadoghq.com"
] | emanuele.palazzetti@datadoghq.com |
7ff386e3fbbfbfa51c32bf91d004cd1a8d786025 | fcb04d0a3deb909dae2113a3db2fb73f7d01f959 | /GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/linalg/setup.py | fff4ca980b136391a75eba82a8b2e43237693c07 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | houseind/robothon | 9e90dc7353aea411f8b322c7fbdb4f8e456ca171 | b483ca3f6c6c92366b383c9f0df89d1541697723 | refs/heads/master | 2021-03-12T19:38:29.897078 | 2012-11-01T11:10:41 | 2012-11-01T11:10:41 | 3,603,847 | 9 | 1 | null | 2012-11-01T11:10:42 | 2012-03-02T16:27:43 | Python | UTF-8 | Python | false | false | 1,339 | py |
import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('linalg',parent_package,top_path)
config.add_data_dir('tests')
# Configure lapack_lite
lapack_info = get_info('lapack_opt',0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
print "### Warning: Using unoptimized lapack ###"
return ext.depends[:-1]
else:
if sys.platform=='win32':
print "### Warning: python_xerbla.c is disabled ###"
return ext.depends[:1]
return ext.depends[:2]
config.add_extension('lapack_lite',
sources = [get_lapack_lite_sources],
depends= ['lapack_litemodule.c',
'python_xerbla.c',
'zlapack_lite.c', 'dlapack_lite.c',
'blas_lite.c', 'dlamch.c',
'f2c_lite.c','f2c.h'],
extra_info = lapack_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| [
"ben@benkiel.com"
] | ben@benkiel.com |
644e059be2b5865dbd65a12165ad5f3cb505fae2 | 5ef455fd26f8ef443e12d44c5572ad96245abdec | /codes/caglar/core/operators.py | 3cd1dea090714e4392861a56bccd957976a6eac5 | [] | no_license | afcarl/PentominoExps | 3bb1bd48851e98f577b3500d54430b963c6f0e9b | 0aa5fbc487f04617015764faccfa3937b0b686e5 | refs/heads/master | 2020-03-17T02:36:44.698092 | 2016-10-30T00:29:59 | 2016-10-30T00:29:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,745 | py | import numpy as np
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import scipy
import scipy.linalg
from scipy.linalg import circulant
from caglar.core.commons import EPS, Sigmoid, Tanh, floatX
from caglar.core.utils import concatenate, get_key_byname_from_dict, sharedX, \
as_floatX, block_gradient
class Operator(object):
def __init__(self, eps=1e-8):
if eps is None:
self.eps = EPS
else:
self.eps = eps
class Dropout(Operator):
"""
Perform the dropout on the layer.
"""
def __init__(self, dropout_prob=0.5, rng=None):
self.rng = RandomStreams(1) if rng is None else rng
self.dropout_prob = dropout_prob
def __call__(self, input, deterministic=False):
if input is None:
raise ValueError("input for the %s should not be empty." % __class__.__name__)
p = self.dropout_prob
if deterministic:
return input
else:
retain_p = 1 - p
input /= retain_p
return input * self.rng.binomial(input.shape,
p=retain_p,
dtype=floatX)
class GaussianNoise(Operator):
def __init__(self, avg=0, std=0.01, rng=None):
self.rng = RandomStreams(1) if rng is None else rng
self.avg = avg
self.std = std
def __call__(self):
raise NotImplementedError("call function is not implemented!")
class AdditiveGaussianNoise(GaussianNoise):
"""
Perform the dropout on the layer.
"""
def __call__(self, input, deterministic=False):
if input is None:
raise ValueError("input for the %s should not be empty." % __class__.__name__)
p = self.dropout_prob
if deterministic:
return input
else:
return input + self.rng.normal(input.shape,
avg = self.avg,
std = self.std,
dtype=floatX)
class MultiplicativeGaussianNoise(GaussianNoise):
"""
Perform the dropout on the layer.
"""
def __call__(self, input, deterministic=False):
if input is None:
raise ValueError("input for the %s should not be empty." % __class__.__name__)
if deterministic:
return input
else:
return input * self.rng.normal(input.shape,
avg = self.avg,
std = self.std,
dtype=floatX)
return result
| [
"ca9lar@gmail.com"
] | ca9lar@gmail.com |
6b4ef8dbb5138461971f7e950467cd9317894416 | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/81190_Oneline_2D_Array_Transposition/recipe-81190.py | e8ae050cb0a860384757c61fc8dc618318d1a217 | [
"MIT",
"Python-2.0"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 215 | py | arr = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]]
# The subject should be regular, with all rows the same length
print [[r[col] for r in arr] for col in range(len(arr[0]))]
[[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
| [
"betty@qburst.com"
] | betty@qburst.com |
7f9cb77ca421d89f684ee1c45a555db4e02c7553 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_292/ch44_2020_04_07_16_09_27_380174.py | 25b2393cb3c7442ec4e8493b9298b55961eff5d8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | dn ={'JANEIRO':'1','FEVEREIRO':'2','MARCO':'3','ABRIL':'4','MAIO':'5','JUNHO':'6','JULHO':'7','AGOSTO':'8','SETEMBRO':'9','OUTUBRO':'10','NOVEMBRO':'11','DEZEMBRO':'12'}
a = input('escreva uma mes em letra maiuscula: ')
x = dn[a]
print(x)
| [
"you@example.com"
] | you@example.com |
8632c31a6ed6cc6325863ad03fea294ea393bb19 | 232c2738dff4b89ca63d7d4ec3c812570e3860c3 | /ch02/ppmi.py | 279fff8158ab54e6e42e2883a733ace6458a127c | [] | no_license | Soh1121/DeepLearningFromScratch2 | 0c115fcdf15c7b0cfd5d1ce7c6c32873354839d7 | f2294156c6394fd105a6534801ff42a078b0a0af | refs/heads/main | 2023-02-19T15:58:58.779465 | 2021-01-20T02:06:07 | 2021-01-20T02:06:07 | 319,550,802 | 0 | 0 | null | 2021-01-19T06:30:58 | 2020-12-08T06:45:41 | Python | UTF-8 | Python | false | false | 438 | py | import sys
sys.path.append('..')
import numpy as np
from common.util import preprocess, create_co_matrix, cos_similarity, ppmi
text = 'You say goodbye and I say hello.'
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
W = ppmi(C)
np.set_printoptions(precision=3) # 有効桁3桁で表示
print('covariance matrix')
print(C)
print('-'*50)
print('PPMI')
print(W)
| [
"satou.shg@gmail.com"
] | satou.shg@gmail.com |
b8664333395dab3b46c91caeabb4de654f14f0f8 | 1f3920ef61f409fd6e512765afebabbbb264e4c0 | /L4P5/histogramfun.py | a84098c900fce92c32b2a6363d09189bf09d9463 | [
"Giftware"
] | permissive | 007lva/6.00.2x | 6d96c22a2c7ec43d13fb87ab517bf2f567d92272 | 632c81c0f614d8beaeb9003d77eab3d8d7738f9b | refs/heads/master | 2016-09-06T15:31:22.603672 | 2014-12-03T00:11:33 | 2014-12-03T00:11:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | import pylab
# You may have to change this path
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of uppercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def plotVowelProportionHistogram(wordList, numBins=15):
"""
Plots a histogram of the proportion of vowels in each word in wordList
using the specified number of bins in numBins
"""
vowels = ('a', 'e', 'i', 'o', 'u')
def countVowels(text):
count = 0
for c in text:
if c in vowels:
count += 1
return count
p = lambda text: countVowels(text) * 1.0 / len(text)
new_list = map(p, wordList)
pylab.hist(new_list, numBins)
pylab.show()
if __name__ == '__main__':
wordList = loadWords()
plotVowelProportionHistogram(wordList, 10)
| [
"="
] | = |
748e7448f41785b4f6c322514f1e2807dfe44a4f | ba45727a064c991ad712137855e15c9f155ff3f6 | /config.py | 4230ce984225db5c4dc3b9e0691057df35842e66 | [
"MIT"
] | permissive | Kadas36/Pitch-App | 4bf50d4cf11088608f072e4710a80a610c8ae70b | 65e6fd006068e77daaaf95790f0d39e03ad810ef | refs/heads/master | 2023-01-23T00:20:57.217098 | 2020-12-08T18:56:55 | 2020-12-08T18:56:55 | 318,538,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import os
class Config:
'''
General configuration parent class
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:kadas36@localhost/pitches'
SECRET_KEY = os.environ.get('SECRET_KEY')
# email configurations
# MAIL_SERVER = 'smtp.googlemail.com'
# MAIL_PORT = 587
# MAIL_USE_TLS = True
# MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
# MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:kadas36@localhost/pitches'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
} | [
"oduorthomas14@gmail.com"
] | oduorthomas14@gmail.com |
abc9d25cf0ac8a2f4f28fb508c772961bf5dd4e4 | 81f790f86b4027f63ff8bba8fd8096769b73b3a8 | /profiles/cromwell-sa/manual_steps.py | 35e72102e537d40410c04fa8604bcafe0078ad37 | [] | no_license | broadinstitute/terraform-terra | 590722b9d13e630e053af9630d170f9846cf2edb | 8beddb01ae6a5da54a388573efec69d113765de5 | refs/heads/master | 2022-05-25T20:09:06.652890 | 2022-05-12T22:55:11 | 2022-05-12T22:55:11 | 180,219,844 | 4 | 1 | null | 2022-05-16T17:25:10 | 2019-04-08T19:35:16 | HCL | UTF-8 | Python | false | false | 858 | py | import sys
def wait_for_enter():
raw_input("Press Enter to continue: ")
class AddToGroup(object):
def run(self, context):
print("\nIn the GSuite admin console (https://admin.google.com) for 'test.firecloud.org', go to:")
print(" Groups -> Search for firecloud-project-editors-perf@test.firecloud.org -> Add members:")
print("Add {0} SA:".format(context["app"]))
print(" {0}-{1}@{2}.iam.gserviceaccount.com ".format(
context["project_name"],
context["app"],
context["project_name"]
)
)
wait_for_enter()
if __name__ == "__main__":
context = {
"app": "cromwell",
"project_name": sys.argv[1]
}
procedure = [
AddToGroup()
]
for step in procedure:
step.run(context)
print("\nAll Done!")
| [
"noreply@github.com"
] | broadinstitute.noreply@github.com |
87f8fc46e977cdf0a70a8db5ec1d90d4b055ca41 | a54007706a09b387690f79fd7ffd889decad42f1 | /day10/code/10_私有属性和私有方法.py | de76362927e72a1c663e1aac04fd6c9636e1c0f5 | [] | no_license | lvah/201903python | d425534544a1f91e5b80b5ff0de5ca34037fe6e9 | 1415fcb7697dfa2884d94dcd8963477e12fe0624 | refs/heads/master | 2020-07-06T16:45:37.882819 | 2019-09-08T10:13:07 | 2019-09-08T10:13:07 | 203,082,401 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py |
class Student(object):
__country = 'china'
def __init__(self, name, age, score):
self.name = name
# 年龄和分数是私有属性
self.__age = age
self.__score = score
# 私有方法, 只能在类内部执行;
def __modify_score(self, scores):
self.__score = scores
print(self.__score)
def set_age(self, age):
if 0<age <150:
self.__age = age
print("当前年龄:", self.__age)
else:
raise Exception("年龄错误")
def set_score(self, scores):
if len(scores) == 3:
self.__score = scores
else:
raise Exception("成绩异常")
class MathStudent(Student):
pass
student = Student("粉条", 10, [100, 100, 100])
# 在类的外部是不能直接访问的;
print(student.name)
student.set_age(15)
# 为什么私有属性不能访问? python解释器看到以双下划开头的属性和方法, 对其修改名称; 是因为 Python 解释器对外把 __属性名 改成了 _类名__属性名
# print(student._Student__age)
# student1 = MathStudent("粉条", 10, [100, 100, 100])
| [
"root@foundation0.ilt.example.com"
] | root@foundation0.ilt.example.com |
526a9ef301211f72aa2d874eed340a698d1a353c | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4145/codes/1758_762.py | 45ee8b72e53e62a98a60cc08b2de1dc2545913b6 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | from numpy import *
# Vetor contendo o nome dos meses do ano
vet_mes = array(['janeiro', 'fevereiro', 'marco', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro'])
st=input("data desejada: ")
d=st[0:2]
me=st[2:4]
a=st[4:8]
m=vet_mes[int(me)-1]
print(d," de ",m," de ",a) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
6f1485277db2113e50acc4f877cfb18709af575c | caaf9046de59559bb92641c46bb8ab00f731cb46 | /PhysicsTools/Heppy/python/analyzers/objects/TauAnalyzer.py | 106849e976130be62ee1815717b9a26cf1102afc | [] | no_license | neumeist/cmssw | 7e26ad4a8f96c907c7373291eb8df205055f47f0 | a7061201efe9bc5fa3a69069db037d572eb3f235 | refs/heads/CMSSW_7_4_X | 2020-05-01T06:10:08.692078 | 2015-01-11T22:57:32 | 2015-01-11T22:57:32 | 29,109,257 | 1 | 1 | null | 2015-01-11T22:56:51 | 2015-01-11T22:56:49 | null | UTF-8 | Python | false | false | 4,565 | py |
import operator
import itertools
import copy
import types
from ROOT import TLorentzVector
from PhysicsTools.Heppy.analyzers.core.Analyzer import Analyzer
from PhysicsTools.HeppyCore.framework.event import Event
from PhysicsTools.HeppyCore.statistics.counter import Counter, Counters
from PhysicsTools.Heppy.analyzers.core.AutoHandle import AutoHandle
from PhysicsTools.Heppy.physicsobjects.Lepton import Lepton
from PhysicsTools.Heppy.physicsobjects.Tau import Tau
from PhysicsTools.HeppyCore.utils.deltar import deltaR, deltaPhi, bestMatch
import PhysicsTools.HeppyCore.framework.config as cfg
class TauAnalyzer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(TauAnalyzer,self).__init__(cfg_ana,cfg_comp,looperName)
#----------------------------------------
# DECLARATION OF HANDLES OF LEPTONS STUFF
#----------------------------------------
def declareHandles(self):
super(TauAnalyzer, self).declareHandles()
self.handles['taus'] = AutoHandle( ('slimmedTaus',''),'std::vector<pat::Tau>')
def beginLoop(self):
super(TauAnalyzer,self).beginLoop()
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('has >=1 tau at preselection')
count.register('has >=1 selected taus')
count.register('has >=1 loose taus')
count.register('has >=1 inclusive taus')
#------------------
# MAKE LEPTON LISTS
#------------------
def makeTaus(self, event):
event.selectedTaus = []
event.looseTaus = []
event.inclusiveTaus = []
#get all
alltaus = map( Tau, self.handles['taus'].product() )
foundTau = False
for tau in alltaus:
tau.associatedVertex = event.goodVertices[0]
tau.lepVeto = False
if self.cfg_ana.vetoLeptons:
for lep in event.selectedLeptons:
if deltaR(lep.eta(), lep.phi(), tau.eta(), tau.phi()) < self.cfg_ana.leptonVetoDR:
tau.lepVeto = True
if tau.lepVeto: continue
if self.cfg_ana.vetoLeptonsPOG:
if not tau.tauID("againstMuonTight"):
tau.lepVeto = True
if not tau.tauID("againstElectronLoose"):
tau.lepVeto = True
if tau.lepVeto: continue
if tau.pt() < self.cfg_ana.ptMin: continue
if abs(tau.eta()) > self.cfg_ana.etaMax: continue
### tau.dxy and tau.dz are zero
### if abs(tau.dxy()) > self.cfg_ana.dxyMax or abs(tau.dz()) > self.cfg_ana.dzMax: continue
foundTau = True
def id3(tau,X):
"""Create an integer equal to 1-2-3 for (loose,medium,tight)"""
return tau.tauID(X%"Loose") + tau.tauID(X%"Medium") + tau.tauID(X%"Tight")
#tau.idMVA2 = id3(tau, "by%sIsolationMVA2")
tau.idCI3hit = id3(tau, "by%sCombinedIsolationDeltaBetaCorr3Hits")
#print "Tau pt %5.1f: idMVA2 %d, idCI3hit %d, %s, %s" % (tau.pt(), tau.idMVA2, tau.idCI3hit, tau.tauID(self.cfg_ana.tauID), tau.tauID(self.cfg_ana.tauLooseID))
if tau.tauID(self.cfg_ana.tauID):
event.selectedTaus.append(tau)
event.inclusiveTaus.append(tau)
elif tau.tauID(self.cfg_ana.tauLooseID):
event.looseTaus.append(tau)
event.inclusiveTaus.append(tau)
event.selectedTaus.sort(key = lambda l : l.pt(), reverse = True)
event.looseTaus.sort(key = lambda l : l.pt(), reverse = True)
self.counters.counter('events').inc('all events')
if foundTau: self.counters.counter('events').inc('has >=1 tau at preselection')
if len(event.selectedTaus): self.counters.counter('events').inc('has >=1 selected taus')
if len(event.looseTaus): self.counters.counter('events').inc('has >=1 loose taus')
if len(event.inclusiveTaus): self.counters.counter('events').inc('has >=1 inclusive taus')
def process(self, event):
self.readCollections( event.input )
self.makeTaus(event)
return True
setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer(
class_object=TauAnalyzer,
ptMin = 20,
etaMax = 9999,
dxyMax = 0.5,
dzMax = 1.0,
vetoLeptons = True,
leptonVetoDR = 0.4,
vetoLeptonsPOG = False,
tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits",
tauLooseID = "decayModeFinding",
)
)
| [
"colin.bernet@cern.ch"
] | colin.bernet@cern.ch |
d9db8b4b25df35f0c01430a7f4dc0ded3603c882 | c3392d4fcd8853be00ab43376c1dd8ae8e155887 | /train_ars.py | 18257502c08e62e69f788f4d80b0b91df2414304 | [] | no_license | smilu97/ROBOTIS-RL | 786e342d626b65581eafab394134e35305556633 | 481f15ea5bfcc6d0fffd3222041338e795effee9 | refs/heads/master | 2023-08-01T08:12:30.992436 | 2021-09-21T07:00:45 | 2021-09-21T07:00:45 | 365,978,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,868 | py | #!/usr/bin/env python
#!/usr/bin/env python
import gym
import numpy as np
from ars.policy import Policy
from ars.config import ARSConfig
from ars.env_ars import EnvARS
from env import OP3Env
class LinearPolicy(Policy):
def __init__(self, input_size: int, output_size: int, discrete_output=False):
self.discrete_output = discrete_output
self.input_size = input_size
self.output_size = output_size
self.shape = (input_size, output_size)
self.param_size = input_size * output_size + output_size
self.c_W = input_size * output_size
super().__init__(self.param_size)
def call(self, params, input):
W = params[:self.c_W].reshape(self.shape)
b = params[self.c_W:]
r = np.matmul(input, W) + b
if self.discrete_output:
r = 1 if r[0] > 0.5 else 0
r = np.minimum(1.0, np.maximum(-1.0, r))
return r
def init_params(self):
# return np.array([-0.00191783, 0.08849352, 0.20041636, 0.27868464, 0.48853733], dtype=np.float64)
return np.zeros(self.param_size, dtype=np.float64)
def main():
def env_creator():
return OP3Env(use_bias=True, human_bias=True)
input_size = 24
output_size = 12
policy = LinearPolicy(input_size, output_size, discrete_output=False)
config = ARSConfig(
step_size=0.1,
num_directions=100,
num_top_directions=10,
exploration_noise=0.01
)
ars = EnvARS(env_creator, policy, config, num_cpus=8)
i = 0
while True:
score = np.mean([ars.evaluate(), ars.evaluate(), ars.evaluate(), ars.evaluate(), ars.evaluate()])
print('score:', score)
if score >= 500.0:
break
for _ in range(1):
ars.train()
i += 1
if i % 10 == 0:
print('save model')
fd = open('result.npz', 'w')
np.save(fd, ars.params)
fd.close()
print('params:', ars.params)
if __name__ == '__main__':
main()
| [
"smilup2244@gmail.com"
] | smilup2244@gmail.com |
51af99662026e17ae85aea2aefaa927ad47d590d | a1851d76efd0a94af2dc89b92c293cdce664b9e4 | /blog/tests.py | 9aa38923c82e7689d09c7384eed4369bb1013362 | [] | no_license | ephremworkeye/drfproject | 21cea415fbecf49dbc0b26122726a13049e8aae1 | 73aa59bf01af40be382e99b659dc18064b891463 | refs/heads/master | 2023-06-25T04:06:36.042518 | 2021-07-30T02:20:41 | 2021-07-30T02:20:41 | 390,543,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | from django.test import TestCase
from django.contrib.auth.models import User
from .models import Category, Post
# Create your tests here.
class Test_Create_Post(TestCase):
@classmethod
def setUpTestData(cls):
test_category = Category.objects.create(name='django')
testuser1 = User.objects.create_user(
username='test_user1', password='123456789'
)
test_post = Post.objects.create(
category_id=1, title='Post Title',
excerpt='Post Excerpt', content='Post Content',
slug='post-title', author_id=1, status='published'
)
def test_blog_content(self):
post = Post.postobjects.get(id=1)
cat = Category.objects.get(id=1)
author = f'{post.author}'
excerpt = f'{post.excerpt}'
title = f'{post.title}'
content = f'{post.content}'
status = f'{post.status}'
self.assertEqual(author, 'test_user1')
self.assertEqual(title, 'Post Title')
self.assertEqual(content, 'Post Content')
self.assertEqual(status, 'published')
self.assertEqual(str(post), 'Post Title')
self.assertEqual(str(cat), 'django')
| [
"ephremworkeye@gmail.com"
] | ephremworkeye@gmail.com |
817fe72f710ece9107211c7c5fc2b31965471a1b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02600/s771105734.py | 11b354cdd0f87325884f1f93bb1cc299c55d6ec5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | X = int(input())
if X in range(400,600):
print(8)
elif X in range(600,800):
print(7)
elif X in range(800,1000):
print(6)
elif X in range(1000,1200):
print(5)
elif X in range(1200,1400):
print(4)
elif X in range(1400,1600):
print(3)
elif X in range(1600,1800):
print(2)
else:
print(1) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
18583f18bcbddf3b2dff5a23e6fe0f224f570052 | e9b18b8fc91747c729c46310d462f0d4c3afa5c3 | /keymaster.py | 1d8d992a1767ce1c08161604fd1025b1322dab27 | [] | no_license | artxgj/hd-signup | e989dbc000399c917eba2c7358129f03a159ab4a | 7989857b18ddba760abf7e2fc1af7878b437ebca | refs/heads/master | 2021-01-16T18:53:48.326243 | 2010-11-11T00:42:08 | 2010-11-11T00:43:38 | 1,069,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | from google.appengine.api import urlfetch, memcache, users
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp import util
import os
try:
from Crypto.Cipher import ARC4
except ImportError:
# Just pass through in dev mode
class ARC4:
new = classmethod(lambda k,x: ARC4)
encrypt = classmethod(lambda k,x: x)
decrypt = classmethod(lambda k,x: x)
class Keymaster(db.Model):
secret = db.BlobProperty(required=True)
@classmethod
def encrypt(cls, key_name, secret):
secret = ARC4.new(os.environ['APPLICATION_ID']).encrypt(secret)
k = cls.get_by_key_name(key_name)
if k:
k.secret = str(secret)
else:
k = cls(key_name=str(key_name), secret=str(secret))
return k.put()
@classmethod
def decrypt(cls, key_name):
k = cls.get_by_key_name(str(key_name))
if not k:
raise Exception("Keymaster has no secret for %s" % key_name)
return ARC4.new(os.environ['APPLICATION_ID']).encrypt(k.secret)
def get(key):
return Keymaster.decrypt(key)
class KeymasterHandler(webapp.RequestHandler):
@util.login_required
def get(self):
if users.is_current_user_admin():
self.response.out.write("""<html><body><form method="post">
<input type="text" name="key" /><input type="text" name="secret" /><input type="submit" /></form></body></html>""")
else:
self.redirect('/')
def post(self):
if users.is_current_user_admin():
Keymaster.encrypt(self.request.get('key'), self.request.get('secret'))
self.response.out.write("Saved: %s" % Keymaster.decrypt(self.request.get('key')))
else:
self.redirect('/')
def main():
application = webapp.WSGIApplication([
('/_km/key', KeymasterHandler),
],debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
"progrium@gmail.com"
] | progrium@gmail.com |
9d704db1a598e07043f4aa5d7b75b15787bd4fce | 5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8 | /buildout-cache/eggs/plone.app.dexterity-2.0.18-py2.7.egg/plone/app/dexterity/upgrades/to2000.py | 08356228142a2d02010f845ccd3c479793018e41 | [] | no_license | renansfs/Plone_SP | 27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a | 8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5 | refs/heads/master | 2021-01-15T15:32:43.138965 | 2016-08-24T15:30:19 | 2016-08-24T15:30:19 | 65,313,812 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from Products.CMFCore.utils import getToolByName
def remove_overlays_css(context):
portal_css = getToolByName(context, 'portal_css')
portal_css.unregisterResource(
'++resource++plone.app.dexterity.overlays.css')
| [
"renansfs@gmail.com"
] | renansfs@gmail.com |
eb9fd5786b975a38a8d18efa6ab081bcefd75756 | f483545d7765c25d1b315027726dbd74bc77b98a | /02.교육/Python-응용SW개발/Quiz/20190423_Sayhi.py | e652c41df894e341182f2857bcdb06554ff939f5 | [] | no_license | niceman5/pythonProject | e51b44a50776100a63443d7da850ba4b8b00f5eb | 3589fd200b56f68b856d2b4d2031c2a1135168a0 | refs/heads/master | 2023-07-10T16:12:57.756944 | 2023-06-27T08:13:54 | 2023-06-27T08:13:54 | 135,047,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py |
def say_hi(name:str, age:int) -> str:
return "Hi. My name is {} and I'm {} years old".format(name, age)
##
# Main Process 처리
#
if __name__ == '__main__':
print(say_hi('Alex',32))
print(say_hi('Frank', 68))
assert say_hi("Alex", 32) == "Hi. My name is Alex and I'm 32 years old"
assert say_hi("Frank", 68) == "Hi. My name is Frank and I'm 68 years old"
print(say_hi('Pepe', 8)) | [
"niceman555@gmail.com"
] | niceman555@gmail.com |
9d42599a85c1e4b4812e356c6575432b94e84088 | 0f05f69c1c71597d861320244760ca5d6e843c69 | /archive/models.py | f9537a3632109c69f580c27f31bfed88043b1e50 | [
"MIT"
] | permissive | emawind84/rrwebtv | 6ddb3d2a9da6acbcd3ce267b5099edabca2b55de | ae22cd39ea430aed0de2b852e40c309465a7237b | refs/heads/master | 2021-06-21T19:33:46.091579 | 2019-12-03T06:29:52 | 2019-12-03T06:29:52 | 177,141,697 | 0 | 0 | MIT | 2021-06-10T21:18:44 | 2019-03-22T13:06:00 | Python | UTF-8 | Python | false | false | 1,035 | py | from django.db import models
from uploads.core.models import Replay
class Performance(models.Model):
note = models.CharField(max_length=255, blank=True)
pilot = models.CharField(max_length=200, blank=True)
team = models.CharField(max_length=200, blank=True)
rally = models.CharField(max_length=200, blank=True)
track = models.CharField(max_length=200, blank=False)
stage_number = models.CharField(max_length=200, blank=True)
car = models.CharField(max_length=200, blank=True)
category = models.CharField(max_length=200, blank=True)
time = models.CharField(max_length=200, blank=True)
youtube_id = models.CharField(max_length=200, blank=False)
uploaded_at = models.DateTimeField(auto_now_add=True)
featured = models.BooleanField(default=False)
replay = models.OneToOneField(Replay, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return '{} - {}'.format(self.uploaded_at, self.track)
class Meta:
ordering = ('-featured', '-uploaded_at',) | [
"emawind84@gmail.com"
] | emawind84@gmail.com |
8fdc98ad20285d96a941c9956594918ed1c27619 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/arc/074d_2.py | 7e4f8cc5c8f778dee383fbd00cadd4fc6501d7f7 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from heapq import heappush, heappop, heapify
N = int(input())
M = 3 * N
A = list(map(int, input().split()))
B = A[::-1]
L = [0] * (N + 1)
que = A[:N]
heapify(que)
S = sum(que)
L[0] = S
for i, a in enumerate(A[N: 2 * N], start=1):
S += a
heappush(que, a)
S -= heappop(que)
L[i] = S
R = [0] * (N + 1)
que = [-a for a in B[:N]]
heapify(que)
S = sum(que)
R[0] = S
for i, b in enumerate(B[N: 2 * N], start=1):
b = -b
S += b
heappush(que, b)
S -= heappop(que)
R[i] = S
R = R[::-1]
ans = -10**18
for l, r in zip(L, R):
ans = max(ans, l + r)
print(ans)
| [
"y.oksaku@stu.kanazawa-u.ac.jp"
] | y.oksaku@stu.kanazawa-u.ac.jp |
7998dea65bc13e38995fb13c85d0359adfeda2b3 | 7ad3099e45e9c501c79213e9bc687d7834ac38e3 | /dlint/linters/bad_popen2_use.py | 0f8e7ca8e0dee169a4222d9ff10209756d25a778 | [
"BSD-3-Clause"
] | permissive | DrewDennison/dlint | 40f76694adb747a868c1f0eaa0ebd8edbc20604d | a2ed49e68638036aa99ea4ba6f095473b5dddd88 | refs/heads/master | 2023-03-01T22:38:57.577847 | 2021-02-11T04:21:00 | 2021-02-11T04:21:00 | 273,586,125 | 0 | 0 | BSD-3-Clause | 2020-06-19T21:03:41 | 2020-06-19T21:03:40 | null | UTF-8 | Python | false | false | 581 | py | #!/usr/bin/env python
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from .helpers import bad_module_use
class BadPopen2UseLinter(bad_module_use.BadModuleUseLinter):
"""This linter looks for use of the "popen2" module. This module execute
shell commands, which often leads to arbitrary code execution bugs.
"""
off_by_default = False
_code = 'DUO126'
_error_tmpl = 'DUO126 avoid "popen2" module use'
@property
def illegal_modules(self):
return [
"popen2",
]
| [
"schwag09@gmail.com"
] | schwag09@gmail.com |
cc41826367a8eab1283b6c9bc053da8592ea2b45 | f227254ce39d1513e3c0f550fb02785989d69f83 | /pycon/slide/templates/codes/good_use_mock.py | 390516428895badd4d1e2af16bd265fa48aec07d | [] | no_license | podhmo/podhmo.github.com | b947159fb8a600331bab06cf3e4319baaab9cf20 | b4114a64299fb21d77e2dee01279f45da8610bbb | refs/heads/master | 2021-01-10T21:37:29.154250 | 2013-09-14T14:13:39 | 2013-09-14T14:13:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # -*- coding:utf-8 -*-
from __future__ import print_function
import unittest
import mock
class Tests(unittest.TestCase):
## tearDownではsetUp時の例外に対応できません
def setUp(self):
self.addCleanup(lambda : print("cleanup"))
self.addCleanup(mock.stopall)
do_something() #raise Exception?
def tearDown(self):
print("teardown")
def test_it(self):
do_something2() #raise Exception?
| [
"podhmo+altair@beproud.jp"
] | podhmo+altair@beproud.jp |
a256dc957721025e1cdb1d095df778f56009ffa0 | 9c58ea644777ed99025038a06b69dde1793b4445 | /logging_format/violations.py | 9cbe1aa1ee026546082b5206b982b02672f91eaa | [
"Apache-2.0"
] | permissive | sfstpala/flake8-logging-format | 973f4cf0ab7c7119a7b29dbc22c8c20ddfd67923 | 1057dc290bee5b475426addbd2e1670d959fc249 | refs/heads/develop | 2021-01-15T08:18:48.210651 | 2017-06-16T19:44:59 | 2017-06-16T19:44:59 | 99,563,776 | 0 | 0 | null | 2017-08-07T09:57:35 | 2017-08-07T09:57:35 | null | UTF-8 | Python | false | false | 396 | py | """
Defined violations
"""
STRING_FORMAT_VIOLATION = "G001 Logging statement uses string.format()"
STRING_CONCAT_VIOLATION = "G003 Logging statement uses '+'"
PERCENT_FORMAT_VIOLATION = "G002 Logging statement uses '%'"
WARN_VIOLATION = "G010 Logging statement uses 'warn' instead of 'warning'"
WHITELIST_VIOLATION = "G100 Logging statement uses non-whitelisted extra keyword argument: {}"
| [
"jesse.myers@globality.com"
] | jesse.myers@globality.com |
8b28cfa24bc518baeda60b60e64076dbdf5e0a3a | 307d157e21997bf5af0e84905af0ddc4780f7ed1 | /kansha/services/components_repository.py | 06d22c0cec91e40c66e4d91963c7c394214d9560 | [] | no_license | maxbyz/kansha | ce650f60d42abd477d297aa821c5e2941ba8e260 | 85b5816da126b1c7098707c98f217d8b2e524ff2 | refs/heads/master | 2021-10-01T15:38:28.560448 | 2018-11-27T11:19:48 | 2018-11-27T11:19:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # -*- coding:utf-8 -*-
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from nagare.services import plugins
class CardExtensions(plugins.Plugins):
ENTRY_POINTS = 'kansha.card.extensions'
CONFIG_SECTION = 'card_extensions'
CONFIGURATORS = {}
def set_configurators(self, configurators):
"""
Return a copy of self with
CONFIGURATORS set.
"""
repository = self.copy()
repository.CONFIGURATORS = configurators
return repository
def instantiate_items(self, card, action_log, services_service):
"""
Return items as CardExtension instances for given card.
"""
return [
(name, services_service(klass, card, action_log, self.CONFIGURATORS.get(name)))
for name, klass in self.items()
]
| [
"romuald.texier-marcade@net-ng.com"
] | romuald.texier-marcade@net-ng.com |
533c59d1b1ddee5ea9a12dc0fe5572af83727f87 | 64106aef328eea5e893a685873d91988c50af74a | /src/com/oppojifen/test_MyOPPO.py | 37873c6ed7143eb2e3a67ae65a73e9c891c1358c | [] | no_license | Fangziqiang/AppiumTesting | 77fd6517bcb96261496583ac8707362529aea54f | e224df6aaaa8262aceb4a8162f1d877064c0d333 | refs/heads/master | 2021-07-25T22:14:08.045554 | 2021-07-06T10:47:55 | 2021-07-06T10:47:55 | 129,370,176 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | # -*- coding: utf-8 -*-
import sys
import os
import unittest
from time import sleep
from appium import webdriver
import subprocess
from swipeMethod import swipe_up
from swipeMethod import swipe_left
from swipeMethod import swipe_right
# https://cloud.tencent.com/developer/article/1467203 安装定位工具
class testUsercenter(unittest.TestCase):
# 添加setup进行初始化工作
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
# OPPO R9s
#desired_caps['platformVersion'] = '7.1.1'
#desired_caps['deviceName'] = 'ba36aa7a'
#desired_caps['deviceName'] = '305f8735'
desired_caps['platformVersion'] = '11'
desired_caps['deviceName'] = '763d6ade'
desired_caps['appPackage'] = 'com.oppo.usercenter'
desired_caps['appActivity'] = 'com.oppo.usercenter.vip.UCVIPMainActivity'
#设置每次启动不清除程序原始数据
desired_caps['noReset'] = 'True'
pass
# 初始化Appium 连接
self.driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_caps)
# 测试用例使用test开头
def test_getintegral(self):
try:
self.driver.implicitly_wait(10)
signin = self.driver.find_element_by_xpath('//*[@text="签到"]')
signin.click()
sleep(3)
print("签到成功")
#self.driver.switch_to.alert.accept()
except:
print("已签到")
#断言判断文本是否存在于页面中
#self.assertIn("Web Browser Automation",text)
#添加teardown进行善后处理
def tearDown(self):
self.driver.quit()
# 添加测试集合
suit = unittest.TestSuite()
suit.addTest(testUsercenter("test_getintegral"))
if __name__ == '__main__':
# 使用main()方法进行运行用例
# unittest.main()
# 使用 run放进行运行测试用例集
run = unittest.TextTestRunner()
run.run(suit) | [
"286330540@qq.com"
] | 286330540@qq.com |
c59c35dfe207243dc29aebe9966469339feb3bc2 | b0eca8365ea8c779ad49f16792cae6dcf28cdf4e | /rebecca/app/admin/tests/test_schema.py | 6a2269cce8773b64bcaeaf5b6f8503c9fcf03b44 | [] | no_license | rebeccaframework/rebecca.app.admin | fefc9eb5d783317e130d6be3821e7c560c8e185a | 0fca8df46bbf03dd5fd98ce9c09b1b305e97db99 | refs/heads/master | 2021-01-19T06:26:02.573251 | 2013-12-22T16:15:16 | 2013-12-22T16:15:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | import unittest
from testfixtures import ShouldRaise
class TestRelation(unittest.TestCase):
def setUp(self):
from ..testing import _setup
self.session = _setup()
def tearDown(self):
from ..testing import _teardown
_teardown()
def _getTarget(self):
from ..schema import Relation
return Relation
def _makeOne(self, *args, **kwargs):
return self._getTarget()(*args, **kwargs)
def test_serialize(self):
from ..testing import DummySQLAModel
dummy = DummySQLAModel(value=100)
self.session.add(dummy)
self.session.flush()
target = self._makeOne(DummySQLAModel, self.session)
result = target.serialize(None, dummy)
self.assertEqual(result, str(dummy.id))
def test_deserialize(self):
from ..testing import DummySQLAModel
dummy = DummySQLAModel(value=100)
self.session.add(dummy)
self.session.flush()
target = self._makeOne(DummySQLAModel, self.session)
result = target.deserialize(None, str(dummy.id))
self.assertEqual(result, dummy)
def test_deserialize_no_instance(self):
import colander
from ..testing import DummySQLAModel
target = self._makeOne(DummySQLAModel, self.session)
msg = '${cstruct} is not found from ${model}'
with ShouldRaise(colander.Invalid(None, msg)):
target.deserialize(None, "a")
def test_deserialize_empty(self):
from ..testing import DummySQLAModel
target = self._makeOne(DummySQLAModel, self.session)
result = target.deserialize(None, "")
self.assertIsNone(result)
| [
"aodagx@gmail.com"
] | aodagx@gmail.com |
5daf54d1ccad555238ec40a2592b3ce2055f6eed | b462a1eb41cdd6ae92eb8717e829b8e09f3152b2 | /cvxcluster/clusterpath.py | 0d7e5ebe5dd304f1e78fa88f33c074a5fa3aedcd | [] | no_license | mnarayan/cvxcluster | 3e49dc7a040ed2f53795dabd3e98ca3ae567d2a8 | 2d32f6409751f2c9e79eded4776a172df0e54454 | refs/heads/master | 2021-01-12T22:35:25.874393 | 2014-05-29T09:51:05 | 2014-05-29T09:51:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | import logging; log = logging.getLogger(__name__)
import numpy as np
from .conditions import RelativeTolerance
from .profile import profile
from .solve import solve
__all__ = [
'clusterpath',
]
@profile
def clusterpath(problem0, solver, conditions, gamma0=1e-3, step=1.2):
"""
Calculate the "clusterpath" -- the sequence of gamma values that cause the
number of clusters to change -- by guess-and-check.
Parameters
----------
problem0 : Problem
a convex clustering problem
solver : Solver
a solver for convex clustering
gamma0 : float > 0
initial gamma value
step : float > 1
gamma <- gamma * step at each iteration
rtol : float < 1
what percentage the duality gap must decrease before calling a problem
"solved"
"""
gamma = gamma0
n_clusters = problem0.n_samples
lmbd0 = None
while n_clusters > 1:
problem = problem0.__class__(problem0.X, gamma, problem0.w)
solution, _ = solve(problem, solver, conditions, lmbd0=lmbd0)
n_clusters_ = 1 + np.max(solution.clusters)
log.info( '{:4.8f} | {:4d}'.format(gamma, n_clusters_) )
if n_clusters > n_clusters_:
n_clusters = n_clusters_
yield gamma, n_clusters_
gamma *= step
lmbd0 = solution.lmbd
for condition in conditions:
condition.reset()
| [
"duckworthd@gmail.com"
] | duckworthd@gmail.com |
6fe8d965fb31a10689982ae269fb3f9a2aff413a | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/candidates_user.py | 0a7719d7df2acd573906fc162e5c24b0d5d57a59 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,536 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CandidatesUser:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'id': 'str'
}
attribute_map = {
'name': 'name',
'id': 'id'
}
def __init__(self, name=None, id=None):
"""CandidatesUser
The model defined in huaweicloud sdk
:param name: 用户名称
:type name: str
:param id: 用户ID
:type id: str
"""
self._name = None
self._id = None
self.discriminator = None
if name is not None:
self.name = name
if id is not None:
self.id = id
@property
def name(self):
"""Gets the name of this CandidatesUser.
用户名称
:return: The name of this CandidatesUser.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CandidatesUser.
用户名称
:param name: The name of this CandidatesUser.
:type name: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this CandidatesUser.
用户ID
:return: The id of this CandidatesUser.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CandidatesUser.
用户ID
:param id: The id of this CandidatesUser.
:type id: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CandidatesUser):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
58eb196519fa8ee91d341cd2c11ff9e17f6fe82b | 8f67f4966c5ccfc3556cc05f7a47c2c664c2b115 | /django_push/subscriber/utils.py | 4aa716aad9ab1c47204a790d5cc4fc05284f1fdf | [
"BSD-2-Clause"
] | permissive | wd5/django-push | 8fe15b44eaaef4a54d4920d8e3c8689066e23038 | 7cd203b53e6e344c01c812a6ce1e0f68044fd13d | refs/heads/master | 2020-12-25T13:33:32.281561 | 2012-11-30T18:43:35 | 2012-11-30T18:43:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | import feedparser
from django.conf import settings
from django.utils.importlib import import_module
def get_hub(topic):
parsed = feedparser.parse(topic)
for link in parsed.feed.links:
if link['rel'] == 'hub':
return link['href']
raise TypeError("Hub not found")
def hub_credentials(hub_url):
"""A callback that returns no credentials, for anonymous
subscriptions. Meant to be overriden if developers need to
authenticate with certain hubs"""
return
def get_hub_credentials(hub_url):
creds_path = getattr(settings, 'PUSH_CREDENTIALS',
'django_push.subscriber.utils.hub_credentials')
creds_path, creds_function = creds_path.rsplit('.', 1)
creds_module = import_module(creds_path)
return getattr(creds_module, creds_function)(hub_url)
| [
"buburno@gmail.com"
] | buburno@gmail.com |
bee3172b4084d00d9c99d956728d8307cc451c8e | 60f3761284aa01d309446850bf9b87e0bf32ad4c | /tests/unit/merchant_account/test_address_details.py | 091f7d7069f1e369920aa32ffdcfeb1e643b1bfc | [
"MIT"
] | permissive | braintree/braintree_python | e8cc05016e65a79ad6b40c3fef557a196dbfce44 | 673f70a60d1db03d633f0758b5b2d40a28c79f67 | refs/heads/master | 2023-09-04T15:38:50.037101 | 2023-08-29T22:51:14 | 2023-08-29T22:51:14 | 579,729 | 212 | 119 | MIT | 2023-06-15T01:05:55 | 2010-03-25T21:29:00 | Python | UTF-8 | Python | false | false | 599 | py | from tests.test_helper import *
from braintree.merchant_account.address_details import AddressDetails
class TestAddressDetails(unittest.TestCase):
def test_repr_has_all_fields(self):
details = AddressDetails({
"street_address": "123 First St",
"region": "Las Vegas",
"locality": "NV",
"postal_code": "89913"
})
regex = r"<AddressDetails {street_address: '123 First St', locality: 'NV', region: 'Las Vegas', postal_code: '89913'} at \w+>"
matches = re.match(regex, repr(details))
self.assertTrue(matches)
| [
"code@getbraintree.com"
] | code@getbraintree.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.