blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f1a9693ae8b87d22ffb444dacd8ef3562f6ac77
|
f43d3731a21ee5df09298f5541b52484f408e010
|
/NewsModel/migrations/0008_auto_20170815_1747.py
|
479138d6adfe7c26b885c29401ac5f0adb3f2221
|
[] |
no_license
|
cash2one/wechat_admin
|
2ba8c35deffff37c263b7091229ba2d86f2aaeaf
|
af0712fdad867d76dcee2092abcf32cada49d075
|
refs/heads/master
| 2021-05-04T22:22:53.514787
| 2017-09-25T10:03:07
| 2017-09-25T10:03:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-15 09:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('NewsModel', '0007_auto_20170815_1741'),
]
operations = [
migrations.AlterField(
model_name='news',
name='comment_url',
field=models.URLField(blank=True, default=None, max_length=255, null=True, verbose_name='评论URL'),
),
migrations.AlterField(
model_name='news',
name='images',
field=models.TextField(blank=True, default=None, null=True, verbose_name='图片链接'),
),
migrations.AlterField(
model_name='news',
name='source',
field=models.CharField(blank=True, default=None, max_length=50, null=True, verbose_name='来源'),
),
migrations.AlterField(
model_name='news',
name='source_url',
field=models.URLField(blank=True, default=None, max_length=255, null=True, verbose_name='源URL'),
),
]
|
[
"“545314690@qq.com”"
] |
“545314690@qq.com”
|
586ad0103c3a894f7a97fab1082d6b4ed5220fd3
|
f09e98bf5de6f6c49df2dbeea93bd09f4b3b902f
|
/google-cloud-sdk/lib/surface/auth/__init__.py
|
f02fed108e951643a585b30690c3e329b4aeaf4e
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
Peterfeng100/notepal
|
75bfaa806e24d85189bd2d09d3cb091944dc97e6
|
d5ba3fb4a06516fec4a4ae3bd64a9db55f36cfcd
|
refs/heads/master
| 2021-07-08T22:57:17.407571
| 2019-01-22T19:06:01
| 2019-01-22T19:06:01
| 166,490,067
| 4
| 1
| null | 2020-07-25T04:37:35
| 2019-01-19T00:37:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,031
|
py
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth for the Google Cloud SDK."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Auth(base.Group):
"""Manage oauth2 credentials for the Google Cloud SDK.
The gcloud auth command group lets you grant and revoke authorization to Cloud
SDK (gcloud) to access Google Cloud Platform. Typically, when scripting Cloud
SDK tools for use on multiple machines, using `gcloud auth
activate-service-account` is recommended.
For more information on authorization and credential types, see:
[](https://cloud.google.com/sdk/docs/authorizing).
While running `gcloud auth` commands, the `--account` flag can be specified
to any command to use that account without activation.
## EXAMPLES
To authenticate a user account with gcloud and minimal user output, run:
$ gcloud auth login --brief
To list all credentialed accounts and identify the current active account,
run:
$ gcloud auth list
To revoke credentials for a user account (like logging out), run:
$ gcloud auth revoke test@gmail.com
"""
category = 'Identity and Security'
def Filter(self, context, args):
del context, args
base.DisableUserProjectQuota()
|
[
"kevinhk.zhang@mail.utoronto.ca"
] |
kevinhk.zhang@mail.utoronto.ca
|
c1e3dce31ab3f2259b84c16695ec02b683b497d8
|
1752e7d1cd7bca76b3e8eaf1b2bb7eee175e1d46
|
/gitwrapperlib/_version.py
|
96d0effe0e9f44c11ba9837acb4c44ace0a752bf
|
[
"MIT"
] |
permissive
|
costastf/gitwrapperlib
|
f231947aeecea86ca00c556785318032559a6b3c
|
521948528c175e5d1cd5c9b794a5927c50fbb78f
|
refs/heads/main
| 2023-08-16T23:43:05.317150
| 2023-06-13T11:30:44
| 2023-06-13T11:30:44
| 116,044,039
| 0
| 5
|
MIT
| 2023-09-06T18:29:46
| 2018-01-02T18:19:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,194
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: _version.py
#
# Copyright 2018 Costas Tyfoxylos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Manages the version of the package.
.. _Google Python Style Guide:
https://google.github.io/styleguide/pyguide.html
"""
import os
__author__ = '''Costas Tyfoxylos <costas.tyf@gmail.com>'''
__docformat__ = '''google'''
__date__ = '''02-01-2018'''
__copyright__ = '''Copyright 2018, Costas Tyfoxylos'''
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<costas.tyf@gmail.com>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
VERSION_FILE_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'.VERSION'
)
)
LOCAL_VERSION_FILE_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'.VERSION'
)
)
try:
with open(VERSION_FILE_PATH, encoding='utf8') as f:
__version__ = f.read()
except IOError:
try:
with open(LOCAL_VERSION_FILE_PATH, encoding='utf8') as f:
__version__ = f.read()
except IOError:
__version__ = 'unknown'
|
[
"costas.tyf@gmail.com"
] |
costas.tyf@gmail.com
|
db9b04a272126f991bb34f351bc484e806e522d2
|
ea5cb47780499016ad4a09c300358df96ce6b22f
|
/examples/py_example.py
|
a5c08b76bf2962d8f1a569a4869ea37b811c7abc
|
[
"MIT"
] |
permissive
|
liuguoyou/PyPatchMatch
|
84e02d26534fcc62bc2f8368db38dfd9883a074f
|
79e5a19296ec044c619484ff7a9e8cded43acd49
|
refs/heads/master
| 2020-12-08T12:21:43.706407
| 2020-01-10T03:37:10
| 2020-01-10T03:37:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : test.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/09/2020
#
# Distributed under terms of the MIT license.
from PIL import Image
import sys
sys.path.insert(0, '../')
import patch_match
if __name__ == '__main__':
source = Image.open('./images/forest_pruned.bmp')
result = patch_match.inpaint(source, patch_size=3)
Image.fromarray(result).show()
|
[
"maojiayuan@gmail.com"
] |
maojiayuan@gmail.com
|
c908e3859988f35653deebb62457f73eeba5a12b
|
c09e2e3b3743b86a24eadd0d62717d4925a661b3
|
/setup.py
|
5638fab5ecbd18ab65729f9752fc0abd8a87cee2
|
[
"MIT"
] |
permissive
|
knowsuchagency/foobar
|
c0a4a2b067663a8d1b2a26c5ddc2994dc6f05ab9
|
b02bea7e6a9af232175443e6b1512fc531b61f40
|
refs/heads/master
| 2020-12-02T06:36:31.700633
| 2017-07-11T07:21:14
| 2017-07-11T07:21:14
| 96,863,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from configparser import ConfigParser
from setuptools import setup
def get_requirements(section: str) -> list:
"""Read requirements from Pipfile."""
pip_config = ConfigParser()
pip_config.read('Pipfile')
def gen():
for item in pip_config.items(section):
lib, version = item
lib, version = lib.strip('"'), version.strip('"')
# ungracefully handle wildcard requirements
if version == '*': version = ''
yield lib + version
return list(gen())
packages = get_requirements('packages')
dev_packages = get_requirements('dev-packages')
setup(
install_requires=packages,
tests_require=dev_packages,
extras_require={
'dev': dev_packages,
},
entry_points={
'console_scripts': [
'foobar=foobar.cli:main'
]
},
)
|
[
"knowsuchagency@gmail.com"
] |
knowsuchagency@gmail.com
|
34767bd0c8b2f5feb057dcc43c1748422c9ddea3
|
368c66467b78adf62da04cb0b8cedd2ef37bb127
|
/BOJ/Python/10828_스택.py
|
b380cf362f317d58d32f74c7a857ea8da85d9885
|
[] |
no_license
|
DJHyun/Algorithm
|
c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5
|
fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a
|
refs/heads/master
| 2020-07-30T16:32:49.344329
| 2020-02-25T07:59:34
| 2020-02-25T07:59:34
| 210,289,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 774
|
py
|
# baekjoon source = "https://www.acmicpc.net/problem/10828"
import sys
T = int(sys.stdin.readline())
stack = [0]*T
tmp = -1
for test_case in range(T):
N = sys.stdin.readline().split()
if len(N) > 1:
cmd = N[0]
number = N[1]
else:
cmd = N[0]
if cmd == 'push':
tmp += 1
stack[tmp] = number
elif cmd == 'pop':
if tmp == -1:
print(-1)
else:
print(stack[tmp])
stack[tmp] = 0
tmp -= 1
elif cmd == 'size':
print(tmp+1)
elif cmd == 'empty':
if tmp == -1:
print(1)
else:
print(0)
elif cmd == 'top':
if tmp == -1:
print(-1)
else:
print(stack[tmp])
|
[
"djestiny4444@naver.com"
] |
djestiny4444@naver.com
|
ebfa4b5433fd5445fa52fa4128d08b66bb1c8acc
|
b64fcb9da80d12c52bd24a7a1b046ed9952b0026
|
/client_sdk_python/providers/auto.py
|
e51b3ffbd7a372bb388c2dc63fe843458be132af
|
[
"MIT"
] |
permissive
|
PlatONnetwork/client-sdk-python
|
e59f44a77690806c8763ed6db938ed8447d42417
|
94ad57bb34b5ee7bb314ac858071686382c55402
|
refs/heads/master
| 2022-07-09T08:49:07.312759
| 2021-12-24T08:15:46
| 2021-12-24T08:15:46
| 173,032,954
| 7
| 16
|
MIT
| 2022-08-31T02:19:42
| 2019-02-28T03:18:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,788
|
py
|
import os
from urllib.parse import (
urlparse,
)
from client_sdk_python.exceptions import (
CannotHandleRequest,
)
from client_sdk_python.providers import (
BaseProvider,
HTTPProvider,
IPCProvider,
WebsocketProvider,
)
HTTP_SCHEMES = {'http', 'https'}
WS_SCHEMES = {'ws', 'wss'}
def load_provider_from_environment():
uri_string = os.environ.get('WEB3_PROVIDER_URI', '')
if not uri_string:
return None
return load_provider_from_uri(uri_string)
def load_provider_from_uri(uri_string, headers=None):
uri = urlparse(uri_string)
if uri.scheme == 'file':
return IPCProvider(uri.path)
elif uri.scheme in HTTP_SCHEMES:
return HTTPProvider(uri_string, headers)
elif uri.scheme in WS_SCHEMES:
return WebsocketProvider(uri_string)
else:
raise NotImplementedError(
'Web3 does not know how to connect to scheme %r in %r' % (
uri.scheme,
uri_string,
)
)
class AutoProvider(BaseProvider):
default_providers = (
load_provider_from_environment,
IPCProvider,
HTTPProvider,
WebsocketProvider,
)
_active_provider = None
def __init__(self, potential_providers=None):
'''
:param iterable potential_providers: ordered series of provider classes to attempt with
AutoProvider will initialize each potential provider (without arguments),
in an attempt to find an active node. The list will default to
:attribute:`default_providers`.
'''
if potential_providers:
self._potential_providers = potential_providers
else:
self._potential_providers = self.default_providers
def make_request(self, method, params):
try:
return self._proxy_request(method, params)
except IOError as exc:
return self._proxy_request(method, params, use_cache=False)
def isConnected(self):
provider = self._get_active_provider(use_cache=True)
return provider is not None and provider.isConnected()
def _proxy_request(self, method, params, use_cache=True):
provider = self._get_active_provider(use_cache)
if provider is None:
raise CannotHandleRequest("Could not discover provider")
return provider.make_request(method, params)
def _get_active_provider(self, use_cache):
if use_cache and self._active_provider is not None:
return self._active_provider
for Provider in self._potential_providers:
provider = Provider()
if provider is not None and provider.isConnected():
self._active_provider = provider
return provider
return None
|
[
"hietel366435@163.com"
] |
hietel366435@163.com
|
e91bc932fdd46fc551c4dde40b6c032d21b7ba8e
|
9a9088713c917ac47c0b8713d6969b2cfcdbadac
|
/leetcode_python/549.Binary_Tree_Longest_Consecutive_Sequence_II.py
|
d73a5a900ef7632ab534ea974479d43f89361fdf
|
[] |
no_license
|
zihuaweng/leetcode-solutions
|
615fdcb9178b19764b4d30bcfe65a9f785e77270
|
e431ff831ddd5f26891e6ee4506a20d7972b4f02
|
refs/heads/master
| 2023-02-06T03:58:26.413711
| 2020-12-26T05:23:03
| 2020-12-26T05:23:03
| 311,418,790
| 4
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 934
|
py
|
#!/usr/bin/env python3
# coding: utf-8
# Time complexity: O()
# Space complexity: O()
# https://leetcode.com/problems/binary-tree-longest-consecutive-sequence-ii
def longestConsecutive(self, root: TreeNode) -> int:
def longest_path(root):
if not root:
return 0, 0
inc, dec = 1, 1
l_inc, l_dec = longest_path(root.left)
r_inc, r_dec = longest_path(root.right)
if root.left:
if root.left.val == root.val + 1:
inc = max(inc, 1 + l_inc)
if root.left.val == root.val - 1:
dec = max(dec, 1 + l_dec)
if root.right:
if root.right.val == root.val + 1:
inc = max(inc, 1 + r_inc)
if root.right.val == root.val - 1:
dec = max(dec, 1 + r_dec)
res[0] = max(res[0], inc + dec - 1)
return (inc, dec)
res = [0]
longest_path(root)
return res[0]
|
[
"zihuaw2@uci.edu"
] |
zihuaw2@uci.edu
|
03d4dc30e025720b6e6240e0e43e9d93e51dbaf7
|
e66fa131cff76fa3fe70e7b6649fa1332159c781
|
/ch09/generatorExp.py
|
f604819768741ec3d1548141beb0ef6c31cfaead
|
[] |
no_license
|
chc1129/python_tutorial
|
c6d97c6671a7952d8a7b838ccb8aa3c352fa6881
|
2f8b389731bafbda73c766c095d1eaadb0f99a1c
|
refs/heads/main
| 2023-08-24T07:00:43.424652
| 2021-10-28T16:07:57
| 2021-10-28T16:07:57
| 341,532,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 327
|
py
|
print( sum(i*i for i in range(10)) )
xvec = [10, 20, 30]
yvec = [7, 5, 3]
print( sum(x*y, for x,y in zip(xvec, yvec))
unipue_words = set(word for line in page for word in line.split())
valedictorian = max((student.gpa, student.name) for student in graduates)
data = 'golf'
list(data[i] for i in range(len(data)-1, -1, -1))
|
[
"chc1129@gmail.com"
] |
chc1129@gmail.com
|
f91d411468aba18dd844b4bd362c56aa8218192b
|
be01d0d54723d1e876c9a15618921dffe2b2255a
|
/Python/123.best_time_to_buy_sell_stackIII.py
|
408394965df626a09202ee7117c0b0d7b2fb3021
|
[] |
no_license
|
jxlxt/leetcode
|
17e7f25bf94dd334ac0d6254ffcffa003ed04c10
|
a6e6e5be3dd5f9501d0aa4caa6744621ab887f51
|
refs/heads/master
| 2023-05-26T22:10:03.997428
| 2023-05-24T02:36:05
| 2023-05-24T02:36:05
| 118,216,055
| 0
| 0
| null | 2018-01-20T06:31:57
| 2018-01-20T06:30:06
| null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n <= 1: return 0
p1, p2 = [0] * n, [0] * n
minV, maxV = prices[0], prices[-1]
for i in range(1, n):
minV = min(prices[i], minV)
p1[i] = max(p1[i-1], prices[i] - minV)
for i in range(n-2, -1, -1):
maxV = max(prices[i], maxV)
p2[i] = max(p2[i+1], maxV - prices[i])
res = 0
for i in range(n):
res = max(res, p1[i] + p2[i])
return res
|
[
"xli239@ucsc.edu"
] |
xli239@ucsc.edu
|
3f24505ba0b0df5ce1e41a599054ca1234b21a5f
|
303bac96502e5b1666c05afd6c2e85cf33f19d8c
|
/solutions/python3/245.py
|
2a1a8626a9b652689053d04fde6cd9e553dc7683
|
[
"MIT"
] |
permissive
|
jxhangithub/leetcode
|
5e82f4aeee1bf201e93e889e5c4ded2fcda90437
|
0de1af607557d95856f0e4c2a12a56c8c57d731d
|
refs/heads/master
| 2022-05-22T12:57:54.251281
| 2022-03-09T22:36:20
| 2022-03-09T22:36:20
| 370,508,127
| 1
| 0
|
MIT
| 2022-03-09T22:36:20
| 2021-05-24T23:16:10
| null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
class Solution:
def shortestWordDistance(self, words, word1, word2):
i1 = i2 = -1
res, same = float("inf"), word1 == word2
for i, w in enumerate(words):
if w == word1:
if same: i2 = i1
i1 = i
if i2 >= 0: res = min(res, i1 - i2)
elif w == word2:
i2 = i
if i1 >= 0: res = min(res, i2 - i1)
return res
|
[
"cenkay.arapsagolu@gmail.com"
] |
cenkay.arapsagolu@gmail.com
|
b596e8250368af057a20ec19b85049a800aebf86
|
b46e3f6472e2ea4605f4d088a211dbaff2493574
|
/reviewboard/dependencies.py
|
ad815f50f2c2685d16d32fec614c8baabbda853d
|
[
"MIT"
] |
permissive
|
fgallaire/reviewboard
|
360501a9f39c5898c54a80801c790f53b0a74f39
|
e6b1323aee5e361754b110e4604ea5fc098050fe
|
refs/heads/master
| 2021-01-18T03:13:56.561458
| 2017-03-22T14:41:15
| 2017-03-22T14:41:15
| 85,837,942
| 0
| 0
| null | 2017-03-22T14:30:31
| 2017-03-22T14:30:31
| null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
"""Version information for Review Board dependencies.
This contains constants that other parts of Review Board (primarily packaging)
can use to look up information on major dependencies of Review Board.
The contents in this file might change substantially between releases. If
you're going to make use of data from this file, code defensively.
"""
from __future__ import unicode_literals
# NOTE: This file may not import other files! It's used for packaging and
# may be needed before any dependencies have been installed.
#: The major version of Django we're using for documentation.
django_doc_major_version = '1.6'
#: The major version of Djblets we're using for documentation.
djblets_doc_major_version = '0.9'
#: The version range required for Django.
django_version = '>=1.6.11,<1.6.999'
#: The version range required for Djblets.
djblets_version = '>=0.10a0.dev,<=0.10.999'
#: All dependencies required to install Review Board.
package_dependencies = {
'Django': django_version,
'django_evolution': '>=0.7.6,<=0.7.999',
'django-haystack': '>=2.3.1,<=2.4.999',
'django-multiselectfield': '',
'Djblets': djblets_version,
'docutils': '',
'markdown': '>=2.4.0,<2.4.999',
'mimeparse': '>=0.1.3',
'paramiko': '>=1.12',
'pycrypto': '>=2.6',
'Pygments': '>=2.1',
'python-dateutil': '==1.5',
'python-memcached': '',
'pytz': '>=2015.2',
'Whoosh': '>=2.6',
}
def build_dependency_list(deps, version_prefix=''):
"""Build a list of dependency specifiers from a dependency map.
This can be used along with :py:data:`package_dependencies`,
:py:data:`npm_dependencies`, or other dependency dictionaries to build a
list of dependency specifiers for use on the command line or in
:file:`setup.py`.
Args:
deps (dict):
A dictionary of dependencies.
Returns:
list of unicode:
A list of dependency specifiers.
"""
return sorted(
[
'%s%s%s' % (dep_name, version_prefix, dep_version)
for dep_name, dep_version in deps.items()
],
key=lambda s: s.lower())
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
76e8eba313f8b3e1df3b59f6a7f19ef2cec47afc
|
f76e11d4da15768bf8683380b1b1312f04060f9a
|
/fix_uppsala_mw.py
|
37b2e1f5aebb99a509df6f9d9861fadc86fee9cf
|
[] |
no_license
|
rasoolims/scripts
|
0804a2e5f7f405846cb659f9f8199f6bd93c4af6
|
fd8110558fff1bb5a7527ff854eeea87b0b3c597
|
refs/heads/master
| 2021-07-07T03:53:20.507765
| 2021-04-13T14:53:00
| 2021-04-13T14:53:00
| 24,770,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import os,sys,codecs
reader =codecs.open(os.path.abspath(sys.argv[1]),'r')
writer =codecs.open(os.path.abspath(sys.argv[2]),'w')
line =reader.readline()
while line:
spl = line.strip().split()
if len(spl)<7 or not '-' in spl[0]:
writer.write(line.strip()+'\n')
line =reader.readline()
writer.close()
|
[
"rasooli.ms@gmail.com"
] |
rasooli.ms@gmail.com
|
2c8a22a8cc80312f3c5a73950fa5d5a693c26997
|
cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b
|
/ecloud/code/src/main/python/manor/streamlet/delete_node.py
|
34611b5a6beb3e0fffee7b01a20ed11aa2690dd6
|
[] |
no_license
|
1026237416/Python
|
ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14
|
ffa8f9ffb8bfec114b0ca46295db05c4213c4c30
|
refs/heads/master
| 2021-07-05T00:57:00.456886
| 2019-04-26T10:13:46
| 2019-04-26T10:13:46
| 114,510,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
import time
from tornado import gen
from manor.screwdriver import compute_util
from manor.streamlet import StreamletBase
def get_instance(params,node_id,serial):
return DeleteNode(params,serial,node_id)
class DeleteNode(StreamletBase):
def __init__(self,params,serial,node_id):
super(DeleteNode,self).__init__(node_id,params,serial)
self.serial=serial
self.server_id=self.params['server_id']
self.command_params=[]
self.stack_ids=[]
@gen.coroutine
def execute(self):
info=compute_util.get_info(self.server_id).to_dict()
self.log.debug(info)
if info['status']!='SHUTOFF':
compute_util.stop_server(self.server_id)
def check_finish(self):
info=compute_util.get_info(self.server_id).to_dict()
if info['status']=='SHUTOFF':
compute_util.delete_server(self.server_id)
for x in range(10):
self.log.debug('finish count down:%s'%x)
time.sleep(1)
self.log.debug('finished ...')
return True
|
[
"1026237416@qq.com"
] |
1026237416@qq.com
|
96a321bea45c33c8c9cbe88fe3e61e609ad28006
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02862/s302553699.py
|
7f9f0b8e8757ae71fce60543238888054cf73342
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
MOD = 1000000007
def mod_inv(mod, a):
old_t, t = 0, 1
old_r, r = mod, a
while r != 0:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_t, t = t, old_t - quotient * t
return old_t % mod
def combine(n, k, mod):
if k > n // 2:
k = n - k
u = 1
for i in range(n - k + 1, n + 1):
u = u * i % mod
v = 1
for i in range(1, k + 1):
v = v * i % mod
return u * mod_inv(mod, v) % MOD
def main():
X, Y = map(int, input().split())
m1 = X + Y
if m1 % 3 == 0:
m = m1 // 3
if X < m or Y < m:
print(0)
else:
print(combine(m, X - m, MOD))
else:
print(0)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
202ffdc331d2955587acdf296d2f6cc782d26fe0
|
0754e2e7aa1ffb90b54d563ce5a9317e41cfebf9
|
/keras/keras95_save_pd.py
|
0fcb88a0887f2d9415ea9ce5bffeaf323de5902a
|
[] |
no_license
|
ChaeMyungSeock/Study
|
62dcf4b13696b1f483c816af576ea8883c57e531
|
6f726a6ecb43387e4a3b9d068a9c491b115c74c0
|
refs/heads/master
| 2023-01-24T20:59:52.053394
| 2020-12-07T14:54:34
| 2020-12-07T14:54:34
| 263,255,793
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
import numpy as np
import pandas as pd
datasets = pd.read_csv("./data/csv/iris.csv", index_col=None, header=0, sep=',') # sep => ,를 기준으로 데이터를 구분한다.
# pd => loc // ioc
print(datasets)
print(datasets.__class__)
print(datasets.head()) # 위에서부터 5개
print(datasets.tail()) # 뒤에서부터 5개
print("========================")
print(datasets.values) # 판다스를 넘파이 형태로 바꿔줌
print(datasets.values.__class__)
# 넘파이로 저장
datasets = datasets.values
np.save('./data/iris_datasets.npy',arr=datasets)
# np.save('./data/iris_y.npy',arr=y_data)
# np.save('')
|
[
"noreply@github.com"
] |
ChaeMyungSeock.noreply@github.com
|
c28e730ec640401a04d6082566e005633a87106c
|
ac01b09550ccedb68a05756a7455c60766b60857
|
/src/mcqexam/urls.py
|
eb22ad6e4bc7093c18a2bedd2176952a1c0afce6
|
[] |
no_license
|
cseai/OpenEduQA
|
ea669cffa7d3f2f3ded2221c8cb85876ac1438df
|
8a90a843720a175c5da0af4fc51cc8e6542deb33
|
refs/heads/master
| 2023-06-10T23:17:40.502619
| 2021-07-05T12:43:44
| 2021-07-05T12:43:44
| 281,315,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
from django.urls import path
from .views import (
mcqexam_list,
mcqexam_create,
mcqexam_detail,
mcqexam_update,
mcqexam_delete,
)
app_name = 'mcqexam'
urlpatterns = [
path('', mcqexam_list, name='list'),
path('create/', mcqexam_create, name='create'),
path('<id>/', mcqexam_detail, name='detail'),
path('<id>/edit/', mcqexam_update, name='update'),
path('<id>/delete/', mcqexam_delete),
#
]
|
[
"bh.pro.pust@gmail.com"
] |
bh.pro.pust@gmail.com
|
850e09600cdd38e57abc33b9302c7c5f830a5f8c
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_23737.py
|
f4d160c86ea326b6ca9a18cbdf1e3aa86cd0a403
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
# '(match_start1)...(match_start2)...(match_end)' find the shortest string match
re.findall('(?=(\D\d{2,5}?.+?CA.?[ -._]*(?:\d{5})?))','6785 56767at435 hjfioej st. CA. 94827ifojwnf 93842')
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
eadece8617cdc25ac73efb28c19f2ad0379e8584
|
bd3528cc321dc37f8c47ac63e57561fd6432c7cc
|
/transformer/tensor2tensor/models/xception_test.py
|
b57a757b9cbc4ec041840652bf6955e95e961d8c
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
oskopek/cil
|
92bbf52f130a1ed89bbe93b74eef74027bb2b37e
|
4c1fd464b5af52aff7a0509f56e21a2671fb8ce8
|
refs/heads/master
| 2023-04-15T10:23:57.056162
| 2021-01-31T14:51:51
| 2021-01-31T14:51:51
| 139,629,560
| 2
| 5
|
MIT
| 2023-03-24T22:34:39
| 2018-07-03T19:35:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,136
|
py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import xception
from tensor2tensor.utils import registry
import tensorflow as tf
class XceptionTest(tf.test.TestCase):
def _test_xception(self, img_size):
vocab_size = 9
batch_size = 3
x = np.random.random_integers(
0, high=255, size=(batch_size, img_size, img_size, 3))
y = np.random.random_integers(
1, high=vocab_size - 1, size=(batch_size, 1, 1, 1))
hparams = xception.xception_tiny()
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
p_hparams.input_modality["inputs"] = (registry.Modalities.IMAGE, None)
p_hparams.target_modality = (registry.Modalities.CLASS_LABEL, vocab_size)
with self.test_session() as session:
features = {
"inputs": tf.constant(x, dtype=tf.int32),
"targets": tf.constant(y, dtype=tf.int32),
}
model = xception.Xception(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (batch_size, 1, 1, 1, vocab_size))
def testXceptionSmallImage(self):
self._test_xception(img_size=9)
def testXceptionLargeImage(self):
self._test_xception(img_size=256)
if __name__ == "__main__":
tf.test.main()
|
[
"lukas.jendele@gmail.com"
] |
lukas.jendele@gmail.com
|
dbf4440fe65197fcde1ca3b5fa97b257966e36f2
|
a10377a6d0c7576b9e47209f49dea398181f73fe
|
/test/node/milticasttest.py
|
c0ccd0fe7d2886bebf8866e8cd1ec423bc99e87f
|
[
"BSD-3-Clause"
] |
permissive
|
zymITsky/ants
|
14077dab214aff543bbc75a059240dd55f656916
|
52918d18c94a9a69c3b2495286e3384ba57ad6f8
|
refs/heads/master
| 2020-06-01T11:04:53.520288
| 2015-02-03T08:09:59
| 2015-02-03T08:09:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
'''
test multicast
'''
__author__ = 'wcong'
import unittest
from ants.cluster import cluster
from ants.node import multicast
class MulticastTest(unittest.TestCase):
def test(self):
cluster = cluster.ClusterInfo(name='test_cluster')
multicast_node = multicast.MulticastManager(cluster, self.print_result)
multicast_node.cast()
multicast_node.find_node()
def print_result(self, addr):
print 'addr:' + addr[0] + ':' + str(addr[1])
if __name__ == '__main__':
unittest.main()
|
[
"cong.wang@tqmall.com"
] |
cong.wang@tqmall.com
|
c79b2076bfa7ce63eafa92d9ff0a8b9ecb045895
|
cc0c7b6af25ce5a1a5fe310628d8a43475f0c41f
|
/det3d/datasets/__init__.py
|
51e3d8f492a19c33b85427acab6d0244fa166b19
|
[
"Apache-2.0"
] |
permissive
|
chisyliu/Det3D
|
183bb6c8d23277cecf9903184553b4c5cee88612
|
e437ca6eb2e9becf478ae0e5f6400f7c21bb7495
|
refs/heads/master
| 2023-03-03T09:00:29.790693
| 2021-01-21T10:44:34
| 2021-01-21T10:44:34
| 267,220,075
| 1
| 0
|
Apache-2.0
| 2021-01-21T10:44:35
| 2020-05-27T04:25:22
| null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
from .builder import build_dataset
# from .cityscapes import CityscapesDataset
from .kitti import KittiDataset
from .lyft import LyftDataset
from .nuscenes import NuScenesDataset
# from .custom import CustomDataset
from .dataset_wrappers import ConcatDataset, RepeatDataset
# from .extra_aug import ExtraAugmentation
from .loader import DistributedGroupSampler, GroupSampler, build_dataloader
from .registry import DATASETS
# from .voc import VOCDataset
# from .wider_face import WIDERFaceDataset
# from .xml_style import XMLDataset
#
__all__ = [
"CustomDataset",
"KittiDataset",
"GroupSampler",
"DistributedGroupSampler",
"build_dataloader",
"ConcatDataset",
"RepeatDataset",
"DATASETS",
"build_dataset",
]
|
[
"poodarchu@gmail.com"
] |
poodarchu@gmail.com
|
bb5cf2bd9afbc637c54860db1dcfb80b4b6cdfcc
|
6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f
|
/month04/Project/day03-demo/ddblog/ddblog/settings.py
|
893e9f69c63fb46dbbd843cdeb0f5c78511d456d
|
[
"Apache-2.0"
] |
permissive
|
chaofan-zheng/python_leanring_code
|
fe22b0370cadebf7456477269aff4a35cef0eb41
|
0af44ff39b9ded2c1d2cc96c6d356d21170ac04d
|
refs/heads/main
| 2023-02-28T07:56:46.457552
| 2021-02-10T15:08:33
| 2021-02-10T15:08:33
| 323,584,115
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,843
|
py
|
"""
Django settings for ddblog project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mn16=mhqp=3d=ub@vo2l1ckxwnlns3fh%_auj4%vf9p2b-#c^v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'user',
'btoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ddblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ddblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ddblog',
'USER': 'root',
'PASSWORD': '417355570',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# 静态文件的配置
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
# 用户上传文件的配置
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
CORS_ALLOW_HEADERS = (
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
)
# 生成token使用的秘钥
JWT_TOKEN_KEY = '123456'
|
[
"417355570@qq.com"
] |
417355570@qq.com
|
a07eec758b37f1fe500b44584d7fec680ba7cad5
|
f33b30743110532ddae286ba1b34993e61669ab7
|
/Minimum Time Difference.py
|
d462b0bb0897d150390facf9b6166c46b86a8bdc
|
[] |
no_license
|
c940606/leetcode
|
fe9dcee7a5daa4d52999d5f53253dd6dd33c348b
|
631df2ce6892a6fbb3e435f57e90d85f8200d125
|
refs/heads/master
| 2021-07-10T14:01:26.164966
| 2020-08-16T10:46:16
| 2020-08-16T10:46:16
| 186,588,449
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
class Solution(object):
def findMinDifference(self, timePoints):
"""
给定一个 24 小时制(小时:分钟)的时间列表,找出列表中任意两个时间的最小时间差并已分钟数表示。
---
输入: ["23:59","00:00"]
输出: 1
:type timePoints: List[str]
:rtype: int
"""
n = len(timePoints)
def helper(x):
x=x.split(":")
return int(x[0])*60 + int(x[1])
timePoints = sorted(map(lambda x:helper(x),timePoints))
print(timePoints)
min_time = 1500
for i in range(0,n-1):
temp = timePoints[i+1] - timePoints[i]
if temp < min_time:
min_time = temp
if abs(timePoints[0]+1440-timePoints[-1]) < min_time:
min_time = abs(timePoints[0]+1440-timePoints[-1])
return min_time
a = Solution()
print(a.findMinDifference(["23:59","00:00"]))
|
[
"762307667@qq.com"
] |
762307667@qq.com
|
6ebe8ee7411a32d10a802ee01d53684cd0fe6e3a
|
950a87f8e64636d2e1f6dd51f04ed51a41085429
|
/tests/test_models_zoo.py
|
08a62611dd76f8524770c811f13c96865b23aaf1
|
[
"MIT"
] |
permissive
|
Pandinosaurus/pytorch-toolbelt
|
325e503a02495a9d7e203bd58e7ad444648688bf
|
94d16a339cf9cb4b95bcaa539a462d81f4b82725
|
refs/heads/develop
| 2023-08-31T03:41:19.373645
| 2021-08-11T14:11:12
| 2021-08-11T14:11:12
| 207,519,450
| 0
| 0
|
MIT
| 2021-08-12T03:04:37
| 2019-09-10T09:35:17
|
Python
|
UTF-8
|
Python
| false
| false
| 758
|
py
|
import pytest
import torch
from pytorch_toolbelt.zoo import resnet34_unet32_s2, resnet34_unet64_s4, hrnet34_unet64
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
@skip_if_no_cuda
@torch.no_grad()
@pytest.mark.parametrize("model_cls", [resnet34_unet32_s2, resnet34_unet64_s4, hrnet34_unet64])
def test_segmentation_models(model_cls):
num_classes = 7
net = model_cls(num_classes=num_classes).cuda().eval()
input = torch.randn((4, 3, 512, 512)).cuda()
with torch.cuda.amp.autocast(True):
output = net(input)
assert output.size(0) == input.size(0)
assert output.size(1) == num_classes
assert output.size(2) == input.size(2)
assert output.size(3) == input.size(3)
|
[
"ekhvedchenya@gmail.com"
] |
ekhvedchenya@gmail.com
|
9ec6d198bb369bd0b2bed230d840f27a3b4cfc2f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03673/s125858327.py
|
4c76558da4394c983252f90470a140ba3b1ccb40
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 367
|
py
|
from collections import deque
n = int(input())
A = deque(map(int, input().split()))
b = deque([])
if n%2==0:
for i in range(n):
if i%2 ==0:
b.append(A[i])
else:
b.appendleft(A[i])
else:
for i in range(n):
if i%2 ==0:
b.appendleft(A[i])
else:
b.append(A[i])
print(*b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8cca21dc1dfb3c3e8dabd4d6e00022561017415a
|
c83acc6433aa8ef7703192e9033fe7cd92b2cccf
|
/traits/observation/exceptions.py
|
5e57b713bc5ff8d4c1000491d5a4eb9aad8a3be8
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"CC-BY-3.0"
] |
permissive
|
oscarpicas/traits
|
857f5c06f3caf06003aed8b21b502b66ca8ba6cc
|
e72691a2f8aa34529af431d6b6b8c1a476ef4107
|
refs/heads/master
| 2022-03-17T10:30:08.330129
| 2022-02-18T21:01:50
| 2022-02-18T21:01:50
| 26,197,506
| 0
| 0
| null | 2015-01-10T04:01:48
| 2014-11-05T01:39:56
| null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
# (C) Copyright 2005-2022 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
class NotifierNotFound(Exception):
""" Raised when a notifier cannot be found."""
pass
|
[
"noreply@github.com"
] |
oscarpicas.noreply@github.com
|
689adeb931f6bef31959fcee01b791be15cabf44
|
05352c29e844705f02d65526343eea9b486f8bd7
|
/src/python/pants/backend/awslambda/python/register.py
|
b7408cad1f7104ac4ba3e71537a413e10471f00b
|
[
"Apache-2.0"
] |
permissive
|
DoN-SultaN/pants
|
af2557de1178faaf73eed0a5a32e8f6fd34d2169
|
5cb5379003a0674c51f9a53f582cf690eddfaf45
|
refs/heads/master
| 2022-10-15T04:18:54.759839
| 2020-06-13T10:04:21
| 2020-06-13T10:04:21
| 272,089,524
| 1
| 0
|
Apache-2.0
| 2020-06-13T21:36:50
| 2020-06-13T21:36:49
| null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Create AWS Lambdas from Python code.
See https://pants.readme.io/docs/awslambda-python.
"""
from pants.backend.awslambda.common import awslambda_common_rules
from pants.backend.awslambda.python import awslambda_python_rules
from pants.backend.awslambda.python.target_types import PythonAWSLambda
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
def rules():
return [*awslambda_common_rules.rules(), *awslambda_python_rules.rules()]
def target_types():
return [PythonAWSLambda]
# Dummy v1 target to ensure that v1 tasks can still parse v2 BUILD files.
class LegacyPythonAWSLambda(Target):
def __init__(self, handler=None, runtime=None, sources=tuple(), **kwargs):
super().__init__(**kwargs)
def build_file_aliases():
return BuildFileAliases(targets={PythonAWSLambda.alias: LegacyPythonAWSLambda})
|
[
"noreply@github.com"
] |
DoN-SultaN.noreply@github.com
|
19f030eeaf16a07224b934871ffad46de4011858
|
934235f70a390a3ba0d7b464cddd10872f31cda3
|
/auction/auction/migrations/0012_auto_20210329_1515.py
|
e0a2ab42ef0fd92803ce83ab6d4e895d55838d22
|
[] |
no_license
|
deji100/Projects
|
6919041ba23e77a5c74e5ab7692bfcee38ececcb
|
17e64d954d1d7805be57ec5d8d4344e4944889e6
|
refs/heads/master
| 2023-04-30T05:25:03.143303
| 2021-05-20T15:00:43
| 2021-05-20T15:00:43
| 338,844,691
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Generated by Django 3.1.3 on 2021-03-29 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auction', '0011_auto_20210326_2149'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='sold_out',
field=models.BooleanField(default=False),
),
]
|
[
"68882568+deji100@users.noreply.github.com"
] |
68882568+deji100@users.noreply.github.com
|
c993ab5b21d8f118c3a3d92b12bbc00a0e289025
|
5c9c9072adafff9de79552d927d225539874a1e5
|
/fallas/panels.py
|
3e39327b21d1c1fe85ed7cd64558561a50ba6772
|
[] |
no_license
|
NOKIA-GAP/trouble-shooting-api
|
e38e221aa01b16b28cd90b1c93f0b5141d67b26a
|
a3f6e2c3c22727c888b1f3f4e570fd729920e267
|
refs/heads/master
| 2021-08-10T01:07:46.333294
| 2018-10-01T17:21:14
| 2018-10-01T17:21:14
| 114,031,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
from .models import (
Falla
)
INSTALACION = 'Instalacion'
INTEGRACION = 'Integracion'
SOFTWARE = 'Software'
HARDWARE = 'Hardware'
DATAFILL = 'Datafill'
AJUSTEPOTENCIA = 'Ajuste Potencia'
INTERFERENCIAEXTREMA = 'Interferencia externa'
CAMBIODISENO = 'Cambio diseno'
MALRECHAZO = 'Mal rechazo'
TX = 'TX'
COMPORTAMIENTOESPERADO = 'Comportamiento esperado'
COMPORTAMIENTOPREVIO = 'Comportamiento previo'
AJUSTEADYACENCIAS = 'Ajuste Adyacencias'
fallas = Falla.objects.all()
fallas_instalacion = Falla.objects.filter(tipo_falla=INSTALACION)
fallas_integracion = Falla.objects.filter(tipo_falla=INTEGRACION)
fallas_software = Falla.objects.filter(tipo_falla=SOFTWARE)
fallas_hardware = Falla.objects.filter(tipo_falla=HARDWARE)
fallas_datafill = Falla.objects.filter(tipo_falla=DATAFILL)
fallas_ajuste_potencia = Falla.objects.filter(tipo_falla=AJUSTEPOTENCIA)
fallas_interferencia_externa = Falla.objects.filter(tipo_falla=INTERFERENCIAEXTREMA)
fallas_cambio_diseno = Falla.objects.filter(tipo_falla=CAMBIODISENO)
fallas_mal_rechazo = Falla.objects.filter(tipo_falla=MALRECHAZO)
fallas_tx = Falla.objects.filter(tipo_falla=TX)
fallas_comportamiento_esperado = Falla.objects.filter(tipo_falla=COMPORTAMIENTOESPERADO)
fallas_comportamiento_previo = Falla.objects.filter(tipo_falla=COMPORTAMIENTOPREVIO)
fallas_ajuste_adyasencias = Falla.objects.filter(tipo_falla=AJUSTEADYACENCIAS)
|
[
"jucebridu@gmail.com"
] |
jucebridu@gmail.com
|
ca25dc5334e07d1808f358786bcaf82904395a1f
|
77c8c500d4077ad733fbfe2c6a85a1dd47bd3cb5
|
/soohyun/python/baekjoon/0714/14890/1.py
|
edba3f8a5a55290bcb7c3549d30fcc7e6037b623
|
[] |
no_license
|
chelseashin/AlgorithmStudy2021
|
786f03c4c17bc057518d428481e7d710d24ec98e
|
1a4744a621ed25715fc9060c5224f0b1092d9c00
|
refs/heads/master
| 2023-06-22T22:27:47.289806
| 2021-07-28T02:54:22
| 2021-07-28T02:54:22
| 326,441,667
| 1
| 5
| null | 2021-06-29T01:27:40
| 2021-01-03T15:44:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,777
|
py
|
# 걸린시간 1시간 20분
import sys
input = sys.stdin.readline
def count_slope_col(N, L, stairs):
answer = 0
for row in range(N):
count = 1
left = 101
previous = -1
is_slope = True
for col in range(N):
current = stairs[row][col]
#print(f"|{stairs[row][col]} previous:{previous}, current: {current}|", end=" ")
#print(left, count)
if previous != -1:
if abs(previous - current) == 1:
if left != 101 and left > 0:
#print("Not enough min number", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
if previous > current:
left = L
left -= 1
count = 0
else:
if count < L:
#print("count < L", count, f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
count = 1
elif abs(previous - current) == 0:
if left != 101 and left > 0:
left -= 1
else:
count += 1
else:
#print("abs(previous - current) > 0", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
previous = current
if left != 101 and left > 0:
is_slope = False
if is_slope:
#print(col, answer)
answer += 1
return answer
def count_slope_row(N, L, stairs):
answer = 0
for col in range(N):
count = 1
left = 101
previous = -1
is_slope = True
for row in range(N):
current = stairs[row][col]
#print(f"|{stairs[row][col]} previous:{previous}, current: {current}|", end=" ")
#print(left, count)
if previous != -1:
if abs(previous - current) == 1:
if left != 101 and left > 0:
#print("Not enough min number", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
if previous > current:
left = L
left -= 1
count = 0
else:
if count < L:
#print("count < L", count, f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
count = 1
elif abs(previous - current) == 0:
if left != 101 and left > 0:
left -= 1
else:
count += 1
else:
#print("abs(previous - current) > 0", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
previous = current
if left != 101 and left > 0:
is_slope = False
if is_slope:
#print(col, answer)
answer += 1
return answer
def main():
N, L = map(int, input().rstrip().split(" "))
stairs = [list(map(int, input().rstrip().split(" "))) for _ in range(N)]
print(count_slope_row(N, L, stairs) + count_slope_col(N, L, stairs))
if __name__ == "__main__":
main()
|
[
"soohyunkim950921@gmail.com"
] |
soohyunkim950921@gmail.com
|
fdc80a64579ae8d080ea7732c7de51bfdd52b18f
|
a4e2b2fa5c54c7d43e1dbe4eef5006a560cd598e
|
/silk/templatetags/filters.py
|
19422dffbef2ed8bba3077129da8477927e3f616
|
[
"MIT"
] |
permissive
|
joaofrancese/silk
|
baa9fc6468351ec34bc103abdbd1decce0ae2f5d
|
d8de1367eb70f4405f4ae55d9286f0653c5b3189
|
refs/heads/master
| 2023-04-01T07:30:42.707427
| 2017-02-22T14:06:05
| 2017-02-22T14:06:05
| 23,427,190
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
import re
from django.template import Library
from django.template.defaultfilters import stringfilter
from django.utils import timezone
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = Library()
def _esc_func(autoescape):
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
return esc
@stringfilter
def spacify(value, autoescape=None):
esc = _esc_func(autoescape)
val = esc(value).replace(' ', " ")
val = val.replace('\t', ' ')
return mark_safe(val)
def _urlify(str):
r = re.compile("(?P<src>/.*\.py)\", line (?P<num>[0-9]+).*")
m = r.search(str)
while m:
group = m.groupdict()
src = group['src']
num = group['num']
start = m.start('src')
end = m.end('src')
rep = '<a href="/silk/src/?file_path={src}&line_num={num}">{src}</a>'.format(src=src, num=num)
str = str[:start] + rep + str[end:]
m = r.search(str)
return str
@register.filter
def hash(h, key):
return h[key]
def _process_microseconds(dt_strftime):
splt = dt_strftime.split('.')
micro = splt[-1]
time = '.'.join(splt[0:-1])
micro = '%.3f' % float('0.' + micro)
return time + micro[1:]
def _silk_date_time(dt):
today = timezone.now().date()
if dt.date() == today:
dt_strftime = dt.strftime('%H:%M:%S.%f')
return _process_microseconds(dt_strftime)
else:
return _process_microseconds(dt.strftime('%Y.%m.%d %H:%M.%f'))
@register.filter
def silk_date_time(dt):
return _silk_date_time(dt)
@register.filter
def sorted(l):
return sorted(l)
@stringfilter
def filepath_urlify(value, autoescape=None):
value = _urlify(value)
return mark_safe(value)
@stringfilter
def body_filter(value):
print(value)
if len(value) > 20:
return 'Too big!'
else:
return value
spacify.needs_autoescape = True
filepath_urlify.needs_autoescape = True
register.filter(spacify)
register.filter(filepath_urlify)
register.filter(body_filter)
|
[
"mtford@gmail.com"
] |
mtford@gmail.com
|
6a09ae6348786ae00b9bb35b1a7611573b23169e
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/s45bQoPtMoZcj7rnR_16.py
|
41a063878034de7567ea745ec857b1619c25c1ce
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
def closest_palindrome(n):
if n == 100:
return 99
n = list(str(n))
n = [int(i) for i in n]
for i in range(len(n) // 2):
x, y = n[i], n[-i - 1]
x, y = x, x
n[i], n[-i - 1] = x, x
return int(''.join(str(i) for i in n))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6bd3aeb20fd5e5259fbb55aac32c164b66cb5769
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/build/android/pylib/utils/base_error.py
|
263479a3c3279aca00c226b8b97cd52bc9ec3175
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
from devil.base_error import *
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
4e821bf5ff638f0d5f8bf8cd7e66dbe3fd01bec1
|
08d316151302f7ba4ae841c15b7adfe4e348ddf1
|
/reviewboard/integrations/tests/test_configs.py
|
29008ec52f56c465b2b27c813306f2c40fc28b9c
|
[
"MIT"
] |
permissive
|
LloydFinch/reviewboard
|
aa8cd21fac359d49b3dfc5a68c42b857c0c04bd8
|
563c1e8d4dfd860f372281dc0f380a0809f6ae15
|
refs/heads/master
| 2020-08-10T20:02:32.204351
| 2019-10-02T20:46:08
| 2019-10-02T20:46:08
| 214,411,166
| 2
| 0
|
MIT
| 2019-10-11T10:44:55
| 2019-10-11T10:44:54
| null |
UTF-8
|
Python
| false
| false
| 4,954
|
py
|
from __future__ import unicode_literals
import logging
from djblets.conditions import ConditionSet
from djblets.forms.fields import ConditionsField
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.integrations.forms import IntegrationConfigForm
from reviewboard.integrations.models import IntegrationConfig
from reviewboard.reviews.conditions import ReviewRequestConditionChoices
from reviewboard.testing.testcase import TestCase
class MyConfigForm(IntegrationConfigForm):
my_conditions = ConditionsField(
choices=ReviewRequestConditionChoices)
class IntegrationConfigTests(SpyAgency, TestCase):
"""Unit tests for reviewboard.integrations.models.IntegrationConfig."""
def test_load_conditions(self):
"""Testing IntegrationConfig.load_conditions"""
config = IntegrationConfig()
config.settings['my_conditions'] = {
'mode': 'all',
'conditions': [
{
'choice': 'branch',
'op': 'is',
'value': 'master',
},
{
'choice': 'summary',
'op': 'contains',
'value': '[WIP]',
},
],
}
condition_set = config.load_conditions(MyConfigForm,
conditions_key='my_conditions')
self.assertEqual(condition_set.mode, ConditionSet.MODE_ALL)
conditions = condition_set.conditions
self.assertEqual(len(conditions), 2)
condition = conditions[0]
self.assertEqual(condition.choice.choice_id, 'branch')
self.assertEqual(condition.operator.operator_id, 'is')
self.assertEqual(condition.value, 'master')
condition = conditions[1]
self.assertEqual(condition.choice.choice_id, 'summary')
self.assertEqual(condition.operator.operator_id, 'contains')
self.assertEqual(condition.value, '[WIP]')
def test_load_conditions_with_empty(self):
"""Testing IntegrationConfig.load_conditions with empty or missing
data
"""
config = IntegrationConfig()
config.settings['conditions'] = None
self.assertIsNone(config.load_conditions(MyConfigForm))
def test_load_conditions_with_bad_data(self):
"""Testing IntegrationConfig.load_conditions with bad data"""
config = IntegrationConfig()
config.settings['conditions'] = 'dfsafas'
self.spy_on(logging.debug)
self.spy_on(logging.exception)
self.assertIsNone(config.load_conditions(MyConfigForm))
self.assertTrue(logging.debug.spy.called)
self.assertTrue(logging.exception.spy.called)
@add_fixtures(['test_users'])
def test_match_conditions(self):
"""Testing IntegrationConfig.match_conditions"""
config = IntegrationConfig()
config.settings['my_conditions'] = {
'mode': 'all',
'conditions': [
{
'choice': 'branch',
'op': 'is',
'value': 'master',
},
{
'choice': 'summary',
'op': 'contains',
'value': '[WIP]',
},
],
}
review_request = self.create_review_request(
branch='master',
summary='[WIP] This is a test.')
self.assertTrue(config.match_conditions(
MyConfigForm,
conditions_key='my_conditions',
review_request=review_request))
review_request = self.create_review_request(
branch='master',
summary='This is a test.')
self.assertFalse(config.match_conditions(
MyConfigForm,
conditions_key='my_conditions',
review_request=review_request))
@add_fixtures(['test_users'])
def test_match_conditions_sandbox(self):
"""Testing IntegrationConfig.match_conditions with exceptions
sandboxed
"""
config = IntegrationConfig()
config.settings['my_conditions'] = {
'mode': 'all',
'conditions': [
{
'choice': 'branch',
'op': 'is',
'value': 'master',
},
{
'choice': 'summary',
'op': 'contains',
'value': '[WIP]',
},
],
}
self.create_review_request(
branch='master',
summary='[WIP] This is a test.')
self.spy_on(logging.exception)
self.assertFalse(config.match_conditions(
MyConfigForm,
conditions_key='my_conditions',
review_request='test'))
self.assertTrue(logging.exception.spy.called)
|
[
"christian@beanbaginc.com"
] |
christian@beanbaginc.com
|
c1b8ece492125158774e3af66f2bfa7f7de642cd
|
d043a51ff0ca2f9fb3943c3f0ea21c61055358e9
|
/python3网络爬虫开发实战/数据存储/文件存储/file6.py
|
38d8bd4763b4c20db1de58e5df8e49db8c1879e2
|
[] |
no_license
|
lj1064201288/dell_python
|
2f7fd9dbcd91174d66a2107c7b7f7a47dff4a4d5
|
529985e0e04b9bde2c9e0873ea7593e338b0a295
|
refs/heads/master
| 2020-03-30T03:51:51.263975
| 2018-12-11T13:21:13
| 2018-12-11T13:21:13
| 150,707,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
import csv
with open('data.csv', 'w') as csvfile:
# 调用csv库的writer()方法初始化写入对象
writer = csv.writer(csvfile)
# 调用writerow()方法传入每行的数据即可完成写入
writer.writerow(['id', 'name', 'age'])
writer.writerow(['10001', 'Mike', 20])
writer.writerow(['10002', 'Bob', 22])
writer.writerow(['10003', 'Jordan', 21])
|
[
"1064201288@qq.com"
] |
1064201288@qq.com
|
030656461f5d5ace42ba347134ba1fef6d164dd2
|
9184e230f8b212e8f686a466c84ecc89abe375d1
|
/arcseventdata/applications/obsolete/ipdpE.py
|
f4a03c9939aa822c22a8334013aca9fa028edc74
|
[] |
no_license
|
danse-inelastic/DrChops
|
75b793d806e6351dde847f1d92ab6eebb1ef24d2
|
7ba4ce07a5a4645942192b4b81f7afcae505db90
|
refs/heads/master
| 2022-04-26T17:37:41.666851
| 2015-05-02T23:21:13
| 2015-05-02T23:21:13
| 34,094,584
| 0
| 1
| null | 2020-09-10T01:50:10
| 2015-04-17T03:30:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,378
|
py
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
## This script reads events from event data file
## and create a histogram hdf5 file of I(pack, detector, pixel, E)
import os
def run( eventdatafilename, nevents, ARCSxml, h5filename,
E_params, Ei, emission_time = 0 ):
from arcseventdata.getinstrumentinfo import getinstrumentinfo
infos = getinstrumentinfo(ARCSxml)
npacks, ndetsperpack, npixelsperdet = infos['detector-system-dimensions']
mod2sample = infos['moderator-sample distance']
pixelPositionsFilename = infos['pixelID-position mapping binary file']
print "eventdatafilename = %s" % eventdatafilename
print "nevents = %s" % nevents
print "pixel-positions-filename=%s" % pixelPositionsFilename
print "output h5filename = %s" % h5filename
print 'E_params (unit: angstrom) = %s' % (E_params, )
print 'mod2sample distance = %s' % mod2sample
print 'Incident energy (unit: meV) = %s' % (Ei, )
print 'emission_time (unit: microsecond) = %s' % (emission_time, )
if os.path.exists(h5filename):
raise IOError, "%s already exists" % h5filename
E_begin, E_end, E_step = E_params # angstrom
import arcseventdata, histogram
E_axis = histogram.axis('energy', boundaries = histogram.arange(
E_begin, E_end, E_step) )
h = histogram.histogram(
'I(pdpE)',
[
('detectorpackID', range(npacks+1)),
('detectorID', range(ndetsperpack)),
('pixelID', range(npixelsperdet) ),
E_axis,
],
data_type = 'int',
)
events = arcseventdata.readevents( eventdatafilename, nevents )
pixelPositions = arcseventdata.readpixelpositions( pixelPositionsFilename )
arcseventdata.events2IpdpE(
events, nevents, h, Ei, pixelPositions,
npacks = npacks, ndetsperpack = ndetsperpack, npixelsperdet = npixelsperdet,
mod2sample = mod2sample,
emission_time = emission_time,
)
# set error bar squares to be equal to counts
h.errors().storage().asNumarray()[:] = h.data().storage().asNumarray()
from histogram.hdf import dump
dump(h, h5filename, '/', 'c' )
return
def main():
from optparse import OptionParser
usage = "usage: %prog [options] event-data-file"
parser = OptionParser(usage)
#parser.add_option("-e", "--eventdatafile", dest="eventdatafile",
# help="ARCS event data file")
parser.add_option("-o", "--out", dest="h5filename", default = "Idspacing.h5",
help="hdf5 file of I(dspacing) histogram")
parser.add_option("-n", "--nevents", dest="nevents", default = '1000',
type = 'int', help="number of events")
parser.add_option("-E", "--EnergyTransfer", dest="E_params", default = '-50,50,1.',
help="energy transfer bin parameters (begin, end, step). units: meV")
parser.add_option("-x", "--ARCS-xml", dest = "ARCSxml",
default = "ARCS.xml",
help="ARCS instrument xml file" )
parser.add_option('-I', '--IncidentEnergy', dest='Ei', default = 60, type = 'float',
help='incident energy. unit: meV')
parser.add_option('-t', '--emission_time', dest='emission_time', default = 0.0, type = 'float',
help='emission time. tof reading - real tof. unit: microsecond')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
raise "should not reach here"
eventdatafile = args[0]
h5filename = options.h5filename
nevents = options.nevents
E_params = eval( options.E_params )
Ei = options.Ei
emission_time = options.emission_time
ARCSxml = options.ARCSxml
run( eventdatafile, nevents, ARCSxml, h5filename, E_params, Ei, emission_time )
return
if __name__ == '__main__':
import journal
journal.warning( 'arcseventdata.Histogrammer2' ).deactivate()
main()
# version
__id__ = "$Id$"
# End of file
|
[
"linjiao@caltech.edu"
] |
linjiao@caltech.edu
|
c9f002f2e16d65a1878541197c7e17dfa4b052e7
|
fb3c1e036f18193d6ffe59f443dad8323cb6e371
|
/src/flash/build/buildbot/slaves/windows64/buildbot.tac
|
b10193418512735dd402c802a5bdee88e3ccd140
|
[] |
no_license
|
playbar/nstest
|
a61aed443af816fdc6e7beab65e935824dcd07b2
|
d56141912bc2b0e22d1652aa7aff182e05142005
|
refs/heads/master
| 2021-06-03T21:56:17.779018
| 2016-08-01T03:17:39
| 2016-08-01T03:17:39
| 64,627,195
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,219
|
tac
|
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine.].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Adobe AS3 Team
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ****
from twisted.application import service
from buildbot.slave.bot import BuildSlave
basedir = r'/c/buildbot/tamarin-redux/windows64'
buildmaster_host = '10.171.22.12'
port = 9750
slavename = 'asteamwin3'
passwd = 'asteam'
keepalive = 600
usepty = 1
umask = None
application = service.Application('buildslave')
s = BuildSlave(host, port, slavename, passwd, basedir, keepalive, usepty,
umask=umask)
s.setServiceParent(application)
|
[
"hgl868@126.com"
] |
hgl868@126.com
|
8df7dd01776154eb4b7f0fa22d4f39e34f89562b
|
18305efd1edeb68db69880e03411df37fc83b58b
|
/pdb_files3000rot/fb/2fb8/tractability_500/pymol_results_file.py
|
8fb6f70e459bfef541c1964fba987ec98a526281
|
[] |
no_license
|
Cradoux/hotspot_pipline
|
22e604974c8e38c9ffa979092267a77c6e1dc458
|
88f7fab8611ebf67334474c6e9ea8fc5e52d27da
|
refs/heads/master
| 2021-11-03T16:21:12.837229
| 2019-03-28T08:31:39
| 2019-03-28T08:31:39
| 170,106,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,223
|
py
|
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.9200000763":[], "16.9200000763_arrows":[]}
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-44.0), float(28.5), float(-4.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-44.0,28.5,-4.5], [-45.62,26.402,-4.591], color="blue red", name="Arrows_16.9200000763_1")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-44.0), float(20.0), float(-2.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-44.0,20.0,-2.5], [-46.141,20.536,-3.622], color="blue red", name="Arrows_16.9200000763_2")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-43.5), float(31.5), float(-3.0), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-43.5,31.5,-3.0], [-45.565,31.015,-1.188], color="blue red", name="Arrows_16.9200000763_3")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-37.0), float(24.0), float(-0.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-37.0,24.0,-0.5], [-35.435,21.826,0.786], color="blue red", name="Arrows_16.9200000763_4")
cluster_dict["16.9200000763"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-34.5), float(29.0), float(-1.0), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-34.5,29.0,-1.0], [-32.426,27.812,-0.09], color="blue red", name="Arrows_16.9200000763_5")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(-39.4049044046), float(27.8836362845), float(-0.685739447518), float(1.0)]
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-44.5), float(23.0), float(-2.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-44.5,23.0,-2.5], [-47.478,23.36,-2.426], color="red blue", name="Arrows_16.9200000763_6")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-42.0), float(22.5), float(-3.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-42.0,22.5,-3.5], [-42.43,23.173,-6.066], color="red blue", name="Arrows_16.9200000763_7")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-40.5), float(22.5), float(0.0), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-40.5,22.5,0.0], [-38.239,21.157,0.384], color="red blue", name="Arrows_16.9200000763_8")
cluster_dict["16.9200000763"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(-36.0), float(25.5), float(0.5), float(1.0)]
cluster_dict["16.9200000763_arrows"] += cgo_arrow([-36.0,25.5,0.5], [-32.936,24.781,0.433], color="red blue", name="Arrows_16.9200000763_9")
cmd.load_cgo(cluster_dict["16.9200000763"], "Features_16.9200000763", 1)
cmd.load_cgo(cluster_dict["16.9200000763_arrows"], "Arrows_16.9200000763")
cmd.set("transparency", 0.2,"Features_16.9200000763")
cmd.group("Pharmacophore_16.9200000763", members="Features_16.9200000763")
cmd.group("Pharmacophore_16.9200000763", members="Arrows_16.9200000763")
if dirpath:
f = join(dirpath, "label_threshold_16.9200000763.mol2")
else:
f = "label_threshold_16.9200000763.mol2"
cmd.load(f, 'label_threshold_16.9200000763')
cmd.hide('everything', 'label_threshold_16.9200000763')
cmd.label("label_threshold_16.9200000763", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.9200000763', members= 'label_threshold_16.9200000763')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
|
[
"cradoux.cr@gmail.com"
] |
cradoux.cr@gmail.com
|
698a03e7e32842e8b104e0c82b939b34a766192e
|
0306bea08e9aab18f34a799ce8a73e86921f90f7
|
/medium/MergeInBetweenLinkedLists.py
|
8ec144cd18dc8a581543cea87bfb41b24ae965b0
|
[] |
no_license
|
GeorgianBadita/LeetCode
|
78686fde88ef65b64f84fb7c2a22ba37ef21b8d9
|
e3b0571182369c5308e0c29fb87106bb0b0d615a
|
refs/heads/master
| 2022-10-21T00:23:26.479943
| 2022-10-14T20:27:27
| 2022-10-14T20:27:27
| 251,733,951
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
# https://leetcode.com/submissions/detail/427432636/
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def mergeInBetween(
self, list1: ListNode, a: int, b: int, list2: ListNode
) -> ListNode:
head_a = list1
diff = b - a
while a - 1 > 0:
head_a = head_a.next
a -= 1
head_b = head_a.next
while diff > 0:
head_b = head_b.next
diff -= 1
head_b = head_b.next
final_node_list2 = list2
while final_node_list2.next != None:
final_node_list2 = final_node_list2.next
head_a.next = list2
final_node_list2.next = head_b
return list1
|
[
"geo.badita@gmail.com"
] |
geo.badita@gmail.com
|
3b0de834899f997d3899b7fac087eda59b03a816
|
d8a541a2953c9729311059585bb0fca9003bd6ef
|
/Lists as stack ques/key_revolver (2).py
|
51286e00570e0066dba3825aad1cc934c43e8eb8
|
[] |
no_license
|
grigor-stoyanov/PythonAdvanced
|
ef7d628d2b81ff683ed8dd47ee307c41b2276dd4
|
0a6bccc7faf1acaa01979d1e23cfee8ec29745b2
|
refs/heads/main
| 2023-06-10T09:58:04.790197
| 2021-07-03T02:52:20
| 2021-07-03T02:52:20
| 332,509,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
from collections import deque
bullet_cost = int(input())
gun_barrel = int(input())
bullets = list((map(int, input().split())))
locks = deque(map(int, input().split()))
safe_value = int(input())
current_bullet = 0
while bullets and locks and gun_barrel > 0:
for bullet in range(1, gun_barrel+1):
current_bullet = bullet
shot = bullets.pop()
safe_value -= bullet_cost
if shot <= locks[0]:
print('Bang!')
locks.popleft()
if not locks or not bullets:
break
else:
print('Ping!')
if not bullets:
break
if bullets and current_bullet == gun_barrel:
current_bullet = 0
print('Reloading!')
if not locks:
print(f'{len(bullets)} bullets left. Earned ${safe_value}')
elif not bullets:
print(f'Couldn\'t get through. Locks left: {len(locks)}')
|
[
"76039296+codelocks7@users.noreply.github.com"
] |
76039296+codelocks7@users.noreply.github.com
|
80f49f9ce84cd2ecb8d31318d2a7f46c6a0b878e
|
4d3118fb51c7d42d22c1f1f3bbcbaebf5f0640d2
|
/exercises/coin_flip.py
|
4bceb538cd7f317864e00ff6d6a62c37a3b2f534
|
[] |
no_license
|
Gabkings/python-practise
|
d899d3d0a4094b9b272120ad2779cbe1c043f8db
|
bafc1fea60dfa6b7075204276fb6e3d857be2cbf
|
refs/heads/master
| 2020-03-25T08:30:05.917168
| 2018-08-05T13:35:44
| 2018-08-05T13:35:44
| 143,615,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from random import random
def coin_flip():
face = random()
if face > 0.5:
faceup = 'head'
else:
faceup = 'tail'
return faceup
con = coin_flip()
print(con)
|
[
"gabworks51@gmail.com"
] |
gabworks51@gmail.com
|
1a21293667b3cb7e185302ffb7736a7bbe0494dd
|
09ce9635b0e74ba178e98efd0d5229a25995713e
|
/submissions/pakencamp-2019-day3/a.py
|
b5e2b7089cb507bc4e2b8a16412a7f5128acf605
|
[
"Unlicense"
] |
permissive
|
m-star18/atcoder
|
7575f1e1f3ee1dfa4a765493eb17b4ef0ad5f1f0
|
08e475810516602fa088f87daf1eba590b4e07cc
|
refs/heads/main
| 2023-07-14T09:16:42.807150
| 2021-08-22T15:59:48
| 2021-08-22T15:59:48
| 364,458,316
| 1
| 0
|
Unlicense
| 2021-08-22T15:59:49
| 2021-05-05T04:13:03
|
Python
|
UTF-8
|
Python
| false
| false
| 201
|
py
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
a, b = map(int, readline().split())
print(b - a + 1)
|
[
"31807@toyota.kosen-ac.jp"
] |
31807@toyota.kosen-ac.jp
|
61122cf8d525f6b5435f3e7f4c654fba7e261694
|
714268a27bd4cc34ec053cb3d991012151554aad
|
/CodeChef/atTheGates.py
|
1bf5f096ca7f90fe56773272969212d1e8a86d07
|
[] |
no_license
|
yashhR/competitive
|
2b649011c2cea74eea8d9646bcfafc73743651eb
|
37f2ec68b33828df4692bc23f28d532cb8d4a358
|
refs/heads/master
| 2022-11-10T04:53:47.634062
| 2020-06-22T16:43:03
| 2020-06-22T16:43:03
| 274,190,602
| 0
| 0
| null | 2020-06-22T16:36:02
| 2020-06-22T16:36:02
| null |
UTF-8
|
Python
| false
| false
| 1,801
|
py
|
'''
There is a table in front of you, with N coins placed in a row and numbered 1 through N from left to right.
For each coin, you know whether it is initially showing heads or tails. You have to perform exactly K operations.
In one operation, you should remove the rightmost coin present on the table,
and if this coin was showing heads right before it was removed, then you should also flip all the remaining coins.
(If a coin was showing heads, then after it is flipped, it is showing tails, and vice versa.)
The code needed to enter the temple is the number of coins which, after these K operations are performed,
have not been removed and are showing heads. Can you find this number? The fate of Persia lies in your hands…
Input:
The first line of the input contains a single integer T denoting the number of test cases.
The description of T test cases follows.
The first line of each test case contains two space-separated integers N and K.
The second line contains N space-separated characters.
For each valid i, the i-th of these characters is 'H' if the i-th coin is initially showing heads or 'T' if it is showing tails.
Output:
For each test case, print a single line containing one integer ― the number of coins that are showing heads after K operations.
'''
t = int(input())
def how_many():
n, m = map(int, input().split())
coins = list(input().split())
def flip():
for i in range(len(coins)):
if coins[i] == "H":
coins[i] = "T"
else:
coins[i] = "H"
for i in range(m):
if coins[-1] == "H":
flip()
coins.pop()
count = 0
for i in range(len(coins)):
if coins[i] == "H":
count += 1
return count
for i in range(t):
print(how_many())
|
[
"17131a05h5@gvpce.ac.in"
] |
17131a05h5@gvpce.ac.in
|
c19f605ab193cc8be99874cea988ac066ac3e0a5
|
9f414bde21046a264f3189786a7180f9ffd79d30
|
/web/web/finders.py
|
74784abfe14fbff5f035c0d81326851720746e80
|
[
"Apache-2.0"
] |
permissive
|
rcbops/FleetDeploymentReporting
|
ebd0ca07f099bdcf4e231d734145307e8f9bb9a5
|
aaab76706c8268d3ff3e87c275baee9dd4714314
|
refs/heads/develop
| 2020-03-21T16:09:25.757015
| 2019-02-26T16:15:52
| 2019-02-26T16:15:52
| 138,753,891
| 1
| 7
|
Apache-2.0
| 2019-02-26T16:15:53
| 2018-06-26T14:58:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,311
|
py
|
import os
from django.apps import apps
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.finders import BaseFinder
from django.core.files.storage import FileSystemStorage
class AngularTemplateFinder(BaseFinder):
"""Incomplete implementation of a finder. Only implements list."""
storage_class = FileSystemStorage
source_dir = 'static'
app_name = 'web'
def __init__(self, *args, **kwargs):
"""Init the finder."""
self.storage = None
app_config = apps.get_app_config(self.app_name)
app_storage = self.storage_class(
os.path.join(app_config.path, self.source_dir)
)
if os.path.isdir(app_storage.location):
self.storage = app_storage
super().__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all html angular templates in the web app.
:param ignore_patterns: Collection of patterns to ignore
:type ignore_patterns: list
:yields: (path, storage object) tuple
:ytype: tuple
"""
if self.storage.exists(''):
for path in utils.get_files(self.storage, ignore_patterns):
if path.startswith('web/html') and path.endswith('.html'):
yield path, self.storage
|
[
"james.absalon@rackspace.com"
] |
james.absalon@rackspace.com
|
2176690dae448b8b2e6b44a37ead6da57cf654a8
|
a222c577f924c390b244beaa67b4b042c2eb7337
|
/bin/kt_regression.py
|
ffbbe2fa6643850cdc769f50a86c7be01586058e
|
[] |
no_license
|
bdqnghi/sentence-ordering
|
59baf539e9f30876860b73805b74862d1beef804
|
fb62eea650f132ea3d01aabb831ea49531824183
|
refs/heads/master
| 2020-03-28T08:07:52.637593
| 2018-02-01T11:38:51
| 2018-02-01T11:38:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
import click
from sent_order.models import kt_regression as model
from sent_order import cuda
@click.group()
def cli():
pass
@cli.command()
@click.argument('train_path', type=click.Path())
@click.argument('model_path', type=click.Path())
@click.option('--train_skim', type=int, default=1000000)
@click.option('--lr', type=float, default=1e-3)
@click.option('--epochs', type=int, default=1000)
@click.option('--epoch_size', type=int, default=1000)
@click.option('--batch_size', type=int, default=20)
@click.option('--lstm_dim', type=int, default=500)
@click.option('--lin_dim', type=int, default=500)
def train(*args, **kwargs):
model.train(*args, **kwargs)
if __name__ == '__main__':
cli()
|
[
"davidwilliammcclure@gmail.com"
] |
davidwilliammcclure@gmail.com
|
a8340cb3c5b1eb05201fa61d09a62ab1595c6306
|
f02b21d5072cb66af643a7070cf0df4401229d6e
|
/leetcode/explore_lessons/binary_search/first_bad_version.py
|
83b3e34729656e01cef998a3a4d05972fdcfb579
|
[] |
no_license
|
dbconfession78/interview_prep
|
af75699f191d47be1239d7f842456c68c92b95db
|
7f9572fc6e72bcd3ef1a22b08db099e1d21a1943
|
refs/heads/master
| 2018-10-09T22:03:55.283172
| 2018-06-23T01:18:00
| 2018-06-23T01:18:00
| 110,733,251
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
# 278. First Bad Version
"""
You are a product manager and currently leading a team to develop a new product. Unfortunately, the latest version
of your product fails the quality check. Since each version is developed based on the previous version, all the
versions after a bad version are also bad.
Suppose you have n versions [1, 2, ..., n] and you want to find out the first bad one, which causes all the
following ones to be bad.
You are given an API bool isBadVersion(version) which will return whether version is bad. Implement a function to
find the first bad version. You should minimize the number of calls to the API.
"""
# *** when pasting into LC, remove 2nd param, bv (bad version) from method def and calls
class Solution:
# def firstBadVersion_PRACTICE(self, n, bv):
def firstBadVersion(self, n, bv):
return
def firstBadVersion_PASSED(self, n, bv):
# def firstBadVersion(self, n, bv):
left = 1
right = n
while left < right:
mid = left + (right - left) // 2
if isBadVersion(mid, bv):
right = mid
else:
left = mid + 1
return left
def isBadVersion(n, bad_version):
if n >= bad_version:
return True
else:
return False
def main():
print(Solution().firstBadVersion(2, bv=1))
print(Solution().firstBadVersion(3, bv=1))
print(Solution().firstBadVersion(4, bv=4))
print(Solution().firstBadVersion(2126753390, bv=1702766719))
# * When pasting into LC, remove 2nd param, bv (bad version) from method def and calls
if __name__ == '__main__':
main()
|
[
"Hyrenkosa1"
] |
Hyrenkosa1
|
b422a944d1f29b3f7038e03480ecbf17ddf705f8
|
48f092fd8191b0218df8605dc7125e526764e59e
|
/NestedLoops/app2.py
|
231571ece85b4688453d6c27e1842e5019388cf3
|
[] |
no_license
|
LalityaSawant/Python-Projects
|
2edb430c094fe3d6b4e706cc61f885aa07e24dff
|
b142708256e26867f09b3063f5f3fffa305ec496
|
refs/heads/master
| 2020-05-01T03:00:26.012301
| 2019-03-23T22:09:33
| 2019-03-23T22:09:33
| 177,235,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
numbers = [0,1,2,3,4,5,4,3,2,1,1,1,4,6,7,8,9,9,9,9,8,7,7,6,5,5,5,6,7,8,8,7,6,5,5,4,4,3,3,3,3,3,3,2,2,3,3]
x = 'x'
for number in numbers:
output = ''
for count in range(number):
output += '0'
print(f''' {output}''')
|
[
"lalitya.sawant@gmail.com"
] |
lalitya.sawant@gmail.com
|
d04872bac0a950de742027fee0e7b9b5f0e2ab53
|
544d4f57945a08cb382b1ef04ae73fb6eaccfb29
|
/105.py
|
9c481c8d2568ed9f326e517c84e96b9ec4d3b0b3
|
[
"LicenseRef-scancode-unicode",
"ICU",
"NAIST-2003",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] |
permissive
|
rzhang1654/pyco
|
7ea06a49cb169b4f70bf5d832ed39af5e416ee60
|
80a23c591da0f36f240f644ce8799fe8f9f5ed98
|
refs/heads/master
| 2023-05-25T11:44:14.027525
| 2021-06-02T14:57:17
| 2021-06-02T14:57:17
| 373,204,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
#!/usr/bin/env python
#
from atexit import register
from re import compile
from threading import Thread
from time import ctime
from urllib import urlopen as uopen
REGEX = compile(r'<script type="(.+?)">')
EXELON = 'https://www.exeloncorp.com/leadership-and-governance'
PAGEs = {
'ethics-and-conduct': 'ethics and conduct',
'executive-profiles': 'executive profiles',
'governance-overview': 'governance overview',
}
def getScript(title):
page = uopen('%s%s' % (EXELON,title))
data = page.read()
page.close()
return REGEX.findall(data)[0]
def _showScript(title):
print '- %s is %s' % (PAGEs[title], getScript(title))
def _main():
print 'At', ctime(), 'on Exelon ...'
for title in PAGEs:
Thread(target=_showScript, args=(title,)).start()
@register
def _atexit():
print 'all DONE at:', ctime()
if __name__ == '__main__':
_main()
|
[
"run.zhang@exeloncorp.com"
] |
run.zhang@exeloncorp.com
|
81120867431d79bbfad20b6629306fb294a78aea
|
e5654e71ad4f043bb28105c3b6f3cd833e1c52dc
|
/openai/venv/lib/python3.10/site-packages/langchain/embeddings/sagemaker_endpoint.py
|
e1371a7d99936cbd102ccb6ba13e593ba23abc8a
|
[] |
no_license
|
henrymendez/garage
|
0b795f020a68fe2d349b556fb8567f6b96488ed5
|
b7aaa920a52613e3f1f04fa5cd7568ad37302d11
|
refs/heads/master
| 2023-07-19T20:16:02.792007
| 2023-07-07T16:58:15
| 2023-07-07T16:58:15
| 67,760,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,006
|
py
|
"""Wrapper around Sagemaker InvokeEndpoint API."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
class SagemakerEndpointEmbeddings(BaseModel, Embeddings):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain.embeddings import SagemakerEndpointEmbeddings
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpointEmbeddings(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: ContentHandlerBase
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
from langchain.llms.sagemaker_endpoint import ContentHandlerBase
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
return values
def _embedding_func(self, texts: List[str]) -> List[float]:
"""Call out to SageMaker Inference embedding endpoint."""
# replace newlines, which can negatively affect performance.
texts = list(map(lambda x: x.replace("\n", " "), texts))
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(texts, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return self.content_handler.transform_output(response["Body"])
def embed_documents(
self, texts: List[str], chunk_size: int = 64
) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
for i in range(0, len(texts), _chunk_size):
response = self._embedding_func(texts[i : i + _chunk_size])
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a SageMaker inference endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func([text])
|
[
"henry95@gmail.com"
] |
henry95@gmail.com
|
c9e58e81a27a870849d3bfed9fa9d0ddacf18134
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc038/D/4502471.py
|
1539cc5c3366975fe7df695b0a53f39763ae4186
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
import sys
import bisect
input=sys.stdin.readline
N=int(input())
data=[tuple(map(int,input().split())) for _ in range(N)]
data.sort()
data2=[]
now=[]
for d in data:
if len(now)==0:
now.append(d)
continue
if now[0][0]!=d[0]:
now.sort(key=lambda x:x[1],reverse=True)
data2+=now
now=[d]
elif now[0][0]==d[0]:
now.append(d)
now.sort(key=lambda x:x[1],reverse=True)
data2+=now
data=data2
inf=float('inf')
dp=[inf]*(len(data)+1)
dp[0]=0
dp2=[(inf,inf)]*(len(data)+1)
dp2[0]=(0,0)
for i in range(len(data)):
idx=bisect.bisect_left(dp,data[i][1])
if dp2[idx-1][0]<data[i][0] and data[i][1]<dp2[idx][1]:
dp[idx]=data[i][1]
dp2[idx]=data[i]
print(len([i for i in dp if i<inf])-1)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
acf6fb9f47e54f8b0c090b7fe6dc50e0a77e2318
|
e634f90bc999a2903c92f66384a867a474b40d9c
|
/Source/Main.py
|
f6c46809d31de7e9aa6b1e9aee2033be4f65fe5d
|
[
"MIT"
] |
permissive
|
Dmunch04/Plistr
|
69f7217fbea24e48870667c8507845ddd1a63547
|
39d70e3b9f1a827d48a6a951617da0892978515c
|
refs/heads/master
| 2020-07-15T21:16:55.539440
| 2019-09-01T14:27:04
| 2019-09-01T14:27:04
| 205,650,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from Window import Window
from Helpers import Loader
class Plistr:
def __init__ (self, Filename):
self.Filename = Filename
self.Items = Loader.LoadFile (self.Filename)
self.Window = Window (Filename)
def Run (self):
self.Window.Run (self.Items)
|
[
"daniellmunch@gmail.com"
] |
daniellmunch@gmail.com
|
7fede01bad23361cbb201f9ae03d1b537a916785
|
5068bc927a7fff73923ce95862ff70120160c491
|
/electrum_axe/plugins/ledger/qt.py
|
2c36c1397995c78a133b363be3051db888fc4fa6
|
[
"MIT"
] |
permissive
|
AXErunners/electrum-axe
|
cdbce2dbb92e23e32e9f9b733ae9f65f51c0ae9f
|
7ef05088c0edaf0688fb167df353d6da619ebf2f
|
refs/heads/master
| 2021-04-03T09:40:37.109317
| 2020-08-27T16:53:18
| 2020-08-27T16:53:18
| 124,705,752
| 336
| 75
|
MIT
| 2020-10-17T18:30:25
| 2018-03-10T23:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,709
|
py
|
from functools import partial
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QInputDialog, QLabel, QVBoxLayout, QLineEdit
from electrum_axe.i18n import _
from electrum_axe.plugin import hook
from electrum_axe.wallet import Standard_Wallet
from electrum_axe.gui.qt.util import WindowModalDialog
from .ledger import LedgerPlugin
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
class Plugin(LedgerPlugin, QtPluginBase):
icon_unpaired = "ledger_unpaired.png"
icon_paired = "ledger.png"
def create_handler(self, window):
return Ledger_Handler(window)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on Ledger"), show_address)
class Ledger_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object)
def __init__(self, win):
super(Ledger_Handler, self).__init__(win, 'Ledger')
self.setup_signal.connect(self.setup_dialog)
self.auth_signal.connect(self.auth_dialog)
def word_dialog(self, msg):
response = QInputDialog.getText(self.top_level_window(), "Ledger Wallet Authentication", msg, QLineEdit.Password)
if not response[1]:
self.word = None
else:
self.word = str(response[0])
self.done.set()
def message_dialog(self, msg):
self.clear_dialog()
self.dialog = dialog = WindowModalDialog(self.top_level_window(), _("Ledger Status"))
l = QLabel(msg)
vbox = QVBoxLayout(dialog)
vbox.addWidget(l)
dialog.show()
def auth_dialog(self, data):
try:
from .auth2fa import LedgerAuthDialog
except ImportError as e:
self.message_dialog(str(e))
return
dialog = LedgerAuthDialog(self, data)
dialog.exec_()
self.word = dialog.pin
self.done.set()
def get_auth(self, data):
self.done.clear()
self.auth_signal.emit(data)
self.done.wait()
return self.word
def get_setup(self):
self.done.clear()
self.setup_signal.emit()
self.done.wait()
return
def setup_dialog(self):
self.show_error(_('Initialization of Ledger HW devices is currently disabled.'))
|
[
"slowdive@me.com"
] |
slowdive@me.com
|
7b5d81cbcf4171c2438e06c851ff4e7d2d6a0401
|
76a8ea60480331f0f61aeb61de55be9a6270e733
|
/downloadable-site-packages/statsmodels/sandbox/rls.py
|
412cc4d05051951d88945554d1d310bacbbc9c20
|
[
"MIT"
] |
permissive
|
bhagyas/Pyto
|
cd2ec3f35bec703db4ac29b56d17abc4bf03e375
|
907024a9b3e04a2a9de54976778c0e1a56b7b83c
|
refs/heads/master
| 2022-11-19T13:05:07.392454
| 2020-07-21T17:33:39
| 2020-07-21T17:33:39
| 281,886,535
| 2
| 0
|
MIT
| 2020-07-23T07:48:03
| 2020-07-23T07:48:02
| null |
UTF-8
|
Python
| false
| false
| 5,136
|
py
|
"""Restricted least squares
from pandas
License: Simplified BSD
"""
import numpy as np
from statsmodels.regression.linear_model import GLS, RegressionResults
class RLS(GLS):
"""
Restricted general least squares model that handles linear constraints
Parameters
----------
endog: array_like
n length array containing the dependent variable
exog: array_like
n-by-p array of independent variables
constr: array_like
k-by-p array of linear constraints
param (0.): array_like or scalar
p-by-1 array (or scalar) of constraint parameters
sigma (None): scalar or array_like
The weighting matrix of the covariance. No scaling by default (OLS).
If sigma is a scalar, then it is converted into an n-by-n diagonal
matrix with sigma as each diagonal element.
If sigma is an n-length array, then it is assumed to be a diagonal
matrix with the given sigma on the diagonal (WLS).
Notes
-----
endog = exog * beta + epsilon
weights' * constr * beta = param
See Greene and Seaks, "The Restricted Least Squares Estimator:
A Pedagogical Note", The Review of Economics and Statistics, 1991.
"""
def __init__(self, endog, exog, constr, param=0., sigma=None):
N, Q = exog.shape
constr = np.asarray(constr)
if constr.ndim == 1:
K, P = 1, constr.shape[0]
else:
K, P = constr.shape
if Q != P:
raise Exception('Constraints and design do not align')
self.ncoeffs = Q
self.nconstraint = K
self.constraint = constr
if np.isscalar(param) and K > 1:
param = np.ones((K,)) * param
self.param = param
if sigma is None:
sigma = 1.
if np.isscalar(sigma):
sigma = np.ones(N) * sigma
sigma = np.squeeze(sigma)
if sigma.ndim == 1:
self.sigma = np.diag(sigma)
self.cholsigmainv = np.diag(np.sqrt(sigma))
else:
self.sigma = sigma
self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
super(GLS, self).__init__(endog, exog)
_rwexog = None
@property
def rwexog(self):
"""Whitened exogenous variables augmented with restrictions"""
if self._rwexog is None:
P = self.ncoeffs
K = self.nconstraint
design = np.zeros((P + K, P + K))
design[:P, :P] = np.dot(self.wexog.T, self.wexog) #top left
constr = np.reshape(self.constraint, (K, P))
design[:P, P:] = constr.T #top right partition
design[P:, :P] = constr #bottom left partition
design[P:, P:] = np.zeros((K, K)) #bottom right partition
self._rwexog = design
return self._rwexog
_inv_rwexog = None
@property
def inv_rwexog(self):
"""Inverse of self.rwexog"""
if self._inv_rwexog is None:
self._inv_rwexog = np.linalg.inv(self.rwexog)
return self._inv_rwexog
_rwendog = None
@property
def rwendog(self):
"""Whitened endogenous variable augmented with restriction parameters"""
if self._rwendog is None:
P = self.ncoeffs
K = self.nconstraint
response = np.zeros((P + K,))
response[:P] = np.dot(self.wexog.T, self.wendog)
response[P:] = self.param
self._rwendog = response
return self._rwendog
_ncp = None
@property
def rnorm_cov_params(self):
"""Parameter covariance under restrictions"""
if self._ncp is None:
P = self.ncoeffs
self._ncp = self.inv_rwexog[:P, :P]
return self._ncp
_wncp = None
@property
def wrnorm_cov_params(self):
"""
Heteroskedasticity-consistent parameter covariance
Used to calculate White standard errors.
"""
if self._wncp is None:
df = self.df_resid
pred = np.dot(self.wexog, self.coeffs)
eps = np.diag((self.wendog - pred) ** 2)
sigmaSq = np.sum(eps)
pinvX = np.dot(self.rnorm_cov_params, self.wexog.T)
self._wncp = np.dot(np.dot(pinvX, eps), pinvX.T) * df / sigmaSq
return self._wncp
_coeffs = None
@property
def coeffs(self):
"""Estimated parameters"""
if self._coeffs is None:
betaLambda = np.dot(self.inv_rwexog, self.rwendog)
self._coeffs = betaLambda[:self.ncoeffs]
return self._coeffs
def fit(self):
rncp = self.wrnorm_cov_params
lfit = RegressionResults(self, self.coeffs, normalized_cov_params=rncp)
return lfit
if __name__=="__main__":
import statsmodels.api as sm
dta = np.genfromtxt('./rlsdata.txt', names=True)
design = np.column_stack((dta['Y'],dta['Y']**2,dta[['NE','NC','W','S']].view(float).reshape(dta.shape[0],-1)))
design = sm.add_constant(design, prepend=True)
rls_mod = RLS(dta['G'],design, constr=[0,0,0,1,1,1,1])
rls_fit = rls_mod.fit()
print(rls_fit.params)
|
[
"adrilabbelol@gmail.com"
] |
adrilabbelol@gmail.com
|
8b7311824ab9a23ac88a7ac5fed9d86293761a1f
|
6d5d161269e66345a32e0e221f2dbce2a07c742a
|
/async_sched/server/messages.py
|
afca8f5818cc66f2f69f7051f2ae055bd29a5126
|
[
"MIT"
] |
permissive
|
justengel/async_sched
|
d286c79eb6a705769aa8e59da7508d5995acb523
|
f980722d51d15025522b2265426b0188ff368418
|
refs/heads/master
| 2022-11-07T18:24:50.843013
| 2020-07-01T02:39:55
| 2020-07-01T02:39:55
| 268,154,385
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 993
|
py
|
from typing import List
from serial_json import DataClass, field
from ..schedule import Schedule
__all__ = ['DataClass', 'Message', 'Error', 'Quit', 'Update', 'RunCommand', 'ScheduleCommand',
'RunningSchedule', 'ListSchedules', 'StopSchedule']
class Message(DataClass):
message: str
class Error(DataClass):
message: str
class Quit(DataClass):
pass
class Update(DataClass):
module_name: str = ''
class RunCommand(DataClass):
callback_name: str
args: tuple = field(default_factory=tuple)
kwargs: dict = field(default_factory=dict)
class ScheduleCommand(DataClass):
name: str
schedule: Schedule
callback_name: str
args: tuple = field(default_factory=tuple)
kwargs: dict = field(default_factory=dict)
class RunningSchedule(DataClass):
name: str
schedule: Schedule
class ListSchedules(DataClass):
schedules: List[RunningSchedule] = field(default_factory=list)
class StopSchedule(DataClass):
name: str
|
[
"jtengel08@gmail.com"
] |
jtengel08@gmail.com
|
c925362f7a177e0811dfb0b9035d7ffefbf1ec34
|
5c6ccc082d9d0d42a69e22cfd9a419a5b87ff6cd
|
/coursera/pythonHse/fourth/4.py
|
b721d875cacc3f42d7207d20fca81d8db4118054
|
[] |
no_license
|
kersky98/stud
|
191c809bacc982c715d9610be282884a504d456d
|
d395a372e72aeb17dfad5c72d46e84dc59454410
|
refs/heads/master
| 2023-03-09T20:47:25.082673
| 2023-03-01T08:28:32
| 2023-03-01T08:28:32
| 42,979,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
# Даны два действительных числа x и y. Проверьте, принадлежит ли точка с
# координатами (x,y) заштрихованному квадрату (включая его границу). Если
# точка принадлежит квадрату, выведите слово YES, иначе выведите слово NO.
# На рисунке сетка проведена с шагом 1.
# Решение должно содержать функцию IsPointInSquare(x, y), возвращающую True,
# если точка принадлежит квадрату и False, если не принадлежит. Основная
# программа должна считать координаты точки, вызвать функцию IsPointInSquare
# и в зависимости от возвращенного значения вывести на экран необходимое
# сообщение. Функция IsPointInSquare не должна содержать инструкцию if.
import sys
x = float(input())
y = float(input())
e = sys.float_info.epsilon
def IsPointInSquare(x, y):
res = -1-e < x < 1+e and -1-e < y < 1+e
return res
if IsPointInSquare(x, y):
print('YES')
else:
print('NO')
|
[
"kerskiy-ev@pao.local"
] |
kerskiy-ev@pao.local
|
d603fc4e9d43b4652a8ac0e851fac084cd7232b0
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1/r4ghu/count.py
|
41bfd1280434461d73ff3977ca2f3160ac163ca2
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
input = open('input.txt','r')
print 'Name of the file:', input.name
results = []
T = int(input.readline())
for t in range(T):
dic = {}
l = []
n = int(input.readline())
if n==0:
results.append('INSOMNIA')
for i in range(1,25*n):
p = list(str(i*n))
for j in p:
if j not in dic.keys():
dic[j]=1
l = dic.keys()
l.sort()
if l==['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
results.append(str(i*n))
break
input.close()
print len(results),results
out = open('out.txt','w')
for i in range(len(results)):
out.write('Case #'+str(i+1)+': '+results[i]+'\n')
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
ac92c6cc494073720f64ded6d9dfce7c9f5e7603
|
cc72013ede1b3bb02c32a3d0d199be4f7986c173
|
/ch10/cballmaxheight.py
|
d1b7e7d7f8bd3735de66883c44cae85de31b5936
|
[] |
no_license
|
alextickle/zelle-exercises
|
b87d2a1476189954565f5cc97ee1448200eb00d4
|
b784ff9ed9b2cb1c56e31c1c63f3e2b52fa37875
|
refs/heads/master
| 2021-01-19T00:33:19.132238
| 2017-09-14T23:35:35
| 2017-09-14T23:35:35
| 87,182,609
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
from classProjectile import Projectile
def getInputs():
a = input("Enter the launch angle (in degrees): ")
v = input("Enter the initial velocity (in meters/sec): ")
h = input("Enter the initial height (in meters): ")
t = input("Enter the time initerval between position calculations: ")
return a, v, h, t
def main():
angle, vel, h0, time = getInputs()
cball = Projectile(angle, vel, h0)
maxheight = 0
while cball.getY() >= 0:
cball.update(time)
if cball.getY() > maxheight:
maxheight = cball.getY()
print "\nDistance traveled: %0.1f meters." % (cball.getX())
print "\nMaximum height: %0.1f meters." % (maxheight)
main()
|
[
"alexander.tickle@gmail.com"
] |
alexander.tickle@gmail.com
|
cacda09aaaef2e4170fc9593a18b9c06078c39cf
|
2e7814885646a56ffd3db0883a1c3f790cb9de46
|
/src/zojax/wiki/browser/wiki.py
|
083aa26c9e5f90104938fd17f906a76c8a3f4019
|
[
"ZPL-2.1"
] |
permissive
|
Zojax/zojax.wiki_
|
1afa247c8797ac7316d6689d77d9c61a991eda0b
|
b0b9a3c3a91ffb725c5ef7e330632f18dab3d75e
|
refs/heads/master
| 2020-04-01T15:33:32.556439
| 2014-01-29T18:31:29
| 2014-01-29T18:31:29
| 2,038,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,643
|
py
|
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
$Id$
"""
from zope import component, interface, event, schema
from zope.component import getUtility, getMultiAdapter, queryMultiAdapter
from zope.traversing.browser import absoluteURL
from zope.lifecycleevent import ObjectCreatedEvent
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces.browser import IBrowserPublisher
from zope.app.container.interfaces import INameChooser
from zojax.richtext.field import RichText
from zojax.content.actions.action import Action
from zojax.statusmessage.interfaces import IStatusMessage
from zojax.layoutform import interfaces, button, Fields, PageletForm
from zojax.wiki.format import generateWikiName
from zojax.wiki.interfaces import _, IWiki, IWikiPage
from zojax.wiki.wikipage import WikiPage
from zojax.wiki.browser.empty import EmptyWikiPage
from zojax.wiki.browser.wikipage import customWidget
from zojax.wiki.browser.interfaces import IManageWikiAction, IAddWikiPageAction
class WikiPublisher(object):
interface.implements(IBrowserPublisher)
component.adapts(IWiki, interface.Interface)
def __init__(self, context, request):
self.context = context
self.request = request
def publishTraverse(self, request, name):
context = self.context
if name in context:
return context[name]
view = queryMultiAdapter((context, request), name=name)
if view is not None:
return view
try:
if INameChooser(context).checkName(name, WikiPage()):
return EmptyWikiPage(name, context, request)
except:
pass
raise NotFound(self.context, name, request)
def browserDefault(self, request):
return self.context, ('FrontPage',)
class ManageWiki(Action):
component.adapts(IWikiPage, interface.Interface)
interface.implements(IManageWikiAction)
weight = 6
title = _(u'Manage Wiki')
contextInterface = IWiki
permission = 'zojax.ModifyContent'
@property
def url(self):
return '%s/context.html'%absoluteURL(self.context, self.request)
class AddWikiPageAction(Action):
component.adapts(IWikiPage, interface.Interface)
interface.implements(IAddWikiPageAction)
weight = 10
title = _(u'Add Wiki Page')
contextInterface = IWiki
permission = 'zojax.ModifyWikiContent'
@property
def url(self):
return '%s/addwikipage.html'%absoluteURL(self.context, self.request)
class IAddWikiPage(interface.Interface):
title = schema.TextLine(
title = _('Title'),
description = _('Wiki page title.'),
required = True)
text = RichText(
title = _(u'Page text'),
description = _(u'Wiki page text.'),
required = True)
class AddWikiPageForm(PageletForm):
label = _('Add Wiki Page')
fields = Fields(IAddWikiPage)
fields['text'].widgetFactory = customWidget
ignoreContext = True
@button.buttonAndHandler(_('Create'), name='create',
provides=interfaces.IAddButton)
def createHandler(self, action):
data, errors = self.extractData()
if errors:
IStatusMessage(self.request).add(
(self.formErrorsMessage,) + errors, 'formError')
else:
page = WikiPage(title=data['title'])
page.text = data['text']
event.notify(ObjectCreatedEvent(page))
name = generateWikiName(data['title'])
wiki = self.context
try:
wiki[name] = page
page.parent = wiki['FrontPage']
IStatusMessage(self.request).add(_('Wiki page has been added.'))
self.redirect(u'%s/'%name)
except Exception, err:
IStatusMessage(self.request).add(err, 'error')
@button.buttonAndHandler(_('Cancel'), name='cancel',
provides=interfaces.ICancelButton)
def cancelHandler(self, action):
self.redirect(u'./')
|
[
"andrey.fedoseev@gmail.com"
] |
andrey.fedoseev@gmail.com
|
e4841e48dd2798ed1c2ba400e9e3a8c6b9d95714
|
cb73499c5b15cead88751dfca21cefae81483501
|
/docs/conf.py
|
6f333b8b3d818eebd843b06f85a9f8c9dc169e27
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
RichardPflaum/GalSim
|
dcbabfbdbd41a0ebe909ad3c28e47daabcd92818
|
05060e583b2465ca8e2b258126c2ba8257e358f1
|
refs/heads/main
| 2023-03-23T05:59:05.349282
| 2021-02-05T23:11:55
| 2021-02-06T00:49:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,740
|
py
|
# Copyright (c) 2012-2020 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# MJ: I find things work better if it's installed properly and you don't do this.
#
#import os
#import sys
#sys.path.insert(0, os.path.abspath('../galsim'))
# -- Project information -----------------------------------------------------
import galsim
import galsim.roman
import galsim.des
project = 'GalSim'
copyright = '2019, GalSim-developers'
author = 'GalSim-developers'
# The short X.Y version
version = '.'.join(map(str,galsim.__version_info__[:2]))
# The full version, including alpha/beta/rc tags
release = galsim.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# https://michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html
# -- Extensions to the Napoleon GoogleDocstring class ---------------------
from sphinx.ext.napoleon.docstring import GoogleDocstring
# first, we define new methods for any new sections and add them to the class
def parse_keys_section(self, section):
return self._format_fields('Keys', self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields('Attributes', self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields('Class Attributes', self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
# we now patch the parse method to guarantee that the the above methods are
# assigned to the _section dict
def patched_parse(self):
self._sections['keys'] = self._parse_keys_section
self._sections['class attributes'] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'GalSimdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GalSim.tex', 'GalSim Documentation',
'GalSim-developers', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'GalSim', 'GalSim Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GalSim', 'GalSim Documentation',
author, 'GalSim', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
[
"michael@jarvis.net"
] |
michael@jarvis.net
|
957b6dfafdf01768a405f18e1263f60e635d7d82
|
209c876b1e248fd67bd156a137d961a6610f93c7
|
/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint1.py
|
2db7b1e8f80682670d2ed4cf48d6df45479f95a9
|
[
"Apache-2.0"
] |
permissive
|
Qengineering/Paddle
|
36e0dba37d29146ebef4fba869490ecedbf4294e
|
591456c69b76ee96d04b7d15dca6bb8080301f21
|
refs/heads/develop
| 2023-01-24T12:40:04.551345
| 2022-10-06T10:30:56
| 2022-10-06T10:30:56
| 544,837,444
| 0
| 0
|
Apache-2.0
| 2022-10-03T10:12:54
| 2022-10-03T10:12:54
| null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle
import os
from paddle.fluid.tests.unittests.auto_checkpoint_utils import get_logger
from test_auto_checkpoint import AutoCheckPointACLBase
paddle.enable_static()
logger = get_logger()
class AutoCheckpointTest1(AutoCheckPointACLBase):
def setUp(self):
get_logger()
logger.info("enter tests")
self._old_environ = dict(os.environ)
proc_env = {
"PADDLE_RUNNING_ENV": "PADDLE_EDL_AUTO_CHECKPOINT",
"PADDLE_TRAINER_ID": "0",
"PADDLE_RUNNING_PLATFORM": "PADDLE_CLOUD",
"PADDLE_JOB_ID": "test_job_auto_1",
"PADDLE_EDL_HDFS_HOME": "/usr/local/hadoop-2.7.7",
"PADDLE_EDL_HDFS_NAME": "",
"PADDLE_EDL_HDFS_UGI": "",
"PADDLE_EDL_HDFS_CHECKPOINT_PATH": "auto_checkpoint_1",
"PADDLE_EDL_ONLY_FOR_CE_TEST": "1",
"PADDLE_EDL_FS_CACHE": ".auto_checkpoint_test_1",
"PADDLE_EDL_SAVE_CHECKPOINT_INTER": "0"
}
os.environ.update(proc_env)
def test_corner_epoch_no(self):
self._test_corner_epoch_no(0)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
Qengineering.noreply@github.com
|
d3336030c75230312fe64f70d821674c7b8b0832
|
744096e063ffb4cdb017f60e6dfae410a51c789a
|
/ml/m08_wine2_keras.py
|
06ebe62cf59103482d4d188d56d1958c5811a866
|
[] |
no_license
|
elf0508/Study-bit
|
59ddab507b02c13a45913c05a4799ff946e63f95
|
a773d7643cbb1c0008e7ea01c32615c9e6e3678c
|
refs/heads/master
| 2022-12-31T11:53:44.344693
| 2020-10-16T09:04:01
| 2020-10-16T09:04:01
| 270,950,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,792
|
py
|
# keras 로 만들기
# 다중분류
import numpy as np
import pandas as pd
from keras.models import Sequential, Input
from keras.layers import Dense, Dropout
from keras.callbacks import EarlyStopping
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler, RobustScaler
from sklearn.decomposition import PCA
from keras.utils import np_utils
ss = StandardScaler()
mms = MinMaxScaler()
mas = MaxAbsScaler()
rs = RobustScaler()
es = EarlyStopping(monitor = 'loss', mode = 'min', patience = 10)
pca = PCA(n_components = 10)
### 1. 데이터
wine = pd.read_csv('./data/csv/winequality-white.csv',
header = 0, index_col = None,
sep = ';', encoding = 'cp949')
print(wine.head())
print(wine.tail())
print(wine.shape) # (4898, 12)
## 1-1. 데이터 전처리
# 1-1-1. 결측치 확인
print(wine.isna()) # 확인 ok
## 1-2. numpy 파일로 변환 후 저장
wine = wine.values
print(type(wine)) # <class 'numpy.ndarray'>
print(wine)
print(wine.shape) # (4898, 12)
np.save('./data/wine_np.npy', arr = wine)
## 1-3. numpy 파일 불러오기
np.load('./data/wine_np.npy')
print(wine.shape) # (4898, 12)
## 1-4. 데이터 나누기
x = wine[:, :11]
y = wine[:, -1:]
print(x.shape) # (4898, 11)
print(y.shape) # (4898, 1)
## 1-5. train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size = 0.25)
print(x_train.shape) # (3673, 11)
print(x_test.shape) # (1225, 11)
print(y_train.shape) # (3673, 1)
print(y_test.shape) # (1225, 1)
## 1-6. 원핫인코딩
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train.shape) # (3673, 10)
print(y_test.shape) # (1225, 10)
## 1-7. 데이터 Scaling
rs.fit(x_train)
x_train = rs.transform(x_train)
x_test = rs.transform(x_test)
print(x_train[0]) # [0.33653846 0.21621622 0.25903614 0.01687117 0.24315068 0.12543554
# 0.31888112 0.06499518 0.41666667 0.23255814 0.77419355]
print(x_test[1]) # [0.40384615 0.10810811 0.29518072 0.01840491 0.17808219 0.04878049
# 0.38041958 0.13635487 0.4537037 0.30232558 0.32258065]
## 1-8. PCA
pca.fit(x_train)
x_train = pca.transform(x_train)
x_test = pca.transform(x_test)
print(x_train.shape) # (3673, 8)
print(x_test.shape) # (1225, 8)
# 2. 모델링
model = Sequential()
model.add(Dense(10, input_shape = (10, ), activation = 'relu'))
model.add(Dense(10))
model.add(Dense(10, activation = 'softmax'))
model.summary()
# 3. 모델 훈련
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_train, y_train, epochs = 10, batch_size = 32)
# 4. 모델 평가
res = model.evaluate(x_test, y_test)
print("loss : ", res[1]) # 0.5395918488502502
print("acc : ", res[1]) # 0.5395918488502502
'''
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
wine = pd.read_csv('winequality-white.csv',sep=';')
x = np.array(wine.iloc[:,0:-1])
y = np.array(wine.iloc[:,-1])
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
from sklearn.preprocessing import OneHotEncoder
y = y.reshape(-1,1)
aaa = OneHotEncoder()
aaa.fit(y)
y = aaa.transform(y).toarray()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test, = train_test_split(
x, y, random_state=66, test_size=0.2 )
print(x_train.shape)
print(y_train.shape)
model = Sequential()
model.add(Dense(30, input_dim=11 ))
model.add(Dense(40))
model.add(Dense(120))
model.add(Dense(500,activation='relu'))
model.add(Dense(60))
model.add(Dense(32,activation='relu'))
model.add(Dense(7, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.fit(x_train, y_train, epochs=300, batch_size=10, validation_split=0.2)
loss, acc = model.evaluate(x_test,y_test)
print('keras의 acc는',acc)
# score = model.score(x_test,y_test) 이건 아마 mL model에 들어있는거니까 없다고 인식하게찌?
# print('score는',score)
# print(np.argmax(a, axis = 1)+1)
'''
|
[
"elf0508@naver.com"
] |
elf0508@naver.com
|
d15dd199e6c86808b473f526e605111671f36034
|
e11dff811ca981f428644fd70d10a7369c671bcb
|
/src/tools/ecos/cvxpy/cvxpy/problems/objective.py
|
13293559446a04fad84799c55d14674f9eada4e7
|
[
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
riadnassiffe/Simulator
|
3c4a036b5635534929fdb04b0e9c96d64c0da71f
|
7d9ff09f26367d3714e3d10be3dd4a9817b8ed6b
|
refs/heads/master
| 2021-06-20T09:31:36.033427
| 2021-04-17T00:03:17
| 2021-04-17T00:03:17
| 16,033,879
| 0
| 0
|
MIT
| 2021-03-22T23:20:34
| 2014-01-18T20:58:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,758
|
py
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.utilities as u
from cvxpy.expressions.expression import Expression
import cvxpy.lin_ops.lin_utils as lu
class Minimize(u.Canonical):
"""An optimization objective for minimization.
"""
NAME = "minimize"
def __init__(self, expr):
self._expr = Expression.cast_to_const(expr)
# Validate that the objective resolves to a scalar.
if self._expr.size != (1, 1):
raise Exception("The '%s' objective must resolve to a scalar."
% self.NAME)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._expr))
def __str__(self):
return ' '.join([self.NAME, self._expr.name()])
def canonicalize(self):
"""Pass on the target expression's objective and constraints.
"""
return self._expr.canonical_form
def variables(self):
"""Returns the variables in the objective.
"""
return self._expr.variables()
def parameters(self):
"""Returns the parameters in the objective.
"""
return self._expr.parameters()
def is_dcp(self):
"""The objective must be convex.
"""
return self._expr.is_convex()
@property
def value(self):
"""The value of the objective expression.
"""
return self._expr.value
@staticmethod
def primal_to_result(result):
"""The value of the objective given the solver primal value.
"""
return result
class Maximize(Minimize):
"""An optimization objective for maximization.
"""
NAME = "maximize"
def canonicalize(self):
"""Negates the target expression's objective.
"""
obj, constraints = super(Maximize, self).canonicalize()
return (lu.neg_expr(obj), constraints)
def is_dcp(self):
"""The objective must be concave.
"""
return self._expr.is_concave()
@staticmethod
def primal_to_result(result):
"""The value of the objective given the solver primal value.
"""
return -result
|
[
"riad.nassiffe@gmail.com"
] |
riad.nassiffe@gmail.com
|
76faca35f33e12cea802b44068cb8aa14880293c
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/menus/models_20201030115229.py
|
fe2fb42e539b2ceeceff986fda84db20415ecd35
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 670
|
py
|
from django.db import models
from django_extensions.db.fields import AutoSlugField
from modelcluster.models import ClusterableModel
from wagtail.core.models import Orderable
from wagtail.admin.edit_handlers import FieldPanel
class MenuItem(Orderable):
link_title = models.CharField(blank )
link_url = external_link
link_page = internal_link
open_in_new_tab
class Menu(ClusterableModel):
title = models.CharField(max_length=100)
slug = AutoSlugField(
populate_from='title',
editable=True,
)
panels = [
FieldPanel('title'),
FieldPanel('slug'),
]
def __str__(self):
return self.title
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
a550d9b8ed43cb34d14d403265c426d95c868ae4
|
74091dce735f281188d38d2f00d1a68e1d38ff7a
|
/design_patterns/observer/with_observer/observer_abc/__init__.py
|
fc74947e39033cd126ee8f4e688bf13e5b204ab0
|
[] |
no_license
|
nbiadrytski-zz/python-training
|
96741aa0ef37bda32d049fde5938191025fe2924
|
559a64aae2db51e11812cea5ff602f25953e8070
|
refs/heads/master
| 2023-05-07T04:08:23.898161
| 2019-12-10T12:12:59
| 2019-12-10T12:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
from design_patterns.observer.with_observer.observer_abc.observer_abc import AbsObserver
from design_patterns.observer.with_observer.observer_abc.subject_abc import AbsSubject
|
[
"Mikalai_Biadrytski@epam.com"
] |
Mikalai_Biadrytski@epam.com
|
2fd0d81d5757eb5301956fc98513bcb6f034e338
|
7410903c6cd5ef35c592af00c934fb21c369cbf2
|
/00_Code/01_LeetCode/15_3Sum.py
|
48ca0cefdb8c2e3ea535801f329f89a117a056e8
|
[
"MIT"
] |
permissive
|
KartikKannapur/Algorithms
|
f4e4726170599db0622d18e8c06a382e9bce9e77
|
66e3c8112826aeffb78bd74d02be1a8d1e478de8
|
refs/heads/master
| 2020-12-25T18:32:41.086518
| 2020-10-19T02:59:47
| 2020-10-19T02:59:47
| 93,961,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,341
|
py
|
"""
Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note: The solution set must not contain duplicate triplets.
For example, given array S = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
Your runtime beats 48.41 % of python submissions
"""
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
# #Special Case
if len(nums) < 3: return []
if len(nums) == 3:
if sum(nums) == 0:
return [sorted(nums)]
# #General Case
nums.sort()
res = []
for i in range(len(nums) - 2):
low = i + 1
high = len(nums) - 1
# #To handle Time Limit Exceeded Error
if i != 0 and nums[i] == nums[i - 1]:
continue
while low < high:
temp_sum = nums[i] + nums[low] + nums[high]
if temp_sum == 0:
res.append((nums[i], nums[low], nums[high]))
if temp_sum > 0:
high -= 1
else:
low += 1
# #Return unique elements
return list(set(tuple(res)))
|
[
"kartikkannapur@gmail.com"
] |
kartikkannapur@gmail.com
|
28d3ad73c52557c945c1d6527b8e8b08169df786
|
7370b067695d6636273ee635b3e78b022be16a62
|
/fullstack/vagrant/forum/forumdb.py
|
403e89ac5b0de33e55b2add36c6f6753c544d7d6
|
[] |
no_license
|
jreiher2003/intro-to-relational-databases
|
56b2f66e5f7a23144b8a1f011d0bcedcf5a07da3
|
a589cb736757708635b7b6bb6688cd2d9d574a85
|
refs/heads/master
| 2020-12-24T13:44:50.200881
| 2015-04-14T14:45:54
| 2015-04-14T14:45:54
| 33,775,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#
# Database access functions for the web forum.
#
import psycopg2
import time
import bleach
## Get posts from database.
def GetAllPosts():
'''Get all the posts from the database, sorted with the newest first.
Returns:
A list of dictionaries, where each dictionary has a 'content' key
pointing to the post content, and 'time' key pointing to the time
it was posted.
'''
## Database connection
DB = psycopg2.connect("dbname=forum")
c = DB.cursor()
c.execute("SELECT time, content FROM posts ORDER BY time DESC")
posts = ({'content': str(bleach.clean(row[1])), 'time': str(row[0])} for row in c.fetchall())
#DB.commit()?
DB.close()
return posts
## Add a post to the database.
def AddPost(content):
'''Add a new post to the database.
Args:
content: The text content of the new post.
'''
DB = psycopg2.connect("dbname=forum")
c = DB.cursor()
c.execute("INSERT INTO posts (content) VALUES (%s)", (content,))
DB.commit()
DB.close()
|
[
"jeffreiher@gmail.com"
] |
jeffreiher@gmail.com
|
d9115b79d1e0ebf6bd315330b1bc516e8c40b72b
|
c224200e8d273b2d215e1b68c8bb7798fe0ca714
|
/python/ccard/luhn.py
|
4d988dfaa53f5803ebe99205be2b09fe9e6a145e
|
[] |
no_license
|
mpranj/mcandre
|
c9c6db22be95f71a350bf05e922eb03befa9c6b1
|
9bf5c3ab0ee24ab7041ef4732d0017e869ae683d
|
refs/heads/master
| 2021-01-18T12:46:37.096984
| 2014-06-26T03:46:56
| 2014-06-26T03:46:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
"""Luhn checksum"""
def luhn(n):
"""luhnsum(int) -> bool
Mod 10 checksum by Hans Peter Luhn (1896-1964)
"""
s = 0
while n:
r = n % 100
n /= 100
z = r % 10
r /= 10 * 2
s += r / 10 + r % 10 + z
return s % 10 == 0
|
[
"andrew.pennebaker@gmail.com"
] |
andrew.pennebaker@gmail.com
|
0dc249d0af3a80caa218ae4f6819b1a99c530f06
|
92be2d8c4a64d5f8c43341be7f1e36b81fce56ab
|
/src/azure-cli/azure/cli/command_modules/monitor/aaz/latest/monitor/private_link_scope/private_endpoint_connection/__cmd_group.py
|
51676cfe96f1d5df414faa020e70a8f2b2eda9b2
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
allanpedroni/azure-cli
|
b31d3347f377208b502231266d4839196e574c4b
|
4e21baa4ff126ada2bc232dff74d6027fd1323be
|
refs/heads/dev
| 2023-08-31T18:27:03.240944
| 2023-08-31T08:49:58
| 2023-08-31T08:49:58
| 204,767,533
| 0
| 0
|
MIT
| 2023-09-14T13:32:41
| 2019-08-27T18:41:15
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command_group(
"monitor private-link-scope private-endpoint-connection",
is_preview=True,
)
class __CMDGroup(AAZCommandGroup):
"""Manage private endpoint connection of a private link scope resource.
"""
pass
__all__ = ["__CMDGroup"]
|
[
"noreply@github.com"
] |
allanpedroni.noreply@github.com
|
a6dec3fb3b780def85cc7985436fbc0609ee67c1
|
612b2dcd643ca7b36ac141a1d62c73b8e5f5d1aa
|
/06_operacje_na_plikach_2019-10-28/zad_3_cytaty_5_linii.py
|
8b8614a0d4f0a6e66e0f536e5657adf3eda1e388
|
[] |
no_license
|
MirekPz/PyCode
|
e41fecb3bec8b40e41efe9db1be036038b94da1b
|
95e1c349beb4fcd0ec1d8c36d000665f28ee794f
|
refs/heads/master
| 2020-08-07T10:54:37.488009
| 2020-02-10T08:41:17
| 2020-02-10T08:41:17
| 213,421,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
"""
Wyświetl tylko 5 pierwszych linii
"""
filename = "cytaty.txt"
with open(filename, encoding="UTF-8") as file:
for i in range(5):
list_of_quotations = file.readline()
print(list_of_quotations)
|
[
"mirek@info-tur.pl"
] |
mirek@info-tur.pl
|
2482f871dff5c01612e22eef2f7419c903a53bca
|
eb473c4b2ca6cfdfa11536a460b88f2aa6dff8c8
|
/lib/dataformat/blockreplica.py
|
d072623269f1bd55ed55bcb45d6214af1483197b
|
[] |
no_license
|
thannan6/dynamo
|
4ff13bd85c9e15c755e89e67e26eaa214e0a5b39
|
8db847b8d0094890110cfc805a34703fb89f564f
|
refs/heads/master
| 2021-05-10T00:01:52.252731
| 2018-01-22T02:35:27
| 2018-01-22T02:35:27
| 118,819,891
| 0
| 0
| null | 2018-01-24T20:45:31
| 2018-01-24T20:45:31
| null |
UTF-8
|
Python
| false
| false
| 4,797
|
py
|
from exceptions import ObjectError
class BlockReplica(object):
"""Block placement at a site. Holds an attribute 'group' which can be None.
BlockReplica size can be different from that of the Block."""
__slots__ = ['_block', '_site', 'group', 'is_complete', 'is_custodial', 'size', 'last_update', 'files']
@property
def block(self):
return self._block
@property
def site(self):
return self._site
def __init__(self, block, site, group, is_complete = False, is_custodial = False, size = -1, last_update = 0):
self._block = block
self._site = site
self.group = group
self.is_complete = is_complete
self.is_custodial = is_custodial
if size < 0:
self.size = block.size
else:
self.size = size
self.last_update = last_update
# set of File objects for incomplete replicas (not implemented)
self.files = None
def __str__(self):
return 'BlockReplica %s:%s (group=%s, is_complete=%s, size=%d, last_update=%d)' % \
(self._site.name, self._block.full_name(),
self.group.name, self.is_complete, self.size, self.last_update)
def __repr__(self):
return 'BlockReplica(block=%s, site=%s, group=%s)' % (repr(self._block), repr(self._site), repr(self.group))
def __eq__(self, other):
return self is other or \
(self._block.full_name() == other._block.full_name() and self._site.name == other._site.name and \
self.group.name == other.group.name and \
self.is_complete == other.is_complete and self.is_custodial == other.is_custodial and \
self.size == other.size and self.last_update == other.last_update)
def __ne__(self, other):
return not self.__eq__(other)
def copy(self, other):
if self._block.full_name() != other._block.full_name():
raise ObjectError('Cannot copy a replica of %s into a replica of %s', other._block.full_name(), self._block.full_name())
if self._site.name != other._site.name:
raise ObjectError('Cannot copy a replica at %s into a replica at %s', other._site.name, self._site.name)
self.group = other.group
self.is_complete = other.is_complete
self.is_custodial = other.is_custodial
self.size = other.size
self.last_update = other.last_update
def unlinked_clone(self):
block = self._block.unlinked_clone()
site = self._site.unlinked_clone()
group = self.group.unlinked_clone()
return BlockReplica(block, site, group, self.is_complete, self.is_custodial, self.size, self.last_update)
def embed_into(self, inventory, check = False):
try:
dataset = inventory.datasets[self._block.dataset.name]
except KeyError:
raise ObjectError('Unknown dataset %s', self._block.dataset.name)
block = dataset.find_block(self._block.name, must_find = True)
try:
site = inventory.sites[self._site.name]
except KeyError:
raise ObjectError('Unknown site %s', self._site.name)
try:
group = inventory.groups[self.group.name]
except KeyError:
raise ObjectError('Unknown group %s', self.group.name)
replica = block.find_replica(site)
updated = False
if replica is None:
replica = BlockReplica(block, site, group, self.is_complete, self.is_custodial, self.size, self.last_update)
dataset_replica = dataset.find_replica(site, must_find = True)
dataset_replica.block_replicas.add(replica)
block.replicas.add(replica)
site.add_block_replica(replica)
updated = True
elif check and (replica is self or replica == self):
# identical object -> return False if check is requested
pass
else:
replica.copy(self)
site.update_partitioning(replica)
updated = True
if check:
return replica, updated
else:
return replica
def delete_from(self, inventory):
dataset = inventory.datasets[self._block.dataset.name]
block = dataset.find_block(self._block.name, must_find = True)
site = inventory.sites[self._site.name]
dataset_replica = site.find_dataset_replica(dataset)
replica = block.find_replica(site, must_find = True)
site.remove_block_replica(replica)
dataset_replica.block_replicas.remove(replica)
block.replicas.remove(replica)
def write_into(self, store, delete = False):
if delete:
store.delete_blockreplica(self)
else:
store.save_blockreplica(self)
|
[
"yiiyama@mit.edu"
] |
yiiyama@mit.edu
|
851052faa0844695302437d571d870c4409cc072
|
3ef70fe63acaa665e2b163f30f1abd0a592231c1
|
/stackoverflow/venv/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/x25519.py
|
9aab25b86adb28ad744e0cc3682073d4dfda55a1
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
wistbean/learn_python3_spider
|
14914b63691ac032955ba1adc29ad64976d80e15
|
40861791ec4ed3bbd14b07875af25cc740f76920
|
refs/heads/master
| 2023-08-16T05:42:27.208302
| 2023-03-30T17:03:58
| 2023-03-30T17:03:58
| 179,152,420
| 14,403
| 3,556
|
MIT
| 2022-05-20T14:08:34
| 2019-04-02T20:19:54
|
Python
|
UTF-8
|
Python
| false
| false
| 5,580
|
py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import warnings
from cryptography import utils
from cryptography.hazmat.backends.openssl.utils import _evp_pkey_derive
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PrivateKey, X25519PublicKey
)
_X25519_KEY_SIZE = 32
@utils.register_interface(X25519PublicKey)
class _X25519PublicKey(object):
def __init__(self, backend, evp_pkey):
self._backend = backend
self._evp_pkey = evp_pkey
def public_bytes(self, encoding=None, format=None):
if encoding is None or format is None:
if encoding is not None or format is not None:
raise ValueError("Both encoding and format are required")
else:
warnings.warn(
"public_bytes now requires encoding and format arguments. "
"Support for calling without arguments will be removed in "
"cryptography 2.7",
utils.DeprecatedIn25,
)
encoding = serialization.Encoding.Raw
format = serialization.PublicFormat.Raw
if (
encoding is serialization.Encoding.Raw or
format is serialization.PublicFormat.Raw
):
if (
encoding is not serialization.Encoding.Raw or
format is not serialization.PublicFormat.Raw
):
raise ValueError(
"When using Raw both encoding and format must be Raw"
)
return self._raw_public_bytes()
if (
encoding in serialization._PEM_DER and
format is not serialization.PublicFormat.SubjectPublicKeyInfo
):
raise ValueError(
"format must be SubjectPublicKeyInfo when encoding is PEM or "
"DER"
)
return self._backend._public_key_bytes(
encoding, format, self, self._evp_pkey, None
)
def _raw_public_bytes(self):
ucharpp = self._backend._ffi.new("unsigned char **")
res = self._backend._lib.EVP_PKEY_get1_tls_encodedpoint(
self._evp_pkey, ucharpp
)
self._backend.openssl_assert(res == 32)
self._backend.openssl_assert(ucharpp[0] != self._backend._ffi.NULL)
data = self._backend._ffi.gc(
ucharpp[0], self._backend._lib.OPENSSL_free
)
return self._backend._ffi.buffer(data, res)[:]
@utils.register_interface(X25519PrivateKey)
class _X25519PrivateKey(object):
def __init__(self, backend, evp_pkey):
self._backend = backend
self._evp_pkey = evp_pkey
def public_key(self):
bio = self._backend._create_mem_bio_gc()
res = self._backend._lib.i2d_PUBKEY_bio(bio, self._evp_pkey)
self._backend.openssl_assert(res == 1)
evp_pkey = self._backend._lib.d2i_PUBKEY_bio(
bio, self._backend._ffi.NULL
)
self._backend.openssl_assert(evp_pkey != self._backend._ffi.NULL)
evp_pkey = self._backend._ffi.gc(
evp_pkey, self._backend._lib.EVP_PKEY_free
)
return _X25519PublicKey(self._backend, evp_pkey)
def exchange(self, peer_public_key):
if not isinstance(peer_public_key, X25519PublicKey):
raise TypeError("peer_public_key must be X25519PublicKey.")
return _evp_pkey_derive(
self._backend, self._evp_pkey, peer_public_key
)
def private_bytes(self, encoding, format, encryption_algorithm):
if (
encoding is serialization.Encoding.Raw or
format is serialization.PublicFormat.Raw
):
if (
format is not serialization.PrivateFormat.Raw or
encoding is not serialization.Encoding.Raw or not
isinstance(encryption_algorithm, serialization.NoEncryption)
):
raise ValueError(
"When using Raw both encoding and format must be Raw "
"and encryption_algorithm must be NoEncryption()"
)
return self._raw_private_bytes()
if (
encoding in serialization._PEM_DER and
format is not serialization.PrivateFormat.PKCS8
):
raise ValueError(
"format must be PKCS8 when encoding is PEM or DER"
)
return self._backend._private_key_bytes(
encoding, format, encryption_algorithm, self._evp_pkey, None
)
def _raw_private_bytes(self):
# When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 we can
# switch this to EVP_PKEY_new_raw_private_key
# The trick we use here is serializing to a PKCS8 key and just
# using the last 32 bytes, which is the key itself.
bio = self._backend._create_mem_bio_gc()
res = self._backend._lib.i2d_PKCS8PrivateKey_bio(
bio, self._evp_pkey,
self._backend._ffi.NULL, self._backend._ffi.NULL,
0, self._backend._ffi.NULL, self._backend._ffi.NULL
)
self._backend.openssl_assert(res == 1)
pkcs8 = self._backend._read_mem_bio(bio)
self._backend.openssl_assert(len(pkcs8) == 48)
return pkcs8[-_X25519_KEY_SIZE:]
|
[
"354142480@qq.com"
] |
354142480@qq.com
|
0c53c8c3135c11ac44c7ed5ad9f0094da5ce9c6a
|
839b26d2d837f256423c11908a2a3618ab8a23f3
|
/dashboard/dashboard/update_bug_with_results_test.py
|
7b4282e95a1380e535001797e6710cb237eb8f78
|
[
"BSD-3-Clause"
] |
permissive
|
Mdlglobal-atlassian-net/catapult
|
79be5d4ec5d681c1d2f37ae83534a02f4a4ec72a
|
e9a386951413e7cbf983abf968626b2e5097fc38
|
refs/heads/master
| 2022-02-27T15:18:45.524790
| 2020-06-01T07:12:27
| 2020-06-01T22:57:01
| 268,672,431
| 0
| 1
|
BSD-3-Clause
| 2020-06-02T01:31:05
| 2020-06-02T01:31:04
| null |
UTF-8
|
Python
| false
| false
| 2,403
|
py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
from dashboard import update_bug_with_results
from dashboard.common import namespaced_stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
# In this class, we patch apiclient.discovery.build so as to not make network
# requests, which are normally made when the IssueTrackerService is initialized.
@mock.patch('apiclient.discovery.build', mock.MagicMock())
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch.object(utils, 'TickMonitoringCustomMetric', mock.MagicMock())
class UpdateBugWithResultsTest(testing_common.TestCase):
def setUp(self):
super(UpdateBugWithResultsTest, self).setUp()
self.SetCurrentUser('internal@chromium.org', is_admin=True)
namespaced_stored_object.Set('repositories', {
'chromium': {
'repository_url': 'https://chromium.googlesource.com/chromium/src'
},
})
def testMapAnomaliesToMergeIntoBug(self):
# Add anomalies.
test_keys = list(
map(utils.TestKey, [
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint',
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time'
]))
anomaly.Anomaly(
start_revision=9990,
end_revision=9997,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
bug_id=12345).put()
anomaly.Anomaly(
start_revision=9990,
end_revision=9996,
test=test_keys[0],
median_before_anomaly=100,
median_after_anomaly=200,
bug_id=54321).put()
# Map anomalies to base(dest_bug_id) bug.
update_bug_with_results._MapAnomaliesToMergeIntoBug(
dest_issue=update_bug_with_results.IssueInfo('chromium', 12345),
source_issue=update_bug_with_results.IssueInfo('chromium', 54321))
anomalies = anomaly.Anomaly.query(
anomaly.Anomaly.bug_id == int(54321),
anomaly.Anomaly.project_id == 'chromium').fetch()
self.assertEqual(0, len(anomalies))
if __name__ == '__main__':
unittest.main()
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
8f926e08fc1dc61bbc483c5c73e906cf776d5658
|
6930a434c0506d44bf8a8e81cb86e95c219c3a77
|
/python/day19/code/bool.py
|
dbd9efd62a9608472ca8da6ae9bf5321b9911546
|
[] |
no_license
|
Conquerk/test
|
ed15d5603538340559556c9e0f20cc61ad3e4486
|
7ff42c99b8a2132c6dd1c73315ff95cfef63a8f6
|
refs/heads/master
| 2020-04-19T01:47:28.322929
| 2019-01-28T01:52:00
| 2019-01-28T01:52:00
| 167,882,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 256
|
py
|
class a:
# def __bool__(self):
# print("bool方法被调用")
# return False
def __len__(self):
print('len被调用')
return 5
x=a()
print(bool(x))
if x :
print("x 为真值")
else:
print("x 为假值")
|
[
"tarena@tedu.cn"
] |
tarena@tedu.cn
|
4a66f501d21dc0e21b25cd557749a229c108d7bf
|
f85ce2baf753d65e8666bbda062acbdb0ccdb5ad
|
/leetcode/venv/lib/python2.7/site-packages/pyutil/common/multi_proxy.py
|
4464833378477d0125b57f50987802b986064008
|
[] |
no_license
|
KqSMea8/PycharmProjects
|
2a9d3fa59d08c77daf63be427da27695d4dea471
|
c592d879fd79da4e0816a4f909e5725e385b6160
|
refs/heads/master
| 2020-04-14T11:54:56.435247
| 2019-01-02T10:15:36
| 2019-01-02T10:15:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
# coding=utf-8
__author__ = 'qiukun'
class MultiProxy():
"""
typical usage:
p = MultiProxy([a, b])
对 p 的方法调用会应用到 a, b,返回 b(最后一个对象)对应方法的返回值
e.g.
buf = p.r()
p.write()
"""
def __init__(self, objs):
"""
:param objs: a iterator of objs to be proxyed.
:return: the proxy
"""
self.objs = objs
def __getattr__(self, item):
def wrapper(*args, **kwargs):
rt = None
for o in self.objs:
rt = getattr(o, item)(*args, **kwargs)
return rt
return wrapper
|
[
"zhangyifeng@bytedance.com"
] |
zhangyifeng@bytedance.com
|
ebca6a39db8c0a22574b611a0271ac7bc9a10d1d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02791/s398923350.py
|
525b6e69293b56cf550f10a02c0494389a8a6380
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
N = int(readline())
P = list(map(int, readline().split()))
cur = N + 1
ans = 0
for x in P:
if cur > x:
ans += 1
cur = x
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ccccc364d5e5480a14cd01e9facbb6cb9445987c
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/pacbiolib/pacbio/pythonpkgs/pbtranscript/lib/python2.7/site-packages/pbtranscript/ice/make_input_fasta_fofn.py
|
77356562bf1686d47a5b7653eb24734ce3d41869
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
#! python
"""Given input.fofn, for each movie.bas|bax.h5 file in the fofn,
call pls2fasta to generate a movie.bax|bas.h5.fasta file in a
specified directory, and then trim both ends of each read in fasta
files. Finally, add all these fasta files to fasta_fofn
(e.g., input.fasta.fofn).
"""
import logging
import sys
import os.path as op
from pbtranscript.__init__ import get_version
from pbtranscript.ice.IceUtils import convert_fofn_to_fasta
def set_parser(parser):
"""Get arguments."""
parser.add_argument("input_fofn",
help="Input bax.h5 fofn, e.g., input.fofn")
parser.add_argument("fasta_fofn",
help="Output fasta fofn, e.g., input.fasta.fofn")
parser.add_argument("fasta_out_dir",
help="Where to save generated fasta files")
from pbcore.util.ToolRunner import PBToolRunner
class MakeFastaFofnRunner(PBToolRunner):
"""ice_make_input_fasta_fofn runner."""
def __init__(self):
desc = "Converting bas/bax.h5 files within a fofn to fasta " + \
"files and create a fasta fofn."
PBToolRunner.__init__(self, desc)
set_parser(self.parser)
def getVersion(self):
"""Return version string."""
return get_version()
def run(self):
"""Run"""
logging.info("Running {f} v{v}.".format(f=op.basename(__file__),
v=get_version()))
args = self.args
try:
convert_fofn_to_fasta(fofn_filename=args.input_fofn,
out_filename=args.fasta_fofn,
fasta_out_dir=args.fasta_out_dir,
force_overwrite=False)
except:
logging.exception("Failed to convert fofn {f} to fasta.".
format(f=args.input_fofn))
return 1
return 0
def main():
"""Main function."""
runner = MakeFastaFofnRunner()
return runner.start()
if __name__ == "__main__":
sys.exit(main())
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
99b4337e7934e957fe1496cda64778117b102922
|
bc2a85e8dd9244f89e2f1801cc19d570a87c74ed
|
/Leetcode/Algorithms/Easy/Arrays/MeetingTime.py
|
4e389ed460c2d8ac9a19f0e4c01beffd544cb5ce
|
[] |
no_license
|
christian-miljkovic/interview
|
1cab113dbe0096e860a3ae1d402901a15e808e32
|
63baa1535b788bc3e924f3c24a799bade6a2eae3
|
refs/heads/master
| 2023-01-11T14:53:09.304307
| 2020-02-04T17:35:12
| 2020-02-04T17:35:12
| 193,549,798
| 0
| 0
| null | 2023-01-05T05:56:15
| 2019-06-24T17:28:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,944
|
py
|
"""
Time Planner
Implement a function meetingPlanner that given the availability, slotsA and slotsB, of two people and a meeting duration dur, returns the earliest time slot that works for both of them and is of duration dur. If there is no common time slot that satisfies the duration requirement, return an empty array.
Time is given in a Unix format called Epoch, which is a nonnegative integer holding the number of seconds that have elapsed since 00:00:00 UTC, Thursday, 1 January 1970.
Each person’s availability is represented by an array of pairs. Each pair is an epoch array of size two. The first epoch in a pair represents the start time of a slot. The second epoch is the end time of that slot. The input variable dur is a positive integer that represents the duration of a meeting in seconds. The output is also a pair represented by an epoch array of size two.
In your implementation assume that the time slots in a person’s availability are disjointed, i.e, time slots in a person’s availability don’t overlap. Further assume that the slots are sorted by slots’ start time.
Implement an efficient solution and analyze its time and space complexities.
Examples:
input: slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 8
output: [60, 68]
input: slotsA = [[10, 50], [60, 120], [140, 210]]
slotsB = [[0, 15], [60, 70]]
dur = 12
output: [] # since there is no common slot whose duration is 12
"""
def meeting_planner(slotsA, slotsB, dur):
result = []
for i in range(0,len(slotsA)):
startA = slotsA[i][0]
endA = slotsA[i][1]
for j in range(0,len(slotsB)):
startB = slotsB[j][0]
endB = slotsB[j][1]
min_time = min(endA, endB)
max_time = max(startA, startB)
bound = min_time - max_time
if bound >= dur:
result.append(max_time)
result.append(max_time+dur)
return result
|
[
"cmm892@stern.nyu.edu"
] |
cmm892@stern.nyu.edu
|
116c8f86cfaa52e4e72d3024334d871d5eb5ebab
|
8311a0bcf3f2126d622f928483ce2ea9d6a7cb0d
|
/Code/Matthew/django/mysite/polls/migrations/0001_initial.py
|
967a82248a77534828deedc67a2aec779b4c3227
|
[] |
no_license
|
guam68/class_iguana
|
857247dca0ff732d11f7fb0d3dc761ec83846c94
|
e4359d32dfe60423a643c21df5636669016ad2c0
|
refs/heads/master
| 2020-05-01T06:33:22.611127
| 2019-03-13T23:07:41
| 2019-03-13T23:07:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
# Generated by Django 2.1.5 on 2019-02-07 18:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"flux2341@gmail.com"
] |
flux2341@gmail.com
|
aa59fe19a4acba2c251b00e81b73e30f348144c8
|
9e20f7e71faa2853516f88ee7672e1323d12e8f7
|
/seq_lda/algorithms/__init__.py
|
d3ed380d3428aab45ed45e014fe016bca4131374
|
[] |
no_license
|
e2crawfo/seq_lda
|
3270a07781369d318c0cf2e75cdbfee96423c52d
|
ebe3caea127575f6e54884bf0d32ac6d4a876d4e
|
refs/heads/master
| 2020-09-17T18:55:19.562861
| 2017-03-28T16:10:52
| 2017-03-28T16:10:52
| 67,877,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from .markov_lda import generate_markov_chains
from .lda import LDA
from .mssg import MSSG, SingleMSSG
from .baseline import OneByOne, Aggregate
|
[
"eric.crawford@mail.mcgill.ca"
] |
eric.crawford@mail.mcgill.ca
|
be6ac3323527ef02eb9d8a966231f7f830aaed04
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_885.py
|
701317e53735eabbc5a8dcc364e6082aba740f46
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#/usr/bin/python
#-*- coding: utf-8 -*-
#Refer http://www.wooyun.org/bugs/wooyun-2015-0110861
#__Author__ = 上善若水
#_PlugName_ = 08CMS_sql Plugin
#_FileName_ = 08CMS_sql.py
def assign(service, arg):
if service == "08cms":
return True, arg
def audit(arg):
url = arg + "info.php?fid=1&tblprefix=cms_msession"
payload = "/**/where/**/1/**/and/**/updatexml(1,concat(0x37,(select/**/md5(520)/**/limit/**/0,1)),1)%23"
geturl = url + payload
code, head, body, errcode, final_url = curl.curl2(geturl,cookie="umW_msid=rsLQWU")
if code == 200 and 'cf67355a3333e6e143439161adc2d82e' in body:
security_hole(url)
if __name__ == '__main__':
from dummy import *
audit(assign('08cms', 'http://www.pxmfw.com/')[1])
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
c0c2166d1a614fb9c14325a21fda4c3df736ef1f
|
3b9338d99cf8090387418e32ca81617f072c39fb
|
/waflib/extras/dumbpreproc.py
|
bc4b2d04061ab96a7da92066adb4f81e8c95e0e8
|
[] |
no_license
|
sillsdevarchive/wsiwaf
|
8ca14c286bafceb9ee6fad740b64ad7131282dc3
|
2dcddafc3602a7220acbe995df4ba85abb06b767
|
refs/heads/master
| 2020-12-30T17:10:21.701380
| 2017-05-12T05:12:17
| 2017-05-12T05:12:17
| 91,052,898
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
Dumb C/C++ preprocessor for finding dependencies
It will look at all include files it can find after removing the comments, so the following
will always add the dependency on both "a.h" and "b.h"::
#include "a.h"
#ifdef B
#include "b.h"
#endif
int main() {
return 0;
}
To use::
def configure(conf):
conf.load('compiler_c')
conf.load('c_dumbpreproc')
"""
import re, sys, os, string, traceback
from waflib import Logs, Build, Utils, Errors
from waflib.Logs import debug, error
from waflib.Tools import c_preproc
re_inc = re.compile(
'^[ \t]*(#|%:)[ \t]*(include)[ \t]*[<"](.*)[>"]\r*$',
re.IGNORECASE | re.MULTILINE)
def lines_includes(node):
code = node.read()
if c_preproc.use_trigraphs:
for (a, b) in c_preproc.trig_def: code = code.split(a).join(b)
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
parser = c_preproc.c_parser
class dumb_parser(parser):
def addlines(self, node):
if node in self.nodes[:-1]:
return
self.currentnode_stack.append(node.parent)
self.lines = lines_includes(node) + [(c_preproc.POPFILE, '')] + self.lines
def start(self, node, env):
self.addlines(node)
while self.lines:
(x, y) = self.lines.pop(0)
if x == c_preproc.POPFILE:
self.currentnode_stack.pop()
continue
self.tryfind(y)
c_preproc.c_parser = dumb_parser
|
[
"tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85"
] |
tnagy1024@f0382ac9-c320-0410-b3f0-b508d59f5a85
|
987fa66a52474194186e6d244ee565863549eaf6
|
53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61
|
/.history/EMR/EMRryzd_2_20190507094011.py
|
d2f51d07e0695ada05beddc4e3e39e063c14d700
|
[] |
no_license
|
cyc19950621/python
|
4add54894dc81187211aa8d45e5115903b69a182
|
d184b83e73334a37d413306d3694e14a19580cb0
|
refs/heads/master
| 2020-04-11T20:39:34.641303
| 2019-07-02T12:54:49
| 2019-07-02T12:54:49
| 162,078,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,283
|
py
|
#-*- coding: UTF-8 -*-
#本文件用于数据清洗
import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHRryzd')#txt目录提取
hxjb = open(r'D:\python\EMR\hxjbml.txt',errors="ignore")#呼吸疾病目录
hxjbdic = hxjb.readlines()#读行
ryzd=[]
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrpath = os.path.basename(emrtxt)
emrpath = os.path.splitext(emrpath)[0]
line_out = []
for line in f.readlines():
line = re.sub('\n','',line)
line = re.sub(r'(.+?)肺炎','肺炎',line)#替换所有的肺炎
for hxjbc in hxjbdic:#检索每个词
hxjbc = re.sub('\n','',hxjbc)
if line.find(hxjbc) >-1:
line_out.append(line)
line_output = EMRdef.delre(line_out)
ryzd.append(line_out)
#line = '\n'.join(line_output)
#EMRdef.text_create(r'D:\DeepLearning ER\EHRryzd2','.txt' ,emrpath,line)
import orangecontrib.associate.fpgrowth as oaf
often=dict(oaf.frequent_itemsets(ryzd, .01))#生成频繁度
print(often)
rules = oaf.association_rules(often, .5) #这里设置置信度
rules = list(rules)
|
[
"1044801968@qq.com"
] |
1044801968@qq.com
|
4052771b1b4311c09a293921817f66b82411e14f
|
e095a91a3424ecc364c4532e8fc705b728a0d1b1
|
/CodeWars/补充知识/reduce函数.py
|
cfbe488542ecf69f30da70eb19144d14b6799a9e
|
[] |
no_license
|
Anakinliu/PythonProjects
|
caed257e71d2e52f691abc5095c4aca5c052feb2
|
2246794a88d06eaa381db1b3a72e9bc54a315dd7
|
refs/heads/master
| 2021-06-03T05:51:09.319613
| 2021-01-26T02:35:38
| 2021-01-26T02:35:38
| 101,546,309
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
from functools import reduce
from itertools import groupby
def sum(a, b):
return a + b
def mul(a, b):
return a * b
def fuck(v, _):
print('v is: ', v)
v.append(0)
return v
lst = range(2)
# reduce函数参数: 函数名, 序列[,初始化序列]
print(reduce(sum, lst))
# 计算阶乘。。。
print(reduce(mul, range(1, 4)))
lst2 = [1, 3, 5, -1, 10, 0, 999, 100, -9, -12, -3, 1]
# 求最大值。。。
print(reduce(lambda a, b : a if a > b else b, lst2))
# print(reduce(fuck,lst, [1,2,3]))
#BEGIN 相当于
# it = iter(lst) # 生成迭代器,参数是支持迭代的对象
# value = [1,2,3]
# for i in it:
# value = fuck(value, i)
# print(value)
#END#
"""
第一步,选择序列的前两个元素并获得结果。
下一步是对先前获得的结果应用相同的功能,并且紧随第二个元素之后的数字将被再次存储。
继续此过程,直到容器中没有剩余元素为止。
返回的最终结果将返回并打印在控制台上。
"""
|
[
"gugeliuyinquan@gmail.com"
] |
gugeliuyinquan@gmail.com
|
4d13f16a6de050457f0c137b74e3f6643612f28d
|
6b9b032a5516c8d7dbb26deeb1b189022f8f9411
|
/LeetCode/dp/91.解码方法.py
|
c999d3a7c5e8a176bfe03eb5318fd70a5954222c
|
[] |
no_license
|
mrmenand/Py_transaction
|
84db99a0010ae90f43fba6b737d7035e48af55fb
|
7e82422c84ad699805cc12568b8d3d969f66a419
|
refs/heads/master
| 2021-07-13T21:15:03.714689
| 2020-06-21T11:49:31
| 2020-06-21T11:49:31
| 176,281,954
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# 91. 解码方法
from functools import lru_cache
class Solution:
@lru_cache()
def numDecodings(self, s: str) -> int:
if not s:
return 1
res = 0
if len(s) >= 1 and s[0] != "0":
res += self.numDecodings(s[1:])
if len(s) >= 2 and s[0] != "0" and int(s[:2]) <= 26:
res += self.numDecodings(s[2:])
return res
|
[
"1006024749@qq.com"
] |
1006024749@qq.com
|
8be4be017c6af7fce4bd719c6b78a351b1b14568
|
c9000e5e30825b29febbefa5ad00da1f57551f8e
|
/02/fandengyuan/homework.py
|
c6bce72ffa5f56803c26555e16520532b283fbab
|
[] |
no_license
|
xiaotian1991/actual-10-homework
|
81c58b24f58fc87e4890f1475ad83de8b66ee53b
|
0b379ca6189f843f121df4db5814c83262f9981a
|
refs/heads/master
| 2021-06-12T23:35:52.954510
| 2017-03-24T07:41:18
| 2017-03-24T07:41:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
arr = [3,1,5,9,2,11,7]
arr0 = []
for i in range(len(arr)):
arr0.append(int(arr[i]))
m = len(arr0)-1
while m >= 1 and arr0[m] < arr0[m-1]:
arr0[m],arr0[m-1] = arr0[m-1],arr0[m]
m = m - 1
print arr0
|
[
"shengxinjing@addnewer.com"
] |
shengxinjing@addnewer.com
|
d7ea612d308d7b4f02c590f2d7d783457b7fee26
|
3b60e6f4bbc011003ac4929f01eb7409918deb79
|
/Analysis_v1/Simulation/Pythia/genfragments/ADDGravToGG_NegInt-0_LambdaT-6500_M-4000To6500_TuneCUEP8M1_13TeV-pythia8_cfi.py
|
8a74a89ef9ff1b0945512ccf71c71c4c03d5767a
|
[] |
no_license
|
uzzielperez/Analyses
|
d1a64a4e8730325c94e2bc8461544837be8a179d
|
1d66fa94763d7847011ea551ee872936c4c401be
|
refs/heads/master
| 2023-02-09T04:54:01.854209
| 2020-09-07T14:57:54
| 2020-09-07T14:57:54
| 120,850,137
| 0
| 0
| null | 2020-06-17T16:48:16
| 2018-02-09T03:14:04
|
C++
|
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsLED:LambdaT = 6500.0',
'ExtraDimensionsLED:n = 2',
'ExtraDimensionsLED:ffbar2gammagamma = on',
'ExtraDimensionsLED:gg2gammagamma = on',
'ExtraDimensionsLED:CutOffmode = 2',
'ExtraDimensionsLED:NegInt= 0',
'PhaseSpace:pTHatMin = 70.0',
'PhaseSpace:mHatMin = 6500.0',
'PhaseSpace:mHatMax = 4000.0'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
|
[
"uzzie.perez@cern.ch"
] |
uzzie.perez@cern.ch
|
2ed4905e9b0d4556acdfe1ddf6a5e250b43f2f6f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2273/60870/314985.py
|
5e53cfd0963d3bbed85c252351f8e04244c40b52
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
num_test = int(input())
info_list = []
for i in range(num_test):
info = input().split()
info = [int(x) for x in info]
for j in range(info[0]):
data = input().split()
data = [int(x) for x in data]
info_list.append(data)
if info_list == [[0, 1, 1], [1, 1, 1], [1, 1, 3], [2, 1, 10], [3, 1, 4], [0, 1, 1], [1, 7, 2], [2, 5, 10], [1, 3, 1], [4, 3, 17], [4, 3, 18], [4, 4, 19], [1, 1, 1], [8, 1, 100]]:
print(15)
print(316)
elif info_list[0:31] == [[0, 214224, 4], [1, 300000, 75], [1, 291002, 29], [1, 300000, 64], [1, 300000, 49], [1, 233141, 41], [1, 300000, 64], [1, 141084, 99], [1, 168700, 82], [1, 300000, 73], [0, 15818, 36], [1, 63903, 41], [1, 38513, 14], [1, 26382, 53], [1, 42336, 90], [1, 45105, 52], [1, 17960, 27], [1, 18440, 75], [1, 64777, 36], [1, 40886, 78], [1, 33546, 97], [1, 7257, 40], [1, 15815, 10], [1, 37789, 74], [1, 47362, 63], [1, 39039, 73], [1, 1339, 24], [1, 37665, 40], [1, 9870, 20], [1, 12339, 99]]:
print(26998514)
print(9400115)
print(5790773)
print(2919180)
print(1954284)
elif info_list == [[0, 21, 4], [1, 30, 7], [1, 29, 29], [1, 30, 6], [1, 30, 4], [1, 23, 4], [1, 30, 6], [1, 14, 9], [1, 16, 8], [1, 30, 7], [0, 4, 1], [1, 5, 1], [1, 1, 3], [0, 1, 1], [1, 7, 2], [2, 5, 10], [1, 3, 1], [4, 3, 17], [4, 3, 18], [4, 4, 19], [0, 1, 1], [1, 7, 1], [1, 9, 3], [2, 4, 10], [3, 2, 4]]:
print(2171)
print(5)
print(245)
print(22)
elif info_list == [[0, 1, 1], [1, 1, 1], [1, 1, 3], [0, 1, 1], [1, 7, 2], [2, 5, 10], [1, 3, 1], [4, 3, 17], [4, 3, 18], [4, 4, 19], [0, 1, 1], [1, 1, 1], [1, 1, 3], [2, 1, 10], [3, 1, 4]]:
print(5)
print(245)
print(15)
else:
print(info_list)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
94dc35c61b39b874b3b81884b9524967947c0b81
|
c3eda1a67e2be5b200e6c9f5a80f20fbcce75bcb
|
/persephone/builds/migrations/0001_initial.py
|
0b7a613e655fa3b74eb0ec33645b168124808fac
|
[
"MIT"
] |
permissive
|
karamanolev/persephone
|
b389a871f6fae58525eeedaec3739ec563c9b934
|
6d1887ae4e1d1941da3dbc416901e9de4764cbbb
|
refs/heads/master
| 2023-05-14T20:28:24.209056
| 2023-04-26T20:50:46
| 2023-04-27T10:32:25
| 86,364,458
| 14
| 2
|
MIT
| 2023-04-11T10:10:50
| 2017-03-27T17:29:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,720
|
py
|
# Generated by Django 3.0.5 on 2020-04-11 23:17
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GlobalSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('google_login_enabled', models.BooleanField(default=False)),
('google_whitelist', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('public_endpoint', models.CharField(max_length=255)),
('github_repo_name', models.CharField(max_length=128)),
('github_api_key', models.CharField(max_length=128)),
('auto_archive_no_diff_builds', models.BooleanField(default=True)),
('auto_approve_master_builds', models.BooleanField(default=True)),
('max_master_builds_to_keep', models.IntegerField(default=20)),
('max_branch_builds_to_keep', models.IntegerField(default=20)),
('supersede_same_branch_builds', models.BooleanField(default=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Build',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.IntegerField(choices=[(0, 'Initializing'), (1, 'Running'), (2, 'Finishing'), (3, 'Pending Review'), (4, 'No Diff'), (5, 'Approved'), (6, 'Rejected'), (7, 'Failed'), (8, 'Superseded'), (9, 'Failing')], default=0)),
('original_build_number', models.CharField(blank=True, max_length=64, null=True)),
('original_build_url', models.CharField(blank=True, max_length=256, null=True)),
('date_started', models.DateTimeField(default=django.utils.timezone.now)),
('date_finished', models.DateTimeField(null=True)),
('date_approved', models.DateTimeField(null=True)),
('date_rejected', models.DateTimeField(null=True)),
('reviewed_by', models.CharField(blank=True, max_length=128, null=True)),
('branch_name', models.CharField(blank=True, db_index=True, max_length=128, null=True)),
('pull_request_id', models.CharField(blank=True, max_length=16, null=True)),
('commit_hash', models.CharField(blank=True, db_index=True, max_length=64, null=True)),
('archived', models.BooleanField(db_index=True, default=False)),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='builds.Build')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='builds', to='builds.Project')),
],
options={
'ordering': ('-date_started',),
},
),
migrations.CreateModel(
name='Screenshot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.IntegerField(choices=[(0, 'Pending'), (1, 'Matching'), (2, 'Different'), (3, 'New'), (4, 'Deleted')], default=0)),
('date_created', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=255)),
('metadata_json', models.TextField(blank=True, null=True)),
('image', models.ImageField(upload_to='screenshots/')),
('image_diff', models.ImageField(null=True, upload_to='screenshot_diffs/')),
('image_diff_amount', models.FloatField(null=True)),
('archived', models.BooleanField(db_index=True, default=False)),
('build', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='screenshots', to='builds.Build')),
('parent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='children', to='builds.Screenshot')),
],
options={
'unique_together': {('build', 'name')},
},
),
]
|
[
"ivailo@karamanolev.com"
] |
ivailo@karamanolev.com
|
5931b02613512b291a1c6c3ed56317fd9de7b0c7
|
4dd1d8fa59e20061e2c12e540fc52b1b305e575b
|
/source/sims-2/vapor-box/s7/mkmov.py
|
fc6d4a9fa9ec7401b461a45e20313be61bfd08fe
|
[
"MIT"
] |
permissive
|
ammarhakim/ammar-simjournal
|
f63521906a97d55ab290a5960d94758139944c89
|
5019f4723e20db80a20db6f2bd454c2fd3241412
|
refs/heads/master
| 2023-06-08T08:18:11.722779
| 2023-06-02T15:06:43
| 2023-06-02T15:06:43
| 204,050,516
| 3
| 3
| null | 2022-02-01T16:53:13
| 2019-08-23T18:28:44
|
Lua
|
UTF-8
|
Python
| false
| false
| 3,609
|
py
|
from pylab import *
import tables
import euler
import pylab
import tables
import math
import numpy
import pylab
import numpy
from matplotlib import rcParams
import matplotlib.pyplot as plt
# customization for figure
rcParams['lines.linewidth'] = 2
rcParams['font.size'] = 18
rcParams['xtick.major.size'] = 8 # default is 4
rcParams['xtick.major.width'] = 3 # default is 0.5
rcParams['ytick.major.size'] = 8 # default is 4
rcParams['ytick.major.width'] = 3 # default is 0.5
rcParams['figure.facecolor'] = 'white'
#rcParams['figure.subplot.bottom'] = 0.125
#rcParams['figure.subplot.right'] = 0.85 # keep labels/ticks of colobar in figure
rcParams['image.interpolation'] = 'none'
rcParams['image.origin'] = 'lower'
rcParams['contour.negative_linestyle'] = 'solid'
rcParams['savefig.bbox'] = 'tight'
# Math/LaTex fonts:
# http://matplotlib.org/users/mathtext.html
# http://matplotlib.org/users/usetex.html
# Example: xlabel(r'$t \cdot l / V_{A,bc}$')
rcParams['mathtext.default'] = 'regular' # match the font used for regular text
def colorbar_adj(obj, mode=1, redraw=False, _fig_=None, _ax_=None, aspect=None):
'''
Add a colorbar adjacent to obj, with a matching height
For use of aspect, see http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.set_aspect ; E.g., to fill the rectangle, try "auto"
'''
from mpl_toolkits.axes_grid1 import make_axes_locatable
if mode == 1:
_fig_ = obj.figure; _ax_ = obj.axes
elif mode == 2: # assume obj is in the current figure/axis instance
_fig_ = plt.gcf(); _ax_ = plt.gca()
_divider_ = make_axes_locatable(_ax_)
_cax_ = _divider_.append_axes("right", size="5%", pad=0.05)
_cbar_ = _fig_.colorbar(obj, cax=_cax_)
if aspect != None:
_ax_.set_aspect(aspect)
if redraw:
_fig_.canvas.draw()
return _cbar_
gasGamma = 5.0/3.0
amu = 1.66053892e-27 # Kg
mLi = 6.941*amu # Kg
kb = 1.38065e-23 # J/K
Tinit = 800+273.14 # K
cs0 = sqrt(kb*Tinit/mLi)
tEnd = 5*2.0/cs0
def pressure(q):
return euler.fluidEx.getP(q)
def mach(q):
return euler.fluidEx.getMach(q)
def getMeshGrid(grid):
xl, yl = grid._v_attrs.vsLowerBounds
xu, yu = grid._v_attrs.vsUpperBounds
nx, ny = grid._v_attrs.vsNumCells
dx = (xu-xl)/nx
dy = (yu-yl)/ny
X = linspace(xl+0.5*dx, xu-0.5*dx, nx)
Y = linspace(yl+0.5*dy, yu-0.5*dy, ny)
return meshgrid(X, Y)
fh = tables.openFile("s7-four-box-chain_inOut.h5")
maskField = fh.root.StructGridField[:,:,0]
def mkFig(fh, XX, YY, pdat, nm, tl):
tm = fh.root.timeData._v_attrs.vsTime
Valf = 0.1
tmAlf = 5*tm/tEnd
dat = numpy.ma.masked_where(maskField < 0.0, pdat)
f = figure(1)
im = pcolormesh(XX, YY, dat.transpose())
title("%s" % tl)
axis('image')
colorbar_adj(im)
savefig(nm)
close()
for i in range(10,11):
print ("Working on %d .." % i)
fh = tables.openFile("s7-four-box-chain_q_%d.h5" % i)
q = fh.root.StructGridField
X, Y = getMeshGrid(fh.root.StructGrid)
numDensity = q[:,:,0]/mLi
mkFig(fh, X, Y, numDensity, 's7-four-box-chain_numDensity_%05d.png' % i, "Number Density")
press = pressure(q)
mkFig(fh, X, Y, press, 's7-four-box-chain_press_%05d.png' % i, "Pressure [Pa]")
temp = press/(numDensity*kb)
mkFig(fh, X, Y, temp-273.15, 's7-four-box-chain_temp_%05d.png' % i, "Temperature [C]")
machN = mach(q)
mkFig(fh, X, Y, machN, 's7-four-box-chain_mach_%05d.png' % i, "Mach Number")
fh.close()
|
[
"11265732+ammarhakim@users.noreply.github.com"
] |
11265732+ammarhakim@users.noreply.github.com
|
5b2224f3c84090e21c38bb5254fff1fc2cc14a75
|
114c1f7ceff04e00591f46eeb0a2eb387ac65710
|
/g4g/DS/Trees/Binary_Trees/Construction_and_Conversion/2_construct_tree_from_inorder_and_level_order.py
|
be8b1575e1d72ff06ab39f696a67b1dc1a2ce5d4
|
[] |
no_license
|
sauravgsh16/DataStructures_Algorithms
|
0783a5e6dd00817ac0b6f2b856ad8d82339a767d
|
d3133f026f972f28bd038fcee9f65784f5d3ea8b
|
refs/heads/master
| 2020-04-23T03:00:29.713877
| 2019-11-25T10:52:33
| 2019-11-25T10:52:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
''' Construct Binary Tree from Inorder and Level Order Traversal '''
class Node(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def construct_tree(inorder, levelorder):
if not inorder:
return
for i in range(len(levelorder)):
if levelorder[i] in inorder:
node = Node(levelorder[i])
ino_index = inorder.index(levelorder[i])
break
print inorder[:ino_index], inorder[ino_index+1:]
node.left = construct_tree(inorder[:ino_index], levelorder)
node.right = construct_tree(inorder[ino_index+1:], levelorder)
return node
def inorder_traversal(root):
if not root:
return
inorder_traversal(root.left)
print root.val,
inorder_traversal(root.right)
levelorder = [20, 8, 22, 4, 12, 10, 14]
inorder = [4, 8, 10, 12, 14, 20, 22]
root = construct_tree(inorder, levelorder)
inorder_traversal(root)
|
[
"GhoshSaurav@JohnDeere.com"
] |
GhoshSaurav@JohnDeere.com
|
4e16fdbe2e92cfb9355b49f7f2b61231b5481fe7
|
e6bc1f55371786dad70313eb468a3ccf6000edaf
|
/Datasets/the-minion-game/Correct/082.py
|
95c75f16f90cb4f3654bfe9b2114f728c53db3fe
|
[] |
no_license
|
prateksha/Source-Code-Similarity-Measurement
|
9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0
|
fb371b837917794d260a219a1ca09c46a5b15962
|
refs/heads/master
| 2023-01-04T07:49:25.138827
| 2020-10-25T14:43:57
| 2020-10-25T14:43:57
| 285,744,963
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
string = input()
stuart = 0
kevin = 0
for i in range(len(string)):
if string[i] in ["A", "E", "I", "O", "U"]:
kevin += len(string) - i
else:
stuart += len(string) - i
if kevin > stuart:
print("Kevin", kevin)
elif kevin < stuart:
print("Stuart", stuart)
else:
print("Draw")
|
[
"pratekshau@gmail.com"
] |
pratekshau@gmail.com
|
442ca8bf52d89c3b46aa97ea92168ad89ffd55b1
|
243b7c1162264e381ab6575f493bd4fb97ced325
|
/src/comments/migrations/0005_comment_parent.py
|
02d0344397530913347a2b50e8802bfd0fe04aa7
|
[
"MIT"
] |
permissive
|
trivvet/djangoAdvance
|
a409dd3003ab5f60b1621e7677826b2002e0d8c8
|
28891893869c1c0c3cf67d7f496dda96322de18c
|
refs/heads/master
| 2020-04-05T22:05:14.272989
| 2018-12-09T11:00:58
| 2018-12-09T11:00:58
| 157,244,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-28 16:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0004_auto_20181126_1850'),
]
operations = [
migrations.AddField(
model_name='comment',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='comments.Comment'),
),
]
|
[
"trivvet@gmail.com"
] |
trivvet@gmail.com
|
ec9d3ab0161bd0be2d1d9cc810b7baf064cb9258
|
0c110eb32f2eaea5c65d40bda846ddc05757ced6
|
/scripts/mastersort/scripts_dir/p7477_run1L6.py
|
fd95e6f425fc95d1d5f1740df9048f4676b66a4c
|
[] |
no_license
|
nyspisoccog/ks_scripts
|
792148a288d1a9d808e397c1d2e93deda2580ff4
|
744b5a9dfa0f958062fc66e0331613faaaee5419
|
refs/heads/master
| 2021-01-18T14:22:25.291331
| 2018-10-15T13:08:24
| 2018-10-15T13:08:24
| 46,814,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7477', 'run1L6']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2539/E2539_e2401779/s2411337_1904_1L6_s10', '/ifs/scratch/pimri/soccog/test_working/7477/run1L6')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7477/run1L6','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7477/run1L6')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7477/run1L6'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', '7477_run1L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7477/run1L6', '7477_run1L6', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
|
[
"katherine@Katherines-MacBook-Pro.local"
] |
katherine@Katherines-MacBook-Pro.local
|
f0e45b1fb4b83b8df90170a17ff2e2c3e5ee84de
|
7d8b5220152b4ef4876c489d6648be56bc83c8e7
|
/exercises/development/beginner/exercise_12.py
|
78175e5a2f27eacb75544c488c37742e2dba2c08
|
[
"CC-BY-4.0",
"ISC",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
comp-think/comp-think.github.io
|
8f89518e7a463376b431f55fb7f495cb3019d4a5
|
e48a7ecf3b1799471271e01430e089e8f8e3c68d
|
refs/heads/master
| 2023-01-04T20:38:27.593237
| 2023-01-02T14:48:54
| 2023-01-02T14:48:54
| 157,171,226
| 52
| 22
|
NOASSERTION
| 2023-01-02T14:48:55
| 2018-11-12T07:11:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
# Test case for the function
def test_f(n1, n2, expected):
result = f(n1, n2)
if expected == result:
return True
else:
return False
# Code of the function
def f(n1, n2):
n = n1 - n2
if n < 0:
n = -n
return list(range(n))
# Tests
print(test_f(3, 4, [0]))
print(test_f(4, 2, [0, 1]))
print(test_f(9, 0, [0, 1, 2, 3, 4, 5, 6, 7, 8]))
|
[
"essepuntato@gmail.com"
] |
essepuntato@gmail.com
|
ad38a8f2cb5608beb486c95d05efd28affd3e33f
|
2bccab3cea54fdf283533d91b4a88363847b565d
|
/triple-center-loss/triple_center_model.py
|
50bfb7111808affc301bed6a03c61cf867c47ae2
|
[] |
no_license
|
AmberzzZZ/classification_keras
|
647727597cc086cc72f532583ad80c6d88ecdce8
|
8e1886f130452b0f5ad30f7e9a32eb8388babb9a
|
refs/heads/master
| 2020-08-28T22:51:53.832386
| 2020-05-26T02:35:44
| 2020-05-26T02:35:44
| 217,844,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,274
|
py
|
from models import *
from keras.layers import Embedding, Lambda
import tensorflow as tf
from keras.utils import to_categorical
from keras.utils import plot_model
def TCL(y_true, y_pred):
return y_pred
def l2distance(args, n_classes):
embedding, center_standard, y_true = args
n_centers = center_standard.shape[0]
lst = []
for i in range(n_centers):
lst.append(K.sum(K.square(embedding-center_standard[i,0,:]), 1, keepdims=True))
distances = K.concatenate(lst, axis=1)
classes = K.arange(0, n_classes, dtype=tf.float32)
y_true = K.repeat_elements(y_true, n_classes, axis=1)
mask = K.cast(K.equal(y_true, classes), dtype=tf.float32)
inter_distances = tf.where(tf.equal(mask, 0.0), distances, np.inf*tf.ones_like(mask))
min_inter_distance = tf.math.reduce_min(inter_distances, axis=1, keepdims=True)
intra_distances = tf.where(tf.equal(mask, 1.0), distances, np.inf*tf.ones_like(mask))
intra_distance = tf.math.reduce_min(intra_distances, axis=1, keepdims=True)
return [intra_distance, min_inter_distance]
def sharedEmbedding(n_classes, embedding_size, x):
return Embedding(n_classes, embedding_size)(x)
def triple_center_model(lr=3e-4, input_shape=(512,512,1), n_classes=10, m=4):
x_input = Input(shape=input_shape)
basemodel = base_model(input_shape)
embedding = basemodel(x_input) # (None,100)
# cls branch
softmax = Dense(n_classes, activation='softmax')(embedding) # dense3
# center branch
embedding_size = embedding.shape.as_list()[-1] # 100: the outdim of dense1
y_input = Input((1,))
# ##### past calculation of l2_loss, keep to compare ####
# center = sharedEmbedding(n_classes, embedding_size, y_input)
# l2_loss = Lambda(lambda x: K.sum(K.square(x[0] - x[1][:, 0]), 1, keepdims=True), name='l2_loss')([embedding, center])
# #####
labels = np.arange(n_classes).reshape([-1,1])
y_standard_input = Input(tensor=K.constant(labels)) # (10,1) assume n_classes=10
center_standard = sharedEmbedding(n_classes, embedding_size, y_standard_input) # (10, 1, 100)
intra_distance, min_inter_distance = Lambda(l2distance, arguments={'n_classes': n_classes},
name='l2distance')([embedding, center_standard, y_input])
triplet_center_loss = Lambda(lambda x: K.maximum(x[0]+m-x[1],0),
name='triple_center_loss')([intra_distance, min_inter_distance])
model = Model(inputs=[x_input, y_input, y_standard_input], outputs=[softmax, triplet_center_loss])
sgd = SGD(lr, momentum=0.9, decay=1e-6, nesterov=True)
adam = Adam(lr, beta_1=0.9, beta_2=0.999)
model.compile(optimizer=adam,
loss=['categorical_crossentropy', TCL],
metrics=['acc']) # loss_weights
return model
if __name__ == '__main__':
train_path = "data/train/"
val_path = "data/val/"
n_classes = 3
target_size = 28
batch_size = 128
x_train, y_train = loadData(train_path, target_size)
x_train = np.expand_dims(x_train, axis=-1)
y_train = to_categorical(y_train, num_classes=n_classes)
print(x_train.shape, y_train.shape)
model = triple_center_model(lr=3e-4, input_shape=(target_size,target_size,1), n_classes=n_classes)
# plot_model(model, to_file='triple_center_model.png', show_shapes=True, show_layer_names=True)
filepath = "./triple_center_model_{epoch:02d}_val_acc_{dense_2_acc:.3f}.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
y_dummy = np.zeros((x_train.shape[0], 1))
model.fit(x=[x_train, y_dummy],
y=[y_train, y_dummy],
batch_size=batch_size,
epochs=100, verbose=1,
callbacks=[checkpoint],
validation_split=0.2)
# model.load_weights('triple_center_model_01_val_acc_0.981.h5', by_name=True)
# img = cv2.imread("data/test/d2/d2_0002.png", 0)
# img = cv2.resize(img, (target_size, target_size))
# tmp = np.reshape(img, (1, target_size, target_size, 1))
# dummy = np.array([1])
# preds = model.predict([tmp, dummy])[0]
# print(preds)
# label = np.argmax(preds)
# print(label)
|
[
"774524217@qq.com"
] |
774524217@qq.com
|
2f810088c1d7c89839e81cd9c11e22c2d9e2f920
|
1185c629b091e09366aec9830d09ecd1b51dddda
|
/eval.py
|
7fee4cd1b8819c5d70a410ec118be451f76e6231
|
[] |
no_license
|
billy-inn/refe
|
9ed79b8cfed83225cbc81a8637d0bafc24e0e494
|
deeaa1934ea7011e22dc7d3d98eedd6144212c7e
|
refs/heads/master
| 2021-07-03T20:26:47.329020
| 2017-09-24T02:33:16
| 2017-09-24T02:33:16
| 104,605,333
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 946
|
py
|
import tensorflow as tf
import numpy as np
import pandas as pd
import config
from optparse import OptionParser
from task import Task
import logging
from model_param_space import param_space_dict
def train(model_name, data_name, params_dict, logger):
task = Task(model_name, data_name, 1, params_dict, logger)
task.refit()
def parse_args(parser):
parser.add_option("-m", "--model", dest="model_name", type="string", default="best_TransE_L2")
parser.add_option("-d", "--data", dest="data_name", type="string", default="wn18")
options, args = parser.parse_args()
return options, args
def main(options):
logger = logging.getLogger()
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s',level=logging.INFO)
train(options.model_name, options.data_name, params_dict=param_space_dict[options.model_name], logger=logger)
if __name__ == "__main__":
parser = OptionParser()
options, args = parse_args(parser)
main(options)
|
[
"bly930725@gmail.com"
] |
bly930725@gmail.com
|
71e678a0332eb9b0dddc74e7ee7677d3d3e5f0be
|
d10c5d3603e027a8fd37115be05e62634ec0f0a5
|
/10_Supervised-Learning-with-scikit-learn/10_ex_2-10.py
|
684d17126431f630f2a9e550a279918f00947165
|
[] |
no_license
|
stacygo/2021-01_UCD-SCinDAE-EXS
|
820049125b18b38ada49ffc2036eab33431d5740
|
027dc2d2878314fc8c9b2796f0c2e4c781c6668d
|
refs/heads/master
| 2023-04-29T01:44:36.942448
| 2021-05-23T15:29:28
| 2021-05-23T15:29:28
| 335,356,448
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 555
|
py
|
# Exercise 2-10: K-Fold CV comparison
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
df = pd.read_csv('input/gm_2008_region.csv')
y = df['life'].values
X = df.drop(['life', 'Region'], axis=1).values
# Create a linear regression object: reg
reg = LinearRegression()
# Perform 3-fold CV
cvscores_3 = cross_val_score(reg, X, y, cv=3)
print(np.mean(cvscores_3))
# Perform 10-fold CV
cvscores_10 = cross_val_score(reg, X, y, cv=10)
print(np.mean(cvscores_10))
|
[
"stacy.gorbunova@gmail.com"
] |
stacy.gorbunova@gmail.com
|
a357f99eeb2586079e4f9f2f7d69c6f57ffcb713
|
8522034ed44d22a50b45f36e7dea057b1ca9c9bd
|
/core/views.py
|
49d2d84d2b27fb7ee828f7685fc2c656a7e45160
|
[] |
no_license
|
JayjeetAtGithub/oth2
|
71c769150d132a253ce9a64da83442ed3409a592
|
d8be9b8e69d23c137045ce8485b27e08300db809
|
refs/heads/master
| 2020-03-22T14:04:51.838326
| 2018-07-16T18:11:18
| 2018-07-16T18:11:18
| 140,152,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,524
|
py
|
from django.shortcuts import render
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Player , Level , TotalLevel
from .serializers import PlayerSerializer , LevelSerializer , TotalLevelSerializer , LeaderboardSerializer , UserSerializer
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from datetime import datetime
class Leaderboard(APIView):
def get(self,request,format=None):
user = request.user
if user.is_authenticated:
player_rank_list = Player.objects.order_by('-score','timestamp')
rank_counter = 1
for player in player_rank_list :
player.rank = rank_counter
player.save()
rank_counter += 1
players = Player.objects.all()
serializer = LeaderboardSerializer(players,many=True)
return Response(serializer.data)
return Response("Unauthenticated")
class Rules(APIView):
def get(self,request,format=None):
user = request.user
if user.is_authenticated:
return Response('Rules')
return Response("Unauthenticated")
class RegisterUser(APIView):
def post(self,request,format=None):
current_user = User.objects.filter(username=request.data['username']).first()
if current_user is not None:
serializer = UserSerializer(current_user)
return Response(serializer.data)
else:
user = User.objects.create_user(request.data['username'])
user.email = request.data['email']
user.first_name = request.data['first_name']
user.last_name = request.data['last_name']
user.save()
player = Player(user=user , player_name = user.first_name + " " + user.last_name)
player.save()
serializer = UserSerializer(user)
return Response(serializer.data,status=status.HTTP_201_CREATED)
class Index(APIView):
def get(self,request,format=None):
last_level = TotalLevel.objects.filter(id=1).first().total_level
user = request.user
if user.is_authenticated:
player = Player.objects.filter(user=user).first()
try:
level = Level.objects.filter(level_number=player.current_level)
return Response("level")
except Level.DoesNotExist:
if player.current_level > last_level:
return Response("win")
return Response("finish")
return Response("index")
class Answer(APIView):
def post(self,request,format=None):
last_level = TotalLevel.objects.filter(id=1).first()
ans = request.data['ans']
user = request.user
if user.is_authenticated:
player = Player.objects.filter(user=user).first()
try:
level = Level.objects.filter(level_number=player.current_level)
except Level.DoesNotExist:
if player.current_level > last_level:
return Response("win")
return Response("finish")
if ans == level.answer:
player.current_level = player.current_level + 1
player.score = player.score + 10
player.timestamp = datetime.now
level.number_of_user = level.number_of_user + 1
level.accuracy = round(level.number_of_user/(float(level.number_of_user + level.wrong)),2)
level.save()
player.save()
try:
level = Level.objects.filter(level_number=player.current_level).first()
return render(request, 'level_transition.html')
return Response("level")
except:
if player.current_level > last_level:
return Response("win")
return Response("finish")
elif ans == "":
pass
else:
level.wrong = level.wrong + 1
level.save()
return Response("Unauthorized")
|
[
"jc.github@rediffmail.com"
] |
jc.github@rediffmail.com
|
b99feeb950808a1214d6463cae0a95bd16ba39f0
|
ffe4c155e228f1d3bcb3ff35265bb727c684ec1a
|
/Codes/file写入/compute_sum.py
|
64fade6866b28dd98f940d12e8ea81c8fefedaa8
|
[] |
no_license
|
yuuee-www/Python-Learning
|
848407aba39970e7e0058a4adb09dd35818c1d54
|
2964c9144844aed576ea527acedf1a465e9a8664
|
refs/heads/master
| 2023-03-12T00:55:06.034328
| 2021-02-28T13:43:14
| 2021-02-28T13:43:14
| 339,406,816
| 0
| 0
| null | 2021-02-28T11:27:40
| 2021-02-16T13:26:46
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 414
|
py
|
def main():
sum = 0.0
with open("bad_numbers.txt") as file:
for n in file.read().split():
# try to convert n to a float, but if it
# is not a valid float, print error message
try:
print(float(n))
sum += float(n)
except ValueError:
print("Invalid number:", n)
print("Sum is:", round(sum, 1))
main()
|
[
"50982416+cyndereN@users.noreply.github.com"
] |
50982416+cyndereN@users.noreply.github.com
|
c008e0806beaccc4d9526eb954576c58d6c04a90
|
98f078b52352ab08a8c9ac08a631a7ff1ac3fa63
|
/medeina/medeina/models.py
|
7e1e6b0c4f8958eb78d39da777536283f8675ab5
|
[] |
no_license
|
Eimis/medeina
|
160ceed5044cec5640f272190d2a14d4428f01dc
|
3bd838798481f933d729f6104c8b1af29a6f587d
|
refs/heads/master
| 2020-03-08T11:14:11.198773
| 2018-04-08T14:23:29
| 2018-04-08T14:23:29
| 128,092,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from medeina.managers import IssueManager
from medeina.states import IssueStates
from django_states.fields import StateField
@python_2_unicode_compatible
class IssueCategory(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Issue categories'
@python_2_unicode_compatible
class Issue(models.Model):
objects = IssueManager()
solved = models.BooleanField(default=False)
title = models.CharField(max_length=50)
submitter = models.ForeignKey(User, related_name='submitted_issues')
solver = models.ForeignKey(User, related_name='solved_issues', null=True)
text_description = models.TextField()
state = StateField(machine=IssueStates)
category = models.ForeignKey(IssueCategory)
created_on = models.DateTimeField(auto_now_add=True)
solved_on = models.DateTimeField(null=True)
def __str__(self):
return self.title
|
[
"eimantas.stonys@gmail.com"
] |
eimantas.stonys@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.