blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
738f342dc72bafe4df18bfca2f9beaa61bcf7526
|
2c4efe2ce49a900c68348f50e71802994c84900a
|
/braindecode/braindecode/venv1/Lib/site-packages/numba/pycc/__init__.py
|
20f60564072472c96972fbdf850306034aaf0c7b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
sisi2/Masterthesis
|
b508632526e82b23c2efb34729141bfdae078fa0
|
7ce17644af47db4ad62764ed062840a10afe714d
|
refs/heads/master
| 2022-11-19T15:21:28.272824
| 2018-08-13T15:02:20
| 2018-08-13T15:02:20
| 131,345,102
| 2
| 1
| null | 2022-11-15T14:08:07
| 2018-04-27T21:09:21
|
Python
|
UTF-8
|
Python
| false
| false
| 3,769
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import os
import logging
import subprocess
import tempfile
import sys
# Public API
from .cc import CC
from .decorators import export, exportmany
def get_ending(args):
if args.llvm:
return ".bc"
elif args.olibs:
return ".o"
elif args.python:
return find_pyext_ending()
else:
return find_shared_ending()
def main(args=None):
import argparse
from .compiler import ModuleCompiler
from .platform import Toolchain, find_shared_ending, find_pyext_ending
from . import decorators
parser = argparse.ArgumentParser(
description="DEPRECATED - Compile Python modules to a single shared library")
parser.add_argument("inputs", nargs='+', help="Input file(s)")
parser.add_argument("-o", nargs=1, dest="output",
help="Output file (default is name of first input -- with new ending)")
group = parser.add_mutually_exclusive_group()
group.add_argument("-c", action="store_true", dest="olibs",
help="Create object file from each input instead of shared-library")
group.add_argument("--llvm", action="store_true",
help="Emit llvm instead of native code")
parser.add_argument('--header', action="store_true",
help="Emit C header file with function signatures")
parser.add_argument('--python', action='store_true',
help='Emit additionally generated Python wrapper and '
'extension module code in output')
parser.add_argument('-d', '--debug', action='store_true',
help='Print extra debug information')
args = parser.parse_args(args)
logger = logging.getLogger(__name__)
if args.debug:
logger.setLevel(logging.DEBUG)
logger.warn("The 'pycc' script is DEPRECATED; "
"please use the numba.pycc.CC API instead")
if args.output:
args.output = args.output[0]
output_base = os.path.split(args.output)[1]
module_name = os.path.splitext(output_base)[0]
else:
input_base = os.path.splitext(args.inputs[0])[0]
module_name = os.path.split(input_base)[1]
args.output = input_base + get_ending(args)
logger.debug('args.output --> %s', args.output)
if args.header:
print('ERROR: pycc --header has been disabled in this release due to a known issue')
sys.exit(1)
logger.debug('inputs --> %s', args.inputs)
decorators.process_input_files(args.inputs)
compiler = ModuleCompiler(decorators.export_registry, module_name=module_name)
if args.llvm:
logger.debug('emit llvm')
compiler.write_llvm_bitcode(args.output, wrap=args.python)
elif args.olibs:
logger.debug('emit object file')
compiler.write_native_object(args.output, wrap=args.python)
else:
logger.debug('emit shared library')
logger.debug('write to temporary object file %s', tempfile.gettempdir())
toolchain = Toolchain()
toolchain.debug = args.debug
temp_obj = (tempfile.gettempdir() + os.sep +
os.path.basename(args.output) + '.o')
compiler.write_native_object(temp_obj, wrap=args.python)
libraries = toolchain.get_python_libraries()
toolchain.link_shared(args.output, [temp_obj],
toolchain.get_python_libraries(),
toolchain.get_python_library_dirs(),
export_symbols=compiler.dll_exports)
os.remove(temp_obj)
|
[
"dansyefila@gmail.com"
] |
dansyefila@gmail.com
|
2ca8d537c01563e7b278dbb6e2b6594cbbc80763
|
99e494d9ca83ebafdbe6fbebc554ab229edcbacc
|
/.history/Day 1/Test/Answers/NegativeMarking_20210304211434.py
|
57b1e9d86c4af6e1af80ae1355f111567af35e11
|
[] |
no_license
|
Datta2901/CCC
|
c0364caa1e4937bc7bce68e4847c8d599aef0f59
|
4debb2c1c70df693d0e5f68b5798bd9c7a7ef3dc
|
refs/heads/master
| 2023-04-19T10:05:12.372578
| 2021-04-23T12:50:08
| 2021-04-23T12:50:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
t = int(input())
for i in range(t):
questions,requiredscore = map(int,input().split())
if questions * 4 < requiredscore:
print(-1)
continue
attempt = (requiredscore/questions) + 3
accuracy = attempt / 7
print(format(accuracy*100,'.2f')
# Here Accuracy can be find by using two linear equations
# They are Total Score(Required Score) = 4 * x - 3 * y
# Total Questions = x + y
# Here x is the total
|
[
"manikanta2901@gmail.com"
] |
manikanta2901@gmail.com
|
3987a84881cb00c259e5f634796e5624fed300d3
|
6674f4300961d9ca7fbfb667734fb91b26fc7881
|
/cutter.py
|
23a52d5ea2f98dd23c1e6879e0862a329c9e6fb2
|
[] |
no_license
|
seffka/sounds
|
2d1232c5e2a0bf4ca5ab2fae06e515ec078aab4a
|
84770ed73a47f42af847012cd987f0e3b6a15db6
|
refs/heads/master
| 2021-01-12T00:13:13.331170
| 2017-01-19T15:06:55
| 2017-01-19T15:06:55
| 78,686,903
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
import sys
sys.path.append('/Users/seffka/DSPMA/sms-tools/software/models/')
from utilFunctions import wavread, wavwrite
from scipy.signal import get_window
import matplotlib.pyplot as plt
import numpy as np
import os
from os import listdir
from os.path import isfile, join, splitext
import essentia
import essentia.standard
def processLength(l, x, instrument, pitch):
s = int(44100 * l / 1000.0)
_8ms = int(44100 * .008)
aw = np.ones(s)
dw = np.ones(s)
hw = get_window('hamming', _8ms)
dw[:_8ms / 2] = hw[:_8ms / 2]
dw[-_8ms / 2:] = hw[-_8ms / 2:]
aw[-_8ms / 2:] = hw[-_8ms / 2:]
ax = x[:s] * aw
dx = x[int(44100 * 0.08): int(44100 * 0.08) + s] * dw
file_a = instrument + '_a_' + str(l) + '_' + pitch + '.wav'
file_d = instrument + '_d_' + str(l) + '_' + pitch + '.wav'
writer = essentia.standard.MonoWriter(filename=join('hacked', file_a))
writer(ax.astype(np.float32))
writer = essentia.standard.MonoWriter(filename=join('hacked', file_d))
writer(dx.astype(np.float32))
f = [f for f in listdir('raw') if isfile(join('raw', f)) and splitext(join('.', f))[1] == '.wav' and 'intro.wav' not in f]
for file in f:
loader = essentia.standard.MonoLoader(filename=join('raw', file))
x = loader()
parts = splitext(file)[0].split('_')
instrument = parts[0]
pitch = parts[1]
processLength(16, x, instrument, pitch)
processLength(24, x, instrument, pitch)
processLength(32, x, instrument, pitch)
processLength(64, x, instrument, pitch)
processLength(128, x, instrument, pitch)
processLength(500, x, instrument, pitch)
|
[
"seva@ringrows.ru"
] |
seva@ringrows.ru
|
012aaaa9e7fdbf6d4aeeaa7fa858790eab08e2eb
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/security/azure-mgmt-security/azure/mgmt/security/v2023_02_01_preview/aio/operations/_health_report_operations.py
|
852329018d706ec02c3aa68d58b22afb9b4dd791
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 4,653
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._health_report_operations import build_get_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HealthReportOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2023_02_01_preview.aio.SecurityCenter`'s
:attr:`health_report` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(self, resource_id: str, health_report_name: str, **kwargs: Any) -> _models.HealthReport:
"""Get health report of resource.
:param resource_id: The identifier of the resource. Required.
:type resource_id: str
:param health_report_name: The health report Key - Unique key for the health report type.
Required.
:type health_report_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HealthReport or the result of cls(response)
:rtype: ~azure.mgmt.security.v2023_02_01_preview.models.HealthReport
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01-preview"))
cls: ClsType[_models.HealthReport] = kwargs.pop("cls", None)
request = build_get_request(
resource_id=resource_id,
health_report_name=health_report_name,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("HealthReport", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/{resourceId}/providers/Microsoft.Security/healthReports/{healthReportName}"}
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
b90e6ea708395e48959ab1848de991922eb9a778
|
adb6314474c49d3780005f110115c2323f3a343e
|
/hr_employee_updation/__manifest__.py
|
4284e142f966d701c0fb80489b75c3f16dfa8b04
|
[] |
no_license
|
viethoang66666/seatek_viet
|
d86996a215ae426a5dce3054360f204e3d0867a1
|
5ebad7ede4690e1bb9e2c1063abf677e675631b4
|
refs/heads/master
| 2023-04-25T00:37:17.236513
| 2021-05-12T10:07:00
| 2021-05-12T10:07:00
| 366,660,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
# -*- coding: utf-8 -*-
###################################################################################
# A part of Open HRMS Project <https://www.openhrms.com>
#
# Cybrosys Technologies Pvt. Ltd.
# Copyright (C) 2018-TODAY Cybrosys Technologies (<https://www.cybrosys.com>).
# Author: Jesni Banu (<https://www.cybrosys.com>)
# Last modified 09Dec2020 by htkhoa - Seatek
# This program is free software: you can modify
# it under the terms of the GNU Affero General Public License (AGPL) as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###################################################################################
{
'name': 'OpenSea HRMS Employee Info 2.0.2',
'version': '12.0.2.0.2',
'summary': """Adding Advanced Fields In Employee Master""",
'description': 'Add more information in employee records. Last modified 06Dec2020 by htkhoa',
'category': 'Generic Modules/Human Resources',
'author': 'Cybrosys Techno Solutions',
'company': 'Cybrosys Techno Solutions',
'website': "https://www.openhrms.com",
'depends': ['base', 'hr', 'mail', 'hr_gamification'],
'data': [
],
'demo': [],
'images': ['static/description/banner.jpg'],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
'application': False,
}
|
[
"vincenthoang271201@gmail.com"
] |
vincenthoang271201@gmail.com
|
6b062341d5d9055c048a0f573b4535d9fdd25741
|
930bc970069d8cbcfb36725a90492eff50638ecc
|
/code/dk-iris-pipeline/airflow_home/dags/iris-dag.py
|
293423870eb24c0bad97b6b4bc19c1f95bd27dbe
|
[
"MIT"
] |
permissive
|
databill86/airflow4ds
|
4770d856569c4db4b55b2d9dfda010e21c4cd790
|
b5ae213f7169c54d31f4eca58d235ec6b09fd56f
|
refs/heads/master
| 2021-09-25T17:26:43.340747
| 2018-10-24T16:09:49
| 2018-10-24T16:09:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
import sys
import os
PROJECT_DIRECTORY = os.getenv(key='AIRFLOW_HOME')
sys.path.append(PROJECT_DIRECTORY)
from src import get_raw_iris, get_clean_iris
import datetime as dt
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
default_args = {
'owner': 'me',
'depends_on_past': False,
'start_date': dt.datetime(2018, 8, 22),
'retries': 1,
'retry_delay': dt.timedelta(minutes=5),
}
with DAG('airflow_tutorial_v01',
default_args=default_args,
schedule_interval='0 0 * * *',
) as dag:
print_hello = BashOperator(task_id='print_hello', bash_command='echo "hello"')
sleep = BashOperator(task_id='sleep', bash_command='sleep 5')
get_data = PythonOperator(task_id='get_raw_iris', python_callable=get_raw_iris)
clean_data = PythonOperator(task_id='get_clean_iris', python_callable=get_clean_iris)
print_hello >> sleep >> get_data >> clean_data
|
[
"dushyant.khosla@pmi.com"
] |
dushyant.khosla@pmi.com
|
d25faa0b2ef3fc7ee416c23bf66da07d35197723
|
3f84ff1f506287bf0bb3b0840947e3ef23f22c87
|
/04day/6-王者游戏私有方法.py
|
8b73d2a76e870dc88eac560b0f4a097706823bd4
|
[] |
no_license
|
2099454967/wbx
|
34b61c0fc98a227562ea7822f2fa56c5d01d3654
|
316e7ac7351b532cb134aec0740e045261015920
|
refs/heads/master
| 2020-03-18T06:09:58.544919
| 2018-05-28T13:01:19
| 2018-05-28T13:01:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
class Game():
def __init__(self):
self.__size = 100
def getSize(self):
return self.__size
def setSize(self,size):
self.__size = size
#大招要想发动,必须要有蓝
#加了两个下划线,就不能直接调用了
def __dazhao(self,mp):
print("十步杀一人")
def fadazhao(self,mp):
if mp <= 80:
print("蓝不够")
else:
self.__dazhao(mp)
wangzhe = Game()
#wangzhe.__dazhao(100)
wangzhe.fadazhao(100)
print(wangzhe.__size)
|
[
"2099454967@qq.com"
] |
2099454967@qq.com
|
95d1e9159392dcaf85675e26e0bdec4db28e9fea
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/nmoohLwP962r6P355_7.py
|
0bc8cd0fa601e62247825d104fab5cc4f71290af
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,574
|
py
|
"""
In this challenge, you have to establish if the digits of a given number form
a straight arithmetic sequence (either increasing or decreasing). A straight
sequence has an equal step between every pair of digits.
Given an integer `n`, implement a function that returns:
* `"Not Straight"` if `n` is lower than 100 or if its digits are not an arithmetic sequence.
* `"Trivial Straight"` if `n` has a single repeating digit.
* An integer being the step of the sequence if the `n` digits are a straight arithmetic sequence.
### Examples
straight_digital(123) ➞ 1
# 2 - 1 = 1 | 3 - 2 = 1
straight_digital(753) ➞ -2
# 5 - 7 = -2 | 3 - 5 = -2
straight_digital(666) ➞ "Trivial Straight"
# There's a single repeating digit (step = 0).
straight_digital(124) ➞ "Not Straight"
# 2 - 1 = 1 | 4 - 2 = 2
# A valid sequence has always the same step between its digits.
straight_digital(99) ➞ "Not Straight"
# The number is lower than 100.
### Notes
* The step of the sequence can be either positive or negative (see example #2).
* Trivia: there are infinite straight digital numbers, but only 96 of them are made of at least two different digits.
"""
def straight_digital(number):
x = ''.join(
n
for n in str(number)
if n.isdigit()
)
d = [
int(j) - int(i)
for i, j in zip(x, x[1:])
]
if len(set(d)) > 1 or number < 100:
return 'Not Straight'
elif len(set(x)) == 1:
return 'Trivial Straight'
else:
return d[0]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
772024ffe81f495ca9834e7489711dd25fcd010b
|
7bb34b9837b6304ceac6ab45ce482b570526ed3c
|
/external/webkit/Source/WebCore/WebCore.gyp/scripts/action_makenames.py
|
ecf543f44980bab6edee79a875d0e89de645676e
|
[
"Apache-2.0",
"LGPL-2.0-only",
"BSD-2-Clause",
"LGPL-2.1-only",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
ghsecuritylab/android_platform_sony_nicki
|
7533bca5c13d32a8d2a42696344cc10249bd2fd8
|
526381be7808e5202d7865aa10303cb5d249388a
|
refs/heads/master
| 2021-02-28T20:27:31.390188
| 2013-10-15T07:57:51
| 2013-10-15T07:57:51
| 245,730,217
| 0
| 0
|
Apache-2.0
| 2020-03-08T00:59:27
| 2020-03-08T00:59:26
| null |
UTF-8
|
Python
| false
| false
| 6,871
|
py
|
#!/usr/bin/python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# action_makenames.py is a harness script to connect actions sections of
# gyp-based builds to make_names.pl.
#
# usage: action_makenames.py OUTPUTS -- INPUTS [-- OPTIONS]
#
# Multiple OUTPUTS, INPUTS, and OPTIONS may be listed. The sections are
# separated by -- arguments.
#
# The directory name of the first output is chosen as the directory in which
# make_names will run. If the directory name for any subsequent output is
# different, those files will be moved to the desired directory.
#
# Multiple INPUTS may be listed. An input with a basename matching
# "make_names.pl" is taken as the path to that script. Inputs with names
# ending in TagNames.in or tags.in are taken as tag inputs. Inputs with names
# ending in AttributeNames.in or attrs.in are taken as attribute inputs. There
# may be at most one tag input and one attribute input. A make_names.pl input
# is required and at least one tag or attribute input must be present.
#
# OPTIONS is a list of additional options to pass to make_names.pl. This
# section need not be present.
import os
import posixpath
import shutil
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 2 or len(sections) == 3
(outputs, inputs) = sections[:2]
if len(sections) == 3:
options = sections[2]
else:
options = []
# Make all output pathnames absolute so that they can be accessed after
# changing directory.
for index in xrange(0, len(outputs)):
outputs[index] = os.path.abspath(outputs[index])
outputDir = os.path.dirname(outputs[0])
# Look at the inputs and figure out which ones are make_names.pl, tags, and
# attributes. There can be at most one of each, and those are the only
# input types supported. make_names.pl is required and at least one of tags
# and attributes is required.
makeNamesInput = None
tagInput = None
attrInput = None
for input in inputs:
# Make input pathnames absolute so they can be accessed after changing
# directory. On Windows, convert \ to / for inputs to the perl script to
# work around the intermix of activepython + cygwin perl.
inputAbs = os.path.abspath(input)
inputAbsPosix = inputAbs.replace(os.path.sep, posixpath.sep)
inputBasename = os.path.basename(input)
if inputBasename == 'make_names.pl':
assert makeNamesInput == None
makeNamesInput = inputAbs
elif inputBasename.endswith('TagNames.in') \
or inputBasename.endswith('tags.in'):
assert tagInput == None
tagInput = inputAbsPosix
elif inputBasename.endswith('AttributeNames.in') \
or inputBasename.endswith('attrs.in'):
assert attrInput == None
attrInput = inputAbsPosix
else:
assert False
assert makeNamesInput != None
assert tagInput != None or attrInput != None
# scriptsPath is a Perl include directory, located relative to
# makeNamesInput.
scriptsPath = os.path.normpath(
os.path.join(os.path.dirname(makeNamesInput), os.pardir, 'bindings', 'scripts'))
# Change to the output directory because make_names.pl puts output in its
# working directory.
os.chdir(outputDir)
# Build up the command.
command = ['perl', '-I', scriptsPath, makeNamesInput]
if tagInput != None:
command.extend(['--tags', tagInput])
if attrInput != None:
command.extend(['--attrs', attrInput])
command.extend(options)
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
returnCode = subprocess.call(command)
assert returnCode == 0
# Go through the outputs. Any output that belongs in a different directory
# is moved. Do a copy and delete instead of rename for maximum portability.
# Note that all paths used in this section are still absolute.
for output in outputs:
thisOutputDir = os.path.dirname(output)
if thisOutputDir != outputDir:
outputBasename = os.path.basename(output)
src = os.path.join(outputDir, outputBasename)
dst = os.path.join(thisOutputDir, outputBasename)
shutil.copyfile(src, dst)
os.unlink(src)
return returnCode
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"gahlotpercy@gmail.com"
] |
gahlotpercy@gmail.com
|
d39c97d024cef112b1e44e961021e9d7cff0637d
|
917c44bfb0b6fdcce7ad4148e6cbd89fd0e61901
|
/tests/factories.py
|
ca87699b78e4d8b4e95ab6e339d458ceda503f02
|
[] |
no_license
|
onepercentclub/django-bb-salesforce
|
39c3a0071d52b0c021c545aa32aeca310ad6a1ec
|
6d00a9521271612a174d7e66dc65a6751f1636f4
|
refs/heads/master
| 2021-01-21T04:33:06.979227
| 2016-07-15T08:58:28
| 2016-07-15T08:58:28
| 32,444,339
| 0
| 1
| null | 2016-07-15T08:58:28
| 2015-03-18T07:31:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
import factory
from tests.models import Member, Country, SubRegion, Region, Address
class MemberFactory(factory.DjangoModelFactory):
FACTORY_FOR = Member
username = factory.Sequence(lambda n: u'jd_{0}'.format(n))
first_name = factory.Sequence(lambda f: u'John_{0}'.format(f))
last_name = factory.Sequence(lambda l: u'Doe_{0}'.format(l))
email = factory.Sequence(lambda l: u'user_{0}@gmail.com'.format(l))
class RegionFactory(factory.DjangoModelFactory):
FACTORY_FOR = Region
name = factory.Sequence(lambda n: u'Region{0}'.format(n))
class SubRegionFactory(factory.DjangoModelFactory):
FACTORY_FOR = SubRegion
name = factory.Sequence(lambda n: u'SubRegion{0}'.format(n))
region = factory.SubFactory(RegionFactory)
class CountryFactory(factory.DjangoModelFactory):
FACTORY_FOR = Country
name = factory.Sequence(lambda n: u'Country_{0}'.format(n))
subregion = factory.SubFactory(SubRegionFactory)
class AddressFactory(factory.DjangoModelFactory):
FACTORY_FOR = Address
user = factory.SubFactory(MemberFactory)
line1 = factory.Sequence(lambda n: u'street_{0}'.format(n))
line2 = factory.Sequence(lambda n: u'extra_{0}'.format(n))
city = factory.Sequence(lambda n: u'city_{0}'.format(n))
state = factory.Sequence(lambda n: u'state_{0}'.format(n))
postal_code = factory.Sequence(lambda n: u'zipcode_{0}'.format(n))
country = factory.SubFactory(CountryFactory)
|
[
"loek@1procentclub.nl"
] |
loek@1procentclub.nl
|
f0f11595eb6cb036039a579f42b8e3513cebbdf1
|
655c51822cc0484a7cfab9d0de6e9f1fde144dba
|
/autoserver/api/src/plugins/memory.py
|
053f8b6098248b8bc598047631f4fbaf6a175412
|
[] |
no_license
|
summer93/CMDB
|
6f1988749cc8078cd219d4dd44f5dfcb0f2cc8f7
|
62c714354870c9b984f722efdf62e0d78bcf1450
|
refs/heads/master
| 2021-01-15T18:26:45.928105
| 2017-08-10T09:24:33
| 2017-08-10T09:24:33
| 99,783,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,628
|
py
|
from repository import models
class Memory(object):
def __init__(self):
pass
@classmethod
def initial(cls):
return cls()
def process(self,server_info,hostname,server_obj):
print('start')
# ############### 处理内存信息 ##################
new_memory_dict = server_info['memory']['data']
old_memory_list = models.Memory.objects.filter(server_obj=server_obj)
# 交集:5, 创建:3,删除4;
new_slot_list = list(new_memory_dict.keys())
old_slot_list = []
for item in old_memory_list:
old_slot_list.append(item.slot)
# 交集:更新[5,]
update_list = set(new_slot_list).intersection(old_slot_list)
# 差集: 创建[3]
create_list = set(new_slot_list).difference(old_slot_list)
# 差集: 创建[4]
del_list = set(old_slot_list).difference(new_slot_list)
if del_list:
# 删除
models.Memory.objects.filter(server_obj=server_obj, slot__in=del_list).delete()
# 记录日志
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content="移除内存:%s" % ("、".join(del_list),))
# 增加、
record_list = []
for slot in create_list:
memory_dict = new_memory_dict[slot]
memory_dict['server_obj'] = server_obj
models.Memory.objects.create(**memory_dict)
temp = "新增内存:位置{slot},容量{capacity},型号:{model},speed:{speed},manufacturer:{manufacturer},sn:{sn}".format(**memory_dict)
record_list.append(temp)
if record_list:
content = ";".join(record_list)
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content=content)
# ############ 更新 ############
record_list = []
row_map = {'capacity': '容量', 'speed': '类型', 'model': '型号'}
print(update_list)
for slot in update_list:
new_memory_row = new_memory_dict[slot]
ol_memory_row = models.Memory.objects.filter(slot=slot, server_obj=server_obj).first()
for k, v in new_memory_row.items():
value = getattr(ol_memory_row, k)
if v != value:
record_list.append("槽位%s,%s由%s变更为%s" % (slot, row_map[k], value, v,))
setattr(ol_memory_row, k, v)
ol_memory_row.save()
if record_list:
content = ";".join(record_list)
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content=content)
|
[
"mcdull9393@gmail.com"
] |
mcdull9393@gmail.com
|
9ac50c0ee92de53c33c21cabe6fe78b6597c7f90
|
30ee21b97e6105288101d1031ed7f96aaf6e141e
|
/lib/_org/stemma_soil_sensor/seesaw.py
|
44e86e096d1fb9c2c0e9aa3883dd97e705d298c1
|
[] |
no_license
|
josmet52/micropython
|
b3d1d69ad7eb6832ce26c15e3580dae99b5bb87f
|
d8f2267b556ba3b15861898c2c2eb5e086dcf9ce
|
refs/heads/main
| 2023-06-13T06:18:30.151497
| 2021-07-09T10:14:25
| 2021-07-09T10:14:25
| 353,124,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,189
|
py
|
"""
This is a lightweight port from CircuitPython to MicroPython
of Dean Miller's https://github.com/adafruit/Adafruit_CircuitPython_seesaw/blob/master/adafruit_seesaw/seesaw.py
* Author(s): Mihai Dinculescu
Implementation Notes
--------------------
**Hardware:**
* Adafruit ATSAMD09 Breakout with SeeSaw: https://www.adafruit.com/product/3657
**Software and Dependencies:**
* MicroPython firmware: https://micropython.org
**Tested on:**
* Hardware: Adafruit HUZZAH32 - ESP32 Feather https://learn.adafruit.com/adafruit-huzzah32-esp32-feather/overview
* Firmware: MicroPython v1.12 https://micropython.org/resources/firmware/esp32-idf3-20191220-v1.12.bin
"""
import time
STATUS_BASE = const(0x00)
TOUCH_BASE = const(0x0F)
_STATUS_HW_ID = const(0x01)
_STATUS_SWRST = const(0x7F)
_HW_ID_CODE = const(0x55)
class Seesaw:
"""Driver for SeeSaw I2C generic conversion trip.
:param I2C i2c: I2C bus the SeeSaw is connected to.
:param int addr: I2C address of the SeeSaw device."""
def __init__(self, i2c, addr):
self.i2c = i2c
self.addr = addr
self.sw_reset()
def sw_reset(self):
"""Trigger a software reset of the SeeSaw chip"""
self._write8(STATUS_BASE, _STATUS_SWRST, 0xFF)
time.sleep(.500)
chip_id = self._read8(STATUS_BASE, _STATUS_HW_ID)
if chip_id != _HW_ID_CODE:
raise RuntimeError("SeeSaw hardware ID returned (0x{:x}) is not "
"correct! Expected 0x{:x}. Please check your wiring."
.format(chip_id, _HW_ID_CODE))
def _write8(self, reg_base, reg, value):
self._write(reg_base, reg, bytearray([value]))
def _read8(self, reg_base, reg):
ret = bytearray(1)
self._read(reg_base, reg, ret)
return ret[0]
def _read(self, reg_base, reg, buf, delay=.005):
self._write(reg_base, reg)
time.sleep(delay)
self.i2c.readfrom_into(self.addr, buf)
def _write(self, reg_base, reg, buf=None):
full_buffer = bytearray([reg_base, reg])
if buf is not None:
full_buffer += buf
self.i2c.writeto(self.addr, full_buffer)
|
[
"joseph.metrailler@bluewin.ch"
] |
joseph.metrailler@bluewin.ch
|
e497b5560b1432d4414794b40c5d8c8d8ce1e288
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_lifted.py
|
40e546c7fcc541ef50d02efb32fc82ae8192b35f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
from xai.brain.wordbase.nouns._lift import _LIFT
#calss header
class _LIFTED(_LIFT, ):
def __init__(self,):
_LIFT.__init__(self)
self.name = "LIFTED"
self.specie = 'nouns'
self.basic = "lift"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d16b3fca1a54c3a45ff1c1042b286a16635babe0
|
6970cd9ca073ae7becabcbc58cbd9e9f567b9d23
|
/imgaug/augmenters/__init__.py
|
47bca888063c5b7432f56c9cb67b256237ed91d7
|
[
"MIT"
] |
permissive
|
Liuxiang0358/imgaug
|
02c7eb57152ebdd7e92a8779a74c98c8ee041cc0
|
3a0c787ed32729dc47c06ea62c20c42997ad4305
|
refs/heads/master
| 2020-07-03T00:44:42.866083
| 2019-08-10T10:23:18
| 2019-08-10T10:23:18
| 201,729,987
| 1
| 0
|
MIT
| 2019-08-11T07:01:49
| 2019-08-11T07:01:49
| null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
from __future__ import absolute_import
from imgaug.augmenters.arithmetic import *
from imgaug.augmenters.blend import *
from imgaug.augmenters.blur import *
from imgaug.augmenters.color import *
from imgaug.augmenters.contrast import *
from imgaug.augmenters.convolutional import *
from imgaug.augmenters.edges import *
from imgaug.augmenters.flip import *
from imgaug.augmenters.geometric import *
from imgaug.augmenters.meta import *
from imgaug.augmenters.pooling import *
from imgaug.augmenters.segmentation import *
from imgaug.augmenters.size import *
from imgaug.augmenters.weather import *
|
[
"kontakt@ajung.name"
] |
kontakt@ajung.name
|
44db22541fa13bb0514ef0a372f738e3da9b270b
|
6933b96b9c10ca70da57b1b384126e20fa21d9b2
|
/FTP全自动采集爆破/sqlite0.5/test.py
|
05ef237848989a15f4732d62c26b3018bf0eb7c9
|
[] |
no_license
|
Dawson0x00/scan
|
5bb2e85756b8e86ba43f6d63182a7e806c560bfc
|
e9f274e26ac924a47cf3216e707dc1a724937775
|
refs/heads/master
| 2021-01-19T14:12:59.107938
| 2017-02-22T02:51:27
| 2017-02-22T02:51:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
#!/usr/local/bin/python
#-*- coding: UTF-8 -*-
queue = []
def enQ():
queue.append(raw_input('Enter new string: ').strip())
#调用list的列表的pop()函数.pop(0)为列表的第一个元素
def deQ(): #www.2cto.com
if len(queue) == 0:
print 'Cannot pop from an empty queue!'
else:
print 'Removed [', queue.pop(0) ,']'
def viewQ():
print queue
CMDs = {'e': enQ, 'd': deQ, 'v': viewQ}
def showmenu():
pr = """
(E)nqueue
(D)equeue
(V)iew
(Q)uit
Enter choice: """
while True:
while True:
try:
choice = raw_input(pr).strip()[0].lower()
except (EOFError, KeyboardInterrupt, IndexError):
choice = 'q'
print '\nYou picked: [%s]' % choice
if choice not in 'devq':
print 'Invalid option, try again'
else:
break
if choice == 'q':
break
CMDs[choice]()
if __name__ == '__main__':
showmenu()
#def worker():
# while True:
# task = _queue.pop()
# ......
#
#
# _writequeue.push(.....)
#python 队列
#if __name__=='__main__':
# while True:
# if threadpool.idle() > 0:
# tasks = sql.fetch_task(threadpool.idel())
# _queue.push(tass)
#
# time.sleep(5)
#
#
# _writequeue.toitems().update()
|
[
"voilet@voiletdeMacBook-Pro-2.local"
] |
voilet@voiletdeMacBook-Pro-2.local
|
521f6e601d211e16f6f2ec8d5e0818cd6b21b957
|
4d4485378bec02daa527c12a6051be4254a86589
|
/usr/models/layerbylayer/tfnmt_model.py
|
cdbd294355c140c8c07dc7f2368279074b8e43af
|
[
"Apache-2.0"
] |
permissive
|
colmantse/tensor2tensor-usr
|
163f5bff5e94dea75760e76fa4b07b6b2657a74c
|
8129cc7de2bb880dc328b4189ed613b26015c151
|
refs/heads/master
| 2021-07-13T01:22:29.626739
| 2017-10-10T00:53:54
| 2017-10-10T00:53:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 248
|
py
|
# coding=utf-8
"""Layer-by-layer model definitions."""
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
from tensor2tensor.layers import common_layers
from usr import utils as usr_utils
import tensorflow as tf
|
[
"fstahlberg@gmail.com"
] |
fstahlberg@gmail.com
|
d1b18b6f214a8f4dda769d317c09e88b0bae2d87
|
5830b76dda9c2d0b62b1929f3a5be26606089c8c
|
/alembic/env.py
|
2b67e16b02665b1e8cf828f13f800f454cb0cd94
|
[] |
no_license
|
umatbro/minikametr
|
0356eaff1c011b26065479cb417648ddf18f4796
|
4844e707c24559b3ad61b89f017a177fb52a5384
|
refs/heads/master
| 2023-08-25T02:30:25.753701
| 2021-10-11T10:18:10
| 2021-10-11T10:18:10
| 415,385,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,167
|
py
|
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from db import DATABASE_URL
from models import * # noqa
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
config.set_main_option("sqlalchemy.url", DATABASE_URL)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = [SQLModel.metadata]
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
[
"umatbroo@gmail.com"
] |
umatbroo@gmail.com
|
b291e92f85bca69d74a58603ffcc38c2215c29b2
|
26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f
|
/exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/monitoring/librato_annotation.py
|
6ee3895763b1496c9b4c9cf358c1564d0a715a01
|
[
"GPL-3.0-only",
"MIT",
"GPL-3.0-or-later",
"CC0-1.0",
"GPL-1.0-or-later"
] |
permissive
|
tr3ck3r/linklight
|
37814ed19173d893cdff161355d70a1cf538239b
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
refs/heads/master
| 2021-04-11T04:33:02.727318
| 2020-03-25T17:38:41
| 2020-03-25T17:38:41
| 248,992,437
| 0
| 0
|
MIT
| 2020-03-21T14:26:25
| 2020-03-21T14:26:25
| null |
UTF-8
|
Python
| false
| false
| 5,451
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) Seth Edwards, 2014
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: librato_annotation
short_description: create an annotation in librato
description:
- Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
author: "Seth Edwards (@Sedward)"
requirements: []
options:
user:
description:
- Librato account username
required: true
api_key:
description:
- Librato account api key
required: true
name:
description:
- The annotation stream name
- If the annotation stream does not exist, it will be created automatically
required: false
title:
description:
- The title of an annotation is a string and may contain spaces
- The title should be a short, high-level summary of the annotation e.g. v45 Deployment
required: true
source:
description:
- A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
required: false
description:
description:
- The description contains extra metadata about a particular annotation
- The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
required: false
start_time:
description:
- The unix timestamp indicating the time at which the event referenced by this annotation started
required: false
end_time:
description:
- The unix timestamp indicating the time at which the event referenced by this annotation ended
- For events that have a duration, this is a useful way to annotate the duration of the event
required: false
links:
description:
- See examples
required: true
'''
EXAMPLES = '''
# Create a simple annotation event with a source
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
source: foo.bar
description: This is a detailed description of the config change
# Create an annotation that includes a link
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXXX
name: code.deploy
title: app code deploy
description: this is a detailed description of a deployment
links:
- rel: example
href: http://www.example.com/deploy
# Create an annotation with a start_time and end_time
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXXX
name: maintenance
title: Maintenance window
description: This is a detailed description of maintenance
start_time: 1395940006
end_time: 1395954406
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def post_annotation(module):
user = module.params['user']
api_key = module.params['api_key']
name = module.params['name']
title = module.params['title']
url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
params = {}
params['title'] = title
if module.params['source'] is not None:
params['source'] = module.params['source']
if module.params['description'] is not None:
params['description'] = module.params['description']
if module.params['start_time'] is not None:
params['start_time'] = module.params['start_time']
if module.params['end_time'] is not None:
params['end_time'] = module.params['end_time']
if module.params['links'] is not None:
params['links'] = module.params['links']
json_body = module.jsonify(params)
headers = {}
headers['Content-Type'] = 'application/json'
# Hack send parameters the way fetch_url wants them
module.params['url_username'] = user
module.params['url_password'] = api_key
response, info = fetch_url(module, url, data=json_body, headers=headers)
response_code = str(info['status'])
response_body = info['body']
if info['status'] != 201:
if info['status'] >= 400:
module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
else:
module.fail_json(msg="Request Failed. Response code: " + response_code)
response = response.read()
module.exit_json(changed=True, annotation=response)
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
api_key=dict(required=True),
name=dict(required=False),
title=dict(required=True),
source=dict(required=False),
description=dict(required=False),
start_time=dict(required=False, default=None, type='int'),
end_time=dict(required=False, default=None, type='int'),
links=dict(type='list')
)
)
post_annotation(module)
if __name__ == '__main__':
main()
|
[
"joshuamadison+gh@gmail.com"
] |
joshuamadison+gh@gmail.com
|
437e3b76c5db61174df0709315d5522b40f253a9
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/LSpPdiycJ75MiwvgQ_23.py
|
b36b16ff2450b87301651121df6fdfacf4586124
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
import math
def grid_pos(lst):
return (math.factorial((lst[0])+(lst[1])))/((math.factorial(lst[0]))*math.factorial(lst[1]))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
dabc8ad186e73d5e2493027f46098c5e57ecf998
|
9d74cbd676e629f8acdc68a4bac3dea0a98b9776
|
/yc204/779.py
|
a67f3265cfaf192ec25967ebdf2dfdcd4f940743
|
[
"MIT"
] |
permissive
|
c-yan/yukicoder
|
01166de35e8059eaa8e3587456bba52f35bd0e44
|
dcfd89b0a03759156dcea8c2e61a7705543dc0d4
|
refs/heads/master
| 2022-03-20T06:50:48.225922
| 2022-02-25T15:48:50
| 2022-02-25T15:48:50
| 237,735,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
Y, M, D = map(int, input().split())
s = '%04d%02d%02d' % (Y, M, D)
if '19890108' <= s <= '20190430':
print('Yes')
else:
print('No')
|
[
"c-yan@users.noreply.github.com"
] |
c-yan@users.noreply.github.com
|
7e4616203464603cd0f1a2b181256c57a50b7d60
|
b20b8858b5da05c60e7dac02feb1187f88cc3294
|
/djangoproj/djangoapp/csc/nl/ja/utterance.py
|
f9d4798c1f3d2281a4dfe50b869a1ffb4b6df3d7
|
[
"MIT"
] |
permissive
|
pbarton666/buzz_bot
|
18f15322e072d784e0916c5b8c147b53df5dc9d4
|
9f44c66e8ecb10e231f70989421f164d7a55029a
|
refs/heads/master
| 2021-01-10T05:26:43.809377
| 2015-09-25T20:04:58
| 2015-09-25T20:04:58
| 43,027,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
from csc.nl.ja.debug import *
from csc.nl.ja.util import *
from csc.nl.ja.tree import *
import MeCab
import CaboCha
import re
class JaUtterance(JaTreeBranch, JaLanguageNode):
''' Represents an entire utterance '''
def __init__(self, children):
JaTreeBranch.__init__(self)
self.children = children
for child in self.children:
child.parent = self
dump_lines = JaDebug.dump_lines_utterance
def __str__(self):
return self.surface
@shared_property
def is_utterance(self):
return True
from csc.nl.ja.chunk import *
from csc.nl.ja.cabocha_token import *
from csc.nl.ja.parser import *
|
[
"barton.pj@gmail.com"
] |
barton.pj@gmail.com
|
2bfec92c85686a48a0bf480793637b9b2fbe0c90
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/voice-service/azext_voice_service/__init__.py
|
74808adfe69dcf2b4492cb572043ecf8ee1296d8
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azext_voice_service._help import helps # pylint: disable=unused-import
class VoiceServiceCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
custom_command_type = CliCommandType(
operations_tmpl='azext_voice_service.custom#{}')
super().__init__(cli_ctx=cli_ctx,
custom_command_type=custom_command_type)
def load_command_table(self, args):
from azext_voice_service.commands import load_command_table
from azure.cli.core.aaz import load_aaz_command_table
try:
from . import aaz
except ImportError:
aaz = None
if aaz:
load_aaz_command_table(
loader=self,
aaz_pkg_name=aaz.__name__,
args=args
)
load_command_table(self, args)
return self.command_table
def load_arguments(self, command):
from azext_voice_service._params import load_arguments
load_arguments(self, command)
COMMAND_LOADER_CLS = VoiceServiceCommandsLoader
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
1c9c05109df0cd972f65b8c29bd5c952deceb307
|
c83bb15f5f4ec5c5d2b2e05a7222eaf1fd4f49e5
|
/myops_run.py
|
ad4beb49c9217b542c766c3de0bd0d6f15327695
|
[
"Apache-2.0"
] |
permissive
|
Xinya-liuliu/MyoPS2020
|
47fd2a0a3dd050ae128a1ddbe3b9b966e0cb45e0
|
6ac6157070315c7917a59954476682c1144f3845
|
refs/heads/main
| 2023-07-31T22:27:31.485620
| 2021-09-24T05:39:33
| 2021-09-24T05:39:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import os
import sys
from pymic.util.parse_config import parse_config
from pymic.net_run.agent_seg import SegmentationAgent
from path_config import path_dict
def main():
if(len(sys.argv) < 4):
print('Number of arguments should be 4. e.g.')
print(' python myops_run.py train config.cfg 1')
exit()
stage = str(sys.argv[1])
cfg_file = str(sys.argv[2])
fold = str(sys.argv[3])
if(not os.path.isfile(cfg_file)):
print("configure file does not exist: {0:} ".format(cfg_file))
exit()
# reset data dir of each fold for configure
config = parse_config(cfg_file)
data_dir = config['dataset']['root_dir']
data_dir = data_dir.replace('MyoPS_data_dir', path_dict['MyoPS_data_dir'])
config['dataset']['root_dir'] = data_dir
for item in ['train_csv', 'valid_csv', 'test_csv']:
config['dataset'][item] = config['dataset'][item].replace("foldi", "fold" + fold)
ckpt_save_dir = config['training']['ckpt_save_dir']
ckpt_save_dir = ckpt_save_dir.replace("fold_i", "fold_" + fold)
config['training']['ckpt_save_dir'] = ckpt_save_dir
agent = SegmentationAgent(config, stage)
agent.run()
if __name__ == "__main__":
main()
|
[
"wguotai@gmail.com"
] |
wguotai@gmail.com
|
50074aa47c4416235453ba945d4a0326e34c5eb4
|
06e359c19c14a549d28ce8ab62a6e1e5f40f0ea8
|
/ScikitLearn/unsupervised/biClusteringL.py
|
8b78e68f28cf461c745d7aebe4febc5c7c318e19
|
[
"Apache-2.0"
] |
permissive
|
thomasearnest/MDeepLearn
|
90c6f4c6d5f148f91b1ce95471cad42baee26977
|
00eb7211a3a40a9da02114923647dfd6ac24f138
|
refs/heads/master
| 2021-10-20T07:27:33.168927
| 2019-02-26T13:37:48
| 2019-02-26T13:37:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:biClusteringL
Description : 双聚类,对行列同时进行聚类
Email : autuanliu@163.com
Date:2018/1/1
"""
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.datasets import make_biclusters
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(shape=(300, 300), n_clusters=5, noise=0.5, random_state=0)
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_, (rows, columns))
print('scores: {}'.format(score))
|
[
"autuanliu@163.com"
] |
autuanliu@163.com
|
b39073640b54f81c5cab62529f62b21ee9042550
|
57120090948f99de2258a6f01a0cc65443441ce9
|
/hyperclass/exe/hyperclass/IndianaPines.py
|
867c2bd892b75164b4d35fd74fe05a5cd177b27f
|
[] |
no_license
|
MysteriousSonOfGod/hyperclass
|
c67eff91f6f0f64fa4a92f8567243ef5cd8fa3c8
|
e8cec11b364e8b049e7432b95ce20a2c5de94235
|
refs/heads/master
| 2023-01-28T16:42:09.289664
| 2020-12-07T22:54:50
| 2020-12-07T22:54:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
from hyperclass.gui.application import HCApplication
from hyperclass.data.manager import dataManager
from hyperclass.gui.labels import labelsManager
from hyperclass.gui.spatial.application import SpatialAppConsole
import sys
ref_file = "/Users/tpmaxwel/Dropbox/Tom/Data/Aviris/IndianPines/documentation/Site3_Project_and_Ground_Reference_Files/19920612_AVIRIS_IndianPine_Site3_gr.tif"
classes = [ ('Alfalfa', [255, 254, 137] ),
('Corn-notill', [3,28,241] ),
('Corn-mintill', [255, 89, 1] ),
('Corn', [5, 255, 133] ),
('Grass/Pasture', [255, 2, 251] ),
('Grass/Trees', [89, 1, 255 ]),
('Grass/pasture-mowed', [3, 171, 255]),
('Hay-windrowed', [12, 255, 7 ]),
('Oats', [172, 175, 84 ]),
('Soybean-notill',[160, 78,158]),
('Soybean-mintill', [101, 173, 255]),
('Soybean-cleantill', [60, 91, 112] ),
('Wheat', [104, 192, 63]),
('Woods', [139, 69, 46]),
('Bldg-Grass-Tree-Drives', [119, 255, 172]),
('Stone/steel-towers', [254, 255, 3])
]
tabs = dict( Reference=dict( type="reference", classes=classes, path=ref_file ) )
default_settings = {}
app = HCApplication()
labelsManager.setLabels( classes )
dataManager.initProject( 'hyperclass', default_settings )
hyperclass = SpatialAppConsole( tabs=tabs )
hyperclass.show()
sys.exit(app.exec_())
|
[
"thomas.maxwell@nasa.gov"
] |
thomas.maxwell@nasa.gov
|
3dbbb9d3f258c8d80d132dd44156efd15a3ad0bb
|
2af1489471d199861b2e7cd63676d842eb65ec5a
|
/channelstream/wsgi_app.py
|
3af579f0067e57ade46e4412500c4da566c0c211
|
[
"BSD-3-Clause"
] |
permissive
|
webclinic017/channelstream
|
637df12982817bd6c74fc1cb91c8571e5c551eef
|
73434adca2812a682b739f86bf1ca320e1f1603c
|
refs/heads/master
| 2023-05-12T15:55:13.453463
| 2020-10-21T19:08:08
| 2020-10-21T19:08:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
import datetime
import uuid
import importlib
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.renderers import JSON
from pyramid.security import NO_PERMISSION_REQUIRED
from channelstream import patched_json as json
from channelstream.wsgi_views.wsgi_security import APIFactory
def datetime_adapter(obj, request):
return obj.isoformat()
def uuid_adapter(obj, request):
return str(obj)
def make_app(server_config):
config = Configurator(
settings=server_config, root_factory=APIFactory, default_permission="access"
)
config.include("pyramid_jinja2")
module_, class_ = server_config["signature_checker"].rsplit(".", maxsplit=1)
signature_checker_cls = getattr(importlib.import_module(module_), class_)
config.registry.signature_checker = signature_checker_cls(server_config["secret"])
authn_policy = AuthTktAuthenticationPolicy(
server_config["cookie_secret"], max_age=2592000
)
authz_policy = ACLAuthorizationPolicy()
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
json_renderer = JSON(serializer=json.dumps, indent=4)
json_renderer.add_adapter(datetime.datetime, datetime_adapter)
json_renderer.add_adapter(uuid.UUID, uuid_adapter)
config.add_renderer("json", json_renderer)
config.add_subscriber(
"channelstream.subscribers.handle_new_request", "pyramid.events.NewRequest"
)
config.add_request_method("channelstream.utils.handle_cors", "handle_cors")
config.include("channelstream.wsgi_views")
config.scan("channelstream.wsgi_views.server")
config.scan("channelstream.wsgi_views.error_handlers")
config.scan("channelstream.events")
config.include("pyramid_apispec.views")
config.pyramid_apispec_add_explorer(
spec_route_name="openapi_spec",
script_generator="channelstream.utils:swagger_ui_script_template",
permission="admin",
route_args={
"factory": "channelstream.wsgi_views.wsgi_security:AdminAuthFactory"
},
)
app = config.make_wsgi_app()
return app
|
[
"info@webreactor.eu"
] |
info@webreactor.eu
|
f86cf73fb42b39100b549b4648351468bffff0c4
|
9672b0bd6c73f35fdcc04dcf884d2e8425e78359
|
/resources/exceptions.py
|
96182522c64fecb5138ccb6a30069f52aef07680
|
[
"MIT"
] |
permissive
|
surfedushare/pol-research
|
a91d5b6c95184719d721e3a8541e36b77a9ed1c6
|
5d07782fba0a894e934efb1dd199b6a4a19f678b
|
refs/heads/master
| 2022-01-07T00:50:24.427941
| 2019-05-02T12:29:53
| 2019-05-02T12:29:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
class DGResourceException(Exception):
def __init__(self, message, resource):
super().__init__(message)
self.resource = resource
class DGShellError(DGResourceException):
pass
|
[
"email@fakoberkers.nl"
] |
email@fakoberkers.nl
|
c161b56d5d336d5b33f873a135e6ff06d4a82968
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_74/931.py
|
67218b9c71e79a373560e676c36322fa8eb846ab
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,047
|
py
|
import sys
class Runner(object):
def __init__(self, test_case):
self.test_case = test_case
self.state = {
"O": 1,
"B": 1,
}
def run(self):
def done():
return not bool(self.test_case.instructions)
def next(robot):
next_buttons = [i['button'] for i in self.test_case.instructions if i['robot'] == robot]
return next_buttons[0] if next_buttons else 0
def next_button():
return self.test_case.instructions[0]
def press():
self.test_case.instructions.pop(0)
def wait():
pass
def move(robot, forward=True):
if forward:
self.state[robot] += 1
else:
self.state[robot] -= 1
steps = 0
while True:
if done(): break
n = next_button()
for robot in self.state.keys():
robots_next = next(robot)
if robots_next == self.state[robot]:
if n['robot'] == robot:
press()
else:
wait()
else:
move(robot, self.state[robot] < robots_next)
steps += 1
print 'Case #%d: %d' % (self.test_case.number, steps)
class TestCase(object):
def __init__(self, spec_string, number):
self.number = number
self.instructions = []
spec = spec_string.strip().split(" ")
spec.pop(0) # discard count.
while spec:
robot = spec.pop(0)
button = int(spec.pop(0))
t = {'robot': robot, 'button': button}
self.instructions.append(t)
def read_input():
lines = sys.stdin.readlines()
return [TestCase(test_case_spec, test_case_number) for test_case_spec, test_case_number in zip(lines[1:], xrange(1, len(lines))) ]
if __name__ == '__main__':
test_cases = read_input()
for test_case in test_cases:
Runner(test_case).run()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
3aa380bc45ee5078c114892476157413ad664058
|
908655251066427f654ee33ebdf804f9f302fcc3
|
/Tests/CartPoleAST/Test/ast_reward.py
|
c8213b436bd92572cc9153556fabc64880fe402c
|
[] |
no_license
|
maxiaoba/MCTSPO
|
be567f80f1dcf5c35ac857a1e6690e1ac599a59d
|
eedfccb5a94e089bd925b58f3d65eef505378bbc
|
refs/heads/main
| 2023-07-05T02:20:16.752650
| 2021-07-06T06:04:40
| 2021-07-06T06:04:40
| 381,811,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
# useful packages for math and debugging
import numpy as np
import pdb
# Define the class, inherit from the base
class ASTReward(object):
def __init__(self):
pass
def give_reward(self, action, **kwargs):
"""
returns the reward for a given time step
Input
-----
kwargs : accepts relevant info for computing the reward
Outputs
-------
(observation, reward, done, info)
reward [Float] : amount of reward due to the previous action
"""
raise NotImplementedError
|
[
"xiaobaima@DNab421bb2.stanford.edu"
] |
xiaobaima@DNab421bb2.stanford.edu
|
209da61669af27b11a8a5affd1de64c8fb12117a
|
d312ced5d6a06d35937b3f3d6a7415746ef06f71
|
/weibo2rss/urls.py
|
6db52f1cd121a8c315918fba590d36665f1c5d7c
|
[] |
no_license
|
chu888chu888/Python-SAE-weibo2rss
|
931ccaf2cd8438ebbfd6eff3b65a54b88e63e999
|
80f0c9e862505218a4284fcebd67276e8b372e31
|
refs/heads/master
| 2020-04-10T08:44:54.448809
| 2012-12-09T07:07:34
| 2012-12-09T07:07:34
| 9,112,168
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 594
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from weibo2rss.views import *
urlpatterns = patterns('',
url(r'^$', getfeedurl), # 主页
url(r'^callback/$', callback), # 微博授权返回页
url(r'^timeline/(?P<uid>\d+)/$', timeline), # 微博timeline rss页,通过user id获取
url(r'^favorites/(?P<uid>\d+)/$', favorites), # 微博favorites rss页,通过user id获取
url(r'^admin/root/weibo/clean/$', clean), # 用于定时清理数据库过期授权,非对外页面,清理周期在config.yaml下cron字段定义
)
|
[
"zhu327@gmail.com"
] |
zhu327@gmail.com
|
34b2d24f65f93831c306c1d5e2fc2783c605cea3
|
3f48e3308674212408c3b6ca972eb4f793bf142b
|
/f7_chipseq/f7_diff_binding_on_UTX_sep_k4me1/f1_differential_binding_figs/py4c_heatmap_across_CellTypes_k27ac_increased.py
|
d8d3e09d9ee4e0d87d9e5c650b7f448484669bd7
|
[] |
no_license
|
zanglab/utx_code
|
8497840ace81e0337f92f04fafbb691f0ed24865
|
32fc7851207f650b3cc78974ab798f8606099e56
|
refs/heads/main
| 2023-06-28T17:38:33.231877
| 2021-07-27T01:31:00
| 2021-07-27T01:31:00
| 388,648,753
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,138
|
py
|
import sys,argparse
import os,glob,re
import numpy as np
import pandas as pd
import matplotlib
# matplotlib.use('Agg')
from matplotlib import gridspec
import matplotlib.pyplot as plt
matplotlib.rcParams['font.size']=14
matplotlib.rcParams["font.sans-serif"] = ["Arial", "Liberation Sans", "Bitstream Vera Sans"]
matplotlib.rcParams["font.family"] = "sans-serif"
import seaborn as sns
sns.set_style("whitegrid", {'axes.grid' : False,'grid.color': 'grey'})
sns.set_style("ticks",{'ytick.color': 'k','axes.edgecolor': 'k'})
from matplotlib.colors import LinearSegmentedColormap
#plus = re.compile('\+')
#minus = re.compile('\-')
matplotlib.rcParams["font.sans-serif"] = ["Arial"]
from scipy.interpolate import interpn
def window_cumulative(df,half_window=7,step=1):
smooth_df_columns = np.arange(0,len(df.columns),step)
smooth_df = pd.DataFrame(index=df.index,columns=smooth_df_columns)#;print(smooth_df.columns)
for col in smooth_df_columns:
window_left = max(col-half_window,0)
window_right = min(col+half_window,len(df.columns)-1)
smooth_df.loc[:,col] = df.iloc[:,window_left:window_right+1].mean(axis=1)
#print(df,smooth_df)
return smooth_df
def signal_centered(df):
center_position = int(df.shape[1]/2)
for row in df.index:
vals = df.loc[row]
max_index = list(vals).index(vals.max())
# move max to center
if max_index<center_position:
df.loc[row] = np.append(np.zeros(center_position-max_index),vals)[:df.shape[1]]
elif max_index>center_position:
df.loc[row] = np.append(vals[max_index-center_position:],np.zeros(max_index-center_position))
return df
def return_vlim(factor,peak_file):
if re.search('islands',peak_file):
factor_match_clim = {'UTX':2,
'UTXFEB':2,
'H3K27me3':4,
'MLL4':4,
'H3K27ac':4,
'H3K4me1':4,
'H3K4me2':4,
'H3K4me3':4}
else:
factor_match_clim = {'UTX':3,
'UTXFEB':3,
'H3K27me3':5,
'MLL4':5,
'H3K27ac':5,
'H3K4me1':5,
'H3K4me2':5,
'H3K4me3':5}
cbar_vmax = factor_match_clim[factor]
return cbar_vmax*0.05,cbar_vmax
def prepare_each_subfig(df_tmp,gs,heatmap_pos,peak_file,factor,celltype):
# read the binding pattern for each factor/celltype
csv_file='../data_binding_patter_readCount/readCount_csv/{}_{}_on_{}_es2kb_bin200.csv'.format(celltype,factor,peak_file)
df = pd.read_csv(csv_file,sep='\t',index_col=0)
df = df.loc[df_tmp.index[:]]
df = window_cumulative(df)
# normalization by readcount
norm_df = pd.read_csv('{}/f0_data_process/chip_seq/final_chipseq/sicer2_islands/get_readCount_on_sicer_islads/total_reads_in_Islands.csv'.format(project_dir),index_col=0)
norm_col = 'total' if re.search('UTX',factor) else 'total_in_islads'
print(peak_file,factor,celltype,norm_col)
norm_factor = norm_df.loc['{}_{}'.format(celltype,factor),norm_col]/1000000
df = 50*df/norm_factor # per kb per million mapped reads
pal = sns.light_palette('red',as_cmap=True)
vmin,vmax = return_vlim(factor,peak_file)
# vmin=None;vmax=None
all_values = [i for col in df.columns for i in df[col].values]
df = df.clip(upper=np.percentile(all_values,98))
ax = plt.subplot(gs[0,heatmap_pos])
g = sns.heatmap(df,yticklabels=False,xticklabels=True,cbar=True,cmap=pal,\
vmin=vmin,vmax=vmax,cbar_kws={"shrink": 0.6})
ax.set_ylabel('')
cbar = g.collections[0].colorbar
cbar.ax.set_position([.9,0.36,.8,.5])
cbar.set_ticks([vmin,vmax])
cbar.set_ticklabels([0,vmax])
if heatmap_pos==0:
ax.set_ylabel('UTX binding sites \n (#{})\n'.format(df.shape[0]),va='baseline')
if not heatmap_pos==3:
cbar.remove()
xp = g.get_xticks()
ax.set_xticks([xp[0],xp[-1]])
ax.set_xticklabels(['-2kb','2kb'],rotation=45,fontsize=13)
ax.set_title('{}\n{}'.format(factor, cellType_labels[celltype]),fontsize=14)
return df.mean()
# ==== dictionary of matched colors/labels
cellType_colors = {'Vector':'tab:blue',\
'WT':'tab:red',\
'DEL':'k',\
'EIF':'tab:purple',\
'TPR':'tab:green',\
'MT2':'tab:orange',\
'FUS':'tab:gray'}
cellType_labels= {'Vector':'Vector',\
'WT':'WT',\
'DEL':'$\Delta$cIDR',\
'EIF':'UTX-eIF$_{IDR}$',\
'TPR':'$\Delta$TPR',\
'MT2':'MT2',\
'FUS':'UTX-FUS$_{IDR}$'}
cellTypes = ['Vector','WT','DEL','EIF']
factors = ['UTX','UTXFEB','H3K27me3','MLL4','H3K4me1','H3K4me2','H3K4me3','H3K27ac']
peak_files = ['UTX_peaks','UTX_islands','UTXFEB_islands','UTXFEB_peaks']
project_dir='/nv/vol190/zanglab/zw5j/since2019_projects/UTX_HaoJiang/'
# project_dir='/Volumes/zanglab/zw5j/since2019_projects/UTX_HaoJiang/'
indir='../f0_data_integration/f2_combined_data/'
outdir = 'f4c_heatmap_across_CellTypes_k72ac_increased'
os.makedirs(outdir,exist_ok=True)
# k4me1_log2fc_col='H3K4me1_WT_over_H3K4me1_Vector_log2FC'
# k4me1_log2avg_col = 'H3K4me1_WT_over_H3K4me1_Vector_log2AVG'
hm_log2fc_col='H3K27ac_WT_over_H3K27ac_Vector_log2FC'
hm_log2avg_col = 'H3K27ac_WT_over_H3K27ac_Vector_log2AVG'
fc_thres = [1.5,2]
log2avg_thre = 0
# == rank value by UTX signal
# csv_file='{}//f7_chipseq/f2_differential_binding_on_202011_UTX_WT_peaks/data_binding_pattern/rpkm_csv/Vector_UTX_es2kb_bin200_on_202011_UTX_WT_peaks.csv'.format(project_dir)
# index_df = pd.read_csv(csv_file,sep='\t',index_col=0)
# ranked_index = index_df.sum(axis=1).sort_values(ascending=False).index
for peak_file in peak_files[:]:
master_df = pd.read_csv('{}/combined_DiffNormReads_on_{}.csv'.format(indir,peak_file),index_col=0)
for fc_thre in fc_thres:
master_df_tmp = master_df[(master_df[hm_log2fc_col]> np.log2(fc_thre)) & (master_df[hm_log2avg_col]>log2avg_thre)]
for factor in factors[:]:
# data for composite plot
composite_data = {}
# heatmap of each factor
fig = plt.figure(figsize = (6,2))
width_ratio = [1,1,1,1]
gs = gridspec.GridSpec(1,4,width_ratios=width_ratio,wspace=.1)
heatmap_pos=0
for celltype in cellTypes[:]:
avg_binding = prepare_each_subfig(master_df_tmp,gs,heatmap_pos,peak_file,factor,celltype)
composite_data[celltype]=avg_binding
heatmap_pos+=1
plt.savefig(outdir+os.sep+'fcthre_{}_{}_{}_binding.png'.format(fc_thre,peak_file,factor,),bbox_inches='tight',pad_inches=0.1,transparent=True,dpi=600)
plt.show()
plt.close()
# == composite plot
fig = plt.figure(figsize = (3,2))
for celltype in cellTypes[:]:
plt.plot(composite_data[celltype],
label = cellType_labels[celltype],
color = cellType_colors[celltype])
plt.ylabel('{} signal'.format(factor))
# plt.ylim(ymax=9 if norm_pattern=='RPKM' else 5)
plt.axes().set_xticks([0,100,200])
plt.axes().set_xticklabels(['-2kb','0','2kb'])
plt.legend(fontsize=12,borderaxespad=0.1,labelspacing=.2,handletextpad=0.2,
handlelength=1,loc="upper right",
bbox_to_anchor=[1.65,1],
frameon=False)
plt.savefig(outdir+os.sep+'composite_fcthre_{}_{}_{}_binding.png'.format(fc_thre,peak_file,factor),bbox_inches='tight',pad_inches=0.1,transparent=True,dpi=600)
plt.show()
plt.close()
|
[
"zhenjia.sdu@gmail.com"
] |
zhenjia.sdu@gmail.com
|
02455e17cb15c2f1da50bd50f0afd2b4b54e1341
|
ed81cc186915e55cd0fbf3ba7717193f68290c46
|
/mcq_v2/quiz_qus/migrations/0003_auto_20181001_2205.py
|
6e738716db91f38e7b7068f599b4a9b1aeb263ec
|
[] |
no_license
|
MMIL/MCQ_Module_V2
|
ea07daf8845afd5023edbea716b0f3808f921e06
|
02586652971eb8d5b952ac8542172a57ab909c3e
|
refs/heads/master
| 2022-12-11T22:34:12.547492
| 2020-03-02T17:23:20
| 2020-03-02T17:23:20
| 146,754,594
| 1
| 4
| null | 2022-11-22T02:52:48
| 2018-08-30T13:34:56
|
CSS
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
# Generated by Django 2.1.1 on 2018-10-01 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz_qus', '0002_question_qus_id'),
]
operations = [
migrations.AlterField(
model_name='question',
name='qus_id',
field=models.IntegerField(unique=True),
),
]
|
[
"dwevediar@gmail.com"
] |
dwevediar@gmail.com
|
14a76995fb36ee801799c35e39e7112c522601c9
|
7b13e6acb2a1f26936462ed795ee4508b4088042
|
/算法题目/算法题目/动态规划/最长公共子序列/最长公共子序列.py
|
57720483fde66ff1f0318f2fcec1681da5c3ea10
|
[] |
no_license
|
guojia60180/algorithm
|
ed2b0fd63108f30cd596390e64ae659666d1c2c6
|
ea81ff2722c7c350be5e1f0cd6d4290d366f2988
|
refs/heads/master
| 2020-04-19T08:25:55.110548
| 2019-05-13T13:29:39
| 2019-05-13T13:29:39
| 168,076,375
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 995
|
py
|
#Author guo
'''
动态规划想法
定义二维数组dp[i][j]用来存储最长公共子序列的长度
其中dp[i][j]表示S1前i个字符与S2前j个字符最长公共子序列长度
考虑S1i与S2j是否值相等分为两种情况
相等时,dp[i][j]=dp[i-1][j-2]+1
不相等时,取最大值
'''
#递归
def recursive_lcs(stra,strb):
if len(stra)==0 or len(strb)==0:
return 0
if stra[0]==strb[0]:
return recursive_lcs(stra[1:],strb[1:])+1
else:
return max([recursive_lcs(stra[1:],strb),recursive_lcs(stra,strb[1:])])
#动态规划
def lcs(stra,strb):
m=[[0 for i in range(len(strb)+1)]for j in range(len(stra)+1)]
mmax=0
p=0#最长匹配中对应在stra的最后一位
for i in range(len(stra)):
for j in range(len(strb)):
if stra[i]==strb[j]:
m[i+1][j+1]=m[i][j]+1
if m[i+1][j+1]>mmax:
mmax=m[i+1][j+1]
p=i+1
return stra[p-mmax:p],mmax
|
[
"44565715+guojia60180@users.noreply.github.com"
] |
44565715+guojia60180@users.noreply.github.com
|
1a46bb5ed824ced4f62fc30a3ac0f058c451d445
|
5368c5c131da8ab226015cfd561cc3016c60fc82
|
/venv/bin/chardetect
|
8ae64b45acdfbfbd24ea506daac71d53d66b8ec8
|
[] |
no_license
|
ojudsonleo/DevCode
|
d621eed2d6555fa9c3fc37314edfc0646c4d1710
|
ee22b9ed560275f276672d62e219c8d429726c23
|
refs/heads/main
| 2023-05-27T10:50:15.593840
| 2021-06-08T05:53:45
| 2021-06-08T05:53:45
| 374,292,419
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
#!/home/admin/Desktop/Python/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"ojudsonleo@gmail.com"
] |
ojudsonleo@gmail.com
|
|
cb76a7dfc791e4ea861199bb4573e6a0bed1781d
|
5545d3c3e910ccb5b45b2277a71ad3c3ea3caedc
|
/jamenson/runtime/bases.py
|
0f10210b6209ad6eb4bc9aa0a786f28149df0744
|
[
"Apache-2.0"
] |
permissive
|
matthagy/Jamenson
|
61de19c71da6e133bf7d8efbb933a1036cf1e6f5
|
18a0fdd60b3d56ed4a6d4e792132535324490634
|
refs/heads/master
| 2016-09-11T04:31:28.895242
| 2013-04-04T00:14:44
| 2013-04-04T00:14:44
| 1,781,863
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
class CachingBase(object):
class __metaclass__(type):
def __new__(cls, name, bases, dct):
dct = dict(dct)
dct['_cache'] = dict()
return type.__new__(cls, name, bases, dct)
def __new__(cls, *args):
key = cls.get_key(*args)
try:
return cls._cache[key]
except KeyError:
self = cls._cache[key] = object.__new__(cls)
self._init_cached(*key)
return self
|
[
"hagy@gatech.edu"
] |
hagy@gatech.edu
|
a04c7206a90f059fc1e7a64f2ee14889c8afdec6
|
4a74ec1b7e299540b924bce4928537a51fc00ff5
|
/day24_day30/day30/최소비용.py
|
d89cf0abe4576789fbcb40888e9bf1e4dbc0130c
|
[] |
no_license
|
yeonlang/algorithm
|
ef74b2592818495f29f6de5f44f81ccf307efa59
|
ab788658bb781773c489cac8c6e8d2bea48fda07
|
refs/heads/master
| 2020-04-22T20:25:46.243355
| 2019-05-08T15:17:45
| 2019-05-08T15:17:45
| 170,641,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,030
|
py
|
import sys
sys.stdin = open("최소비용.txt")
def ispass(ny,nx,y,x):
if 0<=ny<N and 0<=nx<N :
if data[ny][nx]-data[y][x]>0:
if visited[ny][nx] == -1 or visited[ny][nx] > data[ny][nx] - data[y][x] + visited[y][x] + 1:
visited[ny][nx] = data[ny][nx] - data[y][x] + visited[y][x] + 1
return True
else:
if visited[ny][nx]==-1 or visited[ny][nx]> visited[y][x]+1:
visited[ny][nx] = visited[y][x]+1
return True
return False
def BFS(y,x):
que = [(y,x)]
visited[y][x] = 0
while que:
y,x = que.pop(0)
for i in range(4):
ny = y +dy[i]
nx = x +dx[i]
if ispass(ny,nx,y,x):
que.append((ny,nx))
dy = [1,0,-1,0]
dx = [0,1,0,-1]
for tc in range(int(input())):
N = int(input())
data = [list(map(int,input().split())) for _ in range(N)]
visited = [[-1]*N for _ in range(N)]
BFS(0,0)
print("#{} {}".format(tc+1,visited[N-1][N-1]))
|
[
"naspy001@gmail.com"
] |
naspy001@gmail.com
|
0051cc3d11fc72f806d6a37142f0100c2f5e6541
|
34965549da4706d3c8da21d06840452a35eef383
|
/dff_drfcn/config/config.py
|
e9038e61e56fa4fb9835ad460e68437ad5080162
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
xvjiarui/Deep-Feature-Flow
|
50410b650b4ae2754530a4e64891c04a0a3106fe
|
a6a860dbd76660260b461a7fbb3a674765f52b0c
|
refs/heads/master
| 2020-03-19T16:01:27.637599
| 2018-07-05T15:38:10
| 2018-07-05T15:38:10
| 136,696,520
| 0
| 0
| null | 2018-06-09T06:20:30
| 2018-06-09T06:20:30
| null |
UTF-8
|
Python
| false
| false
| 6,561
|
py
|
# --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Xizhou Zhu, Yuwen Xiong, Bin Xiao
# --------------------------------------------------------
import yaml
import numpy as np
from easydict import EasyDict as edict
import os
config = edict()
config.MXNET_VERSION = ''
config.output_path = ''
config.symbol = ''
config.gpus = ''
config.CLASS_AGNOSTIC = True
config.SCALES = [(600, 1000)] # first is scale (the shorter side); second is max size
config.USE_PHILLY = False
# default training
config.default = edict()
config.default.frequent = 20
config.default.kvstore = 'device'
# network related params
config.network = edict()
config.network.pretrained_dir = ''
config.network.pretrained_resnet = ''
config.network.pretrained_flow = ''
config.network.pretrained_epoch = 0
config.network.PIXEL_MEANS = np.array([0, 0, 0])
config.network.IMAGE_STRIDE = 0
config.network.RPN_FEAT_STRIDE = 16
config.network.RCNN_FEAT_STRIDE = 16
config.network.FIXED_PARAMS = ['gamma', 'beta']
config.network.ANCHOR_SCALES = (8, 16, 32)
config.network.ANCHOR_RATIOS = (0.5, 1, 2)
config.network.NORMALIZE_RPN = True
config.network.ANCHOR_MEANS = (0.0, 0.0, 0.0, 0.0)
config.network.ANCHOR_STDS = (0.1, 0.1, 0.4, 0.4)
config.network.NUM_ANCHORS = len(config.network.ANCHOR_SCALES) * len(config.network.ANCHOR_RATIOS)
config.network.DFF_FEAT_DIM = 1024
config.network.ROIDispatch = False
config.network.USE_NONGT_INDEX = False
config.network.NMS_TARGET_THRESH = '0.5'
# dataset related params
config.dataset = edict()
config.dataset.dataset = 'ImageNetVID'
config.dataset.image_set = 'DET_train_30classes+VID_train_15frames'
config.dataset.test_image_set = 'VID_val_videos'
config.dataset.root_path = './data'
config.dataset.dataset_path = './data/ILSVRC2015'
config.dataset.NUM_CLASSES = 31
config.TRAIN = edict()
config.TRAIN.lr = 0
config.TRAIN.lr_step = ''
config.TRAIN.lr_factor = 0.1
config.TRAIN.warmup = False
config.TRAIN.warmup_lr = 0
config.TRAIN.warmup_step = 0
config.TRAIN.momentum = 0.9
config.TRAIN.wd = 0.0005
config.TRAIN.begin_epoch = 0
config.TRAIN.end_epoch = 0
config.TRAIN.model_prefix = ''
config.TRAIN.rpn_loss_scale = 3.0
config.TRAIN.nms_loss_scale = 1.0
config.TRAIN.nms_pos_scale = 4.0
# whether resume training
config.TRAIN.RESUME = False
# whether auto resume training
config.TRAIN.AUTO_RESUME = True
# whether flip image
config.TRAIN.FLIP = True
# whether shuffle image
config.TRAIN.SHUFFLE = True
# whether use OHEM
config.TRAIN.ENABLE_OHEM = False
# size of images for each device, 2 for rcnn, 1 for rpn and e2e
config.TRAIN.BATCH_IMAGES = 2
# e2e changes behavior of anchor loader and metric
config.TRAIN.END2END = False
# group images with similar aspect ratio
config.TRAIN.ASPECT_GROUPING = True
# R-CNN
# rcnn rois batch size
config.TRAIN.BATCH_ROIS = 128
config.TRAIN.BATCH_ROIS_OHEM = 128
# rcnn rois sampling params
config.TRAIN.FG_FRACTION = 0.25
config.TRAIN.FG_THRESH = 0.5
config.TRAIN.BG_THRESH_HI = 0.5
config.TRAIN.BG_THRESH_LO = 0.0
# rcnn bounding box regression params
config.TRAIN.BBOX_REGRESSION_THRESH = 0.5
config.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])
# RPN anchor loader
# rpn anchors batch size
config.TRAIN.RPN_BATCH_SIZE = 256
# rpn anchors sampling params
config.TRAIN.RPN_FG_FRACTION = 0.5
config.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
config.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
config.TRAIN.RPN_CLOBBER_POSITIVES = False
# rpn bounding box regression params
config.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
config.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# used for end2end training
# RPN proposal
config.TRAIN.CXX_PROPOSAL = True
config.TRAIN.RPN_NMS_THRESH = 0.7
config.TRAIN.RPN_PRE_NMS_TOP_N = 12000
config.TRAIN.RPN_POST_NMS_TOP_N = 2000
config.TRAIN.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE
# approximate bounding box regression
config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = True
config.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)
config.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)
# Learn NMS
config.TRAIN.LEARN_NMS = False
config.TRAIN.JOINT_TRAINING = False
config.TRAIN.FIRST_N = 100
# DFF, trained image sampled from [min_offset, max_offset]
config.TRAIN.MIN_OFFSET = -9
config.TRAIN.MAX_OFFSET = 0
config.TEST = edict()
# R-CNN testing
# use rpn to generate proposal
config.TEST.HAS_RPN = False
# size of images for each device
config.TEST.BATCH_IMAGES = 1
# RPN proposal
config.TEST.CXX_PROPOSAL = True
config.TEST.RPN_NMS_THRESH = 0.7
config.TEST.RPN_PRE_NMS_TOP_N = 6000
config.TEST.RPN_POST_NMS_TOP_N = 300
config.TEST.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE
# whether to use softnms
config.TEST.SOFTNMS = False
# whether to use LEARN_NMS
config.TEST.LEARN_NMS = False
config.TEST.FIRST_N = 0
config.TEST.MERGE_METHOD = -1
# RCNN nms
config.TEST.NMS = 0.3
# DFF
config.TEST.KEY_FRAME_INTERVAL = 10
config.TEST.max_per_image = 300
# Test Model Epoch
config.TEST.test_epoch = 0
def update_config(config_file):
exp_config = None
with open(config_file) as f:
exp_config = edict(yaml.load(f))
for k, v in exp_config.items():
if k in config:
if isinstance(v, dict):
if k == 'TRAIN':
if 'BBOX_WEIGHTS' in v:
v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])
elif k == 'network':
if 'PIXEL_MEANS' in v:
v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])
for vk, vv in v.items():
config[k][vk] = vv
else:
if k == 'SCALES':
config[k][0] = (tuple(v))
else:
config[k] = v
else:
raise ValueError("key must exist in config.py")
def update_philly_config(model_dir, data_dir):
def _update_to_abs(prefix, basename):
if not os.path.isabs(basename):
print("Update {} with {}".format(basename, prefix))
return os.path.join(prefix, basename)
else:
return basename
config.output_path = _update_to_abs(model_dir, config.output_path)
config.dataset.dataset_path = _update_to_abs(data_dir, config.dataset.dataset_path)
config.dataset.root_path = _update_to_abs(data_dir, config.dataset.root_path)
config.network.pretrained_dir = _update_to_abs(data_dir, config.network.pretrained_dir)
|
[
"xvjiarui0826@gmail.com"
] |
xvjiarui0826@gmail.com
|
c5c82c9f5560f4c2e4856e167c89d545433bb57d
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-ververica/aliyunsdkververica/request/v20200501/TableExistsRequest.py
|
137bb321ebbe22b34e009f2bfc35f45f452013f6
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821
| 2023-08-04T06:00:29
| 2023-08-04T06:00:29
| 39,558,861
| 1,080
| 721
|
NOASSERTION
| 2023-09-14T08:51:06
| 2015-07-23T09:39:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkververica.endpoint import endpoint_data
class TableExistsRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'ververica', '2020-05-01', 'TableExists')
self.set_uri_pattern('/pop/workspaces/[workspace]/catalog/v1beta2/namespaces/[namespace]/catalogs/[cat]:tableExists')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_workspace(self):
return self.get_path_params().get('workspace')
def set_workspace(self,workspace):
self.add_path_param('workspace',workspace)
def get_database(self):
return self.get_query_params().get('database')
def set_database(self,database):
self.add_query_param('database',database)
def get_cat(self):
return self.get_path_params().get('cat')
def set_cat(self,cat):
self.add_path_param('cat',cat)
def get_namespace(self):
return self.get_path_params().get('namespace')
def set_namespace(self,namespace):
self.add_path_param('namespace',namespace)
def get_table(self):
return self.get_query_params().get('table')
def set_table(self,table):
self.add_query_param('table',table)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
58499b0b8766249579f224c8aae66b0468d1c391
|
24d9f077593b33c707b12d3a00cf91750f740729
|
/src/114. Flatten Binary Tree to Linked List.py
|
ca38f01ee8f682f594eaea6f2db7d9fe6f7ac343
|
[
"Apache-2.0"
] |
permissive
|
xiaonanln/myleetcode-python
|
274c8b8d7c29fd74dd11beb845180fb4e415dcd1
|
95d282f21a257f937cd22ef20c3590a69919e307
|
refs/heads/master
| 2021-01-22T21:45:59.786543
| 2019-04-21T15:24:23
| 2019-04-21T15:24:23
| 85,474,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
"""
Given a binary tree, flatten it to a linked list in-place.
For example,
Given
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
self.flattenHelper(root)
def flattenHelper(self, root):
if not root:
return None, None
leftHead, leftTail = self.flattenHelper(root.left)
rightHead, rightTail = self.flattenHelper(root.right)
if leftHead:
root.left = None
root.right = leftHead
leftTail.right = rightHead
else:
# root.right is already set to rightHead
pass
tail = rightTail or leftTail or root
return root, tail
from utils import *
t = maketree([1, 2, 5, 3, 4, None, 6])
printtree(t)
Solution().flatten(t)
printlist(t, nextKey='right')
|
[
"xiaonanln@gmail.com"
] |
xiaonanln@gmail.com
|
a698ed601f3b430730ee2f2a7f75671b0eee1904
|
4992af29261214cb1e74375fc36dd51fd95db703
|
/sparkmagic/sparkmagic/kernels/sparkrkernel/sparkrkernel.py
|
12553364bb11624f8a652b7828b98250bb92c1f2
|
[
"BSD-3-Clause"
] |
permissive
|
logicalclocks/sparkmagic
|
9ef7ec0c124a8a317f14bb39b6a2b041ed1b8151
|
4f14b6ca54ac5fa54451392eafd5dd10721c000c
|
refs/heads/master
| 2023-05-31T23:30:52.208643
| 2023-02-28T08:21:16
| 2023-02-28T08:21:16
| 134,001,978
| 1
| 11
|
NOASSERTION
| 2023-03-02T10:57:00
| 2018-05-18T20:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 980
|
py
|
# Copyright (c) 2015 aggftw@gmail.com
# Distributed under the terms of the Modified BSD License.
from sparkmagic.utils.constants import LANG_R
from sparkmagic.kernels.wrapperkernel.sparkkernelbase import SparkKernelBase
class SparkRKernel(SparkKernelBase):
def __init__(self, **kwargs):
implementation = 'SparkR'
implementation_version = '1.0'
language = 'no-op'
language_version = '0.1'
language_info = {
'name': 'sparkR',
'mimetype': 'text/x-rsrc',
'codemirror_mode': 'text/x-rsrc',
'pygments_lexer': 'r'
}
session_language = LANG_R
super(SparkRKernel, self).__init__(implementation, implementation_version, language, language_version,
language_info, session_language, **kwargs)
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=SparkRKernel)
|
[
"noreply@github.com"
] |
logicalclocks.noreply@github.com
|
de77c75c5f975b4bbb813f8ff2747bef83345417
|
ca2c6bb4435138eae83d9776a672239651aac9bc
|
/week03/pymysql_insert.py
|
3b59c21070bb191d0f72fbf83794156c1865dcda
|
[] |
no_license
|
git-guozhijia/Python006-006
|
83b48180229d5be661cb2c3f12944b300a90db5a
|
99642c351bc5ebe6dab4a7287bfa3234c37d1a90
|
refs/heads/main
| 2023-04-08T17:31:00.003482
| 2021-04-16T03:15:35
| 2021-04-16T03:15:35
| 323,208,392
| 0
| 0
| null | 2020-12-21T02:15:12
| 2020-12-21T02:15:12
| null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
import pymysql
def insert_func(id, name):
db = pymysql.connect(host='localhost', port=3306, user='root', password='guozhijia123', db='test_db')
try:
with db.cursor() as cursor:
mysql = "insert into book (`id`, `name`) value (%s, %s)"
data = (id, name)
cursor.execute(mysql, data)
'''cousor.close() : with 链接方式在执行完with之后回去自动关闭游标的链接,免去了手动关闭链接 cousor.close()'''
db.commit()
except Exception as e:
print(f"insert error:{e}")
finally:
db.close()
if __name__ == '__main__':
insert_func(1002, '西游记')
|
[
"18811572354@139.com"
] |
18811572354@139.com
|
18050b373866ff46bc02391fb395be2e900b67cf
|
81c8aaec0ca8d9b345943d1f2324ace61eb034c6
|
/backend/markly/actions/models.py
|
ea833b1f906ff8abe47b7fca3af8265506c5991f
|
[
"MIT"
] |
permissive
|
salmanAndroidDev/markly
|
ba1c2e0107e79e4940ab2b5dd9455b877e044e25
|
c2b8c4a2fd99b6e2c374e127f62b10adbf143b7c
|
refs/heads/main
| 2023-06-02T21:18:03.710345
| 2021-06-25T18:28:54
| 2021-06-25T18:28:54
| 377,372,093
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
class Action(models.Model):
"""Action model to store activities"""
user = models.ForeignKey('auth.User',
related_name='actions',
db_index=True,
on_delete=models.CASCADE)
verb = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True,
db_index=True)
# Making a generic relationship
target_ct = models.ForeignKey(ContentType,
blank=True,
null=True,
related_name='target_obj',
on_delete=models.CASCADE)
target_id = models.PositiveIntegerField(blank=True,
null=True,
db_index=True)
target = GenericForeignKey('target_ct', 'target_id')
class Meta:
ordering = ('-created',)
|
[
"salmanAndB@outlook.com"
] |
salmanAndB@outlook.com
|
a664186b38b7feff544ed14e7958602120136940
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1729+371/sdB_pg_1729+371_coadd.py
|
568443ae3e489c9b6fd876c2f3788f0e49d3b805
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[262.892625,37.091864], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_pg_1729+371/sdB_pg_1729+371_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_pg_1729+371/sdB_pg_1729+371_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
a4d5f9dbd7ff9760239211ade45c0106a72a2774
|
5ff8f807d6318f41843c645f6da60a9bc43ede80
|
/fabfile.py
|
be5aa937ae9f3288eeceff30d3bcb3d4effda0db
|
[] |
no_license
|
notmissingout/notmissingout_old
|
e0de2484ad68083aa892f96e44c900ff09f59a40
|
ed2521205679da61345a3335b99151bc0b952689
|
refs/heads/master
| 2021-06-18T03:06:28.420937
| 2017-05-14T09:37:34
| 2017-05-14T09:37:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 7,302
|
py
|
"""Manage a remotely-installed WSGI app on a Unix-like system, with
environment variables controlling a lot of the WSGI app's
functionality (12 Factor style).
All remote access to the app is done via an `invoke` script, which
contains the environment variables, which is created during setup.
Sensitive ones should be passed through fab's env rather than being
put directly into this script. (So this should remain valid for the
lifetime of an open source project. Hostnames, db credentials and the
like do not belong in here. We do have a default username and home
directory, for convenience.)
We create a virtual environment for every release. You probably want
to delete them after a while (but you probably want to delete the
releases as well). This is "better" than sharing a virtualenv, because
of the way pip upgrades packages (otherwise you will get periods where
the app will not work if it needs non-code files or just
previously-unused packages). It is, however, slower.
(Heroku's slug compilation is a better approach. It'd be nice to
detect differences and re-use virtualenvs using symlinking or
copy-and-upgrade in future. However we're not really here to build a
cheap PaaS.)
Getting started:
* with a remote user & host you have access to
$ fab -H HOST setup
$ fab -H host setup:restart=false
* subsequently, to put the latest master live
$ fab -H HOST deploy
* if something goes wrong, roll back to a specific version
$ fab -H switch_to:version=<VERS> restart_appserver
deploy will apply migrations; switch_to will not. Also, migrations are
applied while the site is still running, so should be backwards
compatible.
(deploy also runs compilestatic and compilemessages)
Remote server layout (setup makes these):
media upload media (shared between all releases)
archives tgz archives of code versions
releases unpacked, versioned by datetime of fabric invocation
releases/current symlink to current version
releases/previous symlink to previous version
releases/next symlink to version being upgraded to
releases/<>/ENV virtualenv per release
userv/rc userv script for starting app server
invoke environment-setting invocation script (acts both
as an init.d script and a way of invoking app tasks
such as migration, compilestatic
"""
from fabric.api import *
from fabric.contrib.files import exists
import os
import time
from deployment import fabhelpers
env.remote = 'git@github.com:notmissingout/notmissingout.git'
env.branch = 'master'
env.project = 'notmissingout'
env.user = 'notmissingout'
env.path = '/home/%s' % env.user
def deploy(restart='true'):
"""
Deploy the latest version of the site to the servers.
"""
restart = (restart in ('true', 'True'))
# installs any required third party modules, compiles static files
# and messages, migrates the database and then restarts the
# appserver
env.release = time.strftime('%Y-%m-%dT%H.%M.%S')
# github doesn't support upload-archive, so work from local repo
fabhelpers.export_and_upload_tar_from_git_local()
prep_release(env.release)
switch_to(env.release)
if restart:
restart_appserver()
else:
invoke(command="start")
fabhelpers.substitute_and_put(
"deployment/crontab.in",
"%s/crontab" % env.path,
(
('TOPDIR', env.path),
),
mode=0700,
)
run("crontab < %(path)s/crontab" % { 'path': env.path })
def switch_to(version):
"""Switch the current (ie live) version."""
require('hosts')
previous_path = os.path.join(env.path, 'releases', 'previous')
current_path = os.path.join(env.path, 'releases', 'current')
if exists(previous_path):
run('rm %s' % previous_path)
if exists(current_path):
run('mv %s %s' % (current_path, previous_path))
# ln -s doesn't actually take a path relative to cwd as its first
# argument; it's actually relative to its second argument
run('ln -s %s %s' % (version, current_path))
# tidy up the next marker if there was one
run('rm -f %s' % os.path.join(env.path, 'releases', 'next'))
env.release = version # in case anything else wants to use it after us
def prep_release(version):
"""Compile static, make messages and migrate."""
require('hosts')
current_path = os.path.join(env.path, 'releases', 'current')
next_path = os.path.join(env.path, 'releases', 'next')
if exists(next_path):
run('rm %s' % next_path)
run('ln -s %s %s' % (version, next_path))
run(
"cd %(next_path)s; "
"if [ -d %(current_path)s/ENV ]; then "
" cp -a %(current_path)s/ENV %(next_path)s/ENV; "
"else "
" virtualenv ENV; "
" ENV/bin/pip install --upgrade pip; "
"fi; "
"ENV/bin/pip install -r requirements/live.txt" % {
'path': env.path,
'next_path': next_path,
'current_path': current_path,
'release': env.release
}
)
run('invoke prep')
# leave the next marker (symlink) in place in case something
# goes wrong before the end of switch_to, since it will provide
# useful state on the remote machine
def app_shell():
"""Get an app shell on the current release."""
require('hosts')
run("invoke shell")
def restart_appserver():
"""Restart the (gunicorn) app server."""
require('hosts')
run("invoke restart")
def invoke(command):
"""Run an init command (or shell or prep) via the invoker."""
require('hosts')
run("invoke %s" % command)
def setup():
"""Set up the initial structure for the given user."""
require('hosts', 'path')
require(
'database_url',
'django_secret_key',
'allowed_hosts',
'listen_port',
used_for="configuring the application.",
)
# make our directory structure
run("mkdir -pm 711 %s/media" % env.path)
run("mkdir -pm 711 %s/releases" % env.path)
run("mkdir -pm 700 %s/archives" % env.path)
# make the userv rc script
run("mkdir -pm 700 %s/.userv" % env.path)
put("deployment/userv.rc.in", "%s/.userv/rc" % env.path, mode=0600)
# and the script it points to
# @TOPDIR@ -> env.path
# @WSGI@ -> $(env.project).wsgi (python path to WSGI app)
# @DATABASE_URL@ -> syntax postgresql://USER:PASSWORD@localhost:5432/DBNAME
# (or postgis://...)
# @DJANGO_SECRET_KEY@ -> what it says (make it long and gnarly)
# @ALLOWED_HOSTS@ -> semicolon separated (eg loose-end.in;www.loose-end.in)
# @PORT@ -> that gunicorn should listen on
#
# The last four should be passed into the env in a fab-ish manner.
# (Hence the require statements above.)
substitutions = (
('TOPDIR', env.path),
('WSGI', '%s.wsgi' % env.project),
('DATABASE_URL', env.database_url),
('DJANGO_SECRET_KEY', env.django_secret_key),
('ALLOWED_HOSTS', env.allowed_hosts),
('PORT', env.listen_port),
)
fabhelpers.substitute_and_put(
"deployment/invoke.in",
"%s/invoke" % env.path,
substitutions,
mode=0700,
)
|
[
"richard@tartarus.org"
] |
richard@tartarus.org
|
76a08fcfefed1b045ec3b43c0f851dffda21bfbd
|
357fefa288745c9ab3bc276a7ef0bc815f3fec2a
|
/src/gui/coverage.py
|
c483bb12db046aecb856f12f3f34c4b03eb1372e
|
[
"MIT"
] |
permissive
|
jdvelasq/techminer
|
61da47f44719e462732627edcc1094fab6c173f1
|
7a34a9fd684ce56cfbab583fa1bb71c1669035f9
|
refs/heads/main
| 2023-03-15T23:26:22.876051
| 2023-03-13T21:47:24
| 2023-03-13T21:47:24
| 204,352,276
| 0
| 1
|
MIT
| 2019-12-09T02:37:11
| 2019-08-25T21:34:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
from techminer.gui.bigraph_analysis import App
import pandas as pd
import ipywidgets as widgets
from ipywidgets import GridspecLayout, Layout
from IPython.display import display
class App:
def __init__(self) -> None:
self.app_layout = GridspecLayout(9, 4, height="870px")
self.output = widgets.Output().add_class("output_color")
self.app_layout[0:, 0:] = widgets.VBox(
[self.output],
layout=Layout(margin="10px 10px 10px 10px", border="1px solid gray"),
)
def run(self):
x = pd.read_csv("corpus.csv")
columns = sorted(x.columns)
with self.output:
display(
pd.DataFrame(
{
"Column": columns,
"Number of items": [
len(x) - x[col].isnull().sum() for col in columns
],
"Coverage (%)": [
"{:5.2%}".format((len(x) - x[col].isnull().sum()) / len(x))
for col in columns
],
}
)
)
return self.app_layout
|
[
"jdvelasq@unal.edu.co"
] |
jdvelasq@unal.edu.co
|
44fef314424eaf2f58f5213dbe42ff75bdeb0352
|
4eddc1ba3a0a207e70bfb7addf73c18b1a0e19fc
|
/benchmark.py
|
77fb873d01b719f263403ec6c853ffa6754beff9
|
[
"MIT"
] |
permissive
|
vibiu/validater
|
558284b1811a1bdadbcb072620245cd1e78fd33d
|
687791f4d234d379f392fdb64064276833d08666
|
refs/heads/master
| 2020-12-24T10:39:29.655705
| 2016-05-24T22:41:33
| 2016-05-24T22:41:33
| 62,855,916
| 0
| 0
| null | 2016-07-08T03:29:12
| 2016-07-08T03:29:12
| null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
from timeit import timeit
setup = """
from io import BytesIO
import json
from ijson.backends.python import basic_parse
from ijson.backends.yajl2_cffi import basic_parse as cbasic_parse
from validater import parse, validate
schema = parse([{"userid": "int"}])
data_normal = json.dumps([{"userid": "123"}], ensure_ascii=False)
data_deep = '[' * 8000 + ']' * 8000
obj_normal = BytesIO(data_normal.encode('utf-8'))
obj_deep = BytesIO(data_deep.encode('utf-8'))
"""
print('ijson python'.center(60, '-'))
s = """
obj_normal.seek(0)
for event, value in basic_parse(obj_normal):
pass
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
try:
for event, value in basic_parse(obj_deep):
pass
except RecursionError:
pass
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
print('ijson yajl2_cffi'.center(60, '-'))
s = """
obj_normal.seek(0)
for event, value in cbasic_parse(obj_normal):
pass
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
try:
for event, value in cbasic_parse(obj_deep):
pass
except RecursionError:
pass
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
print('validater'.center(60, '-'))
s = """
obj_normal.seek(0)
err, val = validate(obj_normal, schema)
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
err, val = validate(obj_deep, schema)
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
print('standard json'.center(60, '-'))
s = """
obj_normal.seek(0)
json.loads(data_normal)
"""
print("normal data: %.6f sec" % timeit(s, setup, number=1000))
s = """
obj_deep.seek(0)
try:
json.loads(data_deep)
except RecursionError:
pass
"""
print("deep data: %.6f sec" % timeit(s, setup, number=1000))
|
[
"guyskk@qq.com"
] |
guyskk@qq.com
|
f91f2010f2e8223dd9304fc1bbf4e21fa295d5c7
|
43b0679349d4f8a8c281705df1bf4cf2805b2816
|
/backend/code/archipelag/event/migrations/0005_auto_20170909_1004.py
|
5561e8ad2d914586dfa2bee6d8e6cf2782f56c5a
|
[] |
no_license
|
socek/archipelag
|
d2eecc1e7b49954d3d9de89d571f7a5021b995e4
|
359ea98d9e8cca0eac2469413d4b4469166f6a43
|
refs/heads/master
| 2021-03-24T09:12:29.137254
| 2017-09-09T14:14:33
| 2017-09-09T14:14:33
| 102,904,818
| 0
| 2
| null | 2017-12-18T22:32:12
| 2017-09-08T21:12:10
|
Python
|
UTF-8
|
Python
| false
| false
| 529
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-09 10:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('event', '0004_auto_20170909_0933'),
]
operations = [
migrations.AlterField(
model_name='event',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ngo.NgoUser'),
),
]
|
[
"d.dlugajczyk@clearcode.cc"
] |
d.dlugajczyk@clearcode.cc
|
d1b223b99ccc20932fbbadf3a004e2e0128ec2fd
|
5d7b619d6bd8117db0abc878af02d7f4f30ca938
|
/fileIO_Includes.py
|
9c97566943c292e11fef69ff2bf5df5245b21699
|
[] |
no_license
|
LeeHuangChen/2018_01_17_1_BlastAllToAll
|
3b55f3efc5e837e9692cd32b117b240846a67026
|
32dad9ef0eff7f725734365c41488cd530e6bdcb
|
refs/heads/master
| 2021-09-04T09:54:35.336820
| 2018-01-17T20:46:28
| 2018-01-17T20:46:28
| 117,890,770
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
import os
#A list of includes for file io
def createEmptyFiles(paths):
for path in paths:
f=open(path,"w")
f.close()
def testPath(path):
if not os.path.exists(path):
os.mkdir(path)
def appendFile(path,content):
f=open(path,"a")
f.write(content)
f.close()
def readFile(path):
f=open(path,"r")
content=f.read()
f.close()
return content
def processSeqFile(path):
read=readFile(path)
lines=read.split("\n")[1:]
processedList=[]
#lines longer then 20 assumed to be sequences
for line in lines:
if len(line)>20:
delims=[" "," "," "," "]
array=[]
for delim in delims:
if(len(line.split(delim))>1):
array=line.split(delim)
break
processedList.append(array[1])
return processedList
#generate all the directories needed for the given path
def generateDirectories(path):
folders=path.split("/")
curdir=""
for folder in folders:
curdir=os.path.join(curdir,folder)
if not os.path.exists(curdir):
os.mkdir(curdir)
def generateDirectoriesMult(paths):
for path in paths:
generateDirectories(path)
#processFusedGenes functions
#takes the protein, taxa, and sequence information and produce a FASTA format string for that sequence
def toFASTA(prot, taxa, seq):
return ">"+prot+ " ["+taxa+"]\n"+seq+"\n\n"
#read the fusion event log and produce a dictionary to easily access the contents
def readFusionEventLog(path):
f=open(path,"r")
content=f.read()
f.close()
fusionDict={}
lines=content.split("\n")
for line in lines:
array=line.split("\t")
# if this line is not a header
if (not "#" in line) and (len(line)!=0):
fusionDict[int(array[0])]=array
return fusionDict
#A simple function to generate a name for each test case base on the parameters
def name(model,seqLen,numFamily, numFusionEvent,totalEvolutionTime, numGeneration):
name="M_"+str(model.replace("-",""))+"_SeqL_"+str(seqLen)+"_NFam_"+str(numFamily)+"_NFusions_"+str(numFusionEvent)+"_TEvo_"+str(totalEvolutionTime)+"_NGen_"+str(numGeneration)
return name
|
[
"lhc1@rice.edu"
] |
lhc1@rice.edu
|
f3716608d9c07e2bfa41779e9f5bebd961f12cf3
|
cc2029f40a12e82712072275fc76a07ac59b5940
|
/battles/challenges/leftover.py
|
f1f494d260bbf60ca98f6f60873b37896969161e
|
[
"MIT"
] |
permissive
|
heitorchang/learn-code
|
d3fb8e45d539d302372126fe28e85032590b5707
|
5e6e56f7257de1910830619c01d470e892d7f9d8
|
refs/heads/master
| 2023-08-09T13:46:18.623772
| 2023-07-21T16:57:11
| 2023-07-21T16:57:11
| 147,522,837
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
def leftover(s):
u = s.upper()
su = sum(map(ord, u))
return sum(map(ord, s)) % su
def trymod(a, c):
solutions = []
for i in range(2, a):
if a % i == c:
solutions.append(i)
return solutions
def test():
testeql(leftover("antidisestablishmentarianism"), 27)
testeql(leftover("supercalifragilisticexpialidocious"), 27)
testeql(leftover("appetite"), 4)
testeql(leftover("hello"), 2)
testeql(leftover("cb"), 1)
testeql(leftover("2017"), 2)
testeql(leftover("watcher"), 1)
|
[
"heitorchang@gmail.com"
] |
heitorchang@gmail.com
|
1620d78c72c1f7e1b85e480f5639de81c127ad1e
|
9b410c4884b978f654e1538467549d26456f60e0
|
/src/fuzz_closure_3178.py
|
c0a38983637435f2ceeb1ef23ae239bcb386b76e
|
[] |
no_license
|
vrthra/ddset
|
4c49e13a91c4a1c1b4a7b580174abe21323324da
|
6c776c998a0d7e2ee0092cf688e352149b177330
|
refs/heads/master
| 2022-11-29T23:46:40.730354
| 2020-08-03T13:36:49
| 2020-08-03T13:36:49
| 257,855,512
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
import Fuzz as F
import closure_3178 as Main
if __name__ == '__main__':
F.main('./lang/js/grammar/javascript.fbjson', './lang/js/bugs/closure.3178.js', Main.my_predicate)
|
[
"rahul@gopinath.org"
] |
rahul@gopinath.org
|
93ec4324fe75da5921ba6871ebe99da564045576
|
b589f3997e790c3760ab6ddce1dd1b7813cfab3a
|
/665.py
|
254ca38201ea67ab0e46990b8f10261e4019ab22
|
[] |
no_license
|
higsyuhing/leetcode_easy
|
56ceb2aab31f7c11671d311552aaf633aadd14a8
|
48d516fdbb086d697e2593a9ce1dbe6f40c3c701
|
refs/heads/master
| 2022-12-04T00:49:33.894066
| 2022-11-15T20:44:36
| 2022-11-15T20:44:36
| 135,224,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
# wtf this problem...
p = None
for i in xrange(len(nums) - 1):
if nums[i] > nums[i+1]:
if p is not None:
return False
p = i
return (p is None or p == 0 or p == len(nums)-2 or
nums[p-1] <= nums[p+1] or nums[p] <= nums[p+2])
|
[
"noreply@github.com"
] |
higsyuhing.noreply@github.com
|
4bb930fddf7b3752d067f732a558b58be2f49b4f
|
b1cf54e4d6f969d9084160fccd20fabc12c361c2
|
/misc/python/list_comp.py
|
30133d550bfa444f552434571332ab5c3ebeecee
|
[] |
no_license
|
zarkle/code_challenges
|
88a53477d6f9ee9dd71577678739e745b9e8a694
|
85b7111263d4125b362184df08e8a2265cf228d5
|
refs/heads/master
| 2021-06-10T11:05:03.048703
| 2020-01-23T06:16:41
| 2020-01-23T06:16:41
| 136,668,643
| 0
| 1
| null | 2019-02-07T23:35:59
| 2018-06-08T21:44:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,890
|
py
|
"""
List Comprehension practice
https://www.reddit.com/r/learnpython/comments/4d2yl7/i_need_list_comprehension_exercises_to_drill/
Tip: Practice mechanically translating a list comprehension into the equivalent for loop and back again.
"""
# Find all of the numbers from 1-1000 that are divisible by 7
seven = [num for num in range(1,1001) if num % 7 == 0]
print(seven)
# Find all of the numbers from 1-1000 that have a 3 in them
# Count the number of spaces in a string
string = 'sample string of text'
spaces = [char for char in string if char == ' ']
total = len(spaces)
# Remove all of the vowels in a string
vowels = 'aeiou'
string = 'a string with vowels'
no_vowels = [char for char in string if char not in vowels]
# Find all of the words in a string that are less than 4 letters
short = [word ]
# Challenge:
# Use a dictionary comprehension to count the length of each word in a sentence.
# Use a nested list comprehension to find all of the numbers from 1-1000 that are divisible by any single digit besides 1 (2-9)
# For all the numbers 1-1000, use a nested list/dictionary comprehension to find the highest single digit any of the numbers is divisible by
"""
From: http://www.learnpython.org/en/List_Comprehensions
"""
# create a list of integers which specify the length of each word in a certain sentence, but only if the word is not the word "the".
lengths = [len(word) for word in sentence.split() if word != 'the']
print(lengths)
# long way
sentence = "the quick brown fox jumps over the lazy dog"
words = sentence.split()
word_lengths = []
for word in words:
if word != "the":
word_lengths.append(len(word))
print(words)
print(word_lengths)
# create a new list called "newlist" out of the list "numbers", which contains only the positive numbers from the list, as integers.
newlist = [int(num) for num in numbers if num > -1]
print(newlist)
|
[
"beverly.pham@gmail.com"
] |
beverly.pham@gmail.com
|
3199fe280a37613c9116da698abbdbb9541d069b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_75/166.py
|
1165a86b97154abf8872b43df4ac9635e82595f0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
INPUT_FILE = r'C:\Downloads\FromFirefox\B-large.in'
OUTPUT_FILE = r'C:\Users\Assaf\Fun\codeJam\B-large.out'
inputFile = file(INPUT_FILE, 'rb')
numQuestions = int(inputFile.readline())
outputFile = file(OUTPUT_FILE, 'wb')
def solveQuestion(combain, disappear, elements):
magicka = ['\x00']
for element in elements:
magicka.append(element)
pair = magicka[-2] + magicka[-1]
while pair in combain:
magicka = magicka[:-2] + [combain[pair]]
pair = magicka[-2] + magicka[-1]
for d in disappear:
if d[0] in magicka and d[1] in magicka:
magicka = ['\x00']
return `magicka[1:]`.replace("'", '').replace('"', '')
for q in xrange(numQuestions):
outputFile.write("Case #%d: " % (q+1))
line = inputFile.readline().replace('\r', '').replace('\n', '').replace('\t', ' ').split(' ')
C = int(line[0])
line = line[1:]
combain = {}
for i in xrange(C):
combain[line[0][:2]] = line[0][2]
combain[line[0][:2][::-1]] = line[0][2]
line = line[1:]
D = int(line[0])
line = line[1:]
disappear = []
for i in xrange(D):
disappear.append((line[0][0], line[0][1]))
line = line[1:]
N = int(line[0])
line = line[1:]
if len(line[0]) != N:
raise Exception("Input error at N")
result = solveQuestion(combain, disappear, line[0])
outputFile.write(result)
outputFile.write("\n")
outputFile.close()
inputFile.close()
# print file(OUTPUT_FILE, 'rb').read()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
2205f8227b4cfacbdec160fedda6f00dab2b89d8
|
cbc107b4a98275bd6d007b496d3477d9bc8dc89a
|
/catalog/api_router.py
|
7a9bdf96d7397cdf587e5b494aaee645241d024b
|
[] |
no_license
|
grogsy/local-library
|
1a9e35692cb5173f3197b948d13ce3a5861b03ba
|
783c8965d5aa01c53297f77396010e998272d8c2
|
refs/heads/master
| 2023-08-22T22:28:24.302301
| 2020-06-10T00:07:20
| 2020-06-10T00:07:20
| 271,135,512
| 0
| 0
| null | 2021-09-22T19:11:10
| 2020-06-10T00:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 205
|
py
|
from rest_framework.routers import DefaultRouter
from .api_views import AuthorViewSet, BookViewSet
router = DefaultRouter()
router.register('authors', AuthorViewSet)
router.register('books', BookViewSet)
|
[
"a.gray.cloud@gmail.com"
] |
a.gray.cloud@gmail.com
|
8737c3804357d15c0f2a38478876a0af9addecf2
|
78c062054304534f2a4b7b9ebd4b6fbe7d9dc3b2
|
/Ampere-Law-Example.py
|
e5d8d63a75eedbd1ebe3c5adc860b254a7c0e390
|
[] |
no_license
|
buckees/ICP-field-solver
|
5d4137ee6d6e7345c83b212b7e844b0adf794abc
|
51644c311c62d63e5d7f689d5c6659ab6bda52aa
|
refs/heads/master
| 2022-12-10T18:43:35.387773
| 2020-09-04T04:48:16
| 2020-09-04T04:48:16
| 290,904,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
# -*- coding: utf-8 -*-
"""
ICP Field Solver
Ampere's Law
"""
import numpy as np
from math import pi
import matplotlib.pyplot as plt
from matplotlib import colors, ticker, cm
from Constants import MU0
from Mesh import MESHGRID
#set infinite-long wire to position (wx, wy) with infinitesimal radius
I = 1.0 # wire current in A
# According to Ampere's law in integral form
# B(r|r>r0) = mu0*I/(2*pi*r)
#The earth's magnetic field is about 0.5 gauss.
width, height, nx, ny = 10.0, 10.0, 101, 101
mesh = MESHGRID(width, height, nx, ny)
mesh.init_mesh()
def calc_bf(position, I):
dist, vecx, vecy = mesh.calc_dist(position, I)
bf = MU0*I/(2.0*pi)
dist_min = min(width/(nx-1), height/(ny-1))
bf_max = np.ones_like(dist)*bf/dist_min
bf = np.divide(bf, dist, where=dist>dist_min, out=bf_max)
bf = abs(bf)
print('B field min = %.2e max = %.2e' % (bf.min(), bf.max()))
# fig, ax = plt.subplots(figsize=(3,3))
# ax.plot(mesh.posx, mesh.posy, '.k',
# marker='.', markersize=3,
# color='black', linestyle='None')
#fmt = ticker.LogFormatterMathtext()
#fmt.create_dummy_axis()
#cs = ax.contour(mesh.posx, mesh.posy, bf,
# locator=ticker.LogLocator(subs=range(1,6)),
# cmap=cm.plasma)
# Alternatively, you can manually set the levels
# and the norm:
# lev_exp = np.arange(np.floor(np.log10(bf.min())),
# np.ceil(np.log10(bf.max())), 0.1)
# levs = np.power(10, lev_exp)
# cs = ax.contour(mesh.posx, mesh.posy, bf, levs, norm=colors.LogNorm())
#ax.clabel(cs, cs.levels)
# fig.colorbar(cs)
# ax.quiver(mesh.posx, mesh.posy, vecx, vecy)
# ax.plot(position[0], position[1],
# color='red', marker='o', markersize=15)
return bf, vecx, vecy
pos1, pos2 = (-1.5, 0.0), (1.5, 0.0)
bf1, vx1, vy1 = calc_bf(pos1, I)
bf2, vx2, vy2 = calc_bf(pos2, -I)
vx = np.multiply(bf1, vx1) + np.multiply(bf2, vx2)
vy = np.multiply(bf1, vy1) + np.multiply(bf2, vy2)
bf = np.sqrt(np.power(vx, 2) + np.power(vy, 2))
print('B field min = %.2e max = %.2e' % (bf[np.nonzero(bf)].min(),
bf.max()))
vx, vy = np.divide(vx, bf), np.divide(vy, bf)
fig, ax = plt.subplots(figsize=(3,3))
ax.plot(mesh.posx, mesh.posy, '.k',
marker='.', markersize=3,
color='black', linestyle='None')
# Alternatively, you can manually set the levels
# and the norm:
lev_exp = np.arange(np.floor(np.log10(bf[np.nonzero(bf)].min())),
np.ceil(np.log10(bf.max())), 0.05)
levs = np.power(10, lev_exp)
#levs = np.linspace(bf.min(), bf.max(), 50)
cs = ax.contour(mesh.posx, mesh.posy, bf, levs, norm=colors.LogNorm())
#ax.clabel(cs, cs.levels)
#fig.colorbar(cs)
#ax.quiver(mesh.posx, mesh.posy, vx, vy)
#ax.plot(pos1[0], pos1[1],
# color='red', marker='o', markersize=15)
#ax.plot(pos2[0], pos2[1],
# color='red', marker='o', markersize=15)
fig, ax = plt.subplots(2, 1, figsize=(6,6))
ax[0].plot(mesh.posx[int(nx/2), :], bf[int(nx/2), :])
ax[1].plot(mesh.posy[:, int(ny/2)], bf[:, int(ny/2)])
|
[
"67809187+buckees@users.noreply.github.com"
] |
67809187+buckees@users.noreply.github.com
|
6c95cc60743e241e2ea83b22b81fcfde4ec4a846
|
60284a471e48e49e9b184305b08da38cbaf85c38
|
/src/tests/ftest/container/query_attribute.py
|
a1a6e819ad27b3d44a00c63b6c53a7b7a369d0ab
|
[
"BSD-2-Clause-Patent",
"BSD-2-Clause"
] |
permissive
|
minmingzhu/daos
|
734aa37c3cce1c4c9e777b151f44178eb2c4da1f
|
9f095c63562db03e66028f78df0c37f1c05e2db5
|
refs/heads/master
| 2022-05-10T17:23:32.791914
| 2022-02-28T18:44:50
| 2022-02-28T18:44:50
| 228,773,662
| 1
| 0
|
Apache-2.0
| 2019-12-18T06:30:39
| 2019-12-18T06:30:38
| null |
UTF-8
|
Python
| false
| false
| 7,045
|
py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from apricot import TestWithServers
import base64
class ContainerQueryAttributeTest(TestWithServers):
# pylint: disable=anomalous-backslash-in-string
"""Test class for daos container query and attribute tests.
Test Class Description:
Query test: Create a pool, create a container, and call daos container
query. From the output, verify the pool/container UUID matches the one
that was returned when creating the pool/container.
Attribute test:
1. Prepare 7 types of strings; alphabets, numbers, special characters,
etc.
2. Create attributes with each of these 7 types in attr and value;
i.e., 14 total attributes are created.
3. Call get-attr for each of the 14 attrs and verify the returned
values.
4. Call list-attrs and verify the returned attrs.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a ContainerQueryAttribute object."""
super().__init__(*args, **kwargs)
self.expected_cont_uuid = None
self.daos_cmd = None
def test_container_query_attr(self):
"""JIRA ID: DAOS-4640
Test Description:
Test daos container query and attribute commands as described
above.
Use Cases:
Test container query, set-attr, get-attr, and list-attrs.
:avocado: tags=all,full_regression
:avocado: tags=small
:avocado: tags=container,cont_query_attr
"""
# Create a pool and a container.
self.add_pool()
self.add_container(pool=self.pool)
self.daos_cmd = self.get_daos_command()
# Call daos container query, obtain pool and container UUID, and
# compare against those used when creating the pool and the container.
kwargs = {
"pool": self.pool.uuid,
"cont": self.container.uuid
}
data = self.daos_cmd.container_query(**kwargs)['response']
actual_pool_uuid = data['pool_uuid']
actual_cont_uuid = data['container_uuid']
self.assertEqual(actual_pool_uuid, self.pool.uuid.lower())
self.assertEqual(actual_cont_uuid, self.container.uuid.lower())
# Test container set-attr, get-attr, and list-attrs with different
# types of characters.
test_strings = [
"abcd",
"1234",
"abc123",
"abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij",
# Characters that don't require backslash. The backslashes in here
# are required for the code to work, but not by daos.
"~@#$%^*-=_+[]\{\}:/?,.", # noqa: W605
# Characters that require backslash.
"\`\&\(\)\\\;\\'\\\"\!\<\>", # noqa: W605
# Characters that include space.
"\"aa bb\""]
# We added backslashes for the code to work, but get-attr output
# does not contain them, so prepare the expected output that does not
# include backslashes.
escape_to_not = {}
escape_to_not[test_strings[-3]] = "~@#$%^*-=_+[]{}:/?,."
# We still need a backslash before the double quote for the code to
# work.
escape_to_not[test_strings[-2]] = "`&()\;'\"!<>" # noqa: W605
escape_to_not[test_strings[-1]] = "aa bb"
# Prepare attr-value paris. Use the test_strings in value for the first
# 7 and in attr for the next 7.
attr_values = []
j = 0
for i in range(2):
for test_string in test_strings:
if i == 0:
attr_values.append(["attr" + str(j), test_string])
else:
attr_values.append([test_string, "attr" + str(j)])
j += 1
# Set and verify get-attr.
errors = []
expected_attrs = []
for attr_value in attr_values:
self.daos_cmd.container_set_attr(
pool=actual_pool_uuid, cont=actual_cont_uuid,
attr=attr_value[0], val=attr_value[1])
kwargs["attr"] = attr_value[0]
data = self.daos_cmd.container_get_attr(**kwargs)['response']
actual_val = base64.b64decode(data["value"]).decode()
if attr_value[1] in escape_to_not:
# Special character string.
if actual_val != escape_to_not[attr_value[1]]:
errors.append(
"Unexpected output for get_attr: {} != {}\n".format(
actual_val, escape_to_not[attr_value[1]]))
else:
# Standard character string.
if actual_val != attr_value[1]:
errors.append(
"Unexpected output for get_attr: {} != {}\n".format(
actual_val, attr_value[1]))
# Collect comparable attr as a preparation of list-attrs test.
if attr_value[0] in escape_to_not:
expected_attrs.append(escape_to_not[attr_value[0]])
else:
expected_attrs.append(attr_value[0])
self.assertEqual(len(errors), 0, "; ".join(errors))
# Verify that attr-lists works with test_strings.
expected_attrs.sort()
kwargs = {
"pool": actual_pool_uuid,
"cont": actual_cont_uuid
}
data = self.daos_cmd.container_list_attrs(**kwargs)['response']
actual_attrs = list(data)
actual_attrs.sort()
self.log.debug(str(actual_attrs))
self.assertEqual(actual_attrs, expected_attrs)
def test_list_attrs_long(self):
"""JIRA ID: DAOS-4640
Test Description:
Set many attributes and verify list-attrs works.
Use Cases:
Test daos container list-attrs with 50 attributes.
:avocado: tags=all,full_regression
:avocado: tags=small
:avocado: tags=container,cont_list_attrs
"""
# Create a pool and a container.
self.add_pool()
self.add_container(pool=self.pool)
self.daos_cmd = self.get_daos_command()
expected_attrs = []
vals = []
for i in range(50):
expected_attrs.append("attr" + str(i))
vals.append("val" + str(i))
for expected_attr, val in zip(expected_attrs, vals):
_ = self.daos_cmd.container_set_attr(
pool=self.pool.uuid, cont=self.container.uuid,
attr=expected_attr, val=val)
expected_attrs.sort()
kwargs = {
"pool": self.pool.uuid,
"cont": self.container.uuid
}
data = self.daos_cmd.container_list_attrs(**kwargs)['response']
actual_attrs = list(data)
actual_attrs.sort()
self.assertEqual(
expected_attrs, actual_attrs, "Unexpected output from list_attrs")
|
[
"noreply@github.com"
] |
minmingzhu.noreply@github.com
|
4aeab5318b535611b91139d2bc101a65282f49c9
|
6eb13e52b6babe24eaa7122b11bb3041752d1ede
|
/stock/forms.py
|
e55fb855b02a1c3af321406980641cbc20cbe5ce
|
[] |
no_license
|
siuols/Inventory
|
a992076736bf34e0a5ad35e965860bd5971e3b73
|
e30e15593f1c2e1faabb382d8f4c2753f717fb73
|
refs/heads/master
| 2022-12-08T07:32:49.101561
| 2019-03-05T01:39:29
| 2019-03-05T01:39:29
| 173,235,778
| 1
| 2
| null | 2022-12-08T01:21:31
| 2019-03-01T04:40:13
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,963
|
py
|
from django import forms
from .models import Brand,Category,Course,Customer,Release,Office,Item
from django.contrib.auth import get_user_model
from django.core.validators import RegexValidator
from django.utils.translation import ugettext, ugettext_lazy as _
User = get_user_model()
class ItemForm(forms.ModelForm):
class Meta:
model = Item
fields = [
'brand',
'category',
'number',
'name',
'description',
'quantity',
'unit_cost',
]
class BrandForm(forms.ModelForm):
class Meta:
model = Brand
fields = [
'name'
]
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = [
'name'
]
class OfficeForm(forms.ModelForm):
class Meta:
model = Office
fields = [
'name'
]
class CourseForm(forms.ModelForm):
class Meta:
model = Course
fields = [
'code'
]
class CustomerForm(forms.ModelForm):
class Meta:
model = Customer
fields = [
'id_number',
'last_name',
'first_name',
'middle_name',
'course',
'year',
'status'
]
class ReleaseForm(forms.ModelForm):
class Meta:
model = Release
fields = [
'id_number',
'number',
'quantity',
'office'
]
class RegistrationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', min_length=8, widget=forms.PasswordInput, validators=[RegexValidator('^[-a-zA-Z0-9_]+$', message="Password should be a combination of Alphabets and Numbers")])
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = User
fields = (
'username',
'email'
)
def clean_email(self):
email = self.cleaned_data.get("email")
qs = User.objects.filter(email__iexact=email)
if qs.exists():
raise forms.ValidationError("Cannot use this email. It's already register")
return email
def clean_username(self):
username = self.cleaned_data.get("username")
qs = User.objects.filter(username__iexact=username)
if qs.exists():
raise forms.ValidationError("Username is already register")
return username
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
#Save the provided password in hashed format
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
user.is_active = True
if commit:
user.save()
return user
|
[
"louisaleksieb.dagusen@gmail.com"
] |
louisaleksieb.dagusen@gmail.com
|
775c68a1333634d972eebf451d817b86a9e833eb
|
23d55806db77d9e735dec5f71b85d31bcb88b6d3
|
/lib/clientProcessing.py
|
448a742a6e2b73f2be86da23aca0cf7994777451
|
[
"MIT"
] |
permissive
|
schollz/splitthework
|
60fa69f0a8aeda911718937ff0fff3f20cf564ae
|
cfb2d9495fab64018b73483c41d371408823abe0
|
refs/heads/master
| 2023-09-01T00:07:38.606938
| 2016-01-25T13:24:17
| 2016-01-25T13:24:17
| 50,106,122
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
import os
from lib.compress import *
def processWork(data):
strData = []
for a in data:
strData.append(str(a))
os.system("sudo python3 downloadPages.py " + " ".join(strData))
results = json.load(open('downloadedPages.json','r'))
# print(sys.getsizeof(json.dumps(results)))
dataCompressed = compress(results)
# print(sys.getsizeof(dataCompressed))
return dataCompressed
|
[
"zack.scholl@gmail.com"
] |
zack.scholl@gmail.com
|
5042d48f346bea32b1c892f4b34cf1d6c611d4ab
|
d45ae345eb677df44c8940de49faa54554392259
|
/player.py
|
d059193080e902204e4d7caddae96527a16b3e0f
|
[] |
no_license
|
ErickMwazonga/tic-tac-toe
|
e2c1577a26e86f6846477ba930530f02ed3c9760
|
852d71bdbd30e6c7d6a1a6c5454b27782c04dd9c
|
refs/heads/main
| 2023-02-16T18:41:26.656770
| 2021-01-17T18:55:25
| 2021-01-17T18:55:25
| 330,277,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import math
import random
class Player:
def __init__(self, letter):
self.letter = letter
def get_move(self, game):
pass
class RandomComputerPlayer(Player):
def __init__(self, letter):
super().__init__(letter)
def get_move(self, game):
square = random.choice(game.available_moves())
return square
class HumanPlayer(Player):
def __init__(self, letter):
super().__init__(letter)
def get_move(self, game):
valid_spot = False
val = None
while not valid_spot:
spot = input(f"{self.letter} turn. Choose spot(1-9): ")
if spot == 'Q' or spot == 'q':
quit()
try:
spot = int(spot)
val = game.board_mapping.get(spot)
if val not in game.available_moves():
raise ValueError
valid_spot = True
except ValueError:
print('Invalid spot. Try Again.')
return val
|
[
"erickmwazonga@gmail.com"
] |
erickmwazonga@gmail.com
|
21fff6327264a41c228458d5d90a207273cc788d
|
c1c7214e1f9230f19d74bb9776dac40d820da892
|
/examples/django/urlディスパッチャ/pathconverterの使い方/project/project/urls.py
|
351f32b488f3fde9be2c0319b72c653193e0088b
|
[] |
no_license
|
FujitaHirotaka/djangoruler3
|
cb326c80d9413ebdeaa64802c5e5f5daadb00904
|
9a743fbc12a0efa73dbc90f93baddf7e8a4eb4f8
|
refs/heads/master
| 2020-04-01T13:32:28.078110
| 2018-12-13T00:39:56
| 2018-12-13T00:39:56
| 153,256,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('app/', include('app.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"fujiozone@msn.com"
] |
fujiozone@msn.com
|
dffba14a10e2a9605e6d4e2c4867a0b64bb48df5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02571/s951856680.py
|
877fada58e3c8f14ea495f8dbf96727c9212573a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
from sys import stdin
input = stdin.readline
s = input().strip()
t = input().strip()
ns = len(s)
nt = len(t)
def cnt(a,b):
return sum(1 for aa,bb in zip(a,b) if aa != bb)
res = nt
for i in range(ns - nt + 1):
res = min(res,cnt(s[i:i+nt],t))
print(res)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
cf29ae1399310eb4eef155d268cd8c66f9b0237f
|
03dacfab20ffb93eeb675f78005824bf68b72e7c
|
/Python/Algorithms/String/高级字符串算法/最长子串和子序列问题/1143 M_最长公共子序列.py
|
18f7a7737f2a65f2b64ae7bdb1c3399384fc097b
|
[] |
no_license
|
RuiWu-yes/leetcode
|
e343a55ebd7a3cacd400d6d2605fdbd2345a28d3
|
bfc5641445c505f2b41155c61bdf65f3e601554f
|
refs/heads/master
| 2023-07-14T07:27:42.525472
| 2021-08-01T08:57:00
| 2021-08-01T08:57:00
| 342,442,765
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
# -*- coding: utf-8 -*-
# @Author : ruiwu
# @Email : ryanwoo@zju.edu.cn
# @Title : 1143 最长公共子序列
# @Content : 给定两个字符串 text1 和 text2,返回这两个字符串的最长公共子序列的长度.
# 若这两个字符串没有公共子序列,则返回 0.
class Solution:
def longestCommonSubsequence1(self, text1: str, text2: str) -> int:
# 暴力解法:可以用备忘录去优化
def dp(i, j):
# 空串的base case
if i == -1 or j == -1:
return 0
if text1[i] == text2[j]:
# 这边找到一个lcs的元素,继续往前找
return dp(i-1, j-1) + 1
else:
# 谁能让lcs最长,就听谁的
return max(dp(i-1, j), dp(i, j-1))
return dp(len(text1)-1, len(text2)-1)
def longestCommonSubsequence2(self, text1: str, text2: str) -> int:
# 动态规划:用DP table来优化时间复杂度
# dp[i][j]的定义:对于 s1[1..i] 和 s2[1..j],它们的 LCS 长度是 dp[i][j]
# 状态转移:
# 用两个指针 i 和 j 从后往前遍历 s1 和 s2,如果 s1[i]==s2[j],那么这个字符一定在 lcs 中;
# 否则的话,s1[i] 和 s2[j] 这两个字符至少有一个不在 lcs 中,需要丢弃一个。
m, n = len(text1), len(text2)
# 构建 DP table 和 base case
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if text1[i-1] == text2[j-1]:
# 找到一个 lcs 中的字符
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = max(dp[i-1][j], dp[i][j-1])
return dp[-1][-1]
if __name__ == '__main__':
# case1 res = 3
# 最长公共子序列是 "ace",它的长度为 3.
text1_1 = "abcde"
text2_1 = "ace"
# case2 res = 3
# 最长公共子序列是 "abc",它的长度为 3.
text1_2 = "abc"
text2_2 = "abc"
# case3 res = 0
# 两个字符串没有公共子序列,返回 0.
text1_3 = "abc"
text2_3 = "def"
sol = Solution()
res1 = sol.longestCommonSubsequence1(text1_1, text2_1), sol.longestCommonSubsequence2(text1_1, text2_1)
res2 = sol.longestCommonSubsequence1(text1_2, text2_2), sol.longestCommonSubsequence2(text1_2, text2_2)
res3 = sol.longestCommonSubsequence1(text1_3, text2_3), sol.longestCommonSubsequence2(text1_3, text2_3)
print('case1:', res1)
print('case2:', res2)
print('case3:', res3)
|
[
"ryanwoo@zju.edu.cn"
] |
ryanwoo@zju.edu.cn
|
78363b0fe2022a55e40a75f79966bd4e280108fb
|
99dd08b129792494cd2cd74224ce5a8de68ac4c9
|
/app/migrations/0009_auto_20160605_2237.py
|
c453c0484a366bdb7ec0961161d1fe95913ce75b
|
[] |
no_license
|
ssssergey/DjangoPauk
|
78d90239792d1c07f88809fed36682242260be8e
|
887b71648c3c30f0bd90eb5becf3e9b50d6f44a8
|
refs/heads/master
| 2021-01-20T19:36:38.899428
| 2016-07-10T21:20:26
| 2016-07-10T21:20:26
| 60,295,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-05 19:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0008_auto_20160605_0023'),
]
operations = [
migrations.CreateModel(
name='UserCountry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_time', models.DateTimeField()),
('checked', models.BooleanField(default=True)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.Countries')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='news',
name='download_time',
field=models.DateTimeField(),
),
migrations.AddField(
model_name='countries',
name='users',
field=models.ManyToManyField(related_name='countries', through='app.UserCountry', to=settings.AUTH_USER_MODEL),
),
]
|
[
"lse1983@mail.ru"
] |
lse1983@mail.ru
|
f483e7c7b7682bce9d8387d15166f0d4ba4223ae
|
78b7b3e27553ccf0b89c24cbd11662600db26b4c
|
/ScrapeNASAPicDayWebsite/.history/scraper_20190701155658.py
|
9085a093cb3265c5dda31be4a0d1f5a1e80d88a7
|
[] |
no_license
|
web3-qa/intermediatePython
|
2c23408bd6d6dffc070b92e1155d3c072cfe040c
|
b4791db2bcb59aaf9c447cf50ffd4d21cacbe16b
|
refs/heads/master
| 2023-02-08T14:18:54.288227
| 2019-07-18T13:31:23
| 2019-07-18T13:31:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 188
|
py
|
import urllib.request
from bs4 import BeautifulSoup
from scraper import BeautifulSoup
urllib.request.urlopen("hhtp://apod.nasa.gov/apod/archivepix.html").read()
BeautifulSoup(content, )
|
[
"dcolmer@statestreet.com"
] |
dcolmer@statestreet.com
|
4f9e97ff4a141d409fa1a42cf7808dcf0bd944bb
|
b77e12c0fc66cf47f83359fe8a04890669058f08
|
/day_5/dictAndSet.py
|
e9db838ac14a2b726afe23383f0aae6c8d717cf6
|
[] |
no_license
|
dydy061951/SeleniumProject
|
177bb8bdd9f3f586d63c7330d4e5bcdc473cf7c8
|
857fddb74748133475e5f4583007446ab7d2184f
|
refs/heads/master
| 2021-08-23T04:41:30.705381
| 2017-12-03T09:26:14
| 2017-12-03T09:26:14
| 112,907,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
# dict 是字典的缩写;set是集合的缩写;它们都和数组类似
# python 中的元组用小括号表示,列表用中括号表示
# 元组用小括号,列表用中括号,字典和集合用大括号表示
#比如,同样描述一个学生的信息
stu=("001","小明","男",23) #元组:只读,不可增删改,只可查看
#元组和数组的区别:
#数组可以修改元素的内容,但是不能增加和删除,数组中所有元素的类型一样
#元组不能增删改,元素的类型不固定(可以有数字和字符串)
stu1=["001","小明","男",23] #列表:可以进行增删改查,列表是最常用的数据格式
#find_elements()返回的就是列表
stu2={"001","小明","男",23} #集合:是无序的,不能用下标索引的方式查找元素;不可重复的,重复的元素会自动删除
stu3={"id":"001","姓名":"小明","性别":"男","年龄":"23"} #字典:key:value ,是有自我描述的,看到key值,就知道value所代表的意义;字典也是无序的,key不能重复,但value值可以重复
print(stu3['姓名'])
|
[
"51Testing"
] |
51Testing
|
674ef2ade2f77f65c921287874e2e1a94c29f507
|
27da9fb329a867a6035ecefb77c3a591eefa1e17
|
/tools/data_faker/data_faker/__main__.py
|
63faaed86541ebc70adffb12911f915503996460
|
[
"BSD-3-Clause"
] |
permissive
|
ngoctrantl/rotki
|
ceef5d3c11ff987889997b3ef1939ef71daaa2ce
|
c30b2d0084c215b72e061e04d9f8391f8106b874
|
refs/heads/develop
| 2020-12-21T06:49:47.819538
| 2020-01-26T10:35:45
| 2020-01-26T15:29:56
| 236,344,817
| 0
| 0
|
BSD-3-Clause
| 2020-01-26T17:04:32
| 2020-01-26T17:03:27
| null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
from gevent import monkey # isort:skip # noqa
monkey.patch_all() # isort:skip # noqa
import logging
from data_faker.args import data_faker_args
from data_faker.faker import DataFaker
from data_faker.mock_apis.api import APIServer, RestAPI
logger = logging.getLogger(__name__)
def main() -> None:
arg_parser = data_faker_args()
args = arg_parser.parse_args()
faker = DataFaker(args)
rest_api = RestAPI(
fake_kraken=faker.fake_kraken,
fake_binance=faker.fake_binance,
)
server = APIServer(rest_api)
print('SERVER IS NOW RUNNING')
# For some reason debug=True throws an exception:
# ModuleNotFoundError: No module named 'data_faker
# server.run(debug=True)
server.run()
print('SERVER IS NOW SHUTTING DOWN')
if __name__ == '__main__':
main()
|
[
"lefteris@refu.co"
] |
lefteris@refu.co
|
19a0bd540f1464267b32189c9380ffdd67d3eb3f
|
a1fc57c6a3e3101d53729ad11df22adb058f1060
|
/instagram/posts/models/posts.py
|
65ff67af1e6b4c9e356e4d7d992c72aaa9674904
|
[] |
no_license
|
morwen1/curso_instagram
|
7f5742256a1eacf38a78b06a62e3f21bcf8b10a9
|
d201ff1f35f5f682242e4f49867fe6cad144d5c8
|
refs/heads/master
| 2020-07-17T23:00:47.259139
| 2019-09-03T02:37:21
| 2019-09-03T02:37:21
| 206,119,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
from django.db import models
from utils.abstract_model import AbstractModel
class Post(AbstractModel):
"""
model for posts
"""
profile = models.ForeignKey('users.Profile',
help_text = 'este es una clave foranea diciendo que los posts pueden tener muchos usuarios'
, on_delete=models.CASCADE)
photo = models.ImageField(
upload_to = 'static/posts'
, blank=True)
photobase64 = models.TextField( blank=True)
description = models.CharField(max_length=255)
#indicadores de likes y comentarios son una variable numerica para hacerle la vida facil al front
likes = models.IntegerField(default=0)
reply = models.IntegerField(default=0)
#los comentarios pueden ser vacios osea no tener comentarios
comments = models.ManyToManyField(
to='posts.Comment' ,
blank=True)
|
[
"morwen901@gmail.com"
] |
morwen901@gmail.com
|
57e440f9fd98a0afa4340c24c19adfdec78fcf41
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03011/s231502042.py
|
7a3be73ad896f44c8d92be6dc0b51274a08bf211
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
# -*- coding: utf-8 -*-
from itertools import combinations
print(min(map(lambda x: sum(x), list(combinations(list(map(int, input().split())), 2)))))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3f053d139e2165e119e91310782c2ab28c379bfa
|
50914176887f9f21a3489a9407195ba14831354c
|
/three_sum.py
|
47d8ce947ca63a31a4d8a149ad33f21e2a0c41bf
|
[] |
no_license
|
nkukarl/leetcode
|
e8cfc2a31e64b68222ad7af631277f1f66d277bc
|
b1dbe37e8ca1c88714f91643085625ccced76e07
|
refs/heads/master
| 2021-01-10T05:42:04.022807
| 2018-02-24T03:55:24
| 2018-02-24T03:55:24
| 43,725,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
class Solution:
def three_sum(self, numbers, target):
numbers.sort()
res = []
for i in range(len(numbers) - 2):
if i > 0 and numbers[i] == numbers[i - 1]:
continue
j = i + 1
k = len(numbers) - 1
while j < k:
triplet = [numbers[i], numbers[j], numbers[k]]
total = sum(triplet)
if total == target:
res.append(triplet)
while j < k and numbers[j] == numbers[j + 1]:
j += 1
while j < k and numbers[k] == numbers[k - 1]:
k -= 1
j += 1
k -= 1
elif total < target:
j += 1
else:
k -= 1
return res
|
[
"kai.wang.nankai@gmail.com"
] |
kai.wang.nankai@gmail.com
|
16a5e5311dd5aa7dcfe895249e7d901d2b518249
|
397e125e94f4f139f2bf5055824d81f24b8b1757
|
/ABC/061/C.py
|
47ac392a04a86831bd757380bb2584b700a9cb8c
|
[] |
no_license
|
tails1434/Atcoder
|
ecbab6ee238e3f225551297db961b1b502841fa4
|
e7c7fed36be46bbaaf020a70997842240ba98d62
|
refs/heads/master
| 2021-07-07T00:31:49.235625
| 2020-09-30T01:42:01
| 2020-09-30T01:42:01
| 189,009,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 316
|
py
|
def main():
N, K = map(int, input().split())
ans = [0] * (10 ** 5 + 1)
for i in range(N):
a, b = map(int, input().split())
ans[a] += b
for i in range(10 ** 5 + 1):
if K <= ans[i]:
print(i)
exit()
else:
K -= ans[i]
main()
|
[
"sososo1333@gmail.com"
] |
sososo1333@gmail.com
|
4e4e7e019f8f77c6f1c5dfdc25e15ce358740c13
|
adf428caea488bfbc22917b8d340dde3293fc306
|
/gan/cloud/trainer/mytask.py
|
ea4de8f20e131a8cc074ecd3edd431b3a661f397
|
[] |
no_license
|
tingleshao/riviera
|
3269a0a0cb30da96bfd33ba3d950a873fdfa24e3
|
f44f43bc2b08d50d6bbc6d0b61fcb91146da5d9f
|
refs/heads/master
| 2021-09-11T20:29:35.615539
| 2018-04-11T23:23:32
| 2018-04-11T23:23:32
| 115,686,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,803
|
py
|
import argparse
import model
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.utils import (
saved_model_export_utils)
def generate_experiment_fn(train_files,
eval_files,
num_epochs=None,
train_batch_size=40,
eval_batch_size=40,
embedding_size=8,
first_layer_size=100,
num_layers=4,
scale_factor=0.7,
**experiment_args):
"""Create an experiment function given hyperparameters.
See command line help text for description of args.
Returns:
A function (output_dir) -> Experiment where output_dir is a string
representing the location of summaries, checkpoints, and exports.
this function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
All listed arguments in the outer function are used to create an
Estimator, and input functions (training, evaluation, serving).
Unlisted args are passed through to Experiment.
"""
def _experiment_fn(output_dir):
# num_epochs can control duration if train_steps isn't
# passed to Experiment
train_input = model.generate_input_fn(
train_files,
num_epochs=num_epochs,
batch_size=train_batch_size,
)
# Don't shuffle evaluation data
eval_input = model.generate_input_fn(
eval_files,
batch_size=eval_batch_size,
shuffle=False
)
return tf.contrib.learn.Experiment(
model.build_estimator(
output_dir,
embedding_size=embedding_size,
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_layer_size * scale_factor**i))
for i in range(num_layers)
]
),
train_input_fn=train_input,
eval_input_fn=eval_input,
# export strategies control the prediction graph structure
# of exported binaries.
export_strategies=[saved_model_export_utils.make_export_strategy(
model.serving_input_fn,
default_output_alternative_key=None,
exports_to_keep=1
)],
**experiment_args
)
return _experiment_fn
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-files',
help='GCS or local paths to training data',
nargs='+',
required=True
)
parser.add_argument(
'--num-epochs',
help="""\
Maximum number of training data epochs on which to train.
If both --max-steps and --num-epochs are specified,
the training job will run for --max-steps or --num-epochs,
whichever occurs first. If unspecified will run for --max-steps.\
""",
type=int,
)
parser.add_argument(
'--train-batch-size',
help='Batch size for training steps',
type=int,
default=40
)
parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=40
)
parser.add_argument(
'--train-steps',
help="""\
Steps to run the training job for. If --num-epochs is not specified,
this must be. Otherwise the training job will run indefinitely.\
""",
type=int
)
parser.add_argument(
'--eval-steps',
help='Number of steps to run evalution for at each checkpoint',
default=100,
type=int
)
parser.add_argument(
'--eval-files',
help='GCS or local paths to evaluation data',
nargs='+',
required=True
)
# Training arguments
parser.add_argument(
'--embedding-size',
help='Number of embedding dimensions for categorical columns',
default=8,
type=int
)
parser.add_argument(
'--first-layer-size',
help='Number of nodes in the first layer of the DNN',
default=100,
type=int
)
parser.add_argument(
'--num-layers',
help='Number of layers in the DNN',
default=4,
type=int
)
parser.add_argument(
'--scale-factor',
help='How quickly should the size of the layers in the DNN decay',
default=0.7,
type=float
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
required=True
)
# Argument to turn on all logging
parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default=tf.logging.FATAL,
help='Set logging verbosity'
)
# Experiment arguments
parser.add_argument(
'--eval-delay-secs',
help='How long to wait before running first evaluation',
default=10,
type=int
)
parser.add_argument(
'--min-eval-frequency',
help='Minimum number of training steps between evaluations',
default=1,
type=int
)
args = parser.parse_args()
arguments = args.__dict__
tf.logging.set_verbosity(arguments.pop('verbosity'))
job_dir = arguments.pop('job_dir')
print('Starting Census: Please lauch tensorboard to see results:\n'
'tensorboard --logdir=$MODEL_DIR')
# Run the training job
# learn_runner pulls configuration information from environment
# variables using tf.learn.RunConfig and uses this configuration
# to conditionally execute Experiment, or param server code
#(c) job_dir is the MODEL_DIR, where the trained model is saved
learn_runner.run(generate_experiment_fn(**arguments), job_dir)
|
[
"cshao@cs.unc.edu"
] |
cshao@cs.unc.edu
|
ead3852d5b3896dc4a4817d88070380e31e7f65c
|
e210c28eeed9d38eb78c14b3a6388eca1e0e85d8
|
/nvflare/app_opt/pt/file_model_locator.py
|
2caab1f304cf3f0c974eb142e0c6083e1d68b582
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/NVFlare
|
5a2d2e4c85a3fd0948e25f1ba510449727529a15
|
1433290c203bd23f34c29e11795ce592bc067888
|
refs/heads/main
| 2023-08-03T09:21:32.779763
| 2023-07-05T21:17:16
| 2023-07-05T21:17:16
| 388,876,833
| 442
| 140
|
Apache-2.0
| 2023-09-14T19:12:35
| 2021-07-23T17:26:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,996
|
py
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from nvflare.apis.dxo import DXO
from nvflare.apis.event_type import EventType
from nvflare.apis.fl_context import FLContext
from nvflare.app_common.abstract.model import model_learnable_to_dxo
from nvflare.app_common.abstract.model_locator import ModelLocator
from nvflare.app_opt.pt.file_model_persistor import PTFileModelPersistor
class PTFileModelLocator(ModelLocator):
def __init__(self, pt_persistor_id: str):
"""The ModelLocator's job is to find and locate the models inventory saved during training.
Args:
pt_persistor_id (str): ModelPersistor component ID
"""
super().__init__()
self.pt_persistor_id = pt_persistor_id
self.model_persistor = None
self.model_inventory = {}
def handle_event(self, event_type: str, fl_ctx: FLContext):
if event_type == EventType.START_RUN:
self._initialize(fl_ctx)
def _initialize(self, fl_ctx: FLContext):
engine = fl_ctx.get_engine()
self.model_persistor: PTFileModelPersistor = engine.get_component(self.pt_persistor_id)
if self.model_persistor is None or not isinstance(self.model_persistor, PTFileModelPersistor):
raise ValueError(
f"pt_persistor_id component must be PTFileModelPersistor. " f"But got: {type(self.model_persistor)}"
)
def get_model_names(self, fl_ctx: FLContext) -> List[str]:
"""Returns the list of model names that should be included from server in cross site validation.add().
Args:
fl_ctx (FLContext): FL Context object.
Returns:
List[str]: List of model names.
"""
self.model_inventory: dict = self.model_persistor.get_model_inventory(fl_ctx)
return list(self.model_inventory.keys())
def locate_model(self, model_name, fl_ctx: FLContext) -> DXO:
"""Call to locate and load the model weights of model_name.
Args:
model_name: name of the model
fl_ctx: FLContext
Returns: model_weight DXO
"""
if model_name not in list(self.model_inventory.keys()):
raise ValueError(f"model inventory does not contain: {model_name}")
model_learnable = self.model_persistor.get(model_name, fl_ctx)
dxo = model_learnable_to_dxo(model_learnable)
return dxo
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
295adab3b33c2f876c9b28ae186e1deafbe3fdfd
|
c4e97f2eb1081d8fad5e64872c3d6acf9a89d445
|
/Solutions/0140_wordBreak.py
|
04f7ecd57c34aa3b8fb6e9be01cefe48758ef41d
|
[] |
no_license
|
YoupengLi/leetcode-sorting
|
0efb3f4d7269c76a3ed11caa3ab48c8ab65fea25
|
3d9e0ad2f6ed92ec969556f75d97c51ea4854719
|
refs/heads/master
| 2020-05-18T23:28:51.363862
| 2019-09-12T00:42:14
| 2019-09-12T00:42:14
| 184,712,501
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,485
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/7/23 15:47
# @Author : Youpeng Li
# @Site :
# @File : 0140_wordBreak.py
# @Software: PyCharm
'''
140. Word Break II
Given a non-empty string s and a dictionary wordDict containing a list of non-empty words,
add spaces in s to construct a sentence where each word is a valid dictionary word.
Return all such possible sentences.
Note:
The same word in the dictionary may be reused multiple times in the segmentation.
You may assume the dictionary does not contain duplicate words.
Example 1:
Input:
s = "catsanddog"
wordDict = ["cat", "cats", "and", "sand", "dog"]
Output:
[
"cats and dog",
"cat sand dog"
]
Example 2:
Input:
s = "pineapplepenapple"
wordDict = ["apple", "pen", "applepen", "pine", "pineapple"]
Output:
[
"pine apple pen apple",
"pineapple pen apple",
"pine applepen apple"
]
Explanation: Note that you are allowed to reuse a dictionary word.
Example 3:
Input:
s = "catsandog"
wordDict = ["cats", "dog", "sand", "and", "cat"]
Output:
[]
'''
class Solution:
def wordBreak(self, s: 'str', wordDict: 'List[str]') -> 'List[str]':
if not s or not wordDict:
return []
res = []
self.dfs(s, wordDict, "", res)
return res
# Before we do dfs, we check whether the remaining string
# can be splitted by using the dictionary,
# in this way we can decrease unnecessary computation greatly.
def dfs(self, s: 'str', wordDict: 'List[str]', path: 'str', res: 'List[str]'):
if not s:
res.append(path[:-1])
return
if self.check(s, wordDict): # prunning
for i in range(1, len(s) + 1):
if s[:i] in wordDict:
# dic.remove(s[:i])
self.dfs(s[i:], wordDict, path + s[:i] + " ", res)
# DP code to check whether a string can be splitted by using the
# dic, this is the same as word break I.
def check(self, s: 'str', wordDict: 'List[str]') -> 'bool':
if not s or not wordDict:
return False
dp = [False] * (len(s) + 1) # dp[i] means s[:i+1] can be segmented into words in the wordDicts
dp[0] = True
for i in range(len(s)):
for j in range(i, len(s)):
if dp[i] and s[i: j + 1] in wordDict:
dp[j + 1] = True
return dp[-1]
def wordBreak_1(self, s: 'str', wordDict: 'List[str]') -> 'List[str]':
if not s or not wordDict:
return []
if not self.check_1(s, wordDict):
return []
n = len(s)
word_dict = set(wordDict)
max_len = max(len(word) for word in word_dict)
min_len = min(len(word) for word in word_dict)
def dp(i):
if i >= n:
return [""]
res = []
ed_left = i + min_len
ed_right = min(i + max_len, n)
for ed in range(ed_left, ed_right + 1):
if s[i:ed] in word_dict and dp(ed):
res += [s[i:ed] + ' ' + rest if rest else s[i:ed] for rest in dp(ed)]
return res
return dp(0)
def check_1(self, s: 'str', wordDict: 'List[str]') -> 'bool':
if not s or not wordDict:
return False
dp = [False] * (len(s) + 1) # dp[i] means s[:i+1] can be segmented into words in the wordDicts
dp[0] = True
for i in range(len(s)):
for j in range(i, len(s)):
if dp[i] and s[i: j + 1] in wordDict:
dp[j + 1] = True
return dp[-1]
def wordBreak_2(self, s: 'str', wordDict: 'List[str]') -> 'List[str]':
if not s or not wordDict:
return []
wordDict = set(wordDict)
backup = {}
self.res = []
def dfs_2(s: 'str') -> 'List[str]':
if not s:
return ['']
if s not in backup:
backup[s] = []
for i in range(1, len(s) + 1):
word = s[:i]
if word in wordDict:
sentences = dfs_2(s[i:])
for ss in sentences:
backup[s].append(word + ' ' + ss)
return backup[s]
dfs_2(s)
return [bu[:-1] for bu in backup[s]]
if __name__ == "__main__":
a = Solution()
s = "catsanddog"
wordDict = ["cat", "cats", "and", "sand", "dog"]
print(a.wordBreak(s, wordDict))
print(a.wordBreak_1(s, wordDict))
print(a.wordBreak_2(s, wordDict))
s = "pineapplepenapple"
wordDict = ["apple", "pen", "applepen", "pine", "pineapple"]
print(a.wordBreak(s, wordDict))
print(a.wordBreak_1(s, wordDict))
print(a.wordBreak_2(s, wordDict))
s = "catsandog"
wordDict = ["cats", "dog", "sand", "and", "cat"]
print(a.wordBreak(s, wordDict))
print(a.wordBreak_1(s, wordDict))
print(a.wordBreak_2(s, wordDict))
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \
"baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
wordDict = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa", "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"]
print(a.wordBreak(s, wordDict))
print(a.wordBreak_1(s, wordDict))
print(a.wordBreak_2(s, wordDict))
|
[
"noreply@github.com"
] |
YoupengLi.noreply@github.com
|
484aec251ff1c5e25208e3ebcacfbfdcfa821b7b
|
e4045e99ae5395ce5369a1374a20eae38fd5179b
|
/files/read_names.py
|
4020137ff8b621ccfc423a9891205d6ca36c0eba
|
[] |
no_license
|
srikanthpragada/09_MAR_2018_PYTHON_DEMO
|
74fdb54004ab82b62f68c9190fe868f3c2961ec0
|
8684137c77d04701f226e1e2741a7faf9eeef086
|
refs/heads/master
| 2021-09-11T15:52:17.715078
| 2018-04-09T15:29:16
| 2018-04-09T15:29:16
| 124,910,054
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
# open file for writing in text mode
with open(r"e:\classroom\python\mar9\names.txt", "rt") as f:
for lineno, name in enumerate(f.readlines()):
print("{:03} {}".format(lineno + 1, name), end='')
|
[
"srikanthpragada@gmail.com"
] |
srikanthpragada@gmail.com
|
6685570716f0c046013d2b6a6abc428738b35399
|
85f5dff291acf1fe7ab59ca574ea9f4f45c33e3b
|
/api/tacticalrmm/checks/migrations/0018_auto_20210205_1647.py
|
cce78b61d2167ba2adcec7ff771ee85901adcc0b
|
[
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sadnub/tacticalrmm
|
a4ecaf994abe39244a6d75ed2166222abb00d4f4
|
0af95aa9b1084973642da80e9b01a18dcacec74a
|
refs/heads/develop
| 2023-08-30T16:48:33.504137
| 2023-04-10T22:57:44
| 2023-04-10T22:57:44
| 243,405,684
| 0
| 2
|
MIT
| 2020-09-08T13:03:30
| 2020-02-27T01:43:56
|
Python
|
UTF-8
|
Python
| false
| false
| 518
|
py
|
# Generated by Django 3.1.4 on 2021-02-05 16:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checks', '0017_check_dashboard_alert'),
]
operations = [
migrations.AlterField(
model_name='check',
name='alert_severity',
field=models.CharField(blank=True, choices=[('info', 'Informational'), ('warning', 'Warning'), ('error', 'Error')], default='warning', max_length=15, null=True),
),
]
|
[
"josh@torchlake.com"
] |
josh@torchlake.com
|
f751e7a8acf0536d699fa16c80f652308a74ce43
|
9318b1885946f639f1446431abc6ec4fa33fc9ac
|
/typeData.py
|
1b85d8810b009197b06a00e1557aefa5546fc95d
|
[] |
no_license
|
mcewenar/PYTHON_INFO_I_BASIC
|
1d365bcd3d0186c8955e3cde2605831717d0a412
|
e5c3278969b420e7ce03bf7903cf57e63865aaca
|
refs/heads/master
| 2023-06-04T02:26:42.124304
| 2021-06-22T02:48:08
| 2021-06-22T02:48:08
| 326,510,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
#IMPORTANTE:
def isInt(data):
if type(data) == int:
print("es entero")
return True
elif type(data) == float:
print("es float")
return False
elif type(data) == str:
print("es string")
return None
#print(isInt(5))
#print(isInt(5.0))
#print(isInt("5"))
#print(isInt("Hola mundo"))
x=int(input("Ingrese cualquier dato: "))
print(isInt(x))
|
[
"dmcewena@hotmail.com"
] |
dmcewena@hotmail.com
|
c6aaa99c6e382ba9cb455550f3944c94fc5935df
|
490a934f36fdb97827934220eeff71f89f7c3e5d
|
/config.py
|
026520f65ada648e65573c49c6af390f1694a9cf
|
[
"MIT"
] |
permissive
|
qq453388937/Tornado_home_Git
|
9c34a198be8737bbb49a28732cfbe899c0f86828
|
65b36a2816b6648c9bad136249552c8276d4584e
|
refs/heads/master
| 2021-04-06T06:33:46.631606
| 2018-03-29T08:55:43
| 2018-03-29T08:55:43
| 124,759,358
| 0
| 0
|
MIT
| 2018-03-20T13:07:43
| 2018-03-11T13:30:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 905
|
py
|
# -*- coding:utf-8 -*-
import os
# redis 配置抽离
torndb_settings = dict(
host="127.0.0.1",
database="ihome",
user="root",
password="123", # 看源码得知默认3306端口
)
redis_settings = dict(
host='127.0.0.1',
port=6379,
)
settings = {
'debug': True,
'static_path': os.path.join(os.path.dirname(__file__), 'static'),
'template_path': os.path.join(os.path.dirname(__file__), 'template'),
# 'static_url_prefix': "/ChinaNumber1", # 一般默认用/static ,这个参数可以修改默认的静态请求开头路径
'cookie_secret': '0Q1AKOKTQHqaa+N80XhYW7KCGskOUE2snCW06UIxXgI=', # 组合拳,安全cookie import base64,uuid
'xsrf_cookies': False
}
log_file = os.path.join(os.path.dirname(__file__), 'logs/log.txt')
log_leve = 'debug'
session_expire = 86400
# 密码加密密钥
passwd_hash_key = "nlgCjaTXQX2jpupQFQLoQo5N4OkEmkeHsHD9+BBx2WQ="
|
[
"453388937@qq.com"
] |
453388937@qq.com
|
7bda8d156f687a4d69a597afd6dacbe903332568
|
c0f69bf01d09718b81814bb8bf274c931801e9c8
|
/codebase/manager_component/monitoring_subcomponent/history_subcomponent/graphing_subcomponent/graphing_class.py
|
522e0767a94a2f96d6d3b706d99124d234954be8
|
[] |
no_license
|
johnpcole/Download-Manager
|
369ec1232f35ec3ab8d653c03f4ea12bbb57207c
|
fd9b287cbfb6b813a6d23877f25423079b063c46
|
refs/heads/master
| 2021-07-19T17:33:40.473368
| 2019-11-03T23:43:19
| 2019-11-03T23:43:19
| 85,001,326
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,795
|
py
|
from .....common_components.datetime_datatypes import datetime_module as DateTime
from .graph_subcomponent import graph_module as Graph
class DefineGraphing:
def __init__(self, smallera, largeera):
# Define graphset size
self.graphsetsize = 6
# Defines the granularity of display of monitor data
self.shorterasize = smallera
self.longerasize = largeera
# Screen metrics
self.widegraphcolumnwidth = 3
self.narrowgraphcolumnwidth = 2
self.graphhorizontaloffset = 9
self.graphverticaloffset = -28
self.graphverticalspacing = 177
self.widegraphwidth = 974 - 18
self.narrowgraphwidth = 480 - 18
self.graphheight = 125
self.graphblockheight = 5
self.wideshortoriginoffset = 39 # Hours from origin to now
self.wideshortoriginoffsetminutes = 40 # Minutes from origin to now
self.narrowshortoriginoffset = 25 # Hours from origin to now
self.narrowshortoriginoffsetminutes = 20 # Minutes from origin to now
self.widelongoriginoffset = 23 + (9 * 24) # Hours from origin to now
self.narrowlongoriginoffset = 9 + (6 * 24) # Hours from origin to now
# =========================================================================================
def drawgraphs(self, longhistorymode, historydataset):
currentdatetime = DateTime.getnow()
graphset = Graph.creategraphset(self.graphsetsize)
for graphindex in [1, 2, 3, 4, 5, 6]:
# Axes
graphset.addto(graphindex, Graph.creategraphaxes(
self.determineorigintimedate(currentdatetime, graphindex, longhistorymode),
self.determinecorrecterasize(longhistorymode),
self.determinecolumnwidth(graphindex),
self.graphhorizontaloffset,
self.determinegraphbottom(1),
self.determinegraphwidth(graphindex),
self.graphheight))
# Graph Headings
graphset.addto(graphindex, Graph.createtitles(
longhistorymode,
self.graphhorizontaloffset,
self.graphverticaloffset,
self.graphverticalspacing,
graphindex))
for graphindex in [1, 4]:
# VPN Bars for graphs 1 & 4
graphset.addto(graphindex, Graph.createvpnbars(
self.determineorigintimedate(currentdatetime, graphindex, longhistorymode),
self.determinecorrecterasize(longhistorymode),
self.determinecolumnwidth(graphindex),
self.graphhorizontaloffset,
self.determinegraphbottom(1),
self.graphheight,
historydataset))
# Upload Bars for graphs 2 & 5
graphset.addto(graphindex + 1, Graph.createuploadedbars(
self.determineorigintimedate(currentdatetime, graphindex + 1, longhistorymode),
self.determinecorrecterasize(longhistorymode),
self.determinecolumnwidth(graphindex + 1),
self.graphhorizontaloffset,
self.determinegraphbottom(1),
self.graphheight,
historydataset))
# Legends
graphset.addto(graphindex, Graph.createstatuslegend(
self.determinegraphwidth(graphindex + 1),
self.determinegraphbottom(1)))
if longhistorymode == True:
# Status bars for graphs 1 & 4
graphset.addto(graphindex, Graph.createstatusbars(
self.determineorigintimedate(currentdatetime, graphindex, longhistorymode),
self.determinecorrecterasize(longhistorymode),
self.determinecolumnwidth(graphindex),
self.graphhorizontaloffset,
self.determinegraphbottom(1),
self.graphheight,
historydataset))
else:
# Status blocks for graphs 1 & 4
graphset.addto(graphindex, Graph.createstatusblocks(
self.determineorigintimedate(currentdatetime, graphindex, longhistorymode),
self.determinecorrecterasize(longhistorymode),
self.determinecolumnwidth(graphindex),
self.graphhorizontaloffset,
self.determinegraphbottom(1),
historydataset,
self.graphblockheight))
# Temp bars for graphs 3 & 6
graphset.addto(graphindex + 2, Graph.createtempbars(
self.determineorigintimedate(currentdatetime, graphindex + 2, longhistorymode),
self.determinecorrecterasize(longhistorymode),
self.determinecolumnwidth(graphindex + 2),
self.graphhorizontaloffset,
self.determinegraphbottom(1),
self.graphheight,
historydataset))
return graphset.printout()
def determinegraphbottom(self, graphindex):
return self.graphverticaloffset + (self.graphverticalspacing * graphindex)
def determinecorrecterasize(self, longhistorymode):
if longhistorymode == False:
graph = self.shorterasize
else:
graph = self.longerasize
return graph
def determineorigintimedate(self, currenttimedate, graphindex, longhistorymode):
graph = DateTime.createfromobject(currenttimedate)
if graphindex > 3:
if longhistorymode == True:
graph.adjusthours(0 - self.narrowlongoriginoffset)
else:
graph.adjusthours(0 - self.narrowshortoriginoffset)
graph.adjustminutes(0 - self.narrowshortoriginoffsetminutes)
else:
if longhistorymode == True:
graph.adjusthours(0 - self.widelongoriginoffset)
else:
graph.adjusthours(0 - self.wideshortoriginoffset)
graph.adjustminutes(0 - self.wideshortoriginoffsetminutes)
return graph
def determinegraphwidth(self, index):
if index < 4:
outcome = self.widegraphwidth
else:
outcome = self.narrowgraphwidth
return outcome
def determinecolumnwidth(self, index):
if index < 4:
outcome = self.widegraphcolumnwidth
else:
outcome = self.narrowgraphcolumnwidth
return outcome
|
[
"homeserverstatus@gmail.com"
] |
homeserverstatus@gmail.com
|
af638ccb4cbe7e382ee5237fc60ac8cb90f021ab
|
0eb8bde44f28866596b9612835b4c0bb37c3a30f
|
/morsels/20200622_instance_tracker/problem_text.py
|
47ba7e437c23ffedc2efa80754b80f954149c6b3
|
[] |
no_license
|
gtcooke94/snippets
|
609ebc85b40453a79845e28113bd545579796379
|
4792e10cf9f056487e992219cfb088529a53e897
|
refs/heads/master
| 2021-06-25T13:01:55.282635
| 2020-11-13T21:01:18
| 2020-11-13T21:01:18
| 170,204,644
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,712
|
py
|
Greetings,
This week I'd like you to make a "class factory" which will allow classes to track instances of themselves.
This instance_tracker class factory will return a class when called and can be used like this:
class Account(instance_tracker()):
def __init__(self, number):
self.number = number
super().__init__()
def __repr__(self):
return 'Account({!r})'.format(self.number)
Now the Account class will have an instances attribute which will keep track of all instances of the Account class.
>>> a1 = Account('4056')
>>> a2 = Account('8156')
>>> print(*Account.instances, sep='\n')
Account('4056')
Account('8156')
At first you can assume that subclasses of instance_tracker never override __init__ without calling super()__init__(...).
Bonus 1
For the first bonus, allow your instance_tracker class factory to optionally accept an attribute name to use for storing the instances (instead of the default instances).
class Person:
def __init__(self, name):
self.name = name
def __repr__(self):
return "Person({!r})".format(self.name)
class TrackedPerson(instance_tracker('registry'), Person):
"""Example of inheritance and renaming 'instances' to 'registry'."""
That class should have a registry attribute instead of an instances attribute:
>>> brett = TrackedPerson("Brett Cannon")
>>> guido = TrackedPerson("Guido van Rossum")
>>> carol = TrackedPerson("Carol Willing")
>>> list(TrackedPerson.registry)
[Person('Brett Cannon'), Person('Guido van Rossum'), Person('Carol Willing')]
Bonus 2
For the second bonus, make sure your instance_tracker factory works even for subclasses that don't call super().__init__(...).
For example this class:
class Person(instance_tracker()):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Person({!r})".format(self.name)
Should work as expected:
>>> nick = Person("Nick Coghlan")
>>> brett = Person("Brett Cannon")
>>> list(Person.instances)
[Person('Nick Coghlan'), Person('Brett Cannon')]
Bonus 3
For the third bonus, I'd like you to make sure that objects which are not referenced anywhere else will be deleted from memory as usual.
Take this class for example:
class Account(instance_tracker()):
def __init__(self, number):
self.number = number
def __repr__(self):
return 'Account({!r})'.format(self.number)
Making three instances where one is no longer referenced (we're using a1 twice below) and one has had its last reference removed (using del a2) should result in just one reference:
>>> a1 = Account('4056')
>>> a2 = Account('8156')
>>> a1 = Account('3168')
>>> del a2
>>> list(Account.instances)
[Account('3168')]
.
|
[
"gtcooke94@gmail.com"
] |
gtcooke94@gmail.com
|
bd5f261eaa813c6baee78561351043cf93204240
|
61aa319732d3fa7912e28f5ff7768498f8dda005
|
/tests/configs/memcheck.py
|
669c71b30a1d9f09e065a72010191df495b1cc72
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
TeCSAR-UNCC/gem5-SALAM
|
37f2f7198c93b4c18452550df48c1a2ab14b14fb
|
c14c39235f4e376e64dc68b81bd2447e8a47ff65
|
refs/heads/main
| 2023-06-08T22:16:25.260792
| 2023-05-31T16:43:46
| 2023-05-31T16:43:46
| 154,335,724
| 62
| 22
|
BSD-3-Clause
| 2023-05-31T16:43:48
| 2018-10-23T13:45:44
|
C++
|
UTF-8
|
Python
| false
| false
| 2,695
|
py
|
# Copyright (c) 2016 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2015 Jason Lowe-Power
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import m5
from m5.objects import *
# the traffic generator is only available if we have protobuf support,
# so potentially skip this test
require_sim_object("TrafficGen")
# A wrapper around configs/example/memcheck.py
# For some reason, this is implicitly needed by run.py
root = None
def run_test(root):
# Called from tests/run.py
import sys
argv = [
sys.argv[0],
'-m %d' % maxtick,
]
# Execute the script we are wrapping
run_config('configs/example/memcheck.py', argv=argv)
|
[
"sroger48@uncc.edu"
] |
sroger48@uncc.edu
|
09394ec926883c40727a02a56b5b4e0447abecb3
|
b15ccd04d3edfb4d6278a055422610be09c3916c
|
/4861_회문/sol1.py
|
150a37727e44769ce062900a4cbe6fe7238ab4b5
|
[] |
no_license
|
hksoftcorn/Algorithm
|
d0f3a1a6009f47e4f391e568b29a3b51d6095d33
|
81b067b8105ba305172dd8271787c19f04d170ba
|
refs/heads/master
| 2023-05-12T21:15:34.668580
| 2021-06-08T07:57:04
| 2021-06-08T07:57:04
| 337,121,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
import sys
sys.stdin = open('sample_input.txt')
def solution(N, M, arr):
# 가로
for i in range(N):
for j in range(N-M+1):
palindrome = arr[i][j:j+M]
if palindrome == palindrome[::-1]:
return palindrome
# 세로
for j in range(N):
for i in range(N-M+1):
palindrome =''
for m in range(M):
palindrome += arr[i+m][j]
#print(palindrome)
if palindrome == palindrome[::-1]:
return palindrome
T = int(input())
for tc in range(1, T+1):
N, M = map(int, input().split())
arr = [input() for i in range(N)]
print('#{} {}'.format(tc, solution(N, M, arr)))
|
[
"hksoftcorn.dev@gmail.com"
] |
hksoftcorn.dev@gmail.com
|
c21f0f7ddfb24849fcae721146b7c813fd8bbd6b
|
a86ca34e23afaf67fdf858df9e47847606b23e0c
|
/lib/temboo/Library/Amazon/SNS/ListSubscriptionsByTopic.py
|
d3fabd538f4ac9b5184e976b8af2beefa94acba4
|
[] |
no_license
|
miriammelnick/dont-get-mugged
|
6026ad93c910baaecbc3f5477629b0322e116fa8
|
1613ee636c027ccc49c3f84a5f186e27de7f0f9d
|
refs/heads/master
| 2021-01-13T02:18:39.599323
| 2012-08-12T23:25:47
| 2012-08-12T23:25:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,212
|
py
|
###############################################################################
#
# ListSubscriptionsByTopic
# Returns a list of the subscriptions for a specified topic.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class ListSubscriptionsByTopic(Choreography):
"""
Create a new instance of the ListSubscriptionsByTopic Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Amazon/SNS/ListSubscriptionsByTopic')
def new_input_set(self):
return ListSubscriptionsByTopicInputSet()
def _make_result_set(self, result, path):
return ListSubscriptionsByTopicResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListSubscriptionsByTopicChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the ListSubscriptionsByTopic
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class ListSubscriptionsByTopicInputSet(InputSet):
"""
Set the value of the AWSAccessKeyId input for this choreography. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
def set_AWSAccessKeyId(self, value):
InputSet._set_input(self, 'AWSAccessKeyId', value)
"""
Set the value of the AWSSecretKeyId input for this choreography. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
def set_AWSSecretKeyId(self, value):
InputSet._set_input(self, 'AWSSecretKeyId', value)
"""
Set the value of the NextToken input for this choreography. ((optional, string) The token returned from a previous LIstSubscriptionsByTopic request.)
"""
def set_NextToken(self, value):
InputSet._set_input(self, 'NextToken', value)
"""
Set the value of the TopicArn input for this choreography. ((required, string) The ARN of the topic that you want to find subscriptions for.)
"""
def set_TopicArn(self, value):
InputSet._set_input(self, 'TopicArn', value)
"""
A ResultSet with methods tailored to the values returned by the ListSubscriptionsByTopic choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class ListSubscriptionsByTopicResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((xml) The response from Amazon.)
"""
def get_Response(self):
return self._output.get('Response', None)
class ListSubscriptionsByTopicChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListSubscriptionsByTopicResultSet(response, path)
|
[
"miriam@famulus"
] |
miriam@famulus
|
81ef572b2720d4856f76b694186f6bcfb53baa0f
|
1c560f8035793e75fb9fda0ff6807cd67a2370ec
|
/ABC214/C.py
|
bbaccfd8e49a455c79e986c8d9cfa5c7fe3e2701
|
[] |
no_license
|
pumbaacave/atcoder
|
fa4c488a30388e3d8b4928a570c730c29df7ac0c
|
61923f8714f21e8dd5ebafa89b2c3929cff3adf1
|
refs/heads/master
| 2023-08-17T02:27:03.091792
| 2023-08-05T13:10:58
| 2023-08-05T13:10:58
| 155,023,403
| 1
| 0
| null | 2022-11-12T02:36:11
| 2018-10-28T01:01:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
import sys
import collections
stdin = sys.stdin
# sys.setrecursionlimit(10**5)
def ii(): return int(stdin.readline())
def li(): return map(int, stdin.readline().split())
def li_(): return map(lambda x: int(x)-1, stdin.readline().split())
def lf(): return map(float, stdin.readline().split())
def ls(): return stdin.readline().split()
def ns(): return stdin.readline().rstrip()
def lc(): return list(ns())
def ni(): return int(stdin.readline())
def nf(): return float(stdin.readline())
def run():
N = ii()
S = list(li())
T = list(li())
ret = [0] * N
min_T = min(T)
start = T.index(min_T)
time_to_pass = min_T
for i in range(start, N):
received = T[i]
ret[i] = min(received, time_to_pass)
time_to_pass = ret[i] + S[i]
for i in range(start):
received = T[i]
ret[i] = min(received, time_to_pass)
time_to_pass = ret[i] + S[i]
for n in ret:
print(n)
if __name__ == '__main__':
run()
|
[
"pumbaacave@yahoo.co.jp"
] |
pumbaacave@yahoo.co.jp
|
7c402d715018475f125d7ed7546a3819242a9451
|
b1aa3c599c5d831444e0ae4e434f35f57b4c6c45
|
/month1/week3/class7/operator.py
|
4fc87cc493b996cd295e5b5d82bda5b92cd31cde
|
[] |
no_license
|
yunyusha/xunxibiji
|
2346d7f2406312363216c5bddbf97f35c1e2c238
|
f6c3ffb4df2387b8359b67d5e15e5e33e81e3f7d
|
refs/heads/master
| 2020-03-28T12:31:17.429159
| 2018-09-11T11:35:19
| 2018-09-11T11:35:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from month1.week3.class7.test_Class import jisuan,Fenshu
# 先创建两个分数对象
fel = Fenshu(4,9)
fe2 = Fenshu(5,8)
mf = jisuan()
mf.adjust(fel, fe2, ' +')
|
[
"576462286@qq.com"
] |
576462286@qq.com
|
69e0d566f725250eeb6a4df86ade0a78bb6ecaa6
|
4266e9b1c59ddef83eede23e0fcbd6e09e0fa5cb
|
/vs/gyp/test/win/gyptest-rc-build.py
|
c6ee4492d87a3fe06f0d61154a719d5e3350e1c0
|
[
"BSD-3-Clause"
] |
permissive
|
barrystudy/study
|
b3ba6ed652d1a0bcf8c2e88a2a693fa5f6bf2115
|
96f6bb98966d3633b47aaf8e533cd36af253989f
|
refs/heads/master
| 2020-12-24T14:53:06.219236
| 2017-10-23T02:22:28
| 2017-10-23T02:22:28
| 41,944,841
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure we build and include .rc files.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'rc-build'
test.run_gyp('hello.gyp', chdir=CHDIR)
test.build('hello.gyp', test.ALL, chdir=CHDIR)
test.up_to_date('hello.gyp', 'resource_only_dll', chdir=CHDIR)
test.run_built_executable('with_resources', chdir=CHDIR, status=4)
test.pass_test()
|
[
"2935973620@qq.com"
] |
2935973620@qq.com
|
12934916e6b0d3c94d1a4fee1d88ccb21c46b386
|
7fd1406b7e94d4b82a158ce5be87b5ae821e16b6
|
/pro4_2.py
|
4f291fb3e1e6b3638c2ed6eb70f86a2232d3f486
|
[] |
no_license
|
THABUULAGANATHAN/guvi-programs
|
c1c4d314c7ce43d6c3996fdac85616248c69e4fd
|
fb004f6916776ca9fbe07b8d507f9725cc55248f
|
refs/heads/master
| 2022-01-15T09:08:32.904234
| 2019-07-19T06:45:04
| 2019-07-19T06:45:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
nu1,nu2=map(int,input().split())
if nu1<=nu2:
u=nu1
else:
u=nu2
m=[]
for i in range(0,u):
m.append(sorted(list(map(int,input().split()))))
m=sorted(m)
for i in range(0,len(m[0])):
for j in range(0,len(m)-1):
if m[j][i]>m[j+1][i]:
m[j][i],m[j+1][i]=m[j+1][i],m[j][i]
for i in m:
print(*i)
|
[
"noreply@github.com"
] |
THABUULAGANATHAN.noreply@github.com
|
d224e4bc048889e5860384746a106809df71fbd6
|
4b169d970dc9390ab53281d4a4a1cb32f79f9317
|
/subject.py
|
7e9214220b4ef27ce31b7bce1366a7b8d0c816f6
|
[] |
no_license
|
marloverket/crosstask
|
96a710946f2db1cda18c9f9cb9da3cc8aaa3455f
|
21ba7ea1c5a0f48be252acbea23e916d49bbaebb
|
refs/heads/master
| 2021-01-19T15:40:32.553961
| 2012-11-26T04:04:09
| 2012-11-26T04:04:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,747
|
py
|
import viz,vizinfo,viztask,vizact
from datetime import datetime
global subj
"""
Adds a panel to the screen where the subject info gets
filled out.
"""
info_box = vizinfo.add('')
info_box.scale(2,2)
info_box.translate(0.85,0.8)
info_box.title('Participant Info')
#Add the GUI elements to the box
id_box = info_box.add(viz.TEXTBOX,'Participant ID')
day_box = info_box.add(viz.TEXTBOX, 'Day')
run_box = info_box.add(viz.TEXTBOX, 'Run Number')
scan_box = info_box.add(viz.CHECKBOX,'Scanner')
training_box = info_box.add(viz.CHECKBOX,'Training?')
run_button = info_box.add(viz.BUTTON,'Run')
info_box.visible(viz.OFF)
class Subject(object):
def __init__(self):
self.init_time = datetime.now().strftime("%Y.%m.%d.%H.%M")
self.time_offset = -1
def set_time_offset(self, timestamp):
"""
If the experiment relies on an external trigger
to begin, set the timing offset, so that when we
dump this subject's behavioral data,
"""
self.time_offset = timestamp
def grab_info(self):
""" Reads the information from the vizinfo
widgets and fills in details about this
subject.
"""
self.show_gui()
yield viztask.waitButtonUp(run_button)
self.subject_id = "S%03i"%int(id_box.get())
self.run_num = "R%02i"%int(run_box.get())
self.day_num = "D%02i"%int(day_box.get())
self.is_scanning = bool(scan_box.get())
self.is_training = bool(training_box.get())
info_box.remove()
def show_gui(self):
info_box.visible(viz.ON)
def get_experiment(self):
"""
Experiment files should be named such that it is unambiguous
which file should go with this subject/day/run.
"""
raise NotImplementedError(
"Must be overwritten in subclass")
if __name__=="__main__":
viz.go()
viztask.schedule(experiment())
|
[
"mattcieslak@gmail.com"
] |
mattcieslak@gmail.com
|
09ad4a8a300cc289665cb238bd3bdbbaf5769d75
|
f06d9cd5fb86885a73ee997c687f3294840dd199
|
/services/flickr.py
|
c21a01e2fd33883bb08bcd8d4e89cbe4ed018d9d
|
[] |
no_license
|
bu2/oauth-proxy
|
aaff16a07d5c2c07c8243293c9ed41205b251a74
|
dbed492f8a806c36177a56ca626f005acec904b1
|
refs/heads/master
| 2020-12-26T15:53:40.618570
| 2013-07-09T05:06:16
| 2013-07-09T05:06:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
import foauth.providers
class Flickr(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'http://www.flickr.com/'
docs_url = 'http://www.flickr.com/services/api/'
category = 'Pictures'
# URLs to interact with the API
request_token_url = 'http://www.flickr.com/services/oauth/request_token'
authorize_url = 'http://www.flickr.com/services/oauth/authorize'
access_token_url = 'http://www.flickr.com/services/oauth/access_token'
api_domain = 'secure.flickr.com'
available_permissions = [
(None, 'access your public and private photos'),
('write', 'upload, edit and replace your photos'),
('delete', 'upload, edit, replace and delete your photos'),
]
permissions_widget = 'radio'
def get_authorize_params(self, redirect_uri, scopes):
params = super(Flickr, self).get_authorize_params(redirect_uri, scopes)
if any(scopes):
params['perms'] = scopes[0]
else:
params['perms'] = 'read'
return params
def get_user_id(self, key):
url = u'/services/rest/?method=flickr.people.getLimits'
url += u'&format=json&nojsoncallback=1'
r = self.api(key, self.api_domain, url)
return r.json()[u'person'][u'nsid']
|
[
"marty@martyalchin.com"
] |
marty@martyalchin.com
|
21703522e9344dd45bae154f7468fa13d918ed67
|
600df3590cce1fe49b9a96e9ca5b5242884a2a70
|
/build/android/pylib/junit/test_dispatcher.py
|
51253d4cc07f90be1bf883c29ac92bd70b12bc0c
|
[
"BSD-3-Clause"
] |
permissive
|
metux/chromium-suckless
|
efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a
|
72a05af97787001756bae2511b7985e61498c965
|
refs/heads/orig
| 2022-12-04T23:53:58.681218
| 2017-04-30T10:59:06
| 2017-04-30T23:35:58
| 89,884,931
| 5
| 3
|
BSD-3-Clause
| 2022-11-23T20:52:53
| 2017-05-01T00:09:08
| null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib import constants
from pylib.base import base_test_result
def RunTests(tests, runner_factory):
"""Runs a set of java tests on the host.
Return:
A tuple containing the results & the exit code.
"""
def run(t):
runner = runner_factory(None, None)
runner.SetUp()
results_list, return_code = runner.RunTest(t)
runner.TearDown()
return (results_list, return_code == 0)
test_run_results = base_test_result.TestRunResults()
exit_code = 0
for t in tests:
results_list, passed = run(t)
test_run_results.AddResults(results_list)
if not passed:
exit_code = constants.ERROR_EXIT_CODE
return (test_run_results, exit_code)
|
[
"enrico.weigelt@gr13.net"
] |
enrico.weigelt@gr13.net
|
b8aa82a8b82c5da5dc36d563d5cbd1447a6552cb
|
1a1b7f607c5e0783fd1c98c8bcff6460e933f09a
|
/core/ras/ras_loader.py
|
b66f88e4e3755f436f6ce986949291f6a9faf8f8
|
[] |
no_license
|
smrmohammadi/freeIBS
|
14fb736fcadfaea24f0acdafeafd2425de893a2d
|
7f612a559141622d5042614a62a2580a72a9479b
|
refs/heads/master
| 2021-01-17T21:05:19.200916
| 2014-03-17T03:07:15
| 2014-03-17T03:07:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
from core.db import db_main
from core.ras import ras_main
from core.ibs_exceptions import *
from core.errors import errorText
from radius_server.pyrad.server import RemoteHost
class RasLoader:
def __init__(self):
self.rases_ip={}
self.rases_id={}
self.radius_remote_hosts={}
def __getitem__(self,key):
return self.getRasByID(key)
def getRasByIP(self,ras_ip):
try:
return self.rases_ip[ras_ip]
except KeyError:
raise GeneralException(errorText("RAS","INVALID_RAS_IP")%ras_ip)
def getRasByID(self,ras_id):
try:
return self.rases_id[ras_id]
except KeyError:
raise GeneralException(errorText("RAS","INVALID_RAS_ID")%ras_id)
def checkRasIP(self,ras_ip):
"""
check if ras with ip "ras_ip" is loaded
raise a GeneralException on Error
"""
if not self.rasIPExists(ras_ip):
raise GeneralException(errorText("RAS","INVALID_RAS_IP")%ras_ip)
def checkRasID(self,ras_id):
"""
check if ras with id "ras_id" is loaded
raise a GeneralException on Error
"""
if not self.rases_id.has_key(ras_id):
raise GeneralException(errorText("RAS","INVALID_RAS_ID")%ras_ip)
def rasIPExists(self,ras_ip):
"""
return True if ras with ip "ras_ip" already exists and False if it doesn't exists
"""
return self.rases_ip.has_key(ras_ip)
def getAllRasIPs(self):
"""
return a list of all ras_ips that is loaded into object
"""
return self.rases_ip.keys()
def runOnAllRases(self,method):
"""
run "method" multiple times with each ras_obj as argument
method should accept one argument (ras_obj)
"""
return map(method,self.rases_id.values())
def loadAllRases(self):
ras_ids=self.__getAllActiveRasIDs()
map(self.loadRas,ras_ids)
def loadRas(self,ras_id):
"""
load ras with id "ras_id" and keep it in the loader object
"""
ras_obj=self.loadRasObj(ras_id)
self.keepObj(ras_obj)
def loadRasObj(self,ras_id):
"""
load ras with id "ras_id" and return the object
"""
(ras_info,ras_attrs,ports,ippools)=self.getRasInfo(ras_id)
ras_obj=self.__createRasObj(ras_info,ras_attrs,ports,ippools)
return ras_obj
def getRasInfo(self,ras_id):
ras_info=self.__getRasInfoDB(ras_id)
ras_attrs=self.__getRasAttrs(ras_id)
ports=self.__getRasPorts(ras_id)
ippools=self.__getRasIPpools(ras_id)
return (ras_info,ras_attrs,ports,ippools)
def unloadRas(self,ras_id):
"""
unload ras, with id "ras_id" from object
useful when the ras is deleted
"""
ras_obj=self.getRasByID(ras_id)
ras_obj.unloaded()
self.unKeepObj(ras_obj)
def getRadiusRemoteHosts(self):
return self.radius_remote_hosts
def __getAllActiveRasIDs(self):
"""
return a list of all ras_id s from table "ras"
"""
ras_ids=db_main.getHandle().get("ras","active='t'",0,-1,"",["ras_id"])
return [m["ras_id"] for m in ras_ids]
def __getRasIPpools(self,ras_id):
"""
return a list of ras ippool ids in format [pool_id1,pool_id2,..]
"""
ras_ippools_db=self.__getRasIPpoolsDB(ras_id)
return [m["ippool_id"] for m in ras_ippools_db]
def __getRasIPpoolsDB(self,ras_id):
"""
return a list of ras ippool names from table ras_ippools
"""
return db_main.getHandle().get("ras_ippools","ras_id=%s"%ras_id)
def __getRasPorts(self,ras_id):
"""
return a dic of ports of ras with id "ras_id" in format
{port_name:{"phone":phone_no,"type":type,"comment":comment}
"""
ports={}
db_ports=self.__getPortsDB(ras_id)
for _dic in db_ports:
ports[_dic["port_name"]]=_dic
return ports
def __getPortsDB(self,ras_id):
"""
return a list of dics, that returned from db query from table "ras_ports"
"""
return db_main.getHandle().get("ras_ports","ras_id=%s"%ras_id)
def __getRasInfoDB(self,ras_id):
"""
return a dictionary of ras basic info from table "ras"
"""
return db_main.getHandle().get("ras","ras_id=%s"%ras_id)[0]
def __getRasAttrs(self,ras_id):
"""
return ras attributes in a dic with format {attr_name:attr_value}
"""
attrs={}
attrs_db=self.__getRasAttrsDB(ras_id)
for _dic in attrs_db:
attrs[_dic["attr_name"]]=_dic["attr_value"]
return attrs
def __getRasAttrsDB(self,ras_id):
"""
return a dic of ras_attributes returned from "ras_attrs" table
"""
return db_main.getHandle().get("ras_attrs","ras_id=%s"%ras_id)
def __createRasObj(self,ras_info,ras_attrs,ports,ippools):
"""
create a ras object, using ras_info and ras_attrs
"""
return ras_main.getFactory().getClassFor(ras_info["ras_type"])(ras_info["ras_ip"],ras_info["ras_id"],
ras_info["ras_type"],ras_info["radius_secret"],ports,ippools,ras_attrs)
def keepObj(self,ras_obj):
"""
keep "ras_obj" into self, by adding them to internal dics
"""
self.rases_ip[ras_obj.getRasIP()]=ras_obj
self.rases_id[ras_obj.getRasID()]=ras_obj
self.radius_remote_hosts[ras_obj.getRasIP()]=RemoteHost(ras_obj.getRasIP(),ras_obj.getRadiusSecret(),ras_obj.getRasIP())
def unKeepObj(self,ras_obj):
del(self.rases_id[ras_obj.getRasID()])
del(self.rases_ip[ras_obj.getRasIP()])
del(self.radius_remote_hosts[ras_obj.getRasIP()])
|
[
"farshad_kh"
] |
farshad_kh
|
7df9cc92dfba37d4d64f5ac42303e6293ec477df
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_1_neat/16_0_1_Kaster_count_numbers.py
|
a8f821f2452a7b5e684a7c06e79b1215ca4b622a
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
import sys
def counting_numbers(n, case, i = 1, already_have = set()):
if (n == 0):
message = 'Case #%d: %s' % (case, 'INSOMNIA')
else:
N = str(i * n)
unique_nums = set(list(N))
combined = unique_nums | already_have
if len(combined) < 10:
message = counting_numbers(n, case, i + 1, combined)
else:
message = 'Case #%d: %s' % (case, n*i)
return message
def check_answer(n, i = 1, already_have = set()):
if (n == 0):
print 'INSOMNIA'
else:
N = str(i * n)
unique_nums = set(list(N))
print 'number: %d' % (n * i), 'unique digits: ' + str(unique_nums), 'seen before: ' + str(already_have)
sys.stdout.flush()
combined = unique_nums | already_have
if len(combined) < 10:
raw_input()
check_answer(n, i + 1, combined)
# open the file
with open('A-large.in', 'r') as f:
small = [int(a) for a in f.read().split('\n')[:-1]]
T = small[0]
out = ''
for i, number in enumerate(small[1:]):
out += counting_numbers(number, i+1) + '\n'
open('output2.txt', 'w').write(out)
# check_answer(11)
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
295395e9c4c9fb7d25a1f433a3626ce141121cb9
|
9db82d0fc7819b11ebcae4c3904dde1a75bd1054
|
/setup.py
|
a06c2273e17b0d0b51aa6c03bf54007730d3d415
|
[] |
no_license
|
rblack42/PyLit4
|
f49278ff3417ad4a3348657f1f199f7afc589a1f
|
352f6e962f2265a585de274372ab678a9f3ccddb
|
refs/heads/master
| 2021-01-10T20:47:03.897946
| 2014-08-17T05:37:46
| 2014-08-17T05:37:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 964
|
py
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='PyLit4',
version='0.1dev',
url='https://github.com/rblack42/PyLit4',
license='BSD3',
author='Roie Black',
author_email='rblack@austincc.edu',
description='Literate programming with reStructuredText',
long_description=read('README.rst'),
packages=['pylit'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_required=(
'Flask>=0.10.1',
'nose>=1.3.3'
),
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
[
"rblack@austincc.edu"
] |
rblack@austincc.edu
|
d192513223a3c78eeb653b57ba0afc6b50e163eb
|
becfe7904e7e17bcd23b891021292542a7968b60
|
/basic_elements/cross_and_circle.py
|
f08aee209c76448d5c7e1d35aea0620fa814d597
|
[
"Apache-2.0"
] |
permissive
|
ppinko/python_knowledge_library
|
5ef482ddc36b1e4968f11b295a72589be268af99
|
089348c80e3f49a4a56839bfb921033e5386f07e
|
refs/heads/master
| 2023-03-21T04:15:15.947396
| 2021-03-07T12:26:00
| 2021-03-07T12:26:00
| 256,592,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,350
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 2 10:32:18 2019
@author: lx
"""
"""
My first interactive game - cross and circle
"""
import sys
from recursion import test
board = [['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']]
def update_board(a, b, c, current_board):
""" Update board """
current_board[a][b] = c
return current_board
#test(update_board(0, 0, 'x', board) == [['x', '-', '-'], ['-', '-', '-'],
# ['-', '-', '-']])
#test(update_board(1, 2, 'o', board) == [['x', '-', '-'], ['-', '-', 'o'],
# ['-', '-', '-']])
#test(update_board(2, 0, 'x', board) == [['x', '-', '-'], ['-', '-', 'o'],
# ['x', '-', '-']])
#print(board)
def check_win(current_board, symbol):
""" Check if someone wins """
for i in range(3):
if current_board[i].count(symbol) == 3:
return True
if current_board[0][i] == current_board[1][i] == current_board[2][i] == symbol:
return True
if current_board[0][0] == current_board[1][1] == current_board[2][2] == symbol:
return True
if current_board[0][2] == current_board[1][1] == current_board[2][0] == symbol:
return True
return False
#test(check_win([['x', '-', '-'], ['-', '-', '-'],
# ['-', '-', '-']], 'x') == False)
#test(check_win([['x', 'x', 'x'], ['-', '-', '-'],
# ['-', '-', '-']], 'x') == True)
#test(check_win([['x', '-', '-'], ['x', '-', '-'],
# ['x', '-', '-']], 'x') == True)
#test(check_win([['x', '-', '-'], ['-', 'x', '-'],
# ['-', '-', 'x']], 'x') == True)
def printed_board(current_board):
""" Show current board """
print(current_board[0][0], current_board[1][0], current_board[2][0])
print(current_board[0][1], current_board[1][1], current_board[2][1])
print(current_board[0][2], current_board[1][2], current_board[2][2])
def check_move(x, y, possible_moves):
""" Check possibility of the movement """
move = (x-1, y-1)
if move in possible_moves:
return True
else:
return False
#possibles = [(0,0), (0,1), (1,1), (1,0)]
#test(check_move(1,1, possibles) == [(0,1), (1,1), (1,0)])
def two_player_game():
""" Interactive game for two players """
symbol = 'x'
possible_moves = [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2), (2,0), (2,1), (2,2)]
current_board = [['-', '-', '-'], ['-', '-', '-'], ['-', '-', '-']]
n = 1
print("Welcome in the game!\nHave a nice game!\n")
printed_board(current_board)
while n <= 9:
x = int(input('Choose a column: '))
y = int(input('Choose a row: '))
z = (x-1, y-1)
if check_move(x, y, possible_moves) == False:
print('Invalid move')
continue
possible_moves.remove(z)
print('\n')
updated_board = update_board(x-1, y-1, symbol, current_board)
current_board = updated_board[:]
printed_board(current_board)
if n >= 3:
if check_win(updated_board, symbol) == True:
return print('WINNER')
n += 1
if symbol == 'x':
symbol = 'o'
else:
symbol = 'x'
return print('END OF THE GAME' + '\n' + 'NO WINNER')
""" Initilizing the game """
two_player_game() # START OF THE GAME
|
[
"p.pinkowicz@gmail.com"
] |
p.pinkowicz@gmail.com
|
ad0163094ee7e3c39d856c2a8d32a28d55661207
|
7bead245354e233f76fff4608938bf956abb84cf
|
/cloudmersive_convert_api_client/models/remove_whitespace_from_text_request.py
|
98ee877987d0fcd6f673c4f2bc3c1052c4a3d3c5
|
[
"Apache-2.0"
] |
permissive
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
5ba499937b9664f37cb2700509a4ba93952e9d6c
|
dba2fe7257229ebdacd266531b3724552c651009
|
refs/heads/master
| 2021-10-28T23:12:42.698951
| 2021-10-18T03:44:49
| 2021-10-18T03:44:49
| 138,449,321
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,711
|
py
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RemoveWhitespaceFromTextRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'text_containing_whitespace': 'str'
}
attribute_map = {
'text_containing_whitespace': 'TextContainingWhitespace'
}
def __init__(self, text_containing_whitespace=None): # noqa: E501
"""RemoveWhitespaceFromTextRequest - a model defined in Swagger""" # noqa: E501
self._text_containing_whitespace = None
self.discriminator = None
if text_containing_whitespace is not None:
self.text_containing_whitespace = text_containing_whitespace
@property
def text_containing_whitespace(self):
"""Gets the text_containing_whitespace of this RemoveWhitespaceFromTextRequest. # noqa: E501
Input text string to remove the whitespace from # noqa: E501
:return: The text_containing_whitespace of this RemoveWhitespaceFromTextRequest. # noqa: E501
:rtype: str
"""
return self._text_containing_whitespace
@text_containing_whitespace.setter
def text_containing_whitespace(self, text_containing_whitespace):
"""Sets the text_containing_whitespace of this RemoveWhitespaceFromTextRequest.
Input text string to remove the whitespace from # noqa: E501
:param text_containing_whitespace: The text_containing_whitespace of this RemoveWhitespaceFromTextRequest. # noqa: E501
:type: str
"""
self._text_containing_whitespace = text_containing_whitespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemoveWhitespaceFromTextRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoveWhitespaceFromTextRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"35204726+Cloudmersive@users.noreply.github.com"
] |
35204726+Cloudmersive@users.noreply.github.com
|
fdd7630f165b381094274f0ee6ad3caae9b8abeb
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/KWoj7kWiHRqJtG6S2_10.py
|
65551bedd3a2aa334347384e82cd7b4ef7a2f2ef
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
"""
There is a single operator in Python, capable of providing the remainder of a
division operation. Two numbers are passed as parameters. The first parameter
divided by the second parameter will have a remainder, possibly zero. Return
that value.
### Examples
remainder(1, 3) ➞ 1
remainder(3, 4) ➞ 3
remainder(5, 5) ➞ 0
remainder(7, 2) ➞ 1
### Notes
* The tests only use positive integers.
* Don't forget to `return` the result.
* If you get stuck on a challenge, find help in the **Resources** tab.
"""
remainder = lambda a, b: a % b
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7da4675809919b8ee509fd2815c35901a2f3e54b
|
5f2f3743e0f8054d62042fc6c05bf994995bfdee
|
/tests/test_dlthx.py
|
14d0cffd033391ce59baf17813d95cad08d92c2d
|
[
"MIT"
] |
permissive
|
li7300198125/itmlogic
|
1374a295278af1b818377049c6e0720386c50195
|
b7297a595b6ab8ec36d3ac5f81755171beed4407
|
refs/heads/master
| 2022-11-15T01:59:57.574625
| 2020-06-27T22:51:23
| 2020-06-27T22:51:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
import pytest
from itmlogic.dlthx import dlthx
def test_dlthx(setup_pfl1):
"""
Tests the delta h value, which is the interdecile range of elevations between point x1
and point x2, generated from the terrain profile pfl1.
The terrain profile (pfl1) is imported from tests/conftest.py via the fixture
setup_pfl1.
The test is derived from the original test for Longley-Rice between for Crystal
Palace (South London) to Mursley, England.
"""
assert round(dlthx(setup_pfl1, 2158.5, 77672.5), 4) == 89.2126
|
[
"edward.oughton@gmail.com"
] |
edward.oughton@gmail.com
|
06e6aaa2ca2b7d9bd08666139ce3cf28ff269e0e
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2019/shared_code/central_comp/cod/codem/hybridizer/joblaunch/HybridTask.py
|
6d838cef584d3c08aa703402a2922aba19274e34
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
from hybridizer.database import gbd_round_from_id
import hybridizer.metadata as metadata
from codem.joblaunch.CODEmTask import CODEmBaseTask
import logging
logger = logging.getLogger(__name__)
class HybridTask(CODEmBaseTask):
def __init__(self, user, developed_model_version_id,
global_model_version_id,
conn_def,
upstream_tasks=None,
parameter_dict=None,
max_attempts=15,
cores=1,
gb=20,
runtime_min=60*2):
"""
Creates a hybrid task for a CODEm Workflow.
"""
gbd_round_id = gbd_round_from_id(global_model_version_id, conn_def)
model_version_id = metadata.hybrid_metadata(user, global_model_version_id,
developed_model_version_id, conn_def,
gbd_round_id)
self.gbd_round_id = gbd_round_id
self.model_version_id = model_version_id
self.user = user
self.global_model_version_id = global_model_version_id
self.developed_model_version_id = developed_model_version_id
logger.info("New Hybrid Model Version ID: {}".format(model_version_id))
super().__init__(model_version_id=model_version_id,
parameter_dict=parameter_dict, max_attempts=max_attempts,
upstream_tasks=upstream_tasks,
conn_def=conn_def, hybridizer=True,
cores=cores,
gb=gb, minutes=runtime_min
)
command = 'FILEPATH {} {} {} {} {}'. \
format(user, model_version_id, global_model_version_id, developed_model_version_id, conn_def)
self.setup_task(
command=command,
resource_scales={'m_mem_free': 0.5,
'max_runtime_seconds': 0.5}
)
|
[
"cheth@uw.edu"
] |
cheth@uw.edu
|
3781bf374bfe5f8826bd54fb515b1163b4b53ce4
|
4749d3cf395522d90cb74d1842087d2f5671fa87
|
/alice/LC022.py
|
d5ca5a07462cfcc708bf1982ffc93009853c53ea
|
[] |
no_license
|
AliceTTXu/LeetCode
|
c1ad763c3fa229362350ce3227498dfb1f022ab0
|
ed15eb27936b39980d4cb5fb61cd937ec7ddcb6a
|
refs/heads/master
| 2021-01-23T11:49:49.903285
| 2018-08-03T06:00:16
| 2018-08-03T06:00:16
| 33,470,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
if not n:
return ['']
else:
return self.generateParenthesisCore(n - 1, 1, ['('])
def generateParenthesisCore(self, n, leftCount, out):
if n > 0:
if leftCount > 0:
part1 = self.generateParenthesisCore(n, leftCount - 1, [x + ')' for x in out])
else:
part1 = []
part2 = self.generateParenthesisCore(n - 1, leftCount + 1, [x + '(' for x in out])
return part1 + part2
else:
if leftCount > 0:
return [x + ')' * leftCount for x in out]
def generateParenthesis2(self, n):
if not n:
return ['']
out = []
for i in xrange(n):
for left in self.generateParenthesis2(i):
for right in self.generateParenthesis2(n - i - 1):
out.append('({}){}'.format(left, right))
return out
s = Solution()
print s.generateParenthesis2(3)
|
[
"aliceadelice@gmail.com"
] |
aliceadelice@gmail.com
|
a1dc58d81bc25723ec4c8842e7e14cdd086fbf88
|
9c2b322b36564327cf15e75ff7ad6ef2461643af
|
/code/analysis/delayedfeedback/scaled_noise_test.py
|
aaf8d931f31f9d48cc2dc73af5a06e3f2d446a2a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
dmytrov/stochasticcontrol
|
3951c0fd555cdcf38bcf6812b1758ed41fd28cf9
|
a289d5c0953c4a328b2177f51168588248c00f2c
|
refs/heads/master
| 2022-12-15T13:19:32.295905
| 2020-09-14T19:57:04
| 2020-09-14T19:57:04
| 295,521,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
""" Test for the control-scaled noise covariance matrix
Reference: "Christopher M. Harris and Daniel M. Wolpert - 1998 - Signal-dependent
noise determines motor planning":
"We assume that neural commands have signal-dependent noise
whose standard deviation increases linearly with the absolute value
of the neural control signal."
"""
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import torch
import analysis.delayedfeedback.targetswitching.model as tsm
torch.set_default_dtype(torch.float64)
vcontrol = 1.0* torch.tensor([3.0, 0.0]) # max covariance direction
scale = torch.tensor([4.0, 1.0]) # covariance scale [max, min]
covariance_matrix, m_control_globalscaled = tsm.signal_dependent_noise_covar_torch(vcontrol, scale)
#vcontrol = 1.0* torch.tensor([3.0, 0.0]) # max covariance direction
#covariance_matrix, m_control_globalscaled = tsm.signal_dependent_noise_covar_xaligned_torch(vcontrol, scale)
#covariance_matrix = torch.diag(covariance_matrix)
u, sigma, v = covariance_matrix.svd()
print("u:", u)
print("sigma^2:", sigma)
std = torch.sqrt(sigma)
assert torch.abs(std[0] / std[1] - scale[0] / scale[1]) < 1.0e-3
print(u @ torch.diag(sigma) @ v)
loc = torch.tensor([0.0, 0.0])
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
X = torch.tensor(X)
Y = torch.tensor(Y)
gaussian = torch.distributions.MultivariateNormal(loc=loc, covariance_matrix=covariance_matrix)
XY = torch.stack([X, Y], dim=-1)
Z = torch.exp(gaussian.log_prob(XY))
plt.figure()
m = 0.2 * m_control_globalscaled
plt.arrow(0, 0, m[0, 0], m[1, 0])
plt.arrow(0, 0, m[0, 1], m[1, 1])
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Control scaled noise')
plt.axis("equal")
plt.grid(True)
plt.show()
|
[
"dmytro.velychko@gmail.com"
] |
dmytro.velychko@gmail.com
|
206f6341e76afb3a6029ba678b56ef8a35aa2ff9
|
83bbca19a1a24a6b73d9b56bd3d76609ff321325
|
/bard/providers/download/__init__.py
|
b2f5fccd3085ca64f2b68c752b7284a583c5807e
|
[] |
no_license
|
b1naryth1ef/bard
|
837547a0cbf5c196f5cb9b0dfbb944703fa993e0
|
a6325a4684080a7a3f61f6f361bd2e0a78986ab9
|
refs/heads/master
| 2021-04-30T22:16:17.347526
| 2020-01-10T18:57:53
| 2020-01-10T18:57:53
| 172,892,673
| 16
| 0
| null | 2021-03-25T22:35:25
| 2019-02-27T10:18:01
|
Python
|
UTF-8
|
Python
| false
| false
| 219
|
py
|
from .iptorrents import IPTorrentsDownloadProvider
from .horriblesubs import HorribleSubsDownloadProvider
PROVIDERS = {
"iptorrents": IPTorrentsDownloadProvider,
"horriblesubs": HorribleSubsDownloadProvider,
}
|
[
"b1naryth1ef@gmail.com"
] |
b1naryth1ef@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.