blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0acfaee2a19dd42351019acacada350f2ece6f31
|
fbe2c3b1feb69a5ce019c805594a49dc11c7e463
|
/astrality/tests/module/module_manager/test_persisting_created_files.py
|
c7cf4305b01e387d33b1c69a69fdc1345d8fd310
|
[
"MIT"
] |
permissive
|
JakobGM/astrality
|
50630a26ef6428a0c1376269d71ddaa52912f374
|
72935b616f9a6a2e9254e9cd9319b525c596e8f0
|
refs/heads/master
| 2023-01-07T20:26:05.925893
| 2019-11-19T10:15:36
| 2019-11-19T10:15:36
| 117,895,437
| 114
| 7
|
MIT
| 2022-12-26T20:49:19
| 2018-01-17T21:34:33
|
Python
|
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
"""Tests for ensuring that all files that are created are persisted."""
import pytest
from astrality.module import ModuleManager
from astrality.persistence import CreatedFiles
@pytest.mark.parametrize('method', ['compile', 'copy', 'symlink'])
def test_that_created_files_are_persisted(method, create_temp_files):
"""When modules create files, they should be persisted."""
(
template1,
template2,
template3,
target1,
target2,
target3,
) = create_temp_files(6)
# Delete targets to prevent backups from being restored
for target in (target1, target2, target3):
target.unlink()
modules = {
'A': {
method: [
{
'content': str(template1),
'target': str(target1),
},
{
'content': str(template2),
'target': str(target2),
},
],
},
'B': {
method: {
'content': str(template3),
'target': str(target3),
},
},
}
module_manager = ModuleManager(modules=modules)
module_manager.finish_tasks()
created_files = CreatedFiles()
assert created_files.by(module='A') == [target1, target2]
assert created_files.by(module='B') == [target3]
# Now we should be able to cleanup the created files
assert target1.exists()
assert target2.exists()
assert target3.exists()
# First let's see if dry run is respected
created_files.cleanup(module='A', dry_run=True)
assert target1.exists()
assert target2.exists()
assert target3.exists()
assert created_files.by(module='A') == [target1, target2]
assert created_files.by(module='B') == [target3]
# Now see if we can cleanup module A and let B stay intact
created_files.cleanup(module='A')
assert not target1.exists()
assert not target2.exists()
assert target3.exists()
assert created_files.by(module='A') == []
assert created_files.by(module='B') == [target3]
# Now all files should be cleaned
created_files.cleanup(module='B')
assert not target3.exists()
assert created_files.by(module='A') == []
assert created_files.by(module='B') == []
# Let's see if it has been properly persisted too
del created_files
created_files = CreatedFiles()
assert created_files.by(module='A') == []
assert created_files.by(module='B') == []
|
[
"jakobgm@gmail.com"
] |
jakobgm@gmail.com
|
bd8120b22e58c6e63c7601d35545bfd5546febc3
|
f7c5e3f5834206a7b0d1dadd773d1de032f731e7
|
/dmerce1/db2HTML.py
|
ae12d22aacbefa718a58e88f68f863685e00d0f8
|
[] |
no_license
|
rbe/dmerce
|
93d601462c50dfbbf62b577803ae697d3abde333
|
3cfcae894c165189cc3ff61e27ca284f09e87871
|
refs/heads/master
| 2021-01-01T17:06:27.872197
| 2012-05-04T07:22:26
| 2012-05-04T07:22:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,326
|
py
|
#!/usr/bin/env python
##
#
# $Author: rb $
# $Revision: 1.2.7.1 $
#
# Revision 1.1 2000-07-13 17:05:50+02 rb
# Initial revision
#
##
#
# I M P O R T M O D U L E S
#
try:
import sys
import aDataStruct
except:
print '[db2HTML: ERROR LOADING MODULES: (%s, %s)]' % (sys.exc_info()[0], sys.exc_info()[1])
sys.exit()
#####################################################################
#
# C L A S S E S
#
#####################################################################
#
# Process <select>-tags
#
class select(aDataStruct.aDS):
#
# Constructor
#
def __init__(self):
# Call constructor of aDS
aDataStruct.aDS.__init__(self)
#
# Generate <option>-fields out of a database table
#
def mkOptions(self, table = '', qSuperSearch = '', optionValue = '', optionName = '', selected = ''):
# Build SQL statement
stmt = 'SELECT %s, %s FROM %s WHERE %s' % (optionValue, optionName, table, self.convertqSuperSearch(qSuperSearch))
# Query database
rowCount = self.SQLAL.query(stmt)
# Check rowCount
if not rowCount:
# Return false
return 0
# Fetch result
result = self.SQLAL.fetchall()
# Generate <option>-fields
optionList = [] # Init list
for i in range(0, len(result)):
# Init <option>-tag
appStr = '<option value="%s"' % result[i][0]
# Check if value should be selected
if selected:
if result[i][0] == selected:
appStr = '%s selected' % appStr
appStr = '%s>%s</option>' % (appStr, result[i][1])
# Append to list
optionList.append(appStr)
# Return list of strings
return optionList
#
# Process boxes
#
class box(aDataStruct.aDS):
#
# Constructor
#
def __init__(self):
# Call constructor of aDS
aDataStruct.aDS.__init__(self)
#
# Return nothing or 'checked'
#
#def isChecked(self, var = 0):
# Check value of 'var'
#if var == 1 or var == 'on':
# Yes it is!
#return ' checked'
#else:
# Return nothing
#return ''
#
# Return '' or 'checked'
#
def isChecked(self, expr = ''):
# If true
if expr:
# Return string 'checked'
return 'checked'
# If false, return nothing
else:
return ''
|
[
"ralf@art-of-coding.eu"
] |
ralf@art-of-coding.eu
|
c65381eabc7b6d43ed0b44838ef96d25e0eb2cd4
|
0c5b9ebee22450c214576f18929436527b26a1b0
|
/starfish/core/morphology/Merge/test/test_simple.py
|
f5e8ab5623aa77a99afea463b53830f01dce2371
|
[
"MIT"
] |
permissive
|
spacetx/starfish
|
962b4e4a8c0c193acaa84c016a6edaef76c14769
|
853f56c7c02b15397adb921db5e3bde02fdadb63
|
refs/heads/master
| 2023-03-09T13:51:30.772904
| 2022-09-06T22:16:25
| 2022-09-06T22:16:25
| 92,539,237
| 211
| 75
|
MIT
| 2023-02-11T01:52:25
| 2017-05-26T18:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,186
|
py
|
import numpy as np
import pytest
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.morphology.binary_mask.test.factories import (
binary_arrays_2d,
binary_mask_collection_2d,
binary_mask_collection_3d,
)
from starfish.core.morphology.util import _ticks_equal
from starfish.core.types import Axes, Coordinates
from ..simple import SimpleMerge
def test_success():
mask_collection_0 = binary_mask_collection_2d()
binary_arrays, physical_ticks = binary_arrays_2d()
binary_arrays_negated = [
np.bitwise_not(binary_array)
for binary_array in binary_arrays
]
mask_collection_1 = BinaryMaskCollection.from_binary_arrays_and_ticks(
binary_arrays_negated, None, physical_ticks, None)
merged = SimpleMerge().run([mask_collection_0, mask_collection_1])
assert _ticks_equal(merged._pixel_ticks, mask_collection_0._pixel_ticks)
assert _ticks_equal(merged._physical_ticks, mask_collection_0._physical_ticks)
assert len(mask_collection_0) + len(mask_collection_1) == len(merged)
# go through all the original uncroppped masks, and verify that they are somewhere in the merged
# set.
for mask_collection in (mask_collection_0, mask_collection_1):
for ix in range(len(mask_collection)):
uncropped_original_mask = mask_collection.uncropped_mask(ix)
for jx in range(len(merged)):
uncropped_copy_mask = merged.uncropped_mask(jx)
if uncropped_original_mask.equals(uncropped_copy_mask):
# found the copy, break
break
else:
pytest.fail("could not find mask in merged set.")
def test_pixel_tick_mismatch():
mask_collection_0 = binary_mask_collection_2d()
mask_collection_0._pixel_ticks[Axes.X.value] = np.asarray(
mask_collection_0._pixel_ticks[Axes.X.value]) + 1
binary_arrays, physical_ticks = binary_arrays_2d()
binary_arrays_negated = [
np.bitwise_not(binary_array)
for binary_array in binary_arrays
]
mask_collection_1 = BinaryMaskCollection.from_binary_arrays_and_ticks(
binary_arrays_negated, None, physical_ticks, None)
with pytest.raises(ValueError):
SimpleMerge().run([mask_collection_0, mask_collection_1])
def test_physical_tick_mismatch():
mask_collection_0 = binary_mask_collection_2d()
mask_collection_0._physical_ticks[Coordinates.X] = np.asarray(
mask_collection_0._physical_ticks[Coordinates.X]) + 1
binary_arrays, physical_ticks = binary_arrays_2d()
binary_arrays_negated = [
np.bitwise_not(binary_array)
for binary_array in binary_arrays
]
mask_collection_1 = BinaryMaskCollection.from_binary_arrays_and_ticks(
binary_arrays_negated, None, physical_ticks, None)
with pytest.raises(ValueError):
SimpleMerge().run([mask_collection_0, mask_collection_1])
def test_shape_mismatch():
mask_collection_0 = binary_mask_collection_2d()
mask_collection_1 = binary_mask_collection_3d()
with pytest.raises(ValueError):
SimpleMerge().run([mask_collection_0, mask_collection_1])
|
[
"noreply@github.com"
] |
spacetx.noreply@github.com
|
476ccba49a18e2d3356d794c26547cdac706b13b
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2344/60581/246336.py
|
5e8a23f13ab4797d8c3277856fd1dc425e03eb8f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,584
|
py
|
import sys
lst = []
for line in sys.stdin:
if line.strip()=="":
break
lst.append(line)
input = []
#读入处理
for i in range(0,len(lst)):
theLine = []
j = 0
while j < len(lst[i]):
str = ''
judgeWord = False
judgeNumber = False
if lst[i][j]>='A' and lst[i][j]<='Z':
judgeWord = True
str += lst[i][j]
while judgeWord:
j += 1
if j == len(lst[i]):
theLine.append(str)
break
if lst[i][j]>='A' and lst[i][j]<='Z':
str += lst[i][j]
else:
judgeWord = False
theLine.append(str)
if lst[i][j]>='0' and lst[i][j]<='9':
judgeNumber = True
str += lst[i][j]
while judgeNumber:
j += 1
if j == len(lst[i]):
theLine.append(int(str))
break
if lst[i][j]>='0' and lst[i][j]<='9':
str += lst[i][j]
else:
judgeNumber = False
theLine.append(int(str))
j += 1
input.append(theLine)
testNumber = input[0][0]
start = 1
count = 0
while count < testNumber:
numbers = input[start][0]
numberList = input[start+1].copy()
switchNumber = input[start+2][0]
outPut1 = numberList[0:switchNumber]
outPut2 = numberList[switchNumber:]
for i in range(0,len(outPut1)):
outPut2.append(outPut1[i])
count += 1
start += 3
for i in range(0,len(outPut2)):
print(outPut2[i],end=" ")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
4a4a23e8b49a1a9d7382bc7f3f23df345aef6af1
|
2d5cc685fd861c16a44e6578dff659bc197d44f8
|
/ioflo/aid/__init__.py
|
bc7963f4557e7b278b7b88d81bcbd4bb25ffcb27
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dreamerparadise/ioflo
|
b642e3325760d124c8c608cefd3fb23c408785ff
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
refs/heads/master
| 2023-04-03T04:05:24.934544
| 2020-11-19T22:07:49
| 2020-11-19T22:07:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
"""
aid package
helper and utility modules
"""
# frequent imports
from .odicting import odict, lodict, modict
from .osetting import oset
from .consoling import getConsole
|
[
"smith.samuel.m@gmail.com"
] |
smith.samuel.m@gmail.com
|
eb86106a321ead443d3fef7263cc6bf014b7c342
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/gui/shared/utils/requesters/IntSettingsRequester.py
|
5167f471034e02e31839cd698900e678c4a0e863
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 4,446
|
py
|
# 2017.08.29 21:50:18 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/utils/requesters/IntSettingsRequester.py
import BigWorld
import copy
import constants
from adisp import async, process
from debug_utils import LOG_ERROR, LOG_WARNING
from gui.shared.utils import code2str
class IntSettingsRequester(object):
"""
Setting dictionary presenting int settings keys by section names.
Don't forget to duplicate new value in common.constanst.INT_USER_SETTINGS_KEYS
"""
SETTINGS = {'VERSION': 0,
'GAME': 1,
'GRAPHICS': 2,
'SOUND': 3,
'CONTROLS': 4,
'AIM_ARCADE_1': 43,
'AIM_ARCADE_2': 44,
'AIM_ARCADE_3': 45,
'AIM_SNIPER_1': 46,
'AIM_SNIPER_2': 47,
'AIM_SNIPER_3': 48,
'MARKERS_ENEMY': 49,
'MARKERS_DEAD': 50,
'MARKERS_ALLY': 51,
'GUI_START_BEHAVIOR': 52,
'FEEDBACK': 53,
'EULA_VERSION': constants.USER_SERVER_SETTINGS.EULA_VERSION,
'GAMEPLAY': 55,
'FORT': 56,
'USERS_STORAGE_REV': 57,
'CONTACTS': 58,
'GAME_EXTENDED': constants.USER_SERVER_SETTINGS.GAME_EXTENDED,
'FALLOUT': 60,
'TUTORIAL': 61,
'AIM_ARCADE_4': 63,
'AIM_SNIPER_4': 64,
'MARKS_ON_GUN': constants.USER_SERVER_SETTINGS.HIDE_MARKS_ON_GUN,
'ONCE_ONLY_HINTS': 70,
'CAROUSEL_FILTER_1': 73,
'CAROUSEL_FILTER_2': 74,
'FALLOUT_CAROUSEL_FILTER_1': 75,
'FALLOUT_CAROUSEL_FILTER_2': 76,
'ENCYCLOPEDIA_RECOMMENDATIONS_1': 77,
'ENCYCLOPEDIA_RECOMMENDATIONS_2': 78,
'ENCYCLOPEDIA_RECOMMENDATIONS_3': 79,
'RANKED_CAROUSEL_FILTER_1': 80,
'RANKED_CAROUSEL_FILTER_2': 81,
'FEEDBACK_DAMAGE_INDICATOR': 82,
'FEEDBACK_DAMAGE_LOG': 83,
'FEEDBACK_BATTLE_EVENTS': 84}
def __init__(self):
self.__cache = dict()
def _response(self, resID, value, callback):
"""
Common server response method. Must be called ANYWAY after
server operation will complete.
@param resID: request result id
@param value: requested value
@param callback: function to be called after operation will complete
"""
if resID < 0:
LOG_ERROR('[class %s] There is error while getting data from cache: %s[%d]' % (self.__class__.__name__, code2str(resID), resID))
return callback(dict())
callback(value)
@async
def _requestCache(self, callback = None):
"""
Request data from server
"""
player = BigWorld.player()
if player is not None and player.intUserSettings is not None:
player.intUserSettings.getCache(lambda resID, value: self._response(resID, value, callback))
else:
LOG_WARNING('Player or intUserSettings is not defined', player, player.intUserSettings if player is not None else None)
return
@async
@process
def request(self, callback = None):
"""
Public request method. Validate player entity to request
possibility and itself as single callback argument.
"""
self.__cache = yield self._requestCache()
callback(self)
def getCacheValue(self, key, defaultValue = None):
"""
Public interface method to get value from cache.
@param key: value's key in cache
@param defaultValue: default value if key does not exist
@return: value
"""
return self.__cache.get(key, defaultValue)
@process
def setSetting(self, key, value):
yield self._addIntSettings({self.SETTINGS[key]: int(value)})
@process
def setSettings(self, settings):
intSettings = dict(map(lambda item: (self.SETTINGS[item[0]], int(item[1])), settings.iteritems()))
yield self._addIntSettings(intSettings)
def getSetting(self, key, defaultValue = None):
return self.getCacheValue(self.SETTINGS[key], defaultValue)
@async
def _addIntSettings(self, settings, callback = None):
import BattleReplay
if not BattleReplay.g_replayCtrl.isPlaying:
self.__cache.update(settings)
BigWorld.player().intUserSettings.addIntSettings(settings, callback)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\shared\utils\requesters\IntSettingsRequester.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:50:18 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
ec51a2c268481d48cc82a004658d99ace07708a4
|
6610e8b7d715d28df179217b0222b6a74d7b01f3
|
/Contents/Libraries/Shared/omdb/api.py
|
e7c5f60a9f295f608c0cf9f27a8d959315e9eca3
|
[] |
no_license
|
BigmIkeX/FMoviesPlus.bundle
|
f479b96dabbda8a70944a7c44717a9b7a6abc5c0
|
7789e6137df8a86f6c9f9a78fc478ab89696e3d0
|
refs/heads/master
| 2020-05-22T09:39:12.049063
| 2019-05-09T20:54:25
| 2019-05-09T20:54:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
"""Public interface for omdb module
Accessible via:
import omdb
"""
from .client import Client
# Internal client instance used for our requests.
_client = Client()
def set_default(key, default):
"""Proxy method to internal client instance that sets default params
values.
"""
_client.set_default(key, default)
def get(**params):
"""Generic request."""
return _client.get(**params)
def search(string, **params):
"""Search by string."""
return get(search=string, **params)
def search_movie(string, **params):
"""Search movies by string."""
params['media_type'] = 'movie'
return search(string, **params)
def search_episode(string, **params):
"""Search episodes by string."""
params['media_type'] = 'episode'
return search(string, **params)
def search_series(string, **params):
"""Search series by string."""
params['media_type'] = 'series'
return search(string, **params)
def imdbid(string, **params):
"""Get by IMDB ID."""
return get(imdbid=string, **params)
def title(string, **params):
"""Get by title."""
return get(title=string, **params)
def request(**params):
"""Lower-level request."""
return _client.request(**params)
|
[
"coderalphaakv@gmail.com"
] |
coderalphaakv@gmail.com
|
e80b2c632811677af15c91426fdb116e3238f867
|
1b082458d2815b7671a5d77e809834184e0dabfa
|
/vcr/request.py
|
bbe950b3edd673d60637aff3f6c8f797cf48f426
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
afthill/vcrpy
|
6e4cc1cb842ddec75b73e579a12cf4ae7fb7ca8a
|
5aa2fb017f8aad3455ff3291f2f749a5a45664fe
|
refs/heads/master
| 2022-05-01T04:32:57.896556
| 2014-04-27T22:05:19
| 2014-04-27T22:05:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
class Request(object):
def __init__(self, protocol, host, port, method, path, body, headers):
self.protocol = protocol
self.host = host
self.port = port
self.method = method
self.path = path
self.body = body
# make headers a frozenset so it will be hashable
self.headers = frozenset(headers.items())
def add_header(self, key, value):
tmp = dict(self.headers)
tmp[key] = value
self.headers = frozenset(tmp.iteritems())
@property
def url(self):
return "{0}://{1}{2}".format(self.protocol, self.host, self.path)
def __key(self):
return (
self.host,
self.port,
self.method,
self.path,
self.body,
self.headers
)
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "<Request ({0}) {1}>".format(self.method, self.url)
def __repr__(self):
return self.__str__()
def _to_dict(self):
return {
'protocol': self.protocol,
'host': self.host,
'port': self.port,
'method': self.method,
'path': self.path,
'body': self.body,
'headers': self.headers,
}
@classmethod
def _from_dict(cls, dct):
return Request(**dct)
|
[
"me@kevinmccarthy.org"
] |
me@kevinmccarthy.org
|
67b2a52ec7eb04fbc8eb8dce6312fd2b614f37a3
|
ef2018f6e2c8ccabf89cc5f0780cd8ded8db50c0
|
/textreuse/fine_tuning/original_skipthought.py
|
f9dc21db43a4e8a69e8f8ba58f37585b6f07d8f2
|
[] |
no_license
|
emanjavacas/text-reuse
|
a8d14bfb105b30a3f7cf732f999027186ea899bd
|
96e39a2e6982677f7d1f6b0f8397977155481b33
|
refs/heads/master
| 2022-01-05T22:13:54.800591
| 2019-06-04T19:51:12
| 2019-06-04T19:51:12
| 115,821,869
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
import os
import numpy as np
import skipthoughts
from textreuse.datasets import default_pairs, opusparcus_pairs
from textreuse.datasets import PATHS, OPUSPARCUS_PATH
if __name__ == '__main__':
model = skipthoughts.load_model()
for dataset in PATHS:
for split in PATHS[dataset]:
print(dataset, split)
sents, scores = zip(*default_pairs(PATHS[dataset][split]))
scores = np.array([float(s) for s in scores])
s1, s2 = zip(*sents)
s1, s2 = [' '.join(s) for s in s1], [' '.join(s) for s in s2]
s1, s2 = skipthoughts.encode(model, s1), skipthoughts.encode(model, s2)
with open('{}.{}.npz'.format(dataset.lower(), split), 'wb') as f:
np.savez(f, s1=s1, s2=s2, scores=scores)
for split in ('train', 'test', 'dev'):
print("OPUS: ", split)
sents, scores = zip(*opusparcus_pairs(OPUSPARCUS_PATH, split, maxlines=10000))
s1, s2 = zip(*sents)
s1, s2 = [' '.join(s) for s in s1], [' '.join(s) for s in s2]
s1, s2 = skipthoughts.encode(model, s1), skipthoughts.encode(model, s2)
with open('{}.{}.npz'.format('opusparcus', split), 'wb') as f:
if split == 'train':
np.savez(f, s1=s1, s2=s2)
else:
np.savez(f, s1=s2, s2=s2, scores=np.array([float(s) for s in scores]))
|
[
"enrique.manjavacas@gmail.com"
] |
enrique.manjavacas@gmail.com
|
a797ad4aa0901df584d197d3311931220a7fa063
|
b38c1d72d7c5d688a5f0942895a5f762880196b6
|
/blog/migrations/0001_initial.py
|
c589b4b16fa74505689fe1d50d1e5effa764d2fc
|
[] |
no_license
|
zdimon/angular
|
c8a9874b427ca5bb4899c51c0dc0ba0d96191039
|
ea625523761d6b3300c9d9fb3c0aa7070fb634da
|
refs/heads/master
| 2020-05-17T01:28:35.187898
| 2015-07-17T15:29:16
| 2015-07-17T15:29:16
| 38,496,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,115
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name=b'Title')),
('created_at', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(verbose_name=b'Content', blank=True)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(verbose_name=b'Topic', to='blog.Topic'),
),
]
|
[
"zdimon77@gmail.com"
] |
zdimon77@gmail.com
|
595c01bd43d08b6b8b738334caae83010da66da1
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AlipayMarketingDataDashboardApplyModel.py
|
25bfe04a2359d1e8304fbfc2eb32f13f51baf790
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingDataDashboardApplyModel(object):
def __init__(self):
self._dashboard_ids = None
@property
def dashboard_ids(self):
return self._dashboard_ids
@dashboard_ids.setter
def dashboard_ids(self, value):
if isinstance(value, list):
self._dashboard_ids = list()
for i in value:
self._dashboard_ids.append(i)
def to_alipay_dict(self):
params = dict()
if self.dashboard_ids:
if isinstance(self.dashboard_ids, list):
for i in range(0, len(self.dashboard_ids)):
element = self.dashboard_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.dashboard_ids[i] = element.to_alipay_dict()
if hasattr(self.dashboard_ids, 'to_alipay_dict'):
params['dashboard_ids'] = self.dashboard_ids.to_alipay_dict()
else:
params['dashboard_ids'] = self.dashboard_ids
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingDataDashboardApplyModel()
if 'dashboard_ids' in d:
o.dashboard_ids = d['dashboard_ids']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
c699bc32127b8584ebb9ae1852fe8baa996cea26
|
0f7d186164cafdd90b3e0baae0b5573fead049b4
|
/多任务/线程/07-线程共享全局变量.py
|
5b263873df2b9925669df27515c6809dbc31f50f
|
[] |
no_license
|
DominicRen/Python-learning
|
603b31ff173cbc0782b3cf3da12d5e0834a74402
|
a9e7ded5fc0e2a760cee9887d87700769a3b9b6d
|
refs/heads/master
| 2022-12-03T07:43:01.052201
| 2019-04-05T04:10:42
| 2019-04-05T04:10:42
| 179,615,987
| 1
| 2
| null | 2022-11-28T19:13:32
| 2019-04-05T03:54:26
|
HTML
|
UTF-8
|
Python
| false
| false
| 490
|
py
|
import threading
import time
# 定义一个全局变量
g_num = 100
def test1():
global g_num
g_num += 1
print("-----in test1 g_num = %d-----" % g_num)
def test2():
print("-----in test2 g_num = %d-----" % g_num)
def main():
t1 = threading.Thread(target=test1)
t2 = threading.Thread(target=test2)
t1.start()
time.sleep(1)
t2.start()
time.sleep(1)
print("-----in main Thread g_num = %d-----" % g_num)
if __name__ == "__main__":
main()
|
[
"ren_9510@163.com"
] |
ren_9510@163.com
|
7cbfc835d5ee7dd706a39b957c365a77352048c5
|
f64069f0dc90aa41947d1deca317aa37ad9c9080
|
/CounterCode2015/Campers.py
|
1be7dba22916115142a5cf672826cd46dc065798
|
[] |
no_license
|
raunaklakhwani/Algorithms
|
9fdd7c3f5234b4682690ce3bededa365e75df1e5
|
a0b36f4f068c100b17fd27b3ed28816e4f3b4e4f
|
refs/heads/master
| 2021-01-10T19:39:24.120026
| 2015-12-20T14:14:48
| 2015-12-20T14:14:48
| 27,044,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
# URL : https://www.hackerrank.com/contests/countercode/challenges/campers
from math import ceil
N,K = 8,2
snipers = [2,6]
snipers.sort()
def getBetween(a,b):
#print a,b
n = b-a+1
return int(ceil(n/2.0))
pre = 1
s = 0
for i in snipers:
s += getBetween(pre, i - 2)
#print s
pre = i + 2
s += getBetween(pre, N)
print s + K
|
[
"rlakhwan@cisco.com"
] |
rlakhwan@cisco.com
|
d6cac20aa12d749297a858a8968dcdd59024491d
|
ad846a63f010b808a72568c00de016fbe86d6c35
|
/algotradingenv/lib/python3.8/site-packages/IPython/sphinxext/ipython_console_highlighting.py
|
e07bcc007d0673032ff83deee60ef9272ebd015b
|
[] |
no_license
|
krishansinghal29/algotrade
|
74ee8b1c9113812b1c7c00ded95d966791cf76f5
|
756bc2e3909558e9ae8b2243bb4dabc530f12dde
|
refs/heads/master
| 2023-06-02T01:53:24.924672
| 2021-06-10T09:17:55
| 2021-06-10T09:17:55
| 375,641,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
"""
reST directive for syntax-highlighting ipython interactive sessions.
"""
from sphinx import highlighting
from IPython.lib.lexers import IPyLexer
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows what the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
metadata = {"parallel_read_safe": True, "parallel_write_safe": True}
return metadata
# Register the extension as a valid pygments lexer.
# Alternatively, we could register the lexer with pygments instead. This would
# require using setuptools entrypoints: http://pygments.org/docs/plugins
ipy2 = IPyLexer(python3=False)
ipy3 = IPyLexer(python3=True)
highlighting.lexers["ipython"] = ipy2
highlighting.lexers["ipython2"] = ipy2
highlighting.lexers["ipython3"] = ipy3
|
[
"krishansinghal29@gmail.com"
] |
krishansinghal29@gmail.com
|
bf77e59520837cbd065542476fdad5c4d1c0a67f
|
d3e723fe5eb20b868ed6bc7e3d228eba368f22ef
|
/feedly/feeds/redis.py
|
03755ba7a7ced91466e476809ae1a508fc9d880f
|
[
"BSD-3-Clause"
] |
permissive
|
intellisense/Feedly
|
f33fa42a0f41b73e8e728813ed311d06bd8fb668
|
4c5fb74aee56e5ff382417301a4825b151c474b0
|
refs/heads/master
| 2020-05-29T11:36:24.157740
| 2014-02-24T16:27:33
| 2014-02-24T16:27:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
from feedly.feeds.base import BaseFeed
from feedly.storage.redis.activity_storage import RedisActivityStorage
from feedly.storage.redis.timeline_storage import RedisTimelineStorage
from feedly.serializers.activity_serializer import ActivitySerializer
class RedisFeed(BaseFeed):
timeline_storage_class = RedisTimelineStorage
activity_storage_class = RedisActivityStorage
activity_serializer = ActivitySerializer
# : allow you point to a different redis server as specified in
# : settings.FEEDLY_REDIS_CONFIG
redis_server = 'default'
@classmethod
def get_timeline_storage(cls):
timeline_storage_options = {
'redis_server': cls.redis_server,
}
timeline_storage = cls.timeline_storage_class(
**timeline_storage_options)
return timeline_storage
# : clarify that this feed supports filtering
filtering_supported = True
|
[
"thierryschellenbach@gmail.com"
] |
thierryschellenbach@gmail.com
|
9fcfda872969a04db6c4892cc52345f61d0a5a36
|
2ed86a79d0fcd299ad4a01310954c5eddcf01edf
|
/homeassistant/components/overkiz/climate_entities/atlantic_pass_apc_heating_zone.py
|
b6835d93ebb01978f226f745fb84a212a4de8ecd
|
[
"Apache-2.0"
] |
permissive
|
konnected-io/home-assistant
|
037f12c87bb79e19220192eb918e49db1b1a8b3e
|
2e65b77b2b5c17919939481f327963abdfdc53f0
|
refs/heads/dev
| 2023-05-11T08:57:41.891518
| 2023-05-07T20:03:37
| 2023-05-07T20:03:37
| 109,931,626
| 24
| 10
|
Apache-2.0
| 2023-02-22T06:24:01
| 2017-11-08T05:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 7,956
|
py
|
"""Support for Atlantic Pass APC Heating Control."""
from __future__ import annotations
from typing import Any, cast
from pyoverkiz.enums import OverkizCommand, OverkizCommandParam, OverkizState
from homeassistant.components.climate import (
PRESET_AWAY,
PRESET_COMFORT,
PRESET_ECO,
PRESET_HOME,
PRESET_SLEEP,
ClimateEntity,
ClimateEntityFeature,
HVACMode,
)
from homeassistant.const import ATTR_TEMPERATURE, UnitOfTemperature
from ..const import DOMAIN
from ..coordinator import OverkizDataUpdateCoordinator
from ..entity import OverkizEntity
OVERKIZ_TO_HVAC_MODE: dict[str, str] = {
OverkizCommandParam.AUTO: HVACMode.AUTO,
OverkizCommandParam.ECO: HVACMode.AUTO,
OverkizCommandParam.MANU: HVACMode.HEAT,
OverkizCommandParam.HEATING: HVACMode.HEAT,
OverkizCommandParam.STOP: HVACMode.OFF,
OverkizCommandParam.EXTERNAL_SCHEDULING: HVACMode.AUTO,
OverkizCommandParam.INTERNAL_SCHEDULING: HVACMode.AUTO,
OverkizCommandParam.COMFORT: HVACMode.HEAT,
}
HVAC_MODE_TO_OVERKIZ = {v: k for k, v in OVERKIZ_TO_HVAC_MODE.items()}
PRESET_EXTERNAL = "external"
PRESET_FROST_PROTECTION = "frost_protection"
OVERKIZ_TO_PRESET_MODES: dict[str, str] = {
OverkizCommandParam.OFF: PRESET_ECO,
OverkizCommandParam.STOP: PRESET_ECO,
OverkizCommandParam.MANU: PRESET_COMFORT,
OverkizCommandParam.COMFORT: PRESET_COMFORT,
OverkizCommandParam.ABSENCE: PRESET_AWAY,
OverkizCommandParam.ECO: PRESET_ECO,
OverkizCommandParam.FROSTPROTECTION: PRESET_FROST_PROTECTION,
OverkizCommandParam.EXTERNAL_SCHEDULING: PRESET_EXTERNAL,
OverkizCommandParam.INTERNAL_SCHEDULING: PRESET_HOME,
}
PRESET_MODES_TO_OVERKIZ = {v: k for k, v in OVERKIZ_TO_PRESET_MODES.items()}
OVERKIZ_TO_PROFILE_MODES: dict[str, str] = {
OverkizCommandParam.OFF: PRESET_SLEEP,
OverkizCommandParam.STOP: PRESET_SLEEP,
OverkizCommandParam.ECO: PRESET_ECO,
OverkizCommandParam.ABSENCE: PRESET_AWAY,
OverkizCommandParam.MANU: PRESET_COMFORT,
OverkizCommandParam.DEROGATION: PRESET_COMFORT,
OverkizCommandParam.EXTERNAL_SETPOINT: PRESET_EXTERNAL,
OverkizCommandParam.FROSTPROTECTION: PRESET_FROST_PROTECTION,
OverkizCommandParam.COMFORT: PRESET_COMFORT,
}
OVERKIZ_TEMPERATURE_STATE_BY_PROFILE: dict[str, str] = {
OverkizCommandParam.ECO: OverkizState.CORE_ECO_HEATING_TARGET_TEMPERATURE,
OverkizCommandParam.COMFORT: OverkizState.CORE_COMFORT_HEATING_TARGET_TEMPERATURE,
OverkizCommandParam.DEROGATION: OverkizState.CORE_DEROGATED_TARGET_TEMPERATURE,
}
class AtlanticPassAPCHeatingZone(OverkizEntity, ClimateEntity):
"""Representation of Atlantic Pass APC Heating Zone Control."""
_attr_hvac_modes = [*HVAC_MODE_TO_OVERKIZ]
_attr_preset_modes = [*PRESET_MODES_TO_OVERKIZ]
_attr_supported_features = (
ClimateEntityFeature.TARGET_TEMPERATURE | ClimateEntityFeature.PRESET_MODE
)
_attr_temperature_unit = UnitOfTemperature.CELSIUS
_attr_translation_key = DOMAIN
def __init__(
self, device_url: str, coordinator: OverkizDataUpdateCoordinator
) -> None:
"""Init method."""
super().__init__(device_url, coordinator)
# Temperature sensor use the same base_device_url and use the n+1 index
self.temperature_device = self.executor.linked_device(
int(self.index_device_url) + 1
)
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
if temperature := self.temperature_device.states[OverkizState.CORE_TEMPERATURE]:
return cast(float, temperature.value)
return None
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return OVERKIZ_TO_HVAC_MODE[
cast(str, self.executor.select_state(OverkizState.IO_PASS_APC_HEATING_MODE))
]
@property
def current_heating_profile(self) -> str:
"""Return current heating profile."""
return cast(
str,
self.executor.select_state(OverkizState.IO_PASS_APC_HEATING_PROFILE),
)
async def async_set_heating_mode(self, mode: str) -> None:
"""Set new heating mode and refresh states."""
await self.executor.async_execute_command(
OverkizCommand.SET_PASS_APC_HEATING_MODE, mode
)
if self.current_heating_profile == OverkizCommandParam.DEROGATION:
# If current mode is in derogation, disable it
await self.executor.async_execute_command(
OverkizCommand.SET_DEROGATION_ON_OFF_STATE, OverkizCommandParam.OFF
)
# We also needs to execute these 2 commands to make it work correctly
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_MODE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_PROFILE
)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.async_set_heating_mode(HVAC_MODE_TO_OVERKIZ[hvac_mode])
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.async_set_heating_mode(PRESET_MODES_TO_OVERKIZ[preset_mode])
@property
def preset_mode(self) -> str:
"""Return the current preset mode, e.g., home, away, temp."""
heating_mode = cast(
str, self.executor.select_state(OverkizState.IO_PASS_APC_HEATING_MODE)
)
if heating_mode == OverkizCommandParam.INTERNAL_SCHEDULING:
# In Internal scheduling, it could be comfort or eco
return OVERKIZ_TO_PROFILE_MODES[
cast(
str,
self.executor.select_state(
OverkizState.IO_PASS_APC_HEATING_PROFILE
),
)
]
return OVERKIZ_TO_PRESET_MODES[heating_mode]
@property
def target_temperature(self) -> float:
"""Return hvac target temperature."""
current_heating_profile = self.current_heating_profile
if current_heating_profile in OVERKIZ_TEMPERATURE_STATE_BY_PROFILE:
return cast(
float,
self.executor.select_state(
OVERKIZ_TEMPERATURE_STATE_BY_PROFILE[current_heating_profile]
),
)
return cast(
float, self.executor.select_state(OverkizState.CORE_TARGET_TEMPERATURE)
)
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new temperature."""
temperature = kwargs[ATTR_TEMPERATURE]
if self.hvac_mode == HVACMode.AUTO:
await self.executor.async_execute_command(
OverkizCommand.SET_COMFORT_HEATING_TARGET_TEMPERATURE,
temperature,
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_COMFORT_HEATING_TARGET_TEMPERATURE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_TARGET_TEMPERATURE
)
else:
await self.executor.async_execute_command(
OverkizCommand.SET_DEROGATED_TARGET_TEMPERATURE,
temperature,
)
await self.executor.async_execute_command(
OverkizCommand.SET_DEROGATION_ON_OFF_STATE,
OverkizCommandParam.ON,
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_TARGET_TEMPERATURE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_MODE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_PROFILE
)
|
[
"noreply@github.com"
] |
konnected-io.noreply@github.com
|
241ee1d4fb08eab48847636ac8aa19413cffb686
|
846b11ccf549aba144c1824a24292a4850860ca7
|
/2-EstruturaDeDecisao/5.py
|
ecaf59269d84595e978da0a147427a07e1c37567
|
[] |
no_license
|
paulocesarcsdev/ExerciciosPython
|
6d1feff293e7efc4cd3fbc62eee0add93f76db99
|
25bfaa6dc5cb294242e478a2b253a8ca5d9c7078
|
refs/heads/master
| 2023-05-15T00:53:22.151884
| 2021-06-10T03:04:04
| 2021-06-10T03:04:04
| 337,847,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
'''
Faça um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a média alcançada por aluno e apresentar:
A mensagem "Aprovado", se a média alcançada for maior ou igual a sete;
A mensagem "Reprovado", se a média for menor do que sete;
A mensagem "Aprovado com Distinção", se a média for igual a dez.
'''
notaUm = int(input('Primeira nota: '))
notaDois = int(input('Segunda nota: '))
media = (notaUm + notaDois) / 2
if(media >= 7):
print('Aprovado')
if(media < 7):
print('Reprovado')
if(media == 10):
print('Aprovado com Distinção')
|
[
"paulocesarcs.dev@gmail.com"
] |
paulocesarcs.dev@gmail.com
|
5b8904fa1f4944088bfa67fd47c8959d3ab4ffb4
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/resources/types/bidding_data_exclusion.py
|
9fc75f0ae1869a171b9aa89c12199bed820c8544
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 4,930
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import advertising_channel_type
from google.ads.googleads.v9.enums.types import device
from google.ads.googleads.v9.enums.types import seasonality_event_scope
from google.ads.googleads.v9.enums.types import seasonality_event_status
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"BiddingDataExclusion",},
)
class BiddingDataExclusion(proto.Message):
r"""Represents a bidding data exclusion.
See "About data exclusions" at
https://support.google.com/google-ads/answer/10370710.
Attributes:
resource_name (str):
Immutable. The resource name of the data exclusion. Data
exclusion resource names have the form:
``customers/{customer_id}/biddingDataExclusions/{data_exclusion_id}``
data_exclusion_id (int):
Output only. The ID of the data exclusion.
scope (google.ads.googleads.v9.enums.types.SeasonalityEventScopeEnum.SeasonalityEventScope):
The scope of the data exclusion.
status (google.ads.googleads.v9.enums.types.SeasonalityEventStatusEnum.SeasonalityEventStatus):
Output only. The status of the data
exclusion.
start_date_time (str):
Required. The inclusive start time of the
data exclusion in yyyy-MM-dd HH:mm:ss format.
A data exclusion is backward looking and should
be used for events that start in the past and
end either in the past or future.
end_date_time (str):
Required. The exclusive end time of the data exclusion in
yyyy-MM-dd HH:mm:ss format.
The length of [start_date_time, end_date_time) interval must
be within (0, 14 days].
name (str):
The name of the data exclusion. The name can
be at most 255 characters.
description (str):
The description of the data exclusion. The
description can be at most 2048 characters.
devices (Sequence[google.ads.googleads.v9.enums.types.DeviceEnum.Device]):
If not specified, all devices will be
included in this exclusion. Otherwise, only the
specified targeted devices will be included in
this exclusion.
campaigns (Sequence[str]):
The data exclusion will apply to the campaigns listed when
the scope of this exclusion is CAMPAIGN. The maximum number
of campaigns per event is 2000. Note: a data exclusion with
both advertising_channel_types and campaign_ids is not
supported.
advertising_channel_types (Sequence[google.ads.googleads.v9.enums.types.AdvertisingChannelTypeEnum.AdvertisingChannelType]):
The data_exclusion will apply to all the campaigns under the
listed channels retroactively as well as going forward when
the scope of this exclusion is CHANNEL. The supported
advertising channel types are DISPLAY, SEARCH and SHOPPING.
Note: a data exclusion with both advertising_channel_types
and campaign_ids is not supported.
"""
resource_name = proto.Field(proto.STRING, number=1,)
data_exclusion_id = proto.Field(proto.INT64, number=2,)
scope = proto.Field(
proto.ENUM,
number=3,
enum=seasonality_event_scope.SeasonalityEventScopeEnum.SeasonalityEventScope,
)
status = proto.Field(
proto.ENUM,
number=4,
enum=seasonality_event_status.SeasonalityEventStatusEnum.SeasonalityEventStatus,
)
start_date_time = proto.Field(proto.STRING, number=5,)
end_date_time = proto.Field(proto.STRING, number=6,)
name = proto.Field(proto.STRING, number=7,)
description = proto.Field(proto.STRING, number=8,)
devices = proto.RepeatedField(
proto.ENUM, number=9, enum=device.DeviceEnum.Device,
)
campaigns = proto.RepeatedField(proto.STRING, number=10,)
advertising_channel_types = proto.RepeatedField(
proto.ENUM,
number=11,
enum=advertising_channel_type.AdvertisingChannelTypeEnum.AdvertisingChannelType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
f3704e6ffdfc299ccbe48106a9f84b3607dfd38d
|
a0b7d64efae7804d82aac288f1ab36a98339c32a
|
/all_repos/manual.py
|
d8d1cad94614b5206dd88c6a14ebceb5fabfaa03
|
[
"MIT"
] |
permissive
|
asottile/all-repos
|
a4e71e2deabd4f82af8d8dda43289a22e9d22b28
|
54d272b276e391828b74efad9445f5eafc96c998
|
refs/heads/main
| 2023-09-01T14:39:35.708565
| 2023-08-29T15:13:51
| 2023-08-29T15:13:51
| 99,260,344
| 481
| 81
|
MIT
| 2023-09-05T11:38:16
| 2017-08-03T17:51:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
from __future__ import annotations
import argparse
from typing import Sequence
from all_repos import autofix_lib
from all_repos.config import Config
def find_repos(_: Config) -> list[str]:
raise AssertionError('--repos is required')
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser(
description='Interactively apply a manual change across repos.',
usage='%(prog)s [options]',
)
autofix_lib.add_fixer_args(parser)
parser.add_argument(
'--branch-name', default='all-repos-manual',
help='override the autofixer branch name (default `%(default)s`).',
)
parser.add_argument(
'--commit-msg', '--commit-message', required=True,
help='set the autofixer commit message.',
)
args = parser.parse_args(argv)
# force interactive
args.interactive = True
repos, config, commit, autofix_settings = autofix_lib.from_cli(
args,
find_repos=find_repos,
msg=args.commit_msg,
branch_name=args.branch_name,
)
autofix_lib.fix(
repos,
apply_fix=autofix_lib.shell,
config=config,
commit=commit,
autofix_settings=autofix_settings,
)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
[
"asottile@umich.edu"
] |
asottile@umich.edu
|
ec2eb4274f2ab5c547553a43a9e89cf4b30551d7
|
ad357cfbec64afb8f4cc4043b212996768f9755c
|
/api/barriers/serializers/mixins.py
|
0bf4ea3db58f439746d9c561c10c87ca5eb2913b
|
[
"MIT"
] |
permissive
|
uktrade/market-access-api
|
6b4680e6455eb5c25480ccd3e3d9445654269f36
|
4da26d1be53843d22411577409d9489010bdda09
|
refs/heads/master
| 2023-08-30T14:47:10.373148
| 2023-08-29T13:58:08
| 2023-08-29T13:58:08
| 131,856,014
| 2
| 3
|
MIT
| 2023-09-14T08:04:42
| 2018-05-02T13:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,729
|
py
|
from rest_framework import serializers
class LocationFieldMixin(metaclass=serializers.SerializerMetaclass):
# Use metaclass to get the fields registered at the serializer where the mixin is used
# The field will need to be listed on the serializer using this mixin
location = serializers.SerializerMethodField()
def get_location(self, obj):
try:
return obj.location or ""
except AttributeError:
return ""
class EconomicAssessmentRatingFieldMixin(metaclass=serializers.SerializerMetaclass):
economic_assessment_rating = serializers.SerializerMethodField()
def get_economic_assessment_rating(self, obj):
assessment = obj.current_economic_assessment
if assessment:
return assessment.get_rating_display()
class EconomicAssessmentExplanationFieldMixin(
metaclass=serializers.SerializerMetaclass
):
economic_assessment_explanation = serializers.SerializerMethodField()
def get_economic_assessment_explanation(self, obj):
assessment = obj.current_economic_assessment
if assessment:
return assessment.explanation
class ValueToEconomyFieldMixin(metaclass=serializers.SerializerMetaclass):
"""Value of UK exports of affected goods to partner country"""
value_to_economy = serializers.SerializerMethodField()
def get_value_to_economy(self, obj):
assessment = obj.current_economic_assessment
if assessment:
return assessment.export_potential.get("uk_exports_affected")
class ImportMarketSizeFieldMixin(metaclass=serializers.SerializerMetaclass):
import_market_size = serializers.SerializerMethodField()
def get_import_market_size(self, obj):
"""Size of import market for affected product(s)"""
assessment = obj.current_economic_assessment
if assessment:
return assessment.export_potential.get("import_market_size")
class ValuationAssessmentRatingFieldMixin(metaclass=serializers.SerializerMetaclass):
valuation_assessment_rating = serializers.SerializerMethodField()
def get_valuation_assessment_rating(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.rating
class ValuationAssessmentMidpointFieldMixin(metaclass=serializers.SerializerMetaclass):
valuation_assessment_midpoint = serializers.SerializerMethodField()
valuation_assessment_midpoint_value = serializers.SerializerMethodField()
def get_valuation_assessment_midpoint(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.midpoint
def get_valuation_assessment_midpoint_value(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.midpoint_value
class ValuationAssessmentExplanationFieldMixin(
metaclass=serializers.SerializerMetaclass
):
valuation_assessment_explanation = serializers.SerializerMethodField()
def get_valuation_assessment_explanation(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.explanation
class AssessmentFieldsMixin(
EconomicAssessmentRatingFieldMixin,
EconomicAssessmentExplanationFieldMixin,
ValueToEconomyFieldMixin,
ImportMarketSizeFieldMixin,
ValuationAssessmentRatingFieldMixin,
ValuationAssessmentMidpointFieldMixin,
ValuationAssessmentExplanationFieldMixin,
):
pass
|
[
"noreply@github.com"
] |
uktrade.noreply@github.com
|
1c9687591a895a84bf4597f263816a8942879e51
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/feel_first_hand/work_different_thing_of_next_company.py
|
1f68da0b0954abd51549c98382a5e50aac67ba34
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
#! /usr/bin/env python
def place_or_life(str_arg):
right_point_or_important_work(str_arg)
print('case')
def right_point_or_important_work(str_arg):
print(str_arg)
if __name__ == '__main__':
place_or_life('large_government_and_different_man')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
f5b7ecf5498c79c23d17b6a98ceb9627c189511f
|
2612f336d667a087823234daf946f09b40d8ca3d
|
/python/lib/Lib/site-packages/django/core/management/commands/diffsettings.py
|
98b53b405d24da9b10a77e7b0bbc578bc9603626
|
[
"Apache-2.0"
] |
permissive
|
tnorbye/intellij-community
|
df7f181861fc5c551c02c73df3b00b70ab2dd589
|
f01cf262fc196bf4dbb99e20cd937dee3705a7b6
|
refs/heads/master
| 2021-04-06T06:57:57.974599
| 2018-03-13T17:37:00
| 2018-03-13T17:37:00
| 125,079,130
| 2
| 0
|
Apache-2.0
| 2018-03-13T16:09:41
| 2018-03-13T16:09:41
| null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
keys = user_settings.keys()
keys.sort()
for key in keys:
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
return '\n'.join(output)
|
[
"dmitry.trofimov@jetbrains.com"
] |
dmitry.trofimov@jetbrains.com
|
917995ac73a5bf09f647a885d5c631b5e4545361
|
5f92dd1e312a22c84eb826035b859b5fbffb7f4d
|
/FoTStreamServer/conceptdriftOld/algorithms/cusum.py
|
05f8ff161141730c3fdcd0d7a37383d8942f8561
|
[] |
no_license
|
BrennoMello/FoT-Simulation-SDN-ML
|
a5a967d31c8df99d2f77978efa36eb8ed8d09928
|
bfaa7439db3d3b4b2470e342c5fdbe41106ed700
|
refs/heads/master
| 2023-05-08T10:01:01.099838
| 2021-06-03T02:33:52
| 2021-06-03T02:33:52
| 351,583,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,947
|
py
|
"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
---
*** The Cumulative Sum (CUSUM) Method Implementation ***
Paper: Page, Ewan S. "Continuous inspection schemes."
Published in: Biometrika 41.1/2 (1954): 100-115.
URL: http://www.jstor.org/stable/2333009
"""
from .detector import SuperDetector
class CUSUM(SuperDetector):
"""The Cumulative Sum (CUSUM) drift detection method class."""
def __init__(self, min_instance=30, delta=0.005, lambda_=50):
super().__init__()
self.MINIMUM_NUM_INSTANCES = min_instance
self.m_n = 1
self.x_mean = 0
self.sum = 0
self.delta = delta
self.lambda_ = lambda_
def run(self, pr):
# pr = 1 if pr is False else 0
warning_status = False
drift_status = False
# 1. UPDATING STATS
self.x_mean = self.x_mean + (pr - self.x_mean) / self.m_n
self.sum = self.sum + pr - self.x_mean - self.delta
self.m_n += 1
# 2. UPDATING WARNING AND DRIFT STATUSES
if self.m_n >= self.MINIMUM_NUM_INSTANCES:
if abs(self.sum) > self.lambda_:
drift_status = True
#return warning_status, drift_status
return drift_status
def reset(self):
super().reset()
self.m_n = 1
self.x_mean = 0
self.sum = 0
def get_settings(self):
return [
str(self.MINIMUM_NUM_INSTANCES)
+ "."
+ str(self.delta)
+ "."
+ str(self.lambda_),
"$n_{min}$:"
+ str(self.MINIMUM_NUM_INSTANCES)
+ ", "
+ "$\delta$:"
+ str(self.delta).upper()
+ ", "
+ "$\lambda$:"
+ str(self.lambda_).upper(),
]
|
[
"None"
] |
None
|
8ed210de7b773b2d858db28b0e98b28b8888fd8a
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/final/predict_error/job66.py
|
764442a07ebe3d86ebd0814af1b025e37c398848
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700
| 2021-11-11T21:57:54
| 2021-11-11T21:57:54
| 198,494,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,630
|
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0005
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_predict_error/' + job_name + '*'
total_epochs = 14
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_predict_error/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
[
"baolin.li1994@gmail.com"
] |
baolin.li1994@gmail.com
|
4e3212529919c336eaa65fb17c0588a940065f09
|
8e1d25698853dd5bd1fe89327810afaf22aa7306
|
/companies/api/urls.py
|
d073232701aab54911a84cdb672e6d455d5c9203
|
[] |
no_license
|
briancaffey/demo
|
3351157c7628b58347dd5d3f749429c5159c8616
|
118b022f9a5558c9dacc0681e8c9593c462d36a4
|
refs/heads/master
| 2020-06-26T23:14:59.745396
| 2017-07-16T20:23:04
| 2017-07-16T20:23:04
| 97,034,442
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.conf.urls import url
from .views import (
CompanyListAPIView,
)
urlpatterns = [
url(r'^api/$', CompanyListAPIView.as_view(), name='company_list'),
]
|
[
"briancaffey2010@gmail.com"
] |
briancaffey2010@gmail.com
|
037b4a377cefa4f9c913c9b919835caeb0f89ad1
|
65f378ce824afb65ff44a9e7634eb5e403a15250
|
/discussions/01-python_basics/tests/q32.py
|
758113e39d3b23c219799996407eb3ed034b979d
|
[] |
no_license
|
ucsd-ets/dsc10-2021-sp-public
|
c067f00a853f12510ac3a897a40296e00b8db247
|
d4af09fedd51f988de8136173ba40dc0d0e19c2d
|
refs/heads/master
| 2023-05-25T04:54:36.076251
| 2021-06-06T23:25:31
| 2021-06-06T23:25:31
| 351,519,451
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
test = { 'name': 'q32',
'points': 1,
'suites': [{'cases': [{'code': '>>> county_most_fires == "Los Angeles County"\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
|
[
"nixrunner@localhost"
] |
nixrunner@localhost
|
28d69c649d104842a33a3b0faa6f551bdb3b6b1f
|
64c90b077af7feed265041c0e94cbeee4be92f01
|
/tools~/gen_docs.py
|
96f62efb1a882dda15b4f1aafa1de18cfee027cc
|
[
"Zlib"
] |
permissive
|
Fopoon/Fopoon.ScenesKit
|
5713dd7f420a22079fd9ba57b5984836ec7ca35d
|
acb45bc02ea6f091f7ec36e232a3bc232f528992
|
refs/heads/master
| 2022-11-26T19:24:40.789089
| 2020-08-10T04:20:51
| 2020-08-10T04:20:51
| 286,259,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
from json import loads
from pathlib import Path
from typing import Dict
from scriptgen import \
StringBuilder, \
diff_text, \
interpolate_text, \
write_text_file
def get_text(
template: str,
expressions: Dict[str, str],
template_name: str = None
) -> str:
sb = StringBuilder()
# Add a timestamp when the template is a markdown file.
if template_name.casefold().endswith(".tmd"):
from scriptgen.templates.markdown import markdown_autogen
sb.wb(markdown_autogen())
sb.nl()
# Replace placeholders.
# i.e. replace placeholders found in the text with values found in the expressions dictionary.
# ex: ${SOME_KEY} → ACTUAL_VALUE
interpolated_text = interpolate_text(template, expressions)
# Write the interpolated text into the builder.
sb.wl(interpolated_text)
return str(sb)
if __name__ == "__main__":
fdp = Path(__file__).parent
templates_dir_name = "templates"
# tools/templates/VALUES.t.json
json_path = fdp / templates_dir_name / "VALUES.t.json"
json_text = json_path.read_text()
json = loads(json_text)
templates = {
# tools~/templates/README.tmd → README.md
(fdp / templates_dir_name / "README.tmd"): (fdp.parent / "README.md"),
# tools~/templates/package.tjson → package.json
(fdp / templates_dir_name / "package.tjson"): (fdp.parent / "package.json")
}
for template_path, target_path in templates.items():
template_text = template_path.read_text()
text = get_text(
template_text,
json,
template_name=template_path.name)
write_text_file(
text,
target_path,
# checks for relevant changes between two texts to determine whether to skip writing into a file.
diff_func=diff_text,
# filters out lines when checking for differences.
filter_func=lambda line, idx: idx < 5 and line.startswith("[//]: # (Auto-generated"),
log_func=lambda message: print(message)
)
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
5124b6b8ae0f31df7809797678631fdaaf6b2c24
|
fd326562890d4f1987c384fc7c60374938231222
|
/PythonAdvanced/PongGame/game.py
|
3e9796356f4d3b20fa9b545010373e402f2496ff
|
[] |
no_license
|
miro-lp/SoftUni
|
cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4
|
283d9328537919de49f7f6a301e58593bae9ca2a
|
refs/heads/main
| 2023-08-23T21:22:07.856226
| 2021-08-25T15:10:18
| 2021-08-25T15:10:18
| 318,134,101
| 2
| 1
| null | 2021-08-10T12:51:54
| 2020-12-03T09:03:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
class Game:
def __init__(self):
self.width = 800
self.high = 400
self.__ball_pos = (0, 0)
self.__ball_delta_x = 1
self.__ball_delta_y = 1
self.paddle_a_pos = (-self.width / 2 + 50, 0)
self.paddle_b_pos = (self.width / 2 - 50, 0)
self.paddle_height = self.high / 4
self.paddle_width = 20
self.points_a = 0
self.points_b = 0
def tick(self):
self.__border_check()
self.__paddle_hit()
x, y = self.__ball_pos
self.__ball_pos = (x + self.__ball_delta_x, y + self.__ball_delta_y)
def __border_check(self):
x, y = self.__ball_pos
if abs(y) >= self.high / 2:
self.__ball_delta_y *= -1
if x <= - self.width/2:
self.points_b+=1
self.__ball_pos = (0,0)
if x >= self.width / 2:
self.points_a += 1
self.__ball_pos = (0,0)
def ball_pos(self):
return self.__ball_pos
def paddle_a_up(self):
x, y = self.paddle_a_pos
if y <= self.high / 2 - self.paddle_height / 2:
self.paddle_a_pos = (x, y + 20)
def paddle_a_down(self):
x, y = self.paddle_a_pos
if y >= -self.high / 2 + self.paddle_height / 2:
self.paddle_a_pos = (x, y - 20)
def paddle_b_up(self):
x, y = self.paddle_b_pos
if y <= self.high / 2 - self.paddle_height / 2:
self.paddle_b_pos = (x, y + 20)
def paddle_b_down(self):
x, y = self.paddle_b_pos
if y >= -self.high / 2 + self.paddle_height / 2:
self.paddle_b_pos = (x, y - 20)
def __paddle_hit(self):
x, y = self.__ball_pos
a_x, a_y = self.paddle_a_pos
is_paddle_a_hit = (a_x+self.paddle_width == x and a_y - self.paddle_height / 2 <= y <= a_y + self.paddle_height / 2)
b_x, b_y = self.paddle_b_pos
is_paddle_b_hit = (b_x-self.paddle_width == x and b_y - self.paddle_height / 2 <= y <= b_y + self.paddle_height / 2)
if is_paddle_b_hit or is_paddle_a_hit:
self.__ball_delta_x *= -1
|
[
"miro_lp@abv.bg"
] |
miro_lp@abv.bg
|
3ebabab1cc0ec29075386911102d4c4d892eed04
|
16ba38ef11b82e93d3b581bbff2c21e099e014c4
|
/haohaninfo/即時範例/61.py
|
6e90415a943dd91a9a3bc4319e97582c7b9dabb8
|
[] |
no_license
|
penguinwang96825/Auto-Trading
|
cb7a5addfec71f611bdd82534b90e5219d0602dd
|
a031a921dbc036681c5054f2c035f94499b95d2e
|
refs/heads/master
| 2022-12-24T21:25:34.835436
| 2020-09-22T09:59:56
| 2020-09-22T09:59:56
| 292,052,986
| 2
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
# -*- coding: UTF-8 -*-
#取得報價資訊,詳情請查看技巧51
execfile('function.py')
#取得上下五檔價量資訊
for i in getUpDn5():
UpDn5Info=i.split(',')
UpDn5Time=UpDn5Info[0]
totalUpPrice=0
totalUpQty=0
totalDnPrice=0
totalDnQty=0
#開始進行上下五檔加權平均值
for j in range(0,5):
totalDnPrice+=int(UpDn5Info[1+2*j])*int(UpDn5Info[2+2*j])
totalDnQty+=int(UpDn5Info[2+2*j])
totalUpPrice+=int(UpDn5Info[11+2*j])*int(UpDn5Info[12+2*j])
totalUpQty+=int(UpDn5Info[12+2*j])
print UpDn5Time,"avgUpPrice",float(totalUpPrice)/totalUpQty,"avgDnPrice",float(totalDnPrice)/totalDnQty
|
[
"penguinwang@smail.nchu.edu.tw"
] |
penguinwang@smail.nchu.edu.tw
|
85cee0f893635d531b95b5d312f8e209a6f535dc
|
fefb1e9b0b736da4e49d7754f8d1dbaf37f2fa6a
|
/.history/7_3_2_20210208211227.py
|
2021ca036ff147a699a385552cc614517dc2ef19
|
[] |
no_license
|
wh-debug/python
|
5a78a2227874ebc400d075197de0adab9f55d187
|
1467eeda670f170e6e2d7c0a0550f713f1ee9d75
|
refs/heads/master
| 2023-03-12T22:08:12.608882
| 2021-02-17T09:49:52
| 2021-02-17T09:49:52
| 334,032,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
'''
Author: your name
Date: 2021-02-08 21:11:05
LastEditTime: 2021-02-08 21:12:27
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \python\7_3_2.py
'''
'''
_oo0oo_
o8888888o
88" . "88
(| -_- |)
0\ = /0
___/`---'\___
.' \\| |// '.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' |_/ |
\ .-\__ '-' ___/-. /
___'. .' /--.--\ `. .'___
."" '< `.___\_<|>_/___.' >' "".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `_. \_ __\ /__ _/ .-` / /
=====`-.____`.___ \_____/___.-`___.-'=====
`=---='
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
佛祖保佑 永不宕机 永无BUG
'''
'''
Author: your name
Date: 2021-02-08 21:11:05
LastEditTime: 2021-02-08 21:11:05
LastEditors: your name
Description: In User Settings Edit
FilePath: \python\7_3_2.py
'''
'''删除特定元素的所有列表元素'''
|
[
"1813763848@qq.com"
] |
1813763848@qq.com
|
9961f02f3cc87bd6bb0fd66daadb71f7fbd6f526
|
3a642fa1fc158d3289358b53770cdb39e5893711
|
/src/xlsxwriter/test/worksheet/test_write_sheet_views2.py
|
1ac53cd2d1fb5d98adfaf23b01058d89c365b7e0
|
[] |
no_license
|
andbar-ru/traceyourself.appspot.com
|
d461277a3e6f8c27a651a1435f3206d7b9307d9f
|
5f0af16ba2727faceb6b7e1b98073cd7d3c60d4c
|
refs/heads/master
| 2020-07-23T14:58:21.511328
| 2016-12-26T22:03:01
| 2016-12-26T22:03:01
| 73,806,841
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,464
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteSheetViews(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="1" topLeftCell="A2" activePane="bottomLeft" state="frozen"/><selection pane="bottomLeft"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(0, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" topLeftCell="B1" activePane="topRight" state="frozen"/><selection pane="topRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" ySplit="1" topLeftCell="B2" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="B1" sqref="B1"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes('G4')
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views5(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(3, 6, 3, 6, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozenSplit"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
|
[
"andrey@voktd-andbar.int.kronshtadt.ru"
] |
andrey@voktd-andbar.int.kronshtadt.ru
|
8222ad5dbb1fe9a2d9066677863f31844b5d765d
|
ef78bd58d61002f45778a40da7759ed0b1998cd3
|
/code/transforms/subspacegaussianization.py
|
121e6249045137472b93b36408bb02b129606325
|
[
"MIT"
] |
permissive
|
afcarl/isa
|
61e85c0c790c7cc357e0c29fc5bda948e9c77ce4
|
f0497c0cc7bd72e0de7f4f9a8da40e214c22abe9
|
refs/heads/master
| 2020-03-19T21:36:06.716167
| 2013-01-28T18:32:30
| 2013-01-28T18:32:30
| 136,944,562
| 1
| 0
| null | 2018-06-11T15:20:45
| 2018-06-11T15:20:44
| null |
UTF-8
|
Python
| false
| false
| 3,769
|
py
|
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from transform import Transform
from radialgaussianization import RadialGaussianization
from numpy.linalg import inv, slogdet
from numpy import vstack, dot, zeros
from collections import Callable
class SubspaceGaussianization(Transform):
def __init__(self, isa):
"""
@type gsm: L{ISA}
@param gsm: ISA model used for Gaussianization
"""
self.isa = isa
def apply(self, data):
"""
Subspace Gaussianize data by first applying a linear transformation and then
radially Gaussianizing each subspace. If C{isa} is overcomplete, C{data} has
to be completed by the nullspace representation, that is, C{data} should
have the dimensionality of the hidden states.
@type data: array_like
@param data: data points stored in columns
"""
# complete basis
A = vstack([self.isa.A, self.isa.nullspace_basis()])
# linearly transform data
data = dot(inv(A), data)
data_rg = []
if isinstance(self.isa.subspaces, Callable):
subspaces = self.isa.subspaces()
else:
subspaces = self.isa.subspaces
length = len(str(len(subspaces)))
if Transform.VERBOSITY > 0:
print ('{0:>' + str(length) + '}/{1}').format(0, len(subspaces)),
# TODO: parallelize
for i, gsm in enumerate(subspaces):
# radially Gaussianize subspace
data_rg.append(
RadialGaussianization(gsm).apply(data[:gsm.dim]))
data = data[gsm.dim:]
if Transform.VERBOSITY > 0:
print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(subspaces)),
if Transform.VERBOSITY > 0:
print
return vstack(data_rg)
def inverse(self, data):
"""
Apply inverse subspace Gaussianization.
"""
data_irg = []
if isinstance(self.isa.subspaces, Callable):
subspaces = self.isa.subspaces()
else:
subspaces = self.isa.subspaces
length = len(str(len(subspaces)))
if Transform.VERBOSITY > 0:
print ('{0:>' + str(length) + '}/{1}').format(0, len(subspaces)),
# TODO: parallelize
for i, gsm in enumerate(subspaces):
# inverse radially Gaussianize subspace
data_irg.append(
RadialGaussianization(gsm).inverse(data[:gsm.dim]))
data = data[gsm.dim:]
if Transform.VERBOSITY > 0:
print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(subspaces)),
if Transform.VERBOSITY > 0:
print
data = vstack(data_irg)
# completed filter matrix
# A = vstack([self.isa.A, self.isa.nullspace_basis()])
# linearly transform data
return dot(self.isa.A, data)
def logjacobian(self, data):
"""
Returns the log-determinant of the Jabian matrix evaluated at the given
data points.
@type data: array_like
@param data: data points stored in columns
@rtype: ndarray
@return: the logarithm of the Jacobian determinants
"""
# completed filter matrix
A = vstack([self.isa.A, self.isa.nullspace_basis()])
W = inv(A)
# determinant of linear transformation
logjacobian = zeros([1, data.shape[1]]) + slogdet(W)[1]
# linearly transform data
data = dot(W, data)
if isinstance(self.isa.subspaces, Callable):
subspaces = self.isa.subspaces()
else:
subspaces = self.isa.subspaces
length = len(str(len(subspaces)))
if Transform.VERBOSITY > 0:
print ('{0:>' + str(length) + '}/{1}').format(0, len(subspaces)),
# TODO: parallelize
for i, gsm in enumerate(subspaces):
logjacobian += RadialGaussianization(gsm).logjacobian(data[:gsm.dim])
data = data[gsm.dim:]
if Transform.VERBOSITY > 0:
print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(subspaces)),
if Transform.VERBOSITY > 0:
print
return logjacobian
|
[
"lucas@theis.io"
] |
lucas@theis.io
|
413fae5d3fbae535a19798130e74abb371a25cb9
|
020da726bb378ea9fe58af19caadad8e02bd4e27
|
/CODES_RCM/Calcul_Heat_Wave_RCM.py
|
31eb9bfd038cd5ce54ea25386c8f2ed2b0b10913
|
[] |
no_license
|
guimeto/Heat_Wave_Codes
|
06dc19f9547b2cc083db9fd7bd44d22fad072a37
|
85a1c6ae582818a5694aef9b17fc7f3578b3af16
|
refs/heads/master
| 2020-06-22T05:44:11.064070
| 2020-04-10T18:11:19
| 2020-04-10T18:11:19
| 197,648,190
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 10:31:05 2019
@author: guillaume
"""
import xarray as xr
import numpy as np
import pandas as pd
from netCDF4 import Dataset
model='CANRCM4_NAM-44_ll_CanESM2_historical'
yi = 2000
yf = 2000
tot=(yf-yi)+1
#########################################################
rep_min='K:/PROJETS/PROJET_CORDEX/CORDEX-NAM44/DONNEES/CANRCM4_CanESM2_historical/MONTH/tasmin/'
rep_max='K:/PROJETS/PROJET_CORDEX/CORDEX-NAM44/DONNEES/CANRCM4_CanESM2_historical/MONTH/tasmax/'
rep_hum='K:/PROJETS/PROJET_CORDEX/CORDEX-NAM44/DONNEES/CANRCM4_CanESM2_historical/MONTH/humidex/'
def HWDI(tmin, tmax, hum, ind1, ind2, ind3, seq):
actualCount = 0
sequence = 0
i = 0
while (i <= len(tmin)-1):
while (i+1 < len(tmin)) and (tmin[i] >= ind1) and (tmin[i+1] >= ind1) and (tmax[i] >= ind2) and (tmax[i+1] >= ind2) and (hum[i] >= ind3) and (hum[i+1] >= ind3):
i += 1
if actualCount == 0 :
actualCount += 2
else:
actualCount += 1
if actualCount == 0:
i += 1
actualCount = 0
elif (actualCount == 1) or (actualCount == 2) :
actualCount = 0
elif actualCount >= seq:
sequence += 1
actualCount = 0
return(sequence)
for year in range(yi,yf+1):
data = rep_max + model + '_tasmax_'+str(year) +'*.nc'
tmax = xr.open_mfdataset(data)
data = rep_min + model + '_tasmin_'+str(year) +'*.nc'
tmin = xr.open_mfdataset(data)
data = rep_hum + model + '_humidex_'+str(year) +'*.nc'
hum = xr.open_mfdataset(data)
DS = xr.merge([tmax,tmin, hum])
# get the datetime range
times = pd.date_range("2000-01-01", "2000-12-31", name="time")
times = times[~((times.month == 2) & (times.day == 29))]
DS['time'] = times
DS_date_range = DS.sel(time=slice(str(year) + '-05-01', str(year) + '-09-30'))
DS_date_range.to_netcdf('./tmp.nc')
# Calcul de l'indice
nt=0
IND = np.zeros((tot,130,155),dtype=float)
###### ouverture et lecture des fichiers Netcdf
nc_Modc=Dataset('./tmp.nc','r')
lats=nc_Modc.variables['lat'][:]
lons=nc_Modc.variables['lon'][:]
tmax=nc_Modc.variables['tasmax'][:]
tmin=nc_Modc.variables['tasmin'][:]
humidex=nc_Modc.variables['humidex'][:]
###### boucle sur tous les points de grille et calcul de l'indice
for ni in range(0, len(tmax[0])):
for nj in range(0, len(tmax[0][0])):
IND[nt,ni,nj]=HWDI(tmin[:,ni,nj],tmax[:,ni,nj],humidex[:,ni,nj], 20, 33, 40, 3 )
description='Heat Wave Index'
unite='days'
###### Écriture du fichier Netcdf en sortie
C = Dataset('./output/python/'+model+'_historical_HWDI_'+str(yi)+'-'+str(yf)+'_Mai_Septembre.nc', 'w')
C.description = 'Heat Wave Index'
C.conventions = 'CF-1.0'
C.model_id = model
C.institution = 'UQAM - ESCER Center, University of Quebec in Montreal'
C.contact = 'Guillaume Dueymes'
########################################
# Dimensions
C.createDimension('x', len(tmin[0][0]))
C.createDimension('y', len(tmin[0]))
C.createDimension('time', tot)
var=C.createVariable('HWDI', np.float32, ('time','y','x'))
var.long_name = str(description)
var.unit = str(unite)
lat=C.createVariable('lat', np.float32, ('y','x'))
lon=C.createVariable('lon', np.float32, ('y','x'))
time = C.createVariable('time', np.float64, ('time',))
time.long_name = 'time'
nc_Modr=Dataset(rep_min + model + '_tasmin_200001.nc','r')
lats=nc_Modr.variables['lat'][:]
lons=nc_Modr.variables['lon'][:]
for var in ['lon','lat','time']:
for att in nc_Modr.variables[var].ncattrs():
print(att)
setattr(C.variables[var],att,getattr(nc_Modr.variables[var],att))
time[:]=range(1,nt+1)
lat[:,:] = lats
lon[:,:] = lons
C.variables['HWDI'][:,:,:] = IND[::]
C.close()
|
[
"guillaume.dueymes@gmail.com"
] |
guillaume.dueymes@gmail.com
|
d3bb2016d415c47c7039c7cc1a1f2fadb8094a6d
|
3d4a3bebf614086cce8a22510d8c27c0bea52f92
|
/CDS_pro.py
|
6963688c12963d1e348fb2a00680deef640be7ea
|
[] |
no_license
|
caelus95/MantaOcean
|
dc031518051daac9b718b4c7664a057a956475f8
|
dbc5774f6ecd949a8d8f58c66d0101f816b90dc9
|
refs/heads/master
| 2023-06-18T22:00:26.353952
| 2021-06-29T13:25:48
| 2021-06-29T13:25:48
| 365,965,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,252
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 18:03:50 2021
@author: caelus
"""
PKG_path = '/home/caelus/dock_1/Working_hub/LGnDC_dep/python_cent/MantaPKG/'
import sys
sys.path.append(PKG_path)
from Manta_Signals.procc_index import sig_pro, linearRegress4Cube
from Manta_Signals.utility import nc2npy
import os
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import Dataset
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
r_path = '/home/caelus/dock_1/Working_hub/DATA_dep/CDS/'
data_name = 'Detrended_CDS_monthly_199301_201912.nc'
w_path = '/home/caelus/dock_1/Working_hub/DATA_dep/Kuroshio/Detrend/data/'
w_name = 'Detrended_CDS_NP_Total.nc'
minlon,maxlon = 112,280
minlat,maxlat = -10,70
# data_a_6M = data_a_6M.mean(dim='latitude')
def MantaCurl2D(u,v,dx=28400.0,dy=28400.0 ):
import numpy as np
'''
dx = 28400.0 # meters calculated from the 0.25 degree spatial gridding
dy = 28400.0 # meters calculated from the 0.25 degree spatial gridding
'''
u_T = u.transpose([1,0])
v_T = v.transpose([1,0])
du_dx, du_dy = np.gradient(u_T, dx,dy)
dv_dx, dv_dy = np.gradient(v_T, dx,dy)
curl = dv_dx - du_dy
return curl.transpose([1,0])
ADT_t = xr.open_dataset(r_path+data_name,decode_times=True)
ADT_t = ADT_t.loc[dict(latitude=slice(minlat,maxlat),longitude=slice(minlon,maxlon))]
# Calculating Vorticity (Curl)
tmp_ugos = ADT_t.ugos.values
tmp_vgos = ADT_t.vgos.values
t,at,on = tmp_ugos.shape
Curl = np.zeros_like(tmp_ugos)
for i in range(t):
Curl[i,:,:] = MantaCurl2D(tmp_ugos[i,:,:],tmp_vgos[i,:,:])
CURL = xr.Dataset(
{
'curl': (["time","latitude", "longitude"], Curl)#,
# "mask": (["y","x"],mask)
},
coords={
"longitude": (["longitude"], ADT_t.longitude),
"latitude": (["latitude"], ADT_t.latitude),
"time": (['time'], ADT_t.time),
# "reference_time": pd.Timestamp("2014-09-05"),
},)
# Calculating EKE
ADT_t['EKE'] = (ADT_t.ugos*2 + ADT_t.vgos*2)/2
# Merge data
ADT_t = xr.merge([ADT_t,CURL])
ADT_t.to_netcdf(w_path+w_name,'w')
|
[
"caelus9536@gmail.com"
] |
caelus9536@gmail.com
|
808cf3c32ee1793a9b9e851d99062f6245f9dc9e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02879/s604770795.py
|
8a8a1f8c96a9e183e5e3de56c8219f8a884b4acc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import math
import collections
import fractions
import itertools
import functools
import operator
def solve():
a, b = map(int, input().split())
if a < 10 and b < 10:
print(a*b)
else:
print(-1)
return 0
if __name__ == "__main__":
solve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
31fb114ed920fcfae01485c2a6e8fb4bcf9ab0ff
|
bbfa3b7ee2008617d33a7c5c7770d22e1aa8836b
|
/Optimization/dynamic_programming.py
|
45645d59bdddc6e1e046f26ab79ad1f62240638a
|
[
"MIT"
] |
permissive
|
luoshao23/ML_algorithm
|
1a0046ce9c3abed029cceffa35defe57fffa82b2
|
6e94fdd0718cd892118fd036c7c5851cf3e6d796
|
refs/heads/master
| 2021-08-07T08:38:16.102455
| 2020-03-18T06:49:43
| 2020-03-18T06:49:43
| 92,467,636
| 4
| 1
|
MIT
| 2018-01-16T05:01:29
| 2017-05-26T03:20:08
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,776
|
py
|
from random import random, randint, choice
from copy import deepcopy
from math import log
class fwrapper(object):
"""docstring for fwrapper"""
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
class node(object):
"""docstring for node"""
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print ' ' * indent + self.name
for c in self.children:
c.display(indent + 1)
class paramnode(object):
"""docstring for paramnode"""
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print '%sp%d' % (' ' * indent, self.idx)
class constnode(object):
"""docstring for constnode"""
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print '%s%d' % (' ' * indent, self.v)
addw = fwrapper(lambda l: l[0] + l[1], 2, 'add')
subw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract')
mulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
ifw = fwrapper(iffunc, 3, 'if')
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
gtw = fwrapper(isgreater, 2, 'isgreater')
flist = [addw, mulw, ifw, gtw, subw]
def exampletree():
return node(ifw, [
node(gtw, [paramnode(0), constnode(3)]),
node(addw, [paramnode(1), constnode(5)]),
node(subw, [paramnode(1), constnode(2)]),
]
)
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)
for i in xrange(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
def hiddenfunction(x, y):
return x**2 + 2 * y + 3 * x + 5
def buildhiddenset():
rows = []
for i in xrange(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
def mutate(t, pc, probchange=0.2):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if isinstance(t, node):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
def crossover(t1, t2, probswap=0.7, top=1):
if random() < probswap and not top:
return deepcopy(t2)
else:
result = deepcopy(t1)
if hasattr(t1, 'children') and hasattr(t2, 'children'):
result.children = [crossover(c, choice(t2.children), probswap, 0)
for c in t1.children]
return result
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
def evolve(pc, popsize, rankfunction, maxgen=500,
mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05):
# Returns a random number, tending towards lower numbers. The lower pexp
# is, more lower numbers you will get
def selectindex():
return int(log(random()) / log(pexp))
# Create a random initial population
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print scores[0][0]
if scores[0][0] == 0:
break
# The two best always make it
newpop = [scores[0][1], scores[1][1]]
# Build the next generation
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(
crossover(scores[selectindex()][1],
scores[selectindex()][1],
probswap=breedingrate),
pc, probchange=mutationrate))
else:
# Add a random node to mix things up
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
def gridgame(p):
# Board size
max = (3, 3)
# Remember the last move for each player
lastmove = [-1, -1]
# Remember the player's locations
location = [[randint(0, max[0]), randint(0, max[1])]]
# Put the second player a sufficient distance from the first
location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])
# Maximum of 50 moves before a tie
for o in range(50):
# For each player
for i in range(2):
locs = location[i][:] + location[1 - i][:]
locs.append(lastmove[i])
move = p[i].evaluate(locs) % 4
# You lose if you move the same direction twice in a row
if lastmove[i] == move:
return 1 - i
lastmove[i] = move
if move == 0:
location[i][0] -= 1
# Board wraps
if location[i][0] < 0:
location[i][0] = 0
if move == 1:
location[i][0] += 1
if location[i][0] > max[0]:
location[i][0] = max[0]
if move == 2:
location[i][1] -= 1
if location[i][1] < 0:
location[i][1] = 0
if move == 3:
location[i][1] += 1
if location[i][1] > max[1]:
location[i][1] = max[1]
# If you have captured the other player, you win
if location[i] == location[1 - i]:
return i
def tournament(pl):
# Count losses
losses = [0 for p in pl]
# Every player plays every other player
for i in range(len(pl)):
for j in range(len(pl)):
if i == j:
continue
# Who is the winner?
winner = gridgame([pl[i], pl[j]])
# Two points for a loss, one point for a tie
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
# Sort and return the results
z = zip(losses, pl)
z.sort()
return z
class humanplayer:
def evaluate(self, board):
# Get my location and the location of other players
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
# Display the board
for i in range(4):
for j in range(4):
if (i, j) == me:
print 'O',
elif (i, j) in others:
print 'X',
else:
print '.',
print
# Show moves, for reference
print 'Your last move was %d' % board[len(board) - 1]
print ' 0'
print '2 3'
print ' 1'
print 'Enter move: ',
# Return whatever the user enters
move = int(raw_input())
return move
|
[
"luoshao23@gmail.com"
] |
luoshao23@gmail.com
|
9f89cd804a35d8fb1df192bfec08abe49386513a
|
3784495ba55d26e22302a803861c4ba197fd82c7
|
/src/torch/legacy/nn/SpatialConvolutionMap.py
|
e901140a528bb3805879a4884c68b43e1afbd26d
|
[
"MIT"
] |
permissive
|
databill86/HyperFoods
|
cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789
|
9267937c8c70fd84017c0f153c241d2686a356dd
|
refs/heads/master
| 2021-01-06T17:08:48.736498
| 2020-02-11T05:02:18
| 2020-02-11T05:02:18
| 241,407,659
| 3
| 0
|
MIT
| 2020-02-18T16:15:48
| 2020-02-18T16:15:47
| null |
UTF-8
|
Python
| false
| false
| 4,384
|
py
|
import random
import math
import torch
from .Module import Module
# TODO fix THNN...
class SpatialConvolutionMap(Module):
class maps(object):
@staticmethod
def full(nin, nout):
ft = torch.Tensor(nin * nout, 2)
p = 0
for j in range(nout):
for i in range(nin):
ft[p][0] = i
ft[p][1] = j
p += 1
return ft
@staticmethod
def oneToOne(nfeat):
ft = torch.Tensor(nfeat, 2)
for i in range(nfeat):
ft[i][0] = i
ft[i][1] = i
return ft
@staticmethod
def random(nin, nout, nto):
nker = nto * nout
tbl = torch.Tensor(nker, 2)
fi = torch.randperm(nin)
frcntr = 0
nfi = math.floor(nin / nto) # number of distinct nto chunks
totbl = tbl.select(1, 1)
frtbl = tbl.select(1, 0)
fitbl = fi.narrow(0, 0, (nfi * nto)) # part of fi that covers distinct chunks
ufrtbl = frtbl.unfold(0, nto, nto)
utotbl = totbl.unfold(0, nto, nto)
ufitbl = fitbl.unfold(0, nto, nto)
# start fill_ing frtbl
for i in range(nout): # fro each unit in target map
ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
frcntr += 1
if frcntr - 1 == nfi: # reset fi
fi.copy_(torch.randperm(nin))
frcntr = 1
for tocntr in range(utotbl.size(0)):
utotbl.select(0, tocntr).fill_(tocntr)
return tbl
def __init__(self, conMatrix, kW, kH, dW=1, dH=1):
super(SpatialConvolutionMap, self).__init__()
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.connTable = conMatrix
self.nInputPlane = int(self.connTable.select(1, 0).max()) + 1
self.nOutputPlane = int(self.connTable.select(1, 1).max()) + 1
self.weight = torch.Tensor(self.connTable.size(0), kH, kW)
self.bias = torch.Tensor(self.nOutputPlane)
self.gradWeight = torch.Tensor(self.connTable.size(0), kH, kW)
self.gradBias = torch.Tensor(self.nOutputPlane)
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
else:
ninp = torch.Tensor(self.nOutputPlane).zero_()
for i in range(self.connTable.size(0)):
idx = int(self.connTable[i, 1])
ninp[idx] += 1
for k in range(self.connTable.size(0)):
idx = int(self.connTable[k, 1])
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[idx])
self.weight.select(0, k).uniform_(-stdv, stdv)
for k in range(self.bias.size(0)):
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[k])
# TODO: torch.uniform
self.bias[k] = random.uniform(-stdv, stdv)
def updateOutput(self, input):
self._backend.SpatialConvolutionMap_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.SpatialConvolutionMap_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._backend.SpatialConvolutionMap_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH,
scale
)
|
[
"luis20dr@gmail.com"
] |
luis20dr@gmail.com
|
7fc5fc8829de1dcf123347fcd4404cc65ca1a795
|
2c68f9156087d6d338373f9737fee1a014e4546b
|
/src/privatedns/azext_privatedns/vendored_sdks/models/aaaa_record_py3.py
|
2cd662761ff9da8073fe44d4aa1f2ee418445ab3
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
anpaz/azure-cli-extensions
|
8b0d4071c49840da9883f13cb0fd1f4515246ee0
|
847fd487fe61e83f2a4163a9393edc9555267bc2
|
refs/heads/master
| 2023-04-23T17:22:53.427404
| 2021-01-29T17:48:28
| 2021-01-29T18:01:33
| 257,394,204
| 2
| 0
|
MIT
| 2021-01-28T10:31:07
| 2020-04-20T20:19:43
|
Python
|
UTF-8
|
Python
| false
| false
| 918
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AaaaRecord(Model):
"""An AAAA record.
:param ipv6_address: The IPv6 address of this AAAA record.
:type ipv6_address: str
"""
_attribute_map = {
'ipv6_address': {'key': 'ipv6Address', 'type': 'str'},
}
def __init__(self, *, ipv6_address: str=None, **kwargs) -> None:
super(AaaaRecord, self).__init__(**kwargs)
self.ipv6_address = ipv6_address
|
[
"wx44@cornell.edu"
] |
wx44@cornell.edu
|
1fafa6db558c6073910962a1fd4bfd7d3019592a
|
a00ec9691cd5e56db93d290cb7a7b1edfef6437f
|
/eco/eco/asgi.py
|
2059a74dc9da807b96848d839ceb20e1570e7a08
|
[] |
no_license
|
pronob1010/Ogani
|
7e6dfd424a830bd3355cf07f7a01eb521d8716d4
|
f6ff3dc8ed40ce46a993d1f83e122fbbc61836cf
|
refs/heads/master
| 2022-10-24T06:22:44.044064
| 2020-06-21T22:01:08
| 2020-06-21T22:01:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
"""
ASGI config for eco project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eco.settings')
application = get_asgi_application()
|
[
"pronobmozumder.info@gmail.com"
] |
pronobmozumder.info@gmail.com
|
fce765142eaf423639434d0ee0255320be03ebd1
|
e0c8662a56d89730043146ddc340e9e0b9f7de72
|
/plugin/11e23c18-1596.py
|
3ca27f59f5378a6af4db567a8641f9832a2735e3
|
[] |
no_license
|
izj007/bugscan_poc
|
f2ef5903b30b15c230b292a1ff2dc6cea6836940
|
4490f3c36d4033bdef380577333722deed7bc758
|
refs/heads/master
| 2020-09-22T17:20:50.408078
| 2019-01-18T09:42:47
| 2019-01-18T09:42:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
#coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
import re
def assign(service, arg):
if service=='tipask':
return True,arg
def audit(arg):
code,head,res,errcode, _=curl.curl2(arg+'?dfferfdsfe')
if code == 404 and res:
m=re.search(r'file "(.*)" not found',res)
if m:
security_info('Path:'+','.join(m.groups()))
if __name__=='__main__':
from dummy import *
audit(assign('tipask','http://ask.id028.cn/')[1])
audit(assign('tipask','http://ask.ccun.cn/')[1])
audit(assign('tipask','http://ask.paotuitu.cn/')[1])
audit(assign('tipask','http://wenda.fanmimi.com/')[1])
|
[
"yudekui@wsmtec.com"
] |
yudekui@wsmtec.com
|
12769f9a834726649374d31c469d956ec8091f46
|
5cb9dccbcccb8a2137368dd0615fe3e3c7761707
|
/simulations/kinova/build/chomp_motion_planner/catkin_generated/pkg.develspace.context.pc.py
|
3a3076474a22a84958b84ab31ec82194d9ffd63e
|
[] |
no_license
|
Simon-Steinmann/sim2real-modular-RL-project
|
b2467a393014e106043f6128a026f5eac934a83d
|
4027590ac94de2d5c914731c09efcf2f318b9ca3
|
refs/heads/master
| 2020-07-29T01:30:56.450919
| 2019-10-12T09:33:00
| 2019-10-12T09:33:00
| 209,605,548
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/acis/sim2real/simulations/kinova/src/moveit/moveit_planners/chomp/chomp_motion_planner/include".split(';') if "/home/acis/sim2real/simulations/kinova/src/moveit/moveit_planners/chomp/chomp_motion_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lchomp_motion_planner".split(';') if "-lchomp_motion_planner" != "" else []
PROJECT_NAME = "chomp_motion_planner"
PROJECT_SPACE_DIR = "/home/acis/sim2real/simulations/kinova/devel/.private/chomp_motion_planner"
PROJECT_VERSION = "1.0.1"
|
[
"simon.steinmann91@gmail.com"
] |
simon.steinmann91@gmail.com
|
273a796eb3e0d568fc83cd05aff1e374c4393f21
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/toontown/src/coghq/Stomper.py
|
f579b1ff2fd2e11404c53d4ef459346b72bf19ad
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119
| 2020-09-11T16:58:04
| 2020-09-11T17:02:06
| 294,751,966
| 89
| 32
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,816
|
py
|
"""Stomper module: contains the Stomper class"""
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.showbase.PythonUtil import lerp
from direct.fsm import StateData
import math
class Stomper(StateData.StateData, NodePath):
SerialNum = 0
MotionLinear = 0
MotionSinus = 1
MotionHalfSinus = 2
DefaultStompSound = 'phase_5/audio/sfx/AA_drop_safe.mp3'
def __init__(self,
model,
range=5., # range of motion in feet along Z-axis
period=1., # duration of full cycle
phaseShift=0., # 0..1 phase shift
zOffset=0., # how close the stomper should get to Z=0
motionType=None,
shadow=None,
sound=None,
soundLen=None,
):
StateData.StateData.__init__(self, 'StomperDone')
self.SerialNum = Stomper.SerialNum
Stomper.SerialNum += 1
# get the stomp sound
self.sound = sound
self.soundLen = soundLen
if self.sound is not None:
self.sound = base.loadSfx(sound)
self.motionType = motionType
if self.motionType is None:
self.motionType = Stomper.MotionSinus
node = hidden.attachNewNode('Stomper%s' % self.SerialNum)
NodePath.__init__(self, node)
self.model = model.copyTo(self)
self.shadow = shadow
if shadow is not None:
self.shadow = shadow.copyTo(self)
self.shadow.setPos(0,0,.2)
self.TaskName = 'Stomper%sTask' % self.SerialNum
self.range = range
self.zOffset = zOffset
self.period = period
self.phaseShift = phaseShift
def destroy(self):
self.removeNode()
def enter(self, startTime):
# stomper should hit at t=0
if self.motionType is Stomper.MotionLinear:
motionIval = Sequence(
LerpPosInterval(self.model, self.period/2.,
Point3(0,0,self.zOffset+self.range),
startPos=Point3(0,0,self.zOffset)),
WaitInterval(self.period/4.),
LerpPosInterval(self.model, self.period/4.,
Point3(0,0,self.zOffset),
startPos=Point3(0,0,self.zOffset+self.range)),
)
elif self.motionType is Stomper.MotionSinus:
def sinusFunc(t, self=self):
# t: 0..1
# cos(pi) == -1 (hit/down)
# theta: pi..3*pi
theta = math.pi + (t * 2.*math.pi)
# c: -1..1
c = math.cos(theta)
# z: 0..self.range
self.model.setZ(self.zOffset +
((.5 + (c*.5)) * self.range))
motionIval = Sequence(
LerpFunctionInterval(sinusFunc, duration=self.period),
)
elif self.motionType is Stomper.MotionHalfSinus:
def halfSinusFunc(t, self=self):
# t: 0..1
self.model.setZ(self.zOffset +
(math.sin(t * math.pi) * self.range))
motionIval = Sequence(
LerpFunctionInterval(halfSinusFunc, duration=self.period),
)
# put the motion interval into a Parallel so that we can easily add
# concurrent ivals on (like sound, etc)
self.ival = Parallel(
motionIval,
name='Stomper%s' % self.SerialNum,
)
# 'stomp' sound
if self.sound is not None:
# make sure we don't play a sound that's too long; cap the
# sound length to the motion period
if self.soundLen is None:
sndDur = motionIval.getDuration()
else:
sndDur = min(self.soundLen, motionIval.getDuration())
self.ival.append(
SoundInterval(self.sound, duration=sndDur, node=self))
# shadow
if self.shadow is not None:
def adjustShadowScale(t, self=self):
# scale the shadow according to the position of the
# stomper
modelZ = self.model.getZ()
# a=0..1, 0=down, 1=up
a = modelZ/self.range
self.shadow.setScale(lerp(.7, 1., (1.-a)))
self.ival.append(
LerpFunctionInterval(adjustShadowScale, duration=self.period))
self.ival.loop()
self.ival.setT((globalClock.getFrameTime() - startTime) +
(self.period * self.phaseShift))
def exit(self):
self.ival.finish()
del self.ival
|
[
"66761962+satire6@users.noreply.github.com"
] |
66761962+satire6@users.noreply.github.com
|
81836a77393b703a7e69351661f816b5e0db1eb9
|
5f1c3a2930b20c3847496a249692dc8d98f87eee
|
/Pandas/Titanic_DataAnalysis/Question8.py
|
d6bea604e6b82e0b17af8e7a63fd757c6a04672e
|
[] |
no_license
|
AmbyMbayi/CODE_py
|
c572e10673ba437d06ec0f2ae16022d7cbe21d1c
|
5369abf21a8db1b54a5be6cbd49432c7d7775687
|
refs/heads/master
| 2020-04-24T05:01:46.277759
| 2019-02-22T08:26:04
| 2019-02-22T08:26:04
| 171,723,155
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
"""write a pandas program tocreate a pivot table and count survival by gender,
categories wise age of various classes
"""
import pandas as pd
import numpy as np
df = pd.read_csv('titanic.csv')
age = pd.cut(df['age'], [0,10,30,60,80])
result = df.pivot_table('survived', index=['sex', age], columns='pclass', aggfunc='count')
print(result)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
8b00cb8d70c9d7a361ad920dddfbfdf75d237328
|
e59e0bd3cdfc706105ee697cc6683554d3cdb371
|
/utime/augmentation/elastic_deformation.py
|
9c267b66b0bdb1de51ca1870c185892338355305
|
[
"MIT"
] |
permissive
|
jennynanap/U-Time
|
02455307cd67abf975d659346d4a1f3acebd8eed
|
f7c8e3f1368f43226872a69b0fbb8c29990e4bd9
|
refs/heads/master
| 2023-07-10T16:26:23.648995
| 2021-08-24T12:09:30
| 2021-08-24T12:09:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
import numpy as np
from mpunet.interpolation import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
def elastic_transform(signal, labels, alpha, sigma, bg_value=0.0):
"""
Elastic deformation for 1D signals, modified from:
[Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Modified from:
https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
Deforms both the signal and labels if len(labels) == len(signal)
Signal linearly interpolated
Labels nearest neighbour interpolated
"""
assert signal.ndim in (1, 2, 3)
org_sig_shape = signal.shape
org_lab_shape = labels.shape
if signal.ndim == 3:
signal = signal.reshape(-1, signal.shape[-1])
labels = labels.reshape(-1, 1)
elif signal.ndim == 1:
signal = np.expand_dims(signal, axis=-1)
seg_length = signal.shape[0]
channels = signal.shape[1]
dtype = signal.dtype
# Define coordinate system
coords = (np.arange(seg_length),)
# Initialize interpolators
intrps = []
for i in range(channels):
intrps.append(RegularGridInterpolator(coords, signal[:, i],
method="linear",
bounds_error=False,
fill_value=bg_value,
dtype=np.float32))
# Get random elastic deformations
dx = gaussian_filter((np.random.rand(seg_length) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
indices = np.reshape(coords[0] + dx, (-1, 1))
# Interpolate all signal channels
signal = np.empty(shape=signal.shape, dtype=dtype)
for i, intrp in enumerate(intrps):
signal[:, i] = intrp(indices)
# Interpolate labels if passed, only if same shape as input
if labels is not None and len(labels) == len(signal):
lab_intrp = RegularGridInterpolator(coords, labels,
method="nearest",
bounds_error=False,
fill_value=0,
dtype=np.uint8)
labels = lab_intrp(indices).astype(labels.dtype)
return signal.reshape(org_sig_shape), labels.reshape(org_lab_shape)
|
[
"mathias@perslev.com"
] |
mathias@perslev.com
|
88f954cd4c147c22b62406dd9e1848a41d7bfac9
|
675d1ad3ebb58d6bf177430568bb35a7319ce30b
|
/plotters/limits.py
|
0eab4cdcf3c08d0721d27c24e9679c39f23d5596
|
[] |
no_license
|
kdlong/InitialStateAnalysis
|
30a0382098014087982059908c2f74ccaff0c3d9
|
2a1f7275de6bcbcf53127ad2e949b20d2443e8bc
|
refs/heads/master
| 2021-01-18T07:02:12.114624
| 2015-04-14T16:27:52
| 2015-04-14T16:27:52
| 30,890,597
| 0
| 0
| null | 2015-02-16T22:02:24
| 2015-02-16T22:02:23
|
Python
|
UTF-8
|
Python
| false
| false
| 4,461
|
py
|
'''
Class for plotting limits.
'''
import sys
import os
import errno
import numpy as np
import CMS_lumi, tdrstyle
from plotUtils import _3L_MASSES, _4L_MASSES, python_mkdir
sys.argv.append('-b')
import ROOT
sys.argv.pop()
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gROOT.ProcessLine("gErrorIgnoreLevel = 1001;")
tdrstyle.setTDRStyle()
def save(savename,saveDir,canvas):
'''Save the limits into root file and images.'''
#for type in ['png', 'pdf', 'eps']:
for type in ['png']:
name = "%s/%s/%s.%s" % (saveDir, type, savename, type)
python_mkdir(os.path.dirname(name))
canvas.Print(name)
#canvas.SetName(savename)
#savefile.WriteTObject(self.canvas)
#canvas.Clear()
def plot_limits(analysis, period, savename, **kwargs):
'''Plot limits and get exclusion limits'''
datacardBaseDir = kwargs.pop('datacardBaseDir','datacards')
limitDataBaseDir = kwargs.pop('limitDataBaseDir','limitData')
saveDir = kwargs.pop('saveDir','plots/limits')
blind = kwargs.pop('blind',True)
bp = kwargs.pop('branchingPoint','')
datacardDir = '%s/%s_%itev' % (datacardBaseDir, analysis, period)
if bp: datacardDir += '/%s' % bp
limitDataDir = '%s/%s_%itev' % (limitDataBaseDir, analysis, period)
if bp: limitDataDir += '/%s' % bp
masses = _3L_MASSES if analysis == 'Hpp3l' else _4L_MASSES
if period==13: masses = [500]
quartiles = np.empty((6, len(masses)), dtype=float)
for j, mass in enumerate(masses):
fname = os.path.join(limitDataDir, "higgsCombineTest.Asymptotic.mH%i.root" % mass)
file = ROOT.TFile(fname,"READ")
tree = file.Get("limit")
if not tree: continue
for i, row in enumerate(tree):
quartiles[i,j] = row.limit
n = len(masses)
twoSigma = ROOT.TGraph(2*n)
oneSigma = ROOT.TGraph(2*n)
expected = ROOT.TGraph(n)
if not blind: observed = ROOT.TGraph(n)
for i, mass in enumerate(masses):
twoSigma.SetPoint(i,masses[i],quartiles[4][i])
twoSigma.SetPoint(n+i,masses[n-i-1],quartiles[0][n-i-1])
oneSigma.SetPoint(i,masses[i],quartiles[3][i])
oneSigma.SetPoint(n+i,masses[n-i-1],quartiles[1][n-i-1])
expected.SetPoint(i,masses[i],quartiles[2][i])
if not blind: observed.SetPoint(i,masses[i],quartiles[5][i])
twoSigma.SetFillColor(ROOT.EColor.kYellow)
twoSigma.SetLineColor(ROOT.EColor.kYellow)
twoSigma.SetMarkerStyle(0)
oneSigma.SetFillColor(ROOT.EColor.kSpring)
oneSigma.SetLineColor(ROOT.EColor.kSpring)
oneSigma.SetMarkerStyle(0)
expected.SetLineStyle(7)
expected.SetMarkerStyle(0)
expected.SetFillStyle(0)
if not blind:
observed.SetMarkerStyle(0)
observed.SetFillStyle(0)
canvas = ROOT.TCanvas('c%s'%bp,'c%s'%bp,50,50,800,600)
canvas.SetFillColor(0)
canvas.SetBorderMode(0)
canvas.SetFrameFillStyle(0)
canvas.SetFrameBorderMode(0)
canvas.SetLeftMargin(0.12)
canvas.SetRightMargin(0.04)
canvas.SetTopMargin(0.08)
canvas.SetBottomMargin(0.12)
canvas.SetLogy(1)
expected.GetXaxis().SetLimits(masses[0],masses[-1])
expected.GetXaxis().SetTitle('#Phi^{++} Mass (GeV)')
expected.GetYaxis().SetTitle('95% CLs Upper Limit on #sigma/#sigma_{SM}')
expected.GetYaxis().SetTitleOffset(1.)
expected.GetYaxis().SetTitleSize(0.05)
twoSigma.Draw('f')
oneSigma.Draw('f')
expected.Draw()
if not blind: observed.Draw()
ratiounity = ROOT.TLine(expected.GetXaxis().GetXmin(),1,expected.GetXaxis().GetXmax(),1)
ratiounity.Draw()
legend = ROOT.TLegend(0.65,0.2,0.90,0.4)
legend.SetFillColor(0)
if not blind: legend.AddEntry(observed, 'Observed')
legend.AddEntry(expected, 'Expected')
legend.AddEntry(twoSigma, 'Expected 2#sigma', 'F')
legend.AddEntry(oneSigma, 'Expected 1#sigma', 'F')
legend.Draw('same')
lumiperiod = 2 if period == 8 else 4
CMS_lumi.wrtieExtraText = True
CMS_lumi.extraText = "Preliminary" if not blind else "Simulation Preliminary"
CMS_lumi.lumi_7TeV = "%0.1f fb^{-1}" % (4.9)
CMS_lumi.lumi_8TeV = "%0.1f fb^{-1}" % (19.7)
CMS_lumi.lumi_13TeV = "%0.1f fb^{-1}" % (25.0)
CMS_lumi.CMS_lumi(canvas,lumiperiod,11)
save(savename,saveDir,canvas)
y = 0
for x in range(masses[0],masses[-1]):
y = expected.Eval(x)
if y > 1: break
print "Expected Limit: %i GeV" % x
|
[
"dntaylor@wisc.edu"
] |
dntaylor@wisc.edu
|
2375f031e40a31c4eadc37b25c1a2e45e111c9bd
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/sdssj9-10_104054.47+112023.7/sdB_sdssj9-10_104054.47+112023.7_coadd.py
|
4de1d11e6ffb1bc19dfcb19efea75a26b321efa2
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[160.226958,11.339917], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj9-10_104054.47+112023.7/sdB_sdssj9-10_104054.47+112023.7_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj9-10_104054.47+112023.7/sdB_sdssj9-10_104054.47+112023.7_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
c0c28a81cd864c47e81a5589f67dc159c3c4086b
|
151dc9e2f00b202a5085189ac5e5df06eed1b77a
|
/bot/migrations/0002_food_ingredients.py
|
6a59f2980a0f2a0dc788dbb431c459bb5c62c5f5
|
[] |
no_license
|
ochui/foodbot
|
074d818308ee72c895857641333a71aebe6f36c0
|
c4dcda9a94a270ca26a58383d3719ed312b06907
|
refs/heads/master
| 2023-08-25T10:06:22.793546
| 2021-11-02T23:22:03
| 2021-11-02T23:22:03
| 420,014,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
# Generated by Django 3.2.8 on 2021-10-22 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='food',
name='ingredients',
field=models.ManyToManyField(to='bot.Ingredient'),
),
]
|
[
"ochuiprincewill411@gmail.com"
] |
ochuiprincewill411@gmail.com
|
2690394c577796e63ca8242a797e503b406ccb1a
|
a665936ae4c630ae9ef4c8b106aef1d0dcb3309b
|
/pro57.py
|
d63d9a4c653a8a0a3d28728a19a171d8b6b580fc
|
[] |
no_license
|
Hemavarsheni/codekata
|
bbbd54e0282c0a5724ef58f6abd215cb57fe0cd8
|
577c8347991784d1b9b1b4364045cde3257ee9db
|
refs/heads/master
| 2020-06-13T23:50:44.237011
| 2019-08-16T12:11:29
| 2019-08-16T12:11:29
| 194,827,236
| 0
| 10
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
#Hemavarsheni
a,b=map(str,input().split())
lista=[]
listb=[]
for i in range(len(a)-1):
s=""
for j in range(i,i+2):
s=s+a[j]
lista.append(s)
for i in range(len(b)-1):
s=""
for j in range(i,i+2):
s=s+b[j]
listb.append(s)
for i in lista:
if i in listb:
print("yes")
exit(0)
print("no")
|
[
"noreply@github.com"
] |
Hemavarsheni.noreply@github.com
|
5ef239b0126778ed8d7aeba3dc594138312d9df0
|
9f66941e73cad0c215601339512b8dd6729792da
|
/Show_Program_exe/predictModelUI/tensorflow/_api/v2/profiler/experimental/client/__init__.py
|
6c373c04418802ba602b7d8a351fd76131351e7e
|
[] |
no_license
|
NgaAdrain/TEAM_Enter_Text_Here
|
eb0e02c13959b90eecc0c69d2b24adb23a50150a
|
a7217438284360e06c93d37092dca1afcecb735a
|
refs/heads/master
| 2023-07-20T11:42:50.353497
| 2021-05-28T08:30:53
| 2021-05-28T08:30:53
| 267,983,495
| 2
| 0
| null | 2023-07-06T22:00:36
| 2020-05-30T00:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.profiler.experimental.client namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.profiler.profiler_client import monitor
from tensorflow.python.profiler.profiler_client import trace
del _print_function
|
[
"adrainnga@gmail.com"
] |
adrainnga@gmail.com
|
469233826fa76271f9d1c92d61ca030c6ebe3463
|
ac191a3e973b0d66baa559fdd27af5e0012fe67b
|
/shipyard/utils.py
|
eab1698afc173b87779a9d3d0ce8a92c8445ae1d
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
monokrome/shipyard
|
fb2ab707643e52c3acfd20dfaf4d269a8b5dc195
|
3a37105466e4ac30b260a6b40a467e0412b4fc13
|
refs/heads/master
| 2022-01-17T23:31:26.186270
| 2013-10-23T06:29:21
| 2013-10-23T06:29:21
| 13,798,136
| 1
| 0
| null | 2022-01-06T22:53:10
| 2013-10-23T09:02:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,347
|
py
|
# Copyright 2013 Evan Hazlett and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansi2html import Ansi2HTMLConverter
from django.conf import settings
import redis
def get_short_id(container_id):
return container_id[:12]
def convert_ansi_to_html(text, full=False):
converted = ''
try:
conv = Ansi2HTMLConverter(markup_lines=True, linkify=False, escaped=False)
converted = conv.convert(text.replace('\n', ' <br/>'), full=full)
except Exception, e:
converted = text
return converted
def update_hipache(app_id=None):
from applications.models import Application
if getattr(settings, 'HIPACHE_ENABLED'):
app = Application.objects.get(id=app_id)
redis_host = getattr(settings, 'HIPACHE_REDIS_HOST')
redis_port = getattr(settings, 'HIPACHE_REDIS_PORT')
rds = redis.Redis(host=redis_host, port=redis_port)
with rds.pipeline() as pipe:
domain_key = 'frontend:{0}'.format(app.domain_name)
# remove existing
pipe.delete(domain_key)
pipe.rpush(domain_key, app.id)
# add upstreams
for c in app.containers.all():
port = c.get_ports()[app.backend_port]
upstream = '{0}://{1}:{2}'.format(app.protocol, c.host.hostname,
port)
pipe.rpush(domain_key, upstream)
pipe.execute()
return True
return False
def remove_hipache_config(domain_name=None):
if getattr(settings, 'HIPACHE_ENABLED'):
redis_host = getattr(settings, 'HIPACHE_REDIS_HOST')
redis_port = getattr(settings, 'HIPACHE_REDIS_PORT')
rds = redis.Redis(host=redis_host, port=redis_port)
domain_key = 'frontend:{0}'.format(domain_name)
# remove existing
rds.delete(domain_key)
|
[
"ejhazlett@gmail.com"
] |
ejhazlett@gmail.com
|
cab5ec73309abc6f8c1a012ccccd0e4dc50f50b4
|
91f4078045a57eaaafe0b172909d7041e829941c
|
/arjuna-samples/arjex/test/pkg/gns_adv/check_gnsadv_07_waiters_default_wait.py
|
21418e6c64e6736cfe53b4ca0f3d66b1a111ea50
|
[
"Apache-2.0"
] |
permissive
|
amiablea2/arjuna
|
0d06d1dfb34309f4b6f39b17298f7acb6c3c48c9
|
af74e0882216881ceca0a10f26442165ffc43287
|
refs/heads/master
| 2023-08-21T20:04:30.416303
| 2021-10-27T06:41:40
| 2021-10-27T06:41:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,261
|
py
|
# This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
from arjex.lib.gns_adv.app_page_section.app import WordPress
@for_module
def dashboard(request):
# Setup
wordpress = WordPress(section_dir="simple")
home = wordpress.launch()
dashboard = home.login_with_default_creds()
yield dashboard
# Teadown
dashboard.top_nav.logout()
wordpress.quit()
@test
def check_wait_until_absent_gns_1(request, dashboard):
dashboard.left_nav.gns.wait_until_absent("non_existing")
try:
# It is present
dashboard.left_nav.gns.wait_until_absent("settings")
except GuiWidgetForLabelPresentError as e:
print("Exception as Expected")
print(str(e))
except Exception as e:
raise Exception("Unexpected exception raise: ", str(e))
else:
raise Exception("Exception not raised.")
@test
def check_wait_until_absent_gns_2(request, dashboard):
dashboard.left_nav.wait_until_absent(id="non_existing")
try:
# It is present
dashboard.left_nav.wait_until_absent(link="Settings")
except GuiWidgetPresentError as e:
print("Exception as Expected")
print(str(e))
except Exception as e:
raise Exception("Unexpected exception raise: ", str(e))
else:
raise Exception("Exception not raised.")
@test
def check_contains_gns_1(request, dashboard):
print(dashboard.left_nav.gns.contains("settings"))
print(dashboard.left_nav.gns.contains("non_existing"))
@test
def check_contains_gns_2(request, dashboard):
print(dashboard.left_nav.contains(link="Settings"))
print(dashboard.left_nav.contains(id="non_existing"))
|
[
"rahulverma81@gmail.com"
] |
rahulverma81@gmail.com
|
822d8811b069aed67ce8cd1f23bbf59c6c4a9c45
|
0b25dc3f9b4ef736e739aadddec33b96dd65a0c8
|
/huawei/21.py
|
7302d8adf92d83dba6e7130102b2a602b4176164
|
[] |
no_license
|
ttp55/LearnPy
|
b123f44a74e4364771491c572705742c15eb33ff
|
1530e158bde152e5c585f496dd1e5ffcffdb73bc
|
refs/heads/master
| 2023-05-11T13:02:30.157285
| 2023-05-08T07:13:57
| 2023-05-08T07:13:57
| 196,953,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
# @Time : 2022/2/8 13:58
# @Author : WZG
# --coding:utf-8--
l = [1,2,3,4,5,5]
print(l[3:]+l[:3])
s = input()
while '{}' in s or '()' in s or '[]' in s:
s = s.replace('{}', '')
s = s.replace('[]', '')
s = s.replace('()', '')
print(not s)
|
[
"1047697347@qq.com"
] |
1047697347@qq.com
|
1659e77a9f834c52289989c0d18398077d405e47
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/308/usersdata/290/72816/submittedfiles/ex1.py
|
9d833bfe06bccfb760241d32e0c22608bc1e9950
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
#COMECE A PARTIR DAQUI!
D=((b**2)-(4*a*c))
print(a)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a1add44754eec4db72df64bfc3f294a01ec01a29
|
e78433e847c5a5ff1ed583303c0240b0a93dc06a
|
/0x01-python-if_else_loops_functions/6-print_comb3.py
|
2682ef8c4fae7eef14ca880d7b9148e40c1481e8
|
[] |
no_license
|
david-develop/holbertonschool-higher_level_programming
|
75eaef38173361cc4ef9202f3e3f6c5a781239ed
|
90f62136a7ddfb7d3921daccdc38d50a5376343b
|
refs/heads/master
| 2020-07-22T23:27:35.494155
| 2020-03-26T17:34:36
| 2020-03-26T17:34:36
| 207,366,767
| 0
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
#!/usr/bin/python3
for num1 in range(0, 8):
for num2 in range(num1 + 1, 10):
print("{}{}".format(num1, num2), end=', ')
print("{}{}".format(num1 + 1, num2))
|
[
"924@holbertonschool.com"
] |
924@holbertonschool.com
|
04ba26d95374cc9f8beb1e7b9eb48e8d9e9fd7cc
|
ce2a6330c807591f95210bdda005f875a76b98a8
|
/model_lab/ml_models/titanic/scripts/train.py
|
22de07daecff46eabb25b6cef569ac1b056d0c75
|
[] |
no_license
|
MartinThoma/model-lab
|
c1ca8d0950086cd4e332862d6a3b7a7ae7f10a2f
|
3525cdd5271a1b7940f801d4245343b1592ffa67
|
refs/heads/master
| 2020-04-11T18:43:54.537892
| 2018-12-31T20:14:40
| 2018-12-31T20:17:02
| 162,009,569
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,893
|
py
|
"""Train a model for Titanic."""
import re
import math
import pickle
# 3rd party modules
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
def main():
"""Load, train, serialize, test."""
data = load_data()
analyze_features(data['full_features'])
model = train(data)
with open('model.pickle', 'wb') as f:
pickle.dump(model, f)
evaluate(model, data)
def load_data():
"""Load the titanic dataset."""
train = pd.read_csv("../input/train.csv", dtype={"Age": np.float64}, )
test = pd.read_csv("../input/test.csv", dtype={"Age": np.float64}, )
train = train.set_index('PassengerId')
test = test.set_index('PassengerId')
train = train.apply(preprocess, axis=1)
test = test.apply(preprocess, axis=1)
x_train = train.drop(['Survived'], axis=1)
y_train = train['Survived']
x_test = test
return {'train': {'x': x_train, 'y': y_train},
'test': {'x': x_test},
'full_features': pd.concat([x_train, x_test])}
def preprocess(item):
"""Preprocess the dictionary 'item'."""
item = feature_engineering(item)
item = encode_features(item)
return item
def feature_engineering(item):
"""
Develop new features.
Parameters
----------
item : Dict[str, Any]
Returns
-------
item : Dict[str, Any]
"""
if item["Cabin"] is None:
item["Cabin"] = " "
if item["Age"] is None or math.isnan(item["Age"]):
item["Age"] = 18 # ????
if item["Fare"] is None or math.isnan(item["Fare"]):
item["Fare"] = -1 # ????
def get_title(x):
return re.search(' ([A-Za-z]+)\.', x).group(1)
item["Title"] = get_title(item["Name"])
return item
def encode_features(item):
"""
Encode features for machine learning models.
This step has no value for humans, in contrast to the feature_engineering
step.
"""
item['is_male'] = int(item['Sex'] == 'male')
del item['Name']
del item['Sex']
# del item['Fare']
del item['Cabin']
del item['Ticket']
# One-hot encoding: Embarked
item['embarked_s'] = int(item['Embarked'] == 'S')
item['embarked_c'] = int(item['Embarked'] == 'C')
item['embarked_q'] = int(item['Embarked'] == 'Q')
del item['Embarked']
# One-hot encoding: Title
item['title_mr'] = int(item['Title'] == 'Mr')
item['title_miss'] = int(item['Title'] == 'Miss')
item['title_mrs'] = int(item['Title'] == 'Mrs')
item['title_master'] = int(item['Title'] == 'Master')
item['title_other'] = 1 - (item['title_mr'] +
item['title_miss'] +
item['title_mrs'] +
item['title_master'])
del item['Title']
return item
def analyze_features(df_features):
for column in df_features.columns:
print('## ' + column)
value_counts = df_features[column].value_counts()
if len(value_counts) > 10:
print('Many values')
else:
print(value_counts)
count_nan = len(df_features[column]) - df_features[column].count()
if count_nan > 0:
print('has nan')
print('')
def train(data):
etr = ExtraTreesRegressor(n_estimators=10)
etr.fit(data['train']['x'], np.ravel(data['train']['y']))
return etr
def evaluate(model, data):
score = model.score(data['train']['x'], data['train']['y'])
print("Accuracy: %0.3f".format(score * 100))
predictions = model.predict(data['test']['x'])
passenger_id = np.array(data['test']['x'].index).astype(int)
my_prediction = pd.DataFrame(predictions,
passenger_id,
columns=["Survived"])
my_prediction.to_csv("my_prediction.csv", index_label=["PassengerId"])
if __name__ == '__main__':
main()
|
[
"info@martin-thoma.de"
] |
info@martin-thoma.de
|
220acf2053b36953db69197b5b7cd45c5040bc57
|
48b9d828acf80792bc4385febaa734a2e96ad465
|
/test-openmps/Examples/05_Fermions_LongRange.py
|
fd886085ce8c20f16a78e8dadbd894ef399ac75e
|
[
"MIT"
] |
permissive
|
OminiaVincit/qphase-trans
|
dd4ab2e0cacc449ead3bef318a65eb05aed45621
|
40e0c078dcd74282e8d8f44690433bf670bff8cb
|
refs/heads/master
| 2023-05-06T12:14:30.368375
| 2021-05-28T05:11:58
| 2021-05-28T05:11:58
| 235,478,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
import MPSPyLib as mps
import numpy as np
import sys
import os.path
def main(PostProcess=False):
"""
Introductory example for openMPS to simulate a fermionic system with
long-range interactions. Two modes are available when running the
example from command line:
* ``python LongRangeTunneling.py --PostProcess=F`` : runs the MPSFortLib to
determine the ground state statics (initial state).
(default if ``--PostProcess`` not present.)
* ``python LongRangeTunneling.py --PostProcess=T`` : printing the results
of the simulation run before.
"""
# Build operators
Operators = mps.BuildFermiOperators()
# Define Hamiltonian MPO
H = mps.MPO(Operators)
H.AddMPOTerm('FiniteFunction', ['fdagger','f'], f=[1.0, -0.2],
hparam='t', weight=-1.0, Phase=True)
# Observables
myObservables = mps.Observables(Operators)
# Site terms
myObservables.AddObservable('site', 'nftotal', 'n')
# Correlation functions
myObservables.AddObservable('corr', ['fdagger', 'f'], 'spdm', Phase=True)
# Convergence parameters
myConv = mps.MPSConvParam(max_bond_dimension=30, max_num_sweeps=2)
myConv.AddModifiedConvergenceParameters(0, ['max_bond_dimension',
'local_tol'], [50, 1E-14])
# Specify constants and parameter list
t = 1.0
L = 10
N = 5
parameters = [{
'simtype' : 'Finite',
# Directories
'job_ID' : 'LongRangeTunneling_',
'unique_ID' : 'L_' + str(L) + 'N' + str(N),
'Write_Directory' : 'TMP_05/',
'Output_Directory' : 'OUTPUTS_05/',
# System size and Hamiltonian parameters
'L' : L,
't' : t,
# Specification of symmetries and good quantum numbers
'Abelian_generators' : ['nftotal'],
'Abelian_quantum_numbers' : [N],
'MPSObservables' : myObservables,
'MPSConvergenceParameters' : myConv,
'logfile' : True
}]
# Write Fortran-readable main files
MainFiles = mps.WriteFiles(parameters, Operators, H,
PostProcess=PostProcess)
# Run the simulations and quit if not just post processing
if(not PostProcess):
if os.path.isfile('./Execute_MPSMain'):
RunDir = './'
else:
RunDir = None
mps.runMPS(MainFiles, RunDir=RunDir)
return
# Postprocessing
# --------------
Outputs = mps.ReadStaticObservables(parameters)
# Get observables of state computed with most stringent convergence criteria
fullyconvergedOutputs = mps.GetObservables(Outputs,
'convergence_parameter', 2)
spdm = fullyconvergedOutputs[0]['spdm']
spdmeigs, U = np.linalg.eigh(spdm)
print('Eigenvalues of <f^{\dagger}_i f_j>', spdmeigs)
return
if(__name__ == '__main__'):
# Check for command line arguments
Post = False
for arg in sys.argv[1:]:
key, val = arg.split('=')
if(key == '--PostProcess'): Post = (val == 'T') or (val == 'True')
# Run main function
main(PostProcess=Post)
|
[
"k09tranhoan@gmail.com"
] |
k09tranhoan@gmail.com
|
001b2620fba71bf6672fffa7ba7578a6ef07180f
|
c838b0eaf08c63284bd29442f8a0a297d1558fd5
|
/lagom/runner/rolling_segment_runner.py
|
6f42c595b4aa4688010ec63e73d889e6d6601428
|
[
"MIT"
] |
permissive
|
vin136/lagom
|
ccd0f4a3e469c1ee8ef88b1f5248e712b51c5704
|
54e1890e6450f4b1bf499a838963c5d1a3b2da6a
|
refs/heads/master
| 2020-04-22T21:45:51.488458
| 2019-02-13T16:41:32
| 2019-02-13T16:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
import torch
import numpy as np
from lagom.envs import EnvSpec
from lagom.history import BatchSegment
from lagom.runner import BaseRunner
class RollingSegmentRunner(BaseRunner):
def __init__(self, config, agent, env):
super().__init__(config, agent, env)
self.env_spec = EnvSpec(self.env)
self.obs_buffer = None # for next call
self.done_buffer = None # masking
def __call__(self, T, reset=False):
D = BatchSegment(self.env_spec, T)
if self.obs_buffer is None or reset:
obs = self.env.reset()
# reset agent: e.g. RNN states because initial observation
self.agent.reset(self.config)
else:
obs = self.obs_buffer
D.add_observation(0, obs)
for t in range(T):
info = {}
out_agent = self.agent.choose_action(obs, info=info)
action = out_agent.pop('action')
if torch.is_tensor(action):
raw_action = list(action.detach().cpu().numpy())
else:
raw_action = action
D.add_action(t, raw_action)
obs, reward, done, info = self.env.step(raw_action)
D.add_observation(t+1, obs)
D.add_reward(t, reward)
D.add_done(t, done)
D.add_info(info)
# Record other information: e.g. log-probability of action, policy entropy
D.add_batch_info(out_agent)
self.obs_buffer = obs
self.done_buffer = done
return D
|
[
"zuoxingdong@hotmail.com"
] |
zuoxingdong@hotmail.com
|
f32c47e27a4ed6dfd41d6154529bbafd699be8a3
|
a38eb2457fc834109cbb1d5cd2f5649d932cbb00
|
/blog/migrations/0001_initial.py
|
7115d435920cd0178d44e08eebf29f71a8ba7649
|
[] |
no_license
|
anitalmada/mi-primer-blog
|
0a5daef6aa87273ab4ab09c1f6d1ff51706fdae2
|
872709baeef37c30608231a746d749c9d11afb0e
|
refs/heads/master
| 2020-06-10T01:47:37.291475
| 2017-10-09T00:16:38
| 2017-10-09T00:16:38
| 76,130,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,049
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-10 15:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"admin@admin.com"
] |
admin@admin.com
|
fa3c81e8655206c62c0e1c0548c42cc70ab771ae
|
c1261b9181d86c418df612dc809af933cfbb2c0d
|
/blog1/migrations/0003_auto_20190616_1338.py
|
c8a7d136c991ab23980daf09e24fe9de1d3d62e1
|
[] |
no_license
|
gitlGl/myblog
|
122a598407d12a7397420ce50f9c1ca68a3107d2
|
b3d7d1130e81ca625cb9d2b7204e19da6efe7d07
|
refs/heads/master
| 2023-09-01T14:06:04.720407
| 2022-10-22T08:47:02
| 2022-10-22T08:47:02
| 198,171,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
# Generated by Django 2.1 on 2019-06-16 05:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog1', '0002_auto_20190616_1334'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=20)),
('content', models.TextField()),
],
),
migrations.RemoveField(
model_name='students',
name='sgrade',
),
migrations.DeleteModel(
name='Grades',
),
migrations.DeleteModel(
name='Students',
),
]
|
[
"you@example.com"
] |
you@example.com
|
55f1dc9325861a9e19d2e9a887425c689284659e
|
0c6c7365d6ff8b694bc906ec5f74c741e8bb0d37
|
/Algorithms/922-Sort-Array-By-Parity-II.py
|
b95848daeaa9b922111c67403417c1599a5222c5
|
[] |
no_license
|
XiongQiuQiu/leetcode-slove
|
d58ab90caa250c86b7a1ade8b60c669821d77995
|
60f0da57b8ea4bfb937e2fe0afe3caea719cd7e4
|
refs/heads/master
| 2021-01-23T11:21:15.069080
| 2019-07-08T15:42:48
| 2019-07-08T15:42:48
| 93,133,558
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
'''
Given an array A of non-negative integers, half of the integers in A are odd, and half of the integers are even.
Sort the array so that whenever A[i] is odd, i is odd; and whenever A[i] is even, i is even.
You may return any answer array that satisfies this condition.
Example 1:
Input: [4,2,5,7]
Output: [4,5,2,7]
Explanation: [4,7,2,5], [2,5,4,7], [2,7,4,5] would also have been accepted.
Note:
2 <= A.length <= 20000
A.length % 2 == 0
0 <= A[i] <= 1000
'''
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
odds,evens = [odd for odd in A if odd %2],[even for even in A if even%2 == 0]
return [odds.pop() if i % 2 else evens.pop() for i in range(len(A)) ]
|
[
"zjw2goo@gmail.com"
] |
zjw2goo@gmail.com
|
1eb7553184f9f93b4f42a7d94f77117f5205d59e
|
78171e8cfbc44c547ee07d6e5a85e595fb7397a1
|
/analytics/migrations/0001_initial.py
|
c0e9b067863b881a1c5ab3492b2f8dbcc91c19cf
|
[] |
no_license
|
jimpalowski/URLshortener
|
37b41a3818679c1e0707f02f57147e87a651063c
|
f7b8450ce2e858dff1e6fec11f9fd5dfec3d3e26
|
refs/heads/master
| 2021-09-05T11:44:36.564719
| 2018-01-27T01:59:56
| 2018-01-27T01:59:56
| 118,685,038
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-01-26 19:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shortener', '0003_auto_20180125_2359'),
]
operations = [
migrations.CreateModel(
name='ClickEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=0)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('kirr_url', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='shortener.KirrURL')),
],
),
]
|
[
"palowskijim@gmail.com"
] |
palowskijim@gmail.com
|
037914c9c349ecf834267d3eb3e6e5b20c208d0b
|
50dd2a43daa8316fc11e0c176b5872738fcc5dde
|
/Learning/049_Decorators/deco.py
|
a73431eabd8fc058356752fa444fc38ffd559521
|
[] |
no_license
|
FrenchBear/Python
|
58204d368e3e72071eef298ff00d06ff51bd7914
|
b41ab4b6a59ee9e145ef2cd887a5fe306973962b
|
refs/heads/master
| 2023-08-31T18:43:37.792427
| 2023-08-26T15:53:20
| 2023-08-26T15:53:20
| 124,466,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,383
|
py
|
# deco.py
# Exercices on decorators
#
# 2018-09-03 PV
# 2018-09-07 PV Variant with a class
# 2018-10-01 PV Expanded with execution time and following code
import functools
def uppercase(func):
# Preserve __name__ and __doc__ of original function in the decorated version
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_result = func(*args, **kwargs)
modified_result = original_result.upper()
return modified_result
return wrapper
def strong(func):
def wrapper():
return '<strong>' + func() + '</strong>'
return wrapper
def emphasis(func):
def wrapper():
return '<em>' + func() + '</em>'
return wrapper
@strong
@emphasis
@uppercase
def greet():
return 'Hello!'
print(greet())
@uppercase
def hello(name: str) -> str:
"""This is a polite function to say hello"""
return 'Hello '+name+'!'
print(hello('Pierre'))
print("name:", hello.__name__)
print("doc:", hello.__doc__)
def trace(func):
def wrapper(*args, **kwargs):
print(f'TRACE: calling {func.__name__}() with {args}, {kwargs}')
original_result = func(*args, **kwargs)
print(f'TRACE: {func.__name__}() returned {original_result!r}')
return original_result
return wrapper
@trace
def say(name, line):
return f'{name}: {line}'
print(say('Jane', 'Hello, World'))
# Variant, with a class
# While a function embedded in a function gets a closer with outer function parameters, there is
# no such thing here, and self.original is a "manual closure"
class SkipLines():
def __init__(self, n):
self.n = n
self.original = None
# Callable that handles registration
def __call__(self, f):
self.original = f
return self.relay
def relay(self, *args, **kwargs):
for _ in range(self.n):
print('-'*20)
self.original(*args, **kwargs)
for _ in range(self.n):
print('-'*20)
@SkipLines(2)
def PrintHello(n):
print("Hello,", n)
PrintHello("Pierre")
# ----------------------------------------------
# Decorator to output running time of a function
# Use @functools.wraps(func) to preserve __name__ and __doc__ of decorated function
import time
import functools
print("\nMeasuring execution time")
def clock(func):
@functools.wraps(func)
def clocked(*args, **kwargs):
t0 = time.perf_counter()
result = func(*args, **kwargs)
elapsed = time.perf_counter() - t0
name = func.__name__
arg_lst = []
if args:
arg_lst.append(', '.join(repr(arg) for arg in args))
if kwargs:
pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]
arg_lst.append(', '.join(pairs))
arg_str = ', '.join(arg_lst)
print('[%0.8fs] %s(%s) -> %r ' % (elapsed, name, arg_str, result))
return result
return clocked
# Second version, parameterizable: decorator is a function returning a decorator!
DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}'
def clock2(fmt=DEFAULT_FMT):
def decorate(func):
@functools.wraps(func)
def clocked(*posargs, **kwargs):
t0 = time.perf_counter()
result = func(*posargs, **kwargs)
elapsed = time.perf_counter() - t0
name = func.__name__
arg_lst = []
if posargs:
arg_lst.append(', '.join(repr(arg) for arg in posargs))
if kwargs:
pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]
arg_lst.append(', '.join(pairs))
args = ', '.join(arg_lst)
print(fmt.format(**locals())) # locals() is a dictionary of local variables
return result
return clocked
return decorate
@clock
def snooze(seconds):
time.sleep(seconds)
@clock2()
def factorial(n):
return 1 if n < 2 else n*factorial(n-1)
print('Calling {}(0.25)'.format(snooze.__name__))
snooze(0.25)
print('Calling factorial(6)')
f6 = factorial(n=6)
# ----------------------------------------------
# Use of @functools.lru_cache() to implement a cache of recent calls to avoid executing again
# Artificial but impressive example
print("\n@functools.lru_cache()")
@clock
def fibo1(n):
if n < 2:
return n
return fibo1(n-2) + fibo1(n-1)
print('calling fibo1(6)')
print(fibo1(6))
@functools.lru_cache()
@clock
def fibo2(n):
if n < 2:
return n
return fibo2(n-2) + fibo2(n-1)
print('calling fibo2(6)')
print(fibo2(6))
# ----------------------------------------------
# Use of singledispatch to provide 'overrides' on 1st parameter type
import math
import scipy.special #type: ignore
print("\n@functools.singledispatch")
@functools.singledispatch
def generalized_factorial(obj):
raise ValueError()
@generalized_factorial.register(int)
def fact_i(n):
# print('fact_i')
return 1 if n < 2 else n*fact_i(n-1)
@generalized_factorial.register(float)
def fact_f(x):
# print('fact_f')
return math.gamma(x+1)
@generalized_factorial.register(complex)
def fact_c(x):
return scipy.special.gamma(x+1)
print('3! =', generalized_factorial(3))
print('3.5! =', generalized_factorial(3.5))
print('4! =', generalized_factorial(4))
print('(4+0.01j)! =', generalized_factorial(4+0.01j))
|
[
"FrenchBear38@outlook.com"
] |
FrenchBear38@outlook.com
|
1650ea5291fb078028b18bc7b1d33ce095acb1aa
|
3fce653f12af1b98b6b87e3d87e7d10483ef6fac
|
/hood/migrations/0003_auto_20181018_1012.py
|
f514eafff572aa923446e6983049b97236ec7ec9
|
[] |
no_license
|
IreriVIkki/hoodip
|
6dba100d75a69b0dd146205557cbaba4ec2555e2
|
610629d6a54920e66b7e30156b11887de7fe8db4
|
refs/heads/master
| 2020-04-01T14:31:31.729134
| 2018-10-23T11:27:31
| 2018-10-23T11:27:31
| 153,297,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-18 07:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hood', '0002_auto_20181018_0936'),
]
operations = [
migrations.RenameModel(
old_name='NeighbourHood',
new_name='NeighborHood',
),
]
|
[
"wambsviki@gmail.com"
] |
wambsviki@gmail.com
|
ee0d03e5ed294173e0df5f582729e2d0b61ef73f
|
17926b196d9db43816453d16f3da84de6664f2fd
|
/155_Mini_Stack.py
|
76a37a32f326092b1b0ac94945275b9c1a9c62f7
|
[] |
no_license
|
luchang59/leetcode
|
66690a3c9b28a5201a7be8cd0134142b48418adb
|
feab001b9291f6e57c44eeb0b625fdaa145d19b4
|
refs/heads/master
| 2020-05-28T06:57:20.667138
| 2019-09-20T18:18:11
| 2019-09-20T18:18:11
| 188,914,681
| 0
| 0
| null | 2019-05-27T22:17:44
| 2019-05-27T22:08:54
| null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
curMin = self.getMin()
if curMin == None or x < curMin:
curMin = x
self.stack.append((x, curMin))
def pop(self):
"""
:rtype: None
"""
self.stack.pop()
def top(self):
"""
:rtype: int
"""
return self.stack[-1][0] if self.stack else None
def getMin(self):
"""
:rtype: int
"""
return self.stack[-1][1] if self.stack else None
|
[
"luchang1991@gmail.com"
] |
luchang1991@gmail.com
|
b07ebe4341d3f201e7ded401c8ae97e2d1385731
|
cbe264842df4eae3569b28ed4aae9489014ed23c
|
/python/coding_dojang/judge_regular_expression.py
|
5418ef520574bac18ef5149097b2d76c3df53c2e
|
[
"MIT"
] |
permissive
|
zeroam/TIL
|
31e176c2f4c3e1ef72b1155353690cc2f7160f96
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
refs/heads/master
| 2021-07-23T01:43:34.135033
| 2021-07-10T06:47:17
| 2021-07-10T06:47:17
| 167,952,375
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 617
|
py
|
import re
p = re.compile('^(http(s)?://)?[\w\-]+\.[\w\-]+[\w\-_.?=/&#:]+$')
urls = [
'https://www.example.com',
'http://www.example.com',
'www.example.com',
'example.com',
'http://blog.example.com',
'http://www.example.com/product',
'http://www.example.com/products?id=1&page=2',
'http://www.example.com#up',
'http://255.255.255.255',
'255.255.255.255',
'http://invalid.com/perl.cgi?key= | http://web-site.com/cgi-bin/perl.cgi?key1=value1&key2',
'http://www.site.com:8008'
]
for url in urls:
print(p.match(url) != None, end=' ')
print(p.match(url))
|
[
"imdff0803@gmail.com"
] |
imdff0803@gmail.com
|
0d54a8ca3d2b786f788a93b14b7817b06777b682
|
b48764e6684ffbd73b0043dc889c013860642e8d
|
/1학기/area1.py
|
f237fe223ef10a6ce0fda088efe636035c0f451d
|
[] |
no_license
|
tanghee/Programming-Python-
|
c6d32a1e49d5c95c8359aeb8775cb52cc665167a
|
eb402357ad31638d867042e76af507bc6c67a0b4
|
refs/heads/master
| 2022-03-27T07:27:18.888660
| 2019-12-10T02:06:41
| 2019-12-10T02:06:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
width = 3
height = 5
print("가로 ", width, "세로", height, "인 삼각형의 넒이: ", 3*5*1/2)
print("가로 ", width, "세로", height, "인 삼각형의 넒이: ", 3*5*1/2)
|
[
"s2018w37@e-mirim.hs.kr"
] |
s2018w37@e-mirim.hs.kr
|
7454cf6efe86e121133d4f676780dde446ac0859
|
06fe8a3bb7971066a204be73731a9af3e67edbb9
|
/soccer/gameplay/tests/test_constants.py
|
1493f44ca313729a8e14841601d4ca525ae01788
|
[
"Apache-2.0"
] |
permissive
|
sgadgil6/robocup-software
|
130099715dafd80155bf07966f186f036280455a
|
2b647345f0cdcc50021558b5cccf109239d3e954
|
refs/heads/master
| 2020-12-26T04:38:28.196256
| 2015-09-29T23:33:58
| 2015-09-29T23:33:58
| 43,398,854
| 1
| 0
| null | 2015-09-29T22:49:27
| 2015-09-29T22:49:27
| null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
import unittest
import constants
import robocup
class TestConstants(unittest.TestCase):
def test_our_goal_zone(self):
# right in the center of the goal zone
in_zone = robocup.Point(0, constants.Field.PenaltyDist / 2.0)
out_zone = robocup.Point(0, constants.Field.Length / 2.0)
self.assertTrue(constants.Field.OurGoalZoneShape.contains_point(in_zone))
# self.assertFalse(constants.Field.OurGoalZoneShape.contains_point(out_zone))
|
[
"justbuchanan@gmail.com"
] |
justbuchanan@gmail.com
|
6599fda660a020fbc1972dae24d4e1cb898e6c27
|
466660115eafd99b72f81339d86c5bcbf4c7efb0
|
/codes/15/spectrum_50HzRepeat.py
|
b04979b971b4acfdd85218fcd1fe02f82e9fb818
|
[] |
no_license
|
CoryVegan/scipybook2
|
c2bb68c169c632ab389600034beb33ac921b0ba1
|
a8fd295c2f2d7ee18f351e5622ca7eeb4649ee50
|
refs/heads/master
| 2020-03-23T14:50:28.056482
| 2017-08-25T06:00:00
| 2018-06-02T14:18:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pl
t = np.arange(0, 1.0, 1.0/8000)
x = np.sin(2*np.pi*50*t)[:512]
pl.figure(figsize=(8,3))
pl.plot(np.hstack([x,x,x]))
pl.xlabel("取样点")
pl.subplots_adjust(bottom=0.15)
pl.show()
|
[
"qytang326@gmail.com"
] |
qytang326@gmail.com
|
9361603f524a0d1e30f0a17a54c5c6ff4b05b2a6
|
a5ad207961fddfb0bab8c7471b6f91b69865e0fc
|
/app/agis/models/enormal/menu.py
|
09ca2c47c0709f53c80e61db7cfffc14356895c6
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
yotech/agis
|
b2465fc340e366fbe0267c4000bb0ae728386399
|
4abbecd175337d4942ac133847ce8fc870670571
|
refs/heads/master
| 2020-12-15T09:10:58.076323
| 2016-05-10T14:36:54
| 2016-05-10T14:36:54
| 32,155,630
| 0
| 2
| null | 2016-05-10T14:36:54
| 2015-03-13T12:52:01
|
Python
|
UTF-8
|
Python
| false
| false
| 687
|
py
|
# -*- coding: utf-8 -*-
from gluon import *
from agiscore.gui.mic import Accion
if not request.ajax and request.controller != 'appadmin':
# import si se necesita
from agiscore.gui.escuela import escuela_menu
from agiscore.gui.unidad_organica import unidad_menu
from agiscore.gui.enormal import enormal_menu
# contruir el menú en orden
evento_id = request.args(0)
ev = db.evento(evento_id)
if ev is not None:
ano = db.ano_academico(ev.ano_academico_id)
unidad = db.unidad_organica(ano.unidad_organica_id)
response.menu += escuela_menu()
response.menu += unidad_menu(unidad.id)
response.menu += enormal_menu(ev.id)
|
[
"ybenitezf@gmail.com"
] |
ybenitezf@gmail.com
|
63f4970d203971e6b87ce1413f3580da08d7436c
|
5f9bdfc588331ef610ba01d7ef90c4f8a96fdc63
|
/plot.py
|
3d8ca3134805bf8f081d2405810a8936d42eb86c
|
[] |
no_license
|
imrehg/fuel
|
1c3c73c79f0aaf500a6024bd4fc32980987201c7
|
41c2afa3506172eab31ac1618e6ea3706277fef1
|
refs/heads/master
| 2016-09-10T09:58:26.424418
| 2011-05-05T06:10:21
| 2011-05-05T06:10:21
| 1,702,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,270
|
py
|
import numpy as np
import pylab as pl
from matplotlib.transforms import offset_copy
filename = "data.csv"
types = {'names' : ['code', 'name', 'gdp', 'ppp', 'oilout', 'oilin', 'price'],
'formats' : ['S30', 'S30', 'f4', 'f4', 'f4', 'f4', 'f4']
}
data = np.loadtxt(filename, delimiter=",", dtype=types)
fig = pl.figure(figsize=(10,10))
ax = pl.subplot(1,1,1)
transOffset = offset_copy(ax.transData, fig=fig,
x = 0.05, y=0.10, units='inches')
# for i in xrange(len(data['code'])):
# if data['price'][i] <= 0 or data['gdp'][i] <= 0:
# continue
# if data['oilout'][i] > data['oilin'][i]:
# fuel = False
# else:
# fuel = True
# symbol = "kx" if fuel else 'ko'
# pl.plot(np.log(data['gdp'][i]), data['price'][i], symbol)
# # pl.text(data[i,0], data[i,4], '%.1f' % (fuel), transform=transOffset)
# pl.text(np.log(data['gdp'][i]), data['price'][i], data['name'][i], transform=transOffset)
total = []
for i in xrange(len(data['code'])):
if data['price'][i] > 0:
total += [(data['code'][i], data['price'][i])]
total2 = sorted(total, key= lambda x: x[1])
for j, v in enumerate(total2):
pl.plot(j, v[1])
pl.text(j, v[1], v[0], transform=transOffset)
pl.show()
|
[
"imrehg@gmail.com"
] |
imrehg@gmail.com
|
ef3f4ca1f02f57a1d00a845f054e57b10411e8c5
|
25b914aecd6b0cb49294fdc4f2efcfdf5803cc36
|
/homeassistant/components/trafikverket_weatherstation/config_flow.py
|
103af1c7eb4157ad3f6eb2f593bd8fa981f47a36
|
[
"Apache-2.0"
] |
permissive
|
jason0x43/home-assistant
|
9114decaa8f7c2f1582f84e79dc06736b402b008
|
8bf6aba1cf44ee841de063755c935ea78040f399
|
refs/heads/dev
| 2023-03-04T01:14:10.257593
| 2022-01-01T12:11:56
| 2022-01-01T12:11:56
| 230,622,861
| 1
| 1
|
Apache-2.0
| 2023-02-22T06:15:07
| 2019-12-28T14:45:43
|
Python
|
UTF-8
|
Python
| false
| false
| 2,802
|
py
|
"""Adds config flow for Trafikverket Weather integration."""
from __future__ import annotations
from pytrafikverket.trafikverket_weather import TrafikverketWeather
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import CONF_STATION, DOMAIN
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_STATION): cv.string,
}
)
class TVWeatherConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Trafikverket Weatherstation integration."""
VERSION = 1
entry: config_entries.ConfigEntry
async def validate_input(self, sensor_api: str, station: str) -> str:
"""Validate input from user input."""
web_session = async_get_clientsession(self.hass)
weather_api = TrafikverketWeather(web_session, sensor_api)
try:
await weather_api.async_get_weather(station)
except ValueError as err:
return str(err)
return "connected"
async def async_step_import(self, config: dict):
"""Import a configuration from config.yaml."""
self.context.update(
{"title_placeholders": {CONF_STATION: f"YAML import {DOMAIN}"}}
)
self._async_abort_entries_match({CONF_STATION: config[CONF_STATION]})
return await self.async_step_user(user_input=config)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
name = user_input[CONF_STATION]
api_key = user_input[CONF_API_KEY]
station = user_input[CONF_STATION]
validate = await self.validate_input(api_key, station)
if validate == "connected":
return self.async_create_entry(
title=name,
data={
CONF_API_KEY: api_key,
CONF_STATION: station,
},
)
if validate == "Source: Security, message: Invalid authentication":
errors["base"] = "invalid_auth"
elif validate == "Could not find a weather station with the specified name":
errors["base"] = "invalid_station"
elif validate == "Found multiple weather stations with the specified name":
errors["base"] = "more_stations"
else:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors,
)
|
[
"noreply@github.com"
] |
jason0x43.noreply@github.com
|
b464fca1bb2e46c78e2e37b913633e930aa13bb9
|
1b3fc35ada474601a76de3c2908524336d6ca420
|
/day10/my/Meizitu/Meizitu/start.py
|
9b9063bac0c96d2345c8dab199a5ff64631d28ad
|
[] |
no_license
|
dqsdatalabs/Internet-worm
|
db3677e65d11542887adcde7719b7652757a3e32
|
62f38f58b4fa7643c482077f5ae18fff6fd81915
|
refs/heads/master
| 2022-01-16T14:29:52.184528
| 2018-12-25T08:46:08
| 2018-12-25T08:46:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 177
|
py
|
from scrapy import cmdline
# cmdline.execute('scrapy crawl meinvtu'.split())
cmdline.execute('scrapy crawl meinvtu2'.split())
# cmdline.execute('scrapy crawl meinvtu3'.split())
|
[
"aaa1058169464@126.com"
] |
aaa1058169464@126.com
|
895f55c73c05abed86e355cd53adbf875d22d6f5
|
c46ef0ccf030cee783a75d549e3c9bc0810579ff
|
/tutorial-contents/405_DQN_Reinforcement_learning.py
|
20fa80cdeeb85f8daeac040cef09380718a56006
|
[
"MIT"
] |
permissive
|
cocodee/PyTorch-Tutorial
|
37827b3daa5de1dee6ca174161d948a1933d4453
|
a7b14b80913485735a3ee87da6998a511a1f1950
|
refs/heads/master
| 2021-07-19T16:01:13.106848
| 2017-10-26T22:06:10
| 2017-10-26T22:06:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,348
|
py
|
"""
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
More about Reinforcement learning: https://morvanzhou.github.io/tutorials/machine-learning/reinforcement-learning/
Dependencies:
torch: 0.2
gym: 0.8.1
numpy
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import gym
# Hyper Parameters
BATCH_SIZE = 32
LR = 0.01 # learning rate
EPSILON = 0.9 # greedy policy
GAMMA = 0.9 # reward discount
TARGET_REPLACE_ITER = 100 # target update frequency
MEMORY_CAPACITY = 2000
env = gym.make('CartPole-v0')
env = env.unwrapped
N_ACTIONS = env.action_space.n
N_STATES = env.observation_space.shape[0]
class Net(nn.Module):
def __init__(self, ):
super(Net, self).__init__()
self.fc1 = nn.Linear(N_STATES, 10)
self.fc1.weight.data.normal_(0, 0.1) # initialization
self.out = nn.Linear(10, N_ACTIONS)
self.out.weight.data.normal_(0, 0.1) # initialization
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
actions_value = self.out(x)
return actions_value
class DQN(object):
def __init__(self):
self.eval_net, self.target_net = Net(), Net()
self.learn_step_counter = 0 # for target updating
self.memory_counter = 0 # for storing memory
self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2)) # initialize memory
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
self.loss_func = nn.MSELoss()
def choose_action(self, x):
x = Variable(torch.unsqueeze(torch.FloatTensor(x), 0))
# input only one sample
if np.random.uniform() < EPSILON: # greedy
actions_value = self.eval_net.forward(x)
action = torch.max(actions_value, 1)[1].data.numpy()[0, 0] # return the argmax
else: # random
action = np.random.randint(0, N_ACTIONS)
return action
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % MEMORY_CAPACITY
self.memory[index, :] = transition
self.memory_counter += 1
def learn(self):
# target parameter update
if self.learn_step_counter % TARGET_REPLACE_ITER == 0:
self.target_net.load_state_dict(self.eval_net.state_dict())
self.learn_step_counter += 1
# sample batch transitions
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
b_memory = self.memory[sample_index, :]
b_s = Variable(torch.FloatTensor(b_memory[:, :N_STATES]))
b_a = Variable(torch.LongTensor(b_memory[:, N_STATES:N_STATES+1].astype(int)))
b_r = Variable(torch.FloatTensor(b_memory[:, N_STATES+1:N_STATES+2]))
b_s_ = Variable(torch.FloatTensor(b_memory[:, -N_STATES:]))
# q_eval w.r.t the action in experience
q_eval = self.eval_net(b_s).gather(1, b_a) # shape (batch, 1)
q_next = self.target_net(b_s_).detach() # detach from graph, don't backpropagate
q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) # shape (batch, 1)
loss = self.loss_func(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
dqn = DQN()
print('\nCollecting experience...')
for i_episode in range(400):
s = env.reset()
ep_r = 0
while True:
env.render()
a = dqn.choose_action(s)
# take action
s_, r, done, info = env.step(a)
# modify the reward
x, x_dot, theta, theta_dot = s_
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
r = r1 + r2
dqn.store_transition(s, a, r, s_)
ep_r += r
if dqn.memory_counter > MEMORY_CAPACITY:
dqn.learn()
if done:
print('Ep: ', i_episode,
'| Ep_r: ', round(ep_r, 2))
if done:
break
s = s_
|
[
"mz@email.com"
] |
mz@email.com
|
3d2649b1dc0da59e87d9650fdc443d6ac3042872
|
521efcd158f4c69a686ed1c63dd8e4b0b68cc011
|
/tests/test_utils/timetables.py
|
838ecb5323451c89d32a0c27b6e248c5acbe51f3
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
coutureai/RaWorkflowOrchestrator
|
33fd8e253bfea2f9a82bb122ca79e8cf9dffb003
|
cd3ea2579dff7bbab0d6235fcdeba2bb9edfc01f
|
refs/heads/main
| 2022-10-01T06:24:18.560652
| 2021-12-29T04:52:56
| 2021-12-29T04:52:56
| 184,547,783
| 5
| 12
|
Apache-2.0
| 2022-11-04T00:02:55
| 2019-05-02T08:38:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,746
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import settings
from airflow.timetables.base import Timetable
from airflow.timetables.interval import CronDataIntervalTimetable, DeltaDataIntervalTimetable
def cron_timetable(expr: str) -> CronDataIntervalTimetable:
return CronDataIntervalTimetable(expr, settings.TIMEZONE)
def delta_timetable(delta) -> DeltaDataIntervalTimetable:
return DeltaDataIntervalTimetable(delta)
class CustomSerializationTimetable(Timetable):
def __init__(self, value: str):
self.value = value
@classmethod
def deserialize(cls, data):
return cls(data["value"])
def __eq__(self, other) -> bool:
"""Only for testing purposes."""
if not isinstance(other, CustomSerializationTimetable):
return False
return self.value == other.value
def serialize(self):
return {"value": self.value}
@property
def summary(self):
return f"{type(self).__name__}({self.value!r})"
|
[
"noreply@github.com"
] |
coutureai.noreply@github.com
|
615d6cfeafcfe95a0b3e8aefee6f571baf69f697
|
dc9d2f036ef72f254db5d0ba9e4cc8dcd95aa4aa
|
/WebCrawler by Bucky.py
|
f19ac7e98901e43ff512090c967cf9b3627184c6
|
[] |
no_license
|
YhHoo/Python-Tutorials
|
d43171a21fb8d51271c66bea6f2566ff3effc832
|
9774dd5b3f0b9d0126d4a2bcbac347348e914b71
|
refs/heads/master
| 2021-09-03T21:23:22.099224
| 2018-01-12T03:33:23
| 2018-01-12T03:33:23
| 110,725,041
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
'''
THIS IS BUCKY WEB CRAWLER EXAMPLES
'''
'''
HERE PART 1: Fetch all the href link in <a> tag of the page
'''
import requests
from bs4 import BeautifulSoup
def trade_spider():
url = 'http://www.ckmusic.com.my' # parent URL, rmb don't end with '/'
source_code = requests.get(url) # connect to the web page in url, store all source into source_code
plain_text = source_code.text # convert the source code to just Text
soup = BeautifulSoup(plain_text, "html.parser") # convert it to beautifulSoup Object, in order to use web crawler
for link in soup.findAll('a', {'class': 'prodItemPic'}): # look for tag <a>, where class='prodItemPic'
href = url + link.get('href') # from Tag, only take the href item
print(href)
trade_spider()
input("Code by YH, Press Enter to Terminate =)")
|
[
"hooyuheng@gmail.com"
] |
hooyuheng@gmail.com
|
f246c19999b8be870b9e29f6507c314d5fea8821
|
5ffc3111779894e3ff161c21933f585acac36721
|
/2020F_hw6_submissions/shangstacy/StacyShangCh7P2.py
|
f668df6e9959d44525ae8e081e5c344848bf84ad
|
[] |
no_license
|
Eric-Wonbin-Sang/CS110Manager
|
ac9b8efa5179fdc240e60736d685b2e850447b39
|
31b594b91c8ccd740a7915fb982cc7e7bc280346
|
refs/heads/main
| 2023-02-09T13:44:27.628976
| 2021-01-04T08:25:29
| 2021-01-04T08:25:29
| 321,807,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,118
|
py
|
# StacyShangCh7P2.py
# CS 110 A HW 6
# Stacy Shang
# I pledge my honor that I have abided by the Stevens Honor System -Stacy
# This program accepts a date and checks whether or not the date is valid
def main():
print("This program checks the validity of an inputted date.")
date = input("Enter the date in the form of dd/mm/yy: ")
# dd, mm, yy = date.split("/")
# dd = int(dd)
# mm = int(mm)
# yy = int(yy)
inputDate = date.split("/")
dd = inputDate[0]
mm = inputDate[1]
yy = inputDate[2]
if(mm==1 or mm==3 or m==7 or mm==8 or mm==10 or mm==12):
maxim = 31
elif(mm==4 or mm==6 or mm==9 or mm==11):
maxim = 30
else:
maxim = 28
if(mm<1 or mm>12):
print("Date is invalid")
elif(dd<1 or dd>maxim):
print("Date is invalid")
elif(dd==maxim and mm!=12):
dd=1
mm==mm+1
print("The date is:", dd,mm,yy)
elif(dd==31 and mm==12):
dd=1
mm=1
yy=yy+1
print("The date is:", dd,mm,yy)
else:
dd=dd+1
print("The date is:", dd,mm,yy)
main()
|
[
"eric.wonbin.sang@gmail.com"
] |
eric.wonbin.sang@gmail.com
|
32cfdd87226d303193c8392e399fa29e2acb19e7
|
00d7e9321d418a2d9a607fb9376b862119f2bd4e
|
/sandbox/demo_turtle_undo.py
|
931cafb0e9c9c9f5496dcd7d5da4abb55455705a
|
[
"MIT"
] |
permissive
|
baluneboy/pims
|
92b9b1f64ed658867186e44b92526867696e1923
|
5a07e02588b1b7c8ebf7458b10e81b8ecf84ad13
|
refs/heads/master
| 2021-11-16T01:55:39.223910
| 2021-08-13T15:19:48
| 2021-08-13T15:19:48
| 33,029,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
#!/usr/bin/env python
from turtle import *
for i in range(10):
forward(100)
left(90)
forward(10)
left(90)
forward(100)
right(90)
forward(10)
right(90)
for i in range(30):
undo()
|
[
"none"
] |
none
|
30a4f20cc66cd435aa5f1c61e2618945db9d0dca
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03775/s238849097.py
|
e1fbd653ac8d41a71208142afafc985c89f140a4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
import math
N = int(input())
ans = len(str(N))
for i in range(1, int(math.sqrt(N))+1):
if N % i == 0:
j = N // i
k = max(len(str(i)), len(str(j)))
ans = min(ans, k)
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
2257eb1cd70d0cfccc996cfe65d9ed17c6593ca4
|
8a55bdec478d2fb48508deac13ca3aeeda46fa06
|
/contrib/devtools/symbol-check.py
|
4bcbde4f62eeb0412e726b862b311d0049b94f42
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
hideoussquid/aureus-core-gui
|
83b0525e1afa349e0834e1a3baed5534043cd689
|
ce075f2f0f9c99a344a1b0629cfd891526daac7b
|
refs/heads/master
| 2021-01-19T00:04:39.888184
| 2017-04-04T08:15:18
| 2017-04-04T08:15:18
| 87,142,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,191
|
py
|
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function, unicode_literals
import subprocess
import re
import sys
import os
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used'
}
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
# Allowed NEEDED libraries
ALLOWED_LIBRARIES = {
# aureusd and aureus-qt
b'libgcc_s.so.1', # GCC base support
b'libc.so.6', # C library
b'libpthread.so.0', # threading
b'libanl.so.1', # DNS resolve
b'libm.so.6', # math library
b'librt.so.1', # real-time (clock)
b'ld-linux-x86-64.so.2', # 64-bit dynamic linker
b'ld-linux.so.2', # 32-bit dynamic linker
# aureus-qt only
b'libX11-xcb.so.1', # part of X11
b'libX11.so.6', # part of X11
b'libxcb.so.1', # part of X11
b'libfontconfig.so.1', # font support
b'libfreetype.so.6', # font parsing
b'libdl.so.2' # programming interface to dynamic linker
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + b'\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>7 and re.match(b'[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition(b'@')
is_import = line[6] == b'UND'
if version.startswith(b'@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if b'_' in version:
(lib, _, ver) = version.rpartition(b'_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split(b'.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
def read_libraries(filename):
p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
libraries = []
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>2 and tokens[1] == b'(NEEDED)':
match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:]))
if match:
libraries.append(match.group(1))
else:
raise ValueError('Unparseable (NEEDED) specification')
return libraries
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8')))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
if sym in IGNORE_EXPORTS:
continue
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8')))
retval = 1
# Check dependency libraries
for library_name in read_libraries(filename):
if library_name not in ALLOWED_LIBRARIES:
print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8')))
retval = 1
exit(retval)
|
[
"thesquid@mac.com"
] |
thesquid@mac.com
|
5012edbc0bc687552fcf3a1bbaf558b0a4f335ae
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.5_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=47/params.py
|
cfa7029e7fd66ff63cb0e7d697a6e4c827251ce2
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.532381',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'GSN-EDF',
'trial': 47,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
0563ccfcace9c68878dcee8c2ae4a64f1e154969
|
493ee321c961421eaf1edc38a2de93556b14cc78
|
/py_scripts/sample_nets_discrete_weights.py
|
a8adc8e54817405bc3cf38f8eb056085061552af
|
[] |
no_license
|
guillefix/simpbias-tools
|
4127b7cb939b301074f30ed480b249e418d2eb2d
|
76ab7d1fc45b08ff33f61a39ac49d7df72b5f609
|
refs/heads/master
| 2020-05-18T12:53:51.388198
| 2019-05-01T14:46:14
| 2019-05-01T14:46:14
| 184,422,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,634
|
py
|
import tensorflow as tf
import numpy as np
# from sklearn.metrics import confusion_matrix
# import time
# from datetime import timedelta
import math
from collections import Counter
np.set_printoptions(threshold=np.nan)
from KC_LZ import calc_KC
import pickle
import sys
#(idx, N) = [int(ar) for ar in sys.argv[1:]]
idx = int(sys.argv[1])
input_dim=7
hidden_layer_dim=20
hidden_layer2_dim=20
hidden_layer_dim2 = hidden_layer2_dim
output_dim=1
## VARIABLE declarations
#for many paralell networks
from math import sqrt
W1=[]
b1=[]
W2=[]
b2=[]
W3=[]
b3=[]
variables = []
paral_nets = 5000
a=sqrt(3)*sqrt(2)
b=sqrt(3)
discrete_step=1
for i in range(paral_nets):
scope_name = "net"+str(i)
with tf.variable_scope(scope_name):
W1.append(tf.Variable(np.random.choice(np.arange(-10,10,discrete_step,dtype=np.float16),(input_dim,hidden_layer_dim))))
b1.append(tf.Variable(np.random.choice(np.arange(-10,10,discrete_step,dtype=np.float16),(hidden_layer_dim))))
W2.append(tf.Variable(np.random.choice(np.arange(-10,10,discrete_step,dtype=np.float16),(hidden_layer_dim,hidden_layer_dim2))))
b2.append(tf.Variable(np.random.choice(np.arange(-10,10,discrete_step,dtype=np.float16),(hidden_layer_dim2))))
# W2.append(tf.Variable(tf.random_uniform([hidden_layer_dim,output_dim],-a/sqrt(hidden_layer_dim),a/sqrt(hidden_layer_dim))))
# b2.append(tf.Variable(tf.random_uniform([output_dim],-a/sqrt(hidden_layer_dim),a/sqrt(hidden_layer_dim))))
W3.append(tf.Variable(np.random.choice(np.arange(-10,10,discrete_step,dtype=np.float16),(hidden_layer_dim2,output_dim))))
b3.append(tf.Variable(np.random.choice(np.arange(-10,10,discrete_step,dtype=np.float16),(output_dim))))
# W1.append(tf.Variable(tf.random_normal([input_dim,hidden_layer_dim],stddev=1/sqrt(input_dim))))
# b1.append(tf.Variable(tf.random_normal([hidden_layer_dim],stddev=1/sqrt(input_dim))))
# W2.append(tf.Variable(tf.random_normal([hidden_layer_dim,hidden_layer2_dim],stddev=1/sqrt(hidden_layer_dim))))
# b2.append(tf.Variable(tf.random_normal([hidden_layer2_dim],stddev=1/sqrt(hidden_layer_dim))))
# W3.append(tf.Variable(tf.random_normal([hidden_layer2_dim,output_dim],stddev=1/sqrt(hidden_layer2_dim))))
# b3.append(tf.Variable(tf.random_normal([output_dim],stddev=1/sqrt(hidden_layer2_dim))))
variables.append(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope_name))
x = tf.placeholder(tf.float16, shape=[None, input_dim], name='x')
## NETWORK construction
outputs = []
for i in range(paral_nets):
h = tf.matmul(x, W1[i]) + b1[i]
# h = tf.matmul(x, W1[i])
# h = tf.sign(h)
h = tf.nn.relu(h)
h2 = tf.matmul(h, W2[i]) + b2[i]
# h2 = tf.sign(h2)
h2 = tf.nn.relu(h2)
logits = tf.matmul(h2, W3[i]) + b3[i]
# logits = tf.matmul(h, W2[i]) + b2[i]
# logits = tf.matmul(h, W2[i])
o = tf.sign(logits)
# outputs.append((o+1)/2)
outputs.append(tf.reduce_join(tf.reduce_join(tf.as_string(tf.cast((o+1)//2,tf.int8)), 0),0))
session = tf.Session()
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
param_shape = []
val_placeholders = []
ops=[]
param_size=[]
for i,var in enumerate(train_vars):
param_shape.append(tuple(var.get_shape().as_list()))
param_size.append(np.prod(var.get_shape().as_list()))
val_placeholders.append(tf.placeholder(tf.float16, shape = param_shape[i], name="val_"+str(i)))
ops.append(var.assign_add(val_placeholders[i]))
def get_param_vec():
params = [p.flatten() for p in session.run(train_vars)]
return np.concatenate(params)
def update_params(params_change):
j = 0
change_feed_dict = {}
for i,var in enumerate(train_vars):
#print(i)
val_change = params_change[j:j+param_size[i]]
j += param_size[i]
val_change = val_change.reshape(param_shape[i])
change_feed_dict["val_"+str(i)+":0"]=val_change
session.run(ops,feed_dict=change_feed_dict)
inputs = [[float(xx) for xx in "{0:07b}".format(i)] for i in range(2**7)]
# N=10
#cnt = Counter()
#weights = {}
#for i in range(N):
# if i%(N/100) == 0:
session.run(tf.global_variables_initializer())
fs = session.run(outputs, feed_dict={x:inputs})
#phenos = [[] for i in range(paral_nets)]
phenos = [fs[i] for i in range(paral_nets)]
#varss = session.run(variables,feed_dict={x:inputs})
#for i,f in enumerate(fs):
# if f in weights:
# weights[f].append(varss[i])
# else:
# weights[f]=[varss[i]]
robs=[0.0 for x in phenos]
cnt = Counter(fs)
#phenos = [x+[fs[i]] for i,x in enumerate(phenos)]
param_num=input_dim*hidden_layer_dim + hidden_layer_dim + hidden_layer_dim*hidden_layer2_dim + hidden_layer2_dim + hidden_layer2_dim*output_dim + output_dim
change_vec_ind = np.zeros(param_num)
for i in range(param_num):
print(str(i+1)+"/"+str(param_num))
change_vec_ind[i] = discrete_step
change_vec = np.concatenate([change_vec_ind for j in range(paral_nets)])
update_params(change_vec)
#session.run(tf.global_variables_initializer())
fs = session.run(outputs, feed_dict={x:inputs})
#phenos = [x+[fs[j]] for j,x in enumerate(phenos)]
robs = [xx+(1.0 if fs[j]==phenos[j] else 0.0) for j,xx in enumerate(robs)]
change_vec_ind[i] = -discrete_step
change_vec = np.concatenate([change_vec_ind for j in range(paral_nets)])
update_params(change_vec)
change_vec_ind[i] = 0
#robs=[]
freqs=[]
for i,p in enumerate(phenos):
robs[i] = robs[i]/param_num
freqs.append(cnt[p])
pickle.dump(cnt, open( str(idx)+"_cnt_"+str(paral_nets)+"_"+str(input_dim)+"_"+str(hidden_layer_dim)+"_"+str(hidden_layer2_dim)+"_"+str(output_dim)+"_"+str(discrete_step)+"_relu.p", "wb" ), -1)
pickle.dump(phenos, open( str(idx)+"_phenos_"+str(paral_nets)+"_"+str(input_dim)+"_"+str(hidden_layer_dim)+"_"+str(hidden_layer2_dim)+"_"+str(output_dim)+"_"+str(discrete_step)+"_relu.p", "wb" ), -1)
pickle.dump(robs, open( str(idx)+"_robs_"+str(paral_nets)+"_"+str(input_dim)+"_"+str(hidden_layer_dim)+"_"+str(hidden_layer2_dim)+"_"+str(output_dim)+"_"+str(discrete_step)+"_relu.p", "wb" ), -1)
pickle.dump(freqs, open( str(idx)+"_freqs"+str(paral_nets)+"_"+str(input_dim)+"_"+str(hidden_layer_dim)+"_"+str(hidden_layer2_dim)+"_"+str(output_dim)+"_"+str(discrete_step)+"_relu.p", "wb" ), -1)
#pickle.dump(weights, open( str(idx)+"_weights_"+str(N*paral_nets)+"_"+str(input_dim)+"_"+str(hidden_layer_dim)+"_"+str(hidden_layer2_dim)+"_"+"sallinputs_relu.p", "wb" ), -1)
#with open(str(idx)+"_comp_freq_7_20_20_1_relu", "w") as f:
# for fun,val in cnt.most_common():
# f.write(str(calc_KC(str(fun)))+"\t"+str(val)+"\n")
|
[
"guillefix@gmail.com"
] |
guillefix@gmail.com
|
70715af74016fb193e1dc17cc87fb1632d93526a
|
cedf3c65061222b3099852f27dde39ff0dfe492b
|
/blog/migrations/0006_auto_20200101_2255.py
|
41d4c082f868854719fb951f9bc736479520f459
|
[] |
no_license
|
smrkhan123/myblog
|
1fd87b2c528cb2edccaf3f60c66a8c298774447a
|
8d0c33fcc74c582f21a32150d3460cce62ad6dd2
|
refs/heads/master
| 2020-12-08T21:50:32.352065
| 2020-01-10T18:23:56
| 2020-01-10T18:23:56
| 233,105,398
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
# Generated by Django 2.2.4 on 2020-01-01 17:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_comment'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='comment',
new_name='comments',
),
]
|
[
"sk862147@gmail.com"
] |
sk862147@gmail.com
|
146c641d33adeb6defa68ddc3f71c766beb13d5a
|
fa571a842f04bcbc77ff203a5ed6f6ee776eed6d
|
/codes/tuple10.py
|
8744bc79540b217dd7df2fc0a2637049f35feee4
|
[] |
no_license
|
krishna-rawat-hp/PythonProgramming
|
b25c0916475724e6d2de4b7d59cf40b5b5e8330b
|
d24df17ca6aff9271c44ef8c73b80c00cd065ded
|
refs/heads/master
| 2023-03-11T19:24:34.529059
| 2021-02-27T14:09:22
| 2021-02-27T14:09:22
| 282,611,873
| 0
| 0
| null | 2020-07-26T10:38:54
| 2020-07-26T08:52:49
| null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
# Python tuple Built-in function
tup1 = (1,2,3,4,5)
# Example-1 length of tuple
print("Length of tuple: ",len(tup1))
# Example-2 min value in tuple
print("Minimum value in tuple: ",min(tup1))
# Example-3 Max value in tuple
print("Maximum value in tuple: ",max(tup1))
# Example-4 tuple() method in python tuples
str = "Krishna"
tupstr = tuple(str)
print("simple string: ",type(str), str)
print("tuple of string: ", type(tupstr), tupstr)
|
[
"rawatkrishnakant8319@gmail.com"
] |
rawatkrishnakant8319@gmail.com
|
f9ab128fb82107ad73c13d8bff645ad4cfd837d4
|
2f63688febd21dc3ae6b19abfa79ad313c820154
|
/AlgoExpert/coding_interview_questions/Dynamic_Programming/Max_Subset_Sum_No_Adjacent.py
|
35e8fc100f69daaeac1de7c8a7e1b7ba9ce4c161
|
[] |
no_license
|
novayo/LeetCode
|
cadd03587ee4ed6e35f60294070165afc1539ac8
|
54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7
|
refs/heads/master
| 2023-08-14T00:35:15.528520
| 2023-07-30T05:56:05
| 2023-07-30T05:56:05
| 200,248,146
| 8
| 1
| null | 2022-11-19T04:37:54
| 2019-08-02T14:24:19
|
Python
|
UTF-8
|
Python
| false
| false
| 387
|
py
|
'''
main idea: dp
time comp: O(n)
space comp: O(1)
- where n is the length of the input array
'''
def maxSubsetSumNoAdjacent(array):
# Write your code here.
if not array:
return 0
if len(array) <= 2:
return max(array)
a = array[0]
b = array[1]
c = a + array[2]
for i in range(3, len(array)):
d = array[i] + max(a, b)
a = b
b = c
c = d
return max(a, b, c)
|
[
"eric_shih@trendmicro.com"
] |
eric_shih@trendmicro.com
|
6098d1665fc44c5b9392cbb7fc9e9de0a2f639aa
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/keras/engine/base_layer_utils.py
|
9ff1ab45c3de06a9b41edbb8de2197633c8c4ac7
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a687562a5029ffbfb9a948ea45a12f73bca931230a551f19634b253752a45fe1
size 21452
|
[
"github@cuba12345"
] |
github@cuba12345
|
817c752e00db2148a4dd5635779329f98737565d
|
52a3beeb07ad326115084a47a9e698efbaec054b
|
/horizon/.venv/lib/python2.7/site-packages/openstackclient/identity/v3/policy.py
|
74a783b06a575e12b7a843a14ab6debc95d39f85
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/sample_scripts
|
3dade0710ecdc8f9251dc60164747830f8de6877
|
f9edce63c0a4d636f672702153662bd77bfd400d
|
refs/heads/master
| 2022-11-17T19:19:34.210886
| 2018-06-11T04:14:27
| 2018-06-11T04:14:27
| 282,088,840
| 0
| 0
| null | 2020-07-24T00:57:31
| 2020-07-24T00:57:31
| null |
UTF-8
|
Python
| false
| false
| 5,046
|
py
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Policy action implementations"""
import six
import sys
from openstackclient.common import command
from openstackclient.common import utils
from openstackclient.i18n import _
class CreatePolicy(command.ShowOne):
"""Create new policy"""
def get_parser(self, prog_name):
parser = super(CreatePolicy, self).get_parser(prog_name)
parser.add_argument(
'--type',
metavar='<type>',
default="application/json",
help=_('New MIME type of the policy rules file '
'(defaults to application/json)'),
)
parser.add_argument(
'rules',
metavar='<filename>',
help=_('New serialized policy rules file'),
)
return parser
def take_action(self, parsed_args):
blob = utils.read_blob_file_contents(parsed_args.rules)
identity_client = self.app.client_manager.identity
policy = identity_client.policies.create(
blob=blob, type=parsed_args.type
)
policy._info.pop('links')
policy._info.update({'rules': policy._info.pop('blob')})
return zip(*sorted(six.iteritems(policy._info)))
class DeletePolicy(command.Command):
"""Delete policy"""
def get_parser(self, prog_name):
parser = super(DeletePolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar='<policy>',
help=_('Policy to delete'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
identity_client.policies.delete(parsed_args.policy)
class ListPolicy(command.Lister):
"""List policies"""
def get_parser(self, prog_name):
parser = super(ListPolicy, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
if parsed_args.long:
columns = ('ID', 'Type', 'Blob')
column_headers = ('ID', 'Type', 'Rules')
else:
columns = ('ID', 'Type')
column_headers = columns
data = self.app.client_manager.identity.policies.list()
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class SetPolicy(command.Command):
"""Set policy properties"""
def get_parser(self, prog_name):
parser = super(SetPolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar='<policy>',
help=_('Policy to modify'),
)
parser.add_argument(
'--type',
metavar='<type>',
help=_('New MIME type of the policy rules file'),
)
parser.add_argument(
'--rules',
metavar='<filename>',
help=_('New serialized policy rules file'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
blob = None
if parsed_args.rules:
blob = utils.read_blob_file_contents(parsed_args.rules)
kwargs = {}
if blob:
kwargs['blob'] = blob
if parsed_args.type:
kwargs['type'] = parsed_args.type
if not kwargs:
sys.stdout.write(_('Policy not updated, no arguments present\n'))
return
identity_client.policies.update(parsed_args.policy, **kwargs)
class ShowPolicy(command.ShowOne):
"""Display policy details"""
def get_parser(self, prog_name):
parser = super(ShowPolicy, self).get_parser(prog_name)
parser.add_argument(
'policy',
metavar='<policy>',
help=_('Policy to display'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
policy = utils.find_resource(identity_client.policies,
parsed_args.policy)
policy._info.pop('links')
policy._info.update({'rules': policy._info.pop('blob')})
return zip(*sorted(six.iteritems(policy._info)))
|
[
"Suhaib.Chishti@exponential.com"
] |
Suhaib.Chishti@exponential.com
|
f4476ad1cff0d97701afa2544924788af2b900f8
|
fb909b0716f62ae118afa7d505cbcbd28f62bc63
|
/main/migrations/0066_auto_20200911_1141.py
|
1d62edbe74183d9bf7fcc2d790cd66555eadc459
|
[] |
no_license
|
dkalola/JustAsk-Final
|
a5b951462cd3c88eb84320bb8fcf10c32f959090
|
c2e7c2ffae4d3c2d870d5ba5348a6bae62db5319
|
refs/heads/main
| 2023-05-24T16:02:17.425251
| 2021-06-16T19:33:52
| 2021-06-16T19:33:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,049
|
py
|
# Generated by Django 3.1.1 on 2020-09-11 11:41
import datetime
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('main', '0065_auto_20200906_1119'),
]
operations = [
migrations.AlterField(
model_name='ebook',
name='Email',
field=models.EmailField(blank=True, default='', max_length=254, verbose_name='Email'),
),
migrations.AlterField(
model_name='ebook',
name='price',
field=models.IntegerField(blank=True, default=0, verbose_name='Rental Price (Rs 450 min)'),
),
migrations.AlterField(
model_name='ebook',
name='price2',
field=models.IntegerField(blank=True, default=0, verbose_name='Buy Price (Rs 650 min)'),
),
migrations.AlterField(
model_name='paper',
name='Date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 11, 11, 41, 44, 41629), null=True, verbose_name='Date Of Paper'),
),
migrations.AlterField(
model_name='question',
name='qid',
field=models.CharField(default='76GSJ6O8', max_length=8, unique=True, verbose_name='Question ID'),
),
migrations.AlterField(
model_name='question',
name='question',
field=tinymce.models.HTMLField(verbose_name='Question'),
),
migrations.AlterField(
model_name='student',
name='EndDate',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 11, 11, 41, 44, 34024), null=True, verbose_name='End Date of Subscription'),
),
migrations.AlterField(
model_name='student',
name='StartDate',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 11, 11, 41, 44, 33988), null=True, verbose_name='Start Date of Subscription'),
),
]
|
[
"divyanshukalola88@gmail.com"
] |
divyanshukalola88@gmail.com
|
1863ed33c75a4114a986b15983e8039d257910f5
|
a80943c82d8723e49f1f88cec90a41051c54b949
|
/chloe/plots.py
|
916fcbf5d1cd5bc6274cbbab0cf77c197c9302fa
|
[] |
no_license
|
munozchris/jacc
|
decaebec6ed5da0305c16a252138cc79ba0aafaa
|
f6ea8316c69a8b30f4d77f0f14ae4a9f6613c584
|
refs/heads/master
| 2021-01-11T17:11:40.468185
| 2017-03-14T20:40:12
| 2017-03-14T20:40:12
| 79,737,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,349
|
py
|
import sqlite3
import matplotlib.pyplot as plt
import numpy as np
# Helper functions to determine if a departmnent is in a table:
def is_dept_in_ex(c, dept):
depts_in_ex = c.execute("SELECT DISTINCT Dept FROM e_xTA;").fetchall()
depts_in_ex = [entry[0] for entry in depts_in_ex]
if dept in depts_in_ex:
return True
else:
return False
def is_dept_in_eo(c, dept):
depts_in_eo = c.execute("SELECT DISTINCT Dept FROM e_oTA;").fetchall()
depts_in_eo = [entry[0] for entry in depts_in_eo]
if dept in depts_in_eo:
return True
else:
return False
def is_dept_in_bio(c, dept):
depts_in_bio = c.execute("SELECT DISTINCT Dept FROM e_bio;").fetchall()
depts_in_bio = [entry[0] for entry in depts_in_bio]
if dept in depts_in_bio:
return True
else:
return False
def is_dept_in_lang(c, dept):
depts_in_lang = c.execute("SELECT DISTINCT Dept FROM e_lang;").fetchall()
depts_in_lang = [entry[0] for entry in depts_in_lang]
if dept in depts_in_lang:
return True
else:
return False
def assign_x_values_to_dates(date):
'''
given a date as a string, give it a numerical
value to make graphing easier
'''
first_digit = float(date[-1:]) - 2
if date[:6] == "Winter":
decimal = 0.0
elif date[:6] == "Spring":
decimal = 0.25
elif date[:6] == "Summer":
decimal = 0.5
elif date[:6] == "Autumn":
decimal = 0.75
x_value = first_digit+decimal
return x_value
# GIVEN A DEPARTMENT, MAKE A BAR CHART OF THE AVERAGE HOURS SPENT PER WEEK
# FOR EACH CLASS IN THAT DEPARTMENT
def get_all_hours(dept):
conn = sqlite3.connect("../jae/eval.db")
c = conn.cursor()
hours = []
course_nums = []
if is_dept_in_ex(c, dept):
query = "SELECT CourseNum, AVG(MedHrs) FROM e_xTA WHERE Dept = ? GROUP BY CourseNum"
data = (dept,)
results1 = c.execute(query, data)
for row in results1:
hours.append(row[1])
course_nums.append(row[0])
if is_dept_in_eo(c, dept):
query = "SELECT CourseNum, AVG(MedHrs) FROM e_oTA WHERE Dept = ? GROUP BY CourseNum"
data = (dept,)
results2 = c.execute(query, data)
for row in results2:
hours.append(row[1])
course_nums.append(row[0])
if is_dept_in_bio(c, dept):
query = "SELECT CourseNum, AVG(MedHrs) FROM e_bio WHERE Dept = ? GROUP BY CourseNum"
data = (dept,)
results3 = c.execute(query, data)
for row in results3:
hours.append(row[1])
course_nums.append(row[0])
if is_dept_in_lang(c, dept):
query = "SELECT CourseNum, AVG(MedHrs) FROM e_xlang WHERE Dept = ? GROUP BY CourseNum"
data = (dept,)
results4 = c.execute(query, data)
for row in results4:
hours.append(row[1])
course_nums.append(row[0])
return course_nums, hours
def make_dept_plot(dept):
course_nums, hours = get_all_hours(dept)
N = len(course_nums)
ind = np.arange(N)
width = 1.0
fig, ax = plt.subplots()
rects = ax.bar(ind, hours, width, color='b')
ax.set_ylabel('Average Hours Spent Per Class')
ax.set_title('Average Hours Spent in Each Class in the '+dept+' Department')
ax.set_xticks(ind+width/2)
ax.set_xticklabels(course_nums, rotation=45)
plt.show()
# PLOT AVERAGE HOURS SPENT FOR EACH DEPARTMENT
def plot_all_depts():
conn = sqlite3.connect("../jae/eval.db")
c = conn.cursor()
dept_hour_dict = {}
tables = ["e_xTA", "e_oTA", "e_lang", "e_bio"]
for table in tables:
query = "SELECT Dept, AVG(MedHrs), SUM(NumResponses) FROM "+table+" GROUP BY Dept"
results = c.execute(query)
for dept, hour, responses in results:
if dept not in dept_hour_dict:
dept_hour_dict[dept] = [hour, responses]
else:
total_responses = dept_hour_dict[dept][1] + responses
dept_hour_dict[dept][0] = (dept_hour_dict[dept][1] * dept_hour_dict[dept][0] +
hour * responses) / total_responses
dept_hour_dict[dept][1] = total_responses
initial_list = dept_hour_dict.items()
departments = [value[0] for value in initial_list]
hours = [value[1][0] for value in initial_list]
N = len(departments)
ind = np.arange(N)
width = 1.0
fig, ax = plt.subplots()
rects = ax.bar(ind, hours, width, color='b')
ax.set_ylabel('Average Hours Spent Per Department')
ax.set_title('Average Hours Spent in Each Department')
ax.set_xticks(ind+width/2)
ax.set_xticklabels(departments, rotation=45)
plt.show()
def plot_hours_over_time(dept, coursenum):
conn = sqlite3.connect("../jae/eval.db")
c = conn.cursor()
min_hours = []
max_hours = []
med_hours = []
dates = []
x_values = []
if is_dept_in_ex:
query = "SELECT CourseSection, MinHrs, MedHrs, MaxHrs FROM e_xTA WHERE Dept = ? AND CourseNum = ?;"
if is_dept_in_eo:
query = "SELECT CourseSection, MinHrs, MedHrs, MaxHrs FROM e_oTA WHERE Dept = ? AND CourseNum = ?;"
if is_dept_in_bio:
query = "SELECT CourseSection, MinHrs, MedHrs, MaxHrs FROM e_bio WHERE Dept = ? AND CourseNum = ?;"
if is_dept_in_lang:
query = "SELECT CourseSection, MinHrs, MedHrs, MaxHrs FROM e_lang WHERE Dept = ? AND CourseNum = ?;"
data = (dept, coursenum)
results = c.execute(query, data).fetchall()
for info, min_hrs, med_hrs, max_hrs in results:
year = str(info[-4:])
quarter = info[-11:][:-5]
date = quarter+", "+year
min_hours.append(min_hrs)
max_hours.append(max_hrs)
med_hours.append(med_hrs)
dates.append(date)
x_values.append(assign_x_values_to_dates(date))
fig, ax = plt.subplots()
ax.scatter(x_values, med_hours, color='r')
ax.scatter(x_values, min_hours, color='b')
ax.scatter(x_values, max_hours, color='g')
ax.get_xaxis().set_ticks([])
ax.set_xlabel("2011 through 2016")
ax.set_ylabel("Max, Min, and Average Hours Spent Per Week")
ax.set_title("Hours Spent Per Week over Time")
plt.show()
|
[
"amr@cs.uchicago.edu"
] |
amr@cs.uchicago.edu
|
70524268a8513ee28ba8c529abac584e1da23674
|
c5c3ee2ac4393e7bdbf61f32677221bef5523973
|
/src/jk_asyncio_logging/AsyncioMulticastLogger.py
|
268925e318a8be94b847fbb09df74c97c6f773e7
|
[
"Apache-2.0"
] |
permissive
|
jkpubsrc/python-module-jk-asyncio-logging
|
249960382c61353bc0809b77e30312779d3482f2
|
97b6ca79f8f11353c4b554875e353535d1bbf6fc
|
refs/heads/master
| 2022-10-02T03:10:18.771557
| 2020-01-20T18:38:41
| 2020-01-20T18:38:41
| 235,171,209
| 0
| 1
| null | 2022-09-03T13:55:36
| 2020-01-20T18:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 863
|
py
|
import jk_logging
from .AsyncioLogWrapper import AsyncioLogWrapper
class AsyncioMulticastLogger(AsyncioLogWrapper):
@staticmethod
def create(*argv):
loggers = []
for l in argv:
if isinstance(l, AsyncioLogWrapper):
loggers.append(l._l)
else:
assert isinstance(l, jk_logging.AbstractLogger)
loggers.append(l)
return AsyncioMulticastLogger(jk_logging.MulticastLogger.create(*loggers))
#
def addLogger(self, logger):
if isinstance(logger, AsyncioLogWrapper):
logger = logger._l
assert isinstance(logger, jk_logging.AbstractLogger)
self._l.addLogger(logger)
#
def removeLogger(self, logger):
if isinstance(logger, AsyncioLogWrapper):
logger = logger._l
assert isinstance(logger, jk_logging.AbstractLogger)
self._l.removeLogger(logger)
#
def removeAllLoggers(self):
self._l.removeAllLoggers()
#
#
|
[
"pubsrc@binary-overflow.de"
] |
pubsrc@binary-overflow.de
|
9822128d048432a0a887770ad6757bcefc3d287d
|
7b6313d1c4e0e8a5bf34fc8ac163ad446bc69354
|
/python/[hackerrank]Minimum Height Triangle.py
|
7f3143c59379db448635a1872d49f9dacbc86239
|
[] |
no_license
|
menuka-maharjan/competitive_programming
|
c6032ae3ddcbc974e0e62744989a2aefa30864b2
|
22d0cea0f96d8bd6dc4d81b146ba20ea627022dd
|
refs/heads/master
| 2023-05-01T05:23:09.641733
| 2021-05-23T16:22:21
| 2021-05-23T16:22:21
| 332,250,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
#!/bin/python3
import sys
import math
def lowestTriangle(base, area):
x=math.ceil((2*area)/base)
return x
# Complete this function
base, area = input().strip().split(' ')
base, area = [int(base), int(area)]
height = lowestTriangle(base, area)
print(height)
|
[
"maharjanmenuka8@gmail.com"
] |
maharjanmenuka8@gmail.com
|
097070139417fc16e5f2a0c6fdef23fc212ec909
|
4f906856e07f82f5d14ddabfd3c00c491d4ce8c8
|
/diagonal.py
|
db6d178b8c7d3c44671efdabeb8c983b76e18130
|
[] |
no_license
|
SoliareofAstora/NumericMethods
|
d4ce4d6559b9f0fbad72082ca2f2197ea3c1d349
|
6696a07d0ae45a6f18a44c1257f5631ab78bd859
|
refs/heads/master
| 2021-09-05T11:10:18.370598
| 2018-01-26T20:05:56
| 2018-01-26T20:05:56
| 112,853,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 21:02:09 2017
@author: SoliareOfAstora
"""
import numpy as np
class Diagonal:
x = np.array([])
row = 0
def setNumbers(self, rownumber, vector):
self.x = vector
self.row = rownumber
def getNumber(self, rownumber):
temp = rownumber + self.row
if temp >= 0:
if temp < np.size(self.x):
return self.x[temp]
else:
return 0
else:
return 0
|
[
"piotr1kucharski@gmail.com"
] |
piotr1kucharski@gmail.com
|
b3ad9085d450b0be9e16f6a891885dfb9b831d08
|
d9be34d92ec5bfec5756d5310c2e34226d726cb4
|
/nn/mnist_loader.py
|
611bbf70f967542ec27724a4dfa39e3107c7a9a6
|
[] |
no_license
|
Oldpan/manim
|
43119e4cf0b2d7c17affd66d1f64ce7a6c3bce81
|
ac079f182a977bf0d830ab7647971b67cf9e5160
|
refs/heads/master
| 2021-07-20T11:23:45.752896
| 2017-10-27T22:12:29
| 2017-10-27T22:12:29
| 108,652,804
| 1
| 0
| null | 2017-10-28T13:47:39
| 2017-10-28T13:47:39
| null |
UTF-8
|
Python
| false
| false
| 3,532
|
py
|
"""
mnist_loader
~~~~~~~~~~~~
A library to load the MNIST image data. For details of the data
structures that are returned, see the doc strings for ``load_data``
and ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the
function usually called by our neural network code.
"""
#### Libraries
# Standard library
import cPickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('/Users/grant/cs/neural-networks-and-deep-learning/data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing ``(training_data, validation_data,
test_data)``. Based on ``load_data``, but the format is more
convenient for use in our implementation of neural networks.
In particular, ``training_data`` is a list containing 50,000
2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray
containing the input image. ``y`` is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for ``x``.
``validation_data`` and ``test_data`` are lists containing 10,000
2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional
numpy.ndarry containing the input image, and ``y`` is the
corresponding classification, i.e., the digit values (integers)
corresponding to ``x``.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
|
[
"grantsanderson7@gmail.com"
] |
grantsanderson7@gmail.com
|
229bde7a28f796c2b05d06510b721e757b2aa411
|
1fbf09a5127a87434c8cfe7131f4b5879966cf04
|
/web/dashboard/controller/project.py
|
d96b19e04ea25e26e2855f477d372df8b086c182
|
[
"MIT"
] |
permissive
|
pombredanne/Kunlun-M
|
cb0495c583e6ae39e168e96341f4fd2cb01d30ae
|
ab3b3cc843edee6a558a485d89daf944d8fd7f8e
|
refs/heads/master
| 2023-08-23T05:58:12.433991
| 2021-09-30T10:04:04
| 2021-09-30T10:04:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,965
|
py
|
#!/usr/bin/env python
# encoding: utf-8
'''
@author: LoRexxar
@contact: lorexxar@gmail.com
@file: project.py
@time: 2021/7/20 15:50
@desc:
'''
import re
import ast
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse, HttpResponseNotFound
from django.views.generic import TemplateView
from django.views import View
from django.shortcuts import render, redirect
from Kunlun_M.settings import SUPER_ADMIN
from Kunlun_M.const import VUL_LEVEL, VENDOR_VUL_LEVEL
from web.index.controller import login_or_token_required
from utils.utils import del_sensitive_for_config
from web.index.models import ScanTask, ScanResultTask, Rules, Tampers, NewEvilFunc, Project, ProjectVendors, VendorVulns
from web.index.models import get_and_check_scanresult, get_and_check_evil_func
class ProjectListView(TemplateView):
"""展示当前用户的项目"""
template_name = "dashboard/projects/projects_list.html"
def get_context_data(self, **kwargs):
context = super(ProjectListView, self).get_context_data(**kwargs)
rows = Project.objects.all().order_by('-id')
project_count = Project.objects.all().count()
context['projects'] = rows
for project in context['projects']:
tasks = ScanTask.objects.filter(project_id=project.id).order_by('-id')
tasks_count = len(tasks)
vendors_count = ProjectVendors.objects.filter(project_id=project.id).count()
results_count = ScanResultTask.objects.filter(scan_project_id=project.id, is_active=1).count()
last_scan_time = 0
if tasks:
last_scan_time = tasks.first().last_scan_time
project.tasks_count = tasks_count
project.results_count = results_count
project.last_scan_time = last_scan_time
project.vendors_count = vendors_count
context['projects'] = sorted(context['projects'], key=lambda x:x.last_scan_time)[::-1]
if 'p' in self.request.GET:
page = int(self.request.GET['p'])
else:
page = 1
# check page
if page*50 > project_count:
page = 1
context['projects'] = context['projects'][(page-1)*50: page*50]
context['page'] = page
max_page = project_count // 50 if project_count % 50 == 0 else (project_count // 50)+1
max_page = max_page+1 if max_page == 1 else max_page
context['max_page'] = max_page
context['page_range'] = range(int(max_page))[1:]
return context
class ProjectDetailView(View):
"""展示当前项目细节"""
@staticmethod
@login_or_token_required
def get(request, project_id):
project = Project.objects.filter(id=project_id).first()
tasks = ScanTask.objects.filter(project_id=project.id).order_by('-id')[:20]
taskresults = ScanResultTask.objects.filter(scan_project_id=project.id, is_active=1).all()
newevilfuncs = NewEvilFunc.objects.filter(project_id=project.id).all()
pvs = ProjectVendors.objects.filter(project_id=project.id)
for task in tasks:
task.is_finished = int(task.is_finished)
task.parameter_config = del_sensitive_for_config(task.parameter_config)
for taskresult in taskresults:
taskresult.is_unconfirm = int(taskresult.is_unconfirm)
taskresult.level = 0
taskresult.vid = 0
if taskresult.cvi_id == '9999':
vender_vul_id = taskresult.vulfile_path.split(":")[-1]
if vender_vul_id:
vv = VendorVulns.objects.filter(id=vender_vul_id).first()
if vv:
taskresult.vulfile_path = "[{}]{}".format(vv.vendor_name, vv.title)
taskresult.level = VENDOR_VUL_LEVEL[vv.severity]
taskresult.vid = vv.id
# 处理多个refer的显示问题
references = []
if re.search(r'"http[^"]+"', taskresult.source_code, re.I):
rs = re.findall(r'"http[^"]+"', taskresult.source_code, re.I)
for r in rs:
references.append(r)
else:
references = [taskresult.source_code]
taskresult.source_code = references
else:
r = Rules.objects.filter(svid=taskresult.cvi_id).first()
taskresult.level = VUL_LEVEL[r.level]
if not project:
return HttpResponseNotFound('Project Not Found.')
else:
data = {
'tasks': tasks,
'taskresults': taskresults,
'newevilfuncs': newevilfuncs,
'project': project,
'project_vendors': pvs,
}
return render(request, 'dashboard/projects/project_detail.html', data)
|
[
"lorexxar@gmail.com"
] |
lorexxar@gmail.com
|
b7bb6f433a614f530de7f24c83596e5d1d083e36
|
84f1fea102aeb2d324e8ad3908e1765d04a0a730
|
/emails/migrations/0002_sentemail_transaction_event.py
|
367b922c18a634054c774c56a9edd85a29f73e26
|
[
"Apache-2.0"
] |
permissive
|
Natsoye/explorer
|
c205f8eb8d08705c2c4ee4ee45c28f7d0a534b10
|
638c70204d6001d9c5c56701917a6273a02c90cf
|
refs/heads/master
| 2021-08-30T10:42:56.371192
| 2021-08-17T15:43:04
| 2021-08-17T15:43:04
| 181,131,891
| 2
| 0
|
Apache-2.0
| 2021-08-17T15:43:05
| 2019-04-13T06:43:15
|
Python
|
UTF-8
|
Python
| false
| false
| 526
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('emails', '0001_initial'),
('transactions', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sentemail',
name='transaction_event',
field=models.ForeignKey(blank=True, to='transactions.OnChainTransaction', null=True),
preserve_default=True,
),
]
|
[
"mflaxman@gmail.com"
] |
mflaxman@gmail.com
|
d6cfbd76a3fdbf59869fa3c44935823a5b1d04e7
|
37e87b3d5e1ee9009f0ea0671bc0c6edf0e233b7
|
/162_3.py
|
a5d822d8c80608bdbd14448d50c484cc6169f703
|
[] |
no_license
|
Jane11111/Leetcode2021
|
d9f4987792938597bf89ff72ba6bbcb4a3f9d081
|
a95b871578aae0103066962c33b8c0f4ec22d0f2
|
refs/heads/master
| 2023-07-14T21:29:41.196752
| 2021-08-23T03:28:02
| 2021-08-23T03:28:02
| 344,804,297
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021-03-12 10:16
# @Author : zxl
# @FileName: 162_3.py
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
i = 0
j = len(nums)-1
while i<=j:
if i==j:
return i
m = (i+j)//2
if nums[m]>nums[m+1]:
j = m
else:
i=m+1
return -1
obj = Solution()
nums = [1,2,1,3,5,6,4]
ans= obj.findPeakElement(nums)
print(ans)
|
[
"791057615@qq.com"
] |
791057615@qq.com
|
8737cd2b0d8f72b063a39e23b4cb76f20b2b2e95
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/arista/eos/tests/unit/modules/network/eos/test_eos_eapi.py
|
a01bf1eb370a2fdae9d8734baa3d9a7804e49bea
|
[
"GPL-3.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.arista.eos.tests.unit.compat.mock import patch
from ansible_collections.arista.eos.plugins.modules import eos_eapi
from ansible_collections.arista.eos.tests.unit.modules.utils import (
set_module_args,
)
from .eos_module import TestEosModule, load_fixture
class TestEosEapiModule(TestEosModule):
module = eos_eapi
def setUp(self):
super(TestEosEapiModule, self).setUp()
self.mock_run_commands = patch(
"ansible_collections.arista.eos.plugins.modules.eos_eapi.run_commands"
)
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch(
"ansible_collections.arista.eos.plugins.modules.eos_eapi.load_config"
)
self.load_config = self.mock_load_config.start()
self.mock_verify_state = patch(
"ansible_collections.arista.eos.plugins.modules.eos_eapi.verify_state"
)
self.verify_state = self.mock_verify_state.start()
self.command_fixtures = {}
def tearDown(self):
super(TestEosEapiModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
# hack for older version of mock
# should be using patch.stopall() but CI is still failing
try:
self.mock_verify_state.stop()
except RuntimeError:
pass
def load_fixtures(self, commands=None, transport="eapi"):
def run_commands(module, commands, **kwargs):
output = list()
for cmd in commands:
output.append(load_fixture(self.command_fixtures[cmd]))
return output
self.run_commands.side_effect = run_commands
self.load_config.return_value = dict(diff=None, session="session")
def start_configured(self, *args, **kwargs):
self.command_fixtures = {
"show vrf": "eos_eapi_show_vrf.text",
"show management api http-commands | json": "eos_eapi_show_mgmt.json",
}
return self.execute_module(*args, **kwargs)
def start_unconfigured(self, *args, **kwargs):
self.command_fixtures = {
"show vrf": "eos_eapi_show_vrf.text",
"show management api http-commands | json": "eos_eapi_show_mgmt_unconfigured.json",
}
return self.execute_module(*args, **kwargs)
def test_eos_eapi_http_enable(self):
set_module_args(dict(http=True))
commands = [
"management api http-commands",
"protocol http port 80",
"no shutdown",
]
self.start_unconfigured(changed=True, commands=commands)
def test_eos_eapi_http_disable(self):
set_module_args(dict(http=False))
commands = ["management api http-commands", "no protocol http"]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_http_port(self):
set_module_args(dict(http_port=81))
commands = ["management api http-commands", "protocol http port 81"]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_http_invalid(self):
set_module_args(dict(http_port=80000))
self.start_unconfigured(failed=True)
def test_eos_eapi_https_enable(self):
set_module_args(dict(https=True))
commands = [
"management api http-commands",
"protocol https port 443",
"no shutdown",
]
self.start_unconfigured(changed=True, commands=commands)
def test_eos_eapi_https_disable(self):
set_module_args(dict(https=False))
commands = ["management api http-commands", "no protocol https"]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_https_port(self):
set_module_args(dict(https_port=8443))
commands = ["management api http-commands", "protocol https port 8443"]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_local_http_enable(self):
set_module_args(dict(local_http=True))
commands = [
"management api http-commands",
"protocol http localhost port 8080",
"no shutdown",
]
self.start_unconfigured(changed=True, commands=commands)
def test_eos_eapi_local_http_disable(self):
set_module_args(dict(local_http=False))
commands = [
"management api http-commands",
"no protocol http localhost",
]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_local_http_port(self):
set_module_args(dict(local_http_port=81))
commands = [
"management api http-commands",
"protocol http localhost port 81",
]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_vrf(self):
set_module_args(dict(vrf="test"))
commands = [
"management api http-commands",
"no shutdown",
"vrf test",
"no shutdown",
]
self.start_unconfigured(changed=True, commands=commands)
def test_eos_eapi_change_from_default_vrf(self):
set_module_args(dict(vrf="test"))
commands = ["management api http-commands", "vrf test", "no shutdown"]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_default(self):
set_module_args(dict())
self.start_configured(changed=False, commands=[])
def test_eos_eapi_vrf_missing(self):
set_module_args(dict(vrf="missing"))
self.start_unconfigured(failed=True)
def test_eos_eapi_state_absent(self):
set_module_args(dict(state="stopped"))
commands = ["management api http-commands", "shutdown"]
self.start_configured(changed=True, commands=commands)
def test_eos_eapi_state_failed(self):
self.mock_verify_state.stop()
set_module_args(dict(state="stopped", timeout=1))
result = self.start_configured(failed=True)
"timeout expired before eapi running state changed" in result["msg"]
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
3786cda539c2758029469435453faa4ddf75d5b5
|
9724c8cd81ad39f7f9a2419e2873d7d74cb10c72
|
/pyabc/external/__init__.py
|
2cb45567dc2210c4965c2f9d98839dbe36b6260c
|
[
"BSD-3-Clause"
] |
permissive
|
ICB-DCM/pyABC
|
36b7fc431fe4ba4b34d80d268603ec410aeaf918
|
d1542fb201edca86369082e1fc7934995e3d03a4
|
refs/heads/main
| 2023-09-01T13:42:52.880878
| 2023-08-18T16:55:04
| 2023-08-18T16:55:04
| 96,995,608
| 187
| 49
|
BSD-3-Clause
| 2023-08-18T16:55:05
| 2017-07-12T10:30:10
|
Python
|
UTF-8
|
Python
| false
| false
| 871
|
py
|
"""
.. _api_external:
External simulators
===================
This module can be used to easily interface pyABC with model simulations,
summary statistics calculators and distance functions written in programming
languages other than Python.
The class :class:`pyabc.external.ExternalHandler`, as well as derived
Model, SumStat, and Distance classes, allow the use of arbitrary languages,
with communication via file i/o.
It has been successfully used with models written in e.g. R, Java, or C++.
Further, pyABC provides efficient interfaces to R via the class
:class:`pyabc.external.r.R` via the rpy2 package, and to Julia via the class
:class:`pyabc.external.julia.Julia` via the pyjulia package.
"""
from .base import (
LOC,
RETURNCODE,
TIMEOUT,
ExternalDistance,
ExternalHandler,
ExternalModel,
ExternalSumStat,
create_sum_stat,
)
|
[
"noreply@github.com"
] |
ICB-DCM.noreply@github.com
|
0a792293e4a77a07abaf9f4e415181af5f8c98e6
|
0a2f79a4338615113abf34dbd027d6bec762c981
|
/tries.py
|
066636c52b3e15a0e99ff4763be6533413bc7248
|
[] |
no_license
|
SmithChen-364/learngit
|
d56220cb5902b30b7832f3cdeb4d429c58583d1c
|
b5202a269d06d745477c146bf0379a5f16a6b585
|
refs/heads/master
| 2020-06-20T12:11:19.678310
| 2019-08-06T07:47:29
| 2019-08-06T07:47:29
| 197,118,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,807
|
py
|
#tries for search words
#tries类
class tries:
#根节点root
root=None
#添加方法
def add(self,word):
#如果没有根节点,则将第一个单词的第一个字符添加为根节点,
#如果有根节点,从第一个字符开始添加节点
if(not self.root):
self.root=self.add_element(word,0,self.root)
self.root.parentNode=self.triesNode("root")
else:
self.add_element(word,0,self.root)
def addList(self,wordlist):
if(len(wordlist)==0):
print("invalid list you input")
else:
for item in wordlist:
self.add(item)
def add_element(self,word,index,current):
if(current==None):
current=self.triesNode(word[index])
if(word[index]<current.getChar()):
current.leftNode=self.add_element(word,index,current.leftNode)
current.leftNode.parentNode=current
elif(word[index]>current.getChar()):
current.rightNode=self.add_element(word,index,current.rightNode)
current.rightNode.parentNode=current
else:
if(index==len(word)-1):
current.setFreq(current.getFreq()+1)
return current
current.middleNode=self.add_element(word,index+1,current.middleNode)
current.middleNode.parentNode=current
return current
def delete(self,word):
isExist=self.search(word)
if(isExist):
isExist.setFreq(0)
def printAll(self):
self.printAllNode(self.root,[])
def printPrefix(self,word):
self.add(word)
prefix=self.search(word)
if(prefix.getFreq()>1):
print(word)
self.printAllNode(prefix.middleNode,list(word[:-1]))
prefix.setFreq(prefix.getFreq()-1)
def printAllNode(self,_currentNode,storage):
if(_currentNode==None):
return
if(_currentNode.parentNode.middleNode is _currentNode):
storage.append(_currentNode.parentNode.getChar())
if(_currentNode.getFreq()!=0):
storage.append(_currentNode.getChar())
print("".join(storage))
storage.pop()
self.printAllNode(_currentNode.leftNode,storage)
self.printAllNode(_currentNode.middleNode,storage)
self.printAllNode(_currentNode.rightNode,storage)
if(_currentNode.parentNode.middleNode is _currentNode):
storage.pop()
def search(self,word):
i=0
currentNode=self.root
parentNode=None
Freq=0
while(i<len(word)):
if(currentNode==None):
break
if(word[i]>currentNode.getChar()):
currentNode=currentNode.rightNode
elif(word[i]<currentNode.getChar()):
currentNode=currentNode.leftNode
else:
Freq=currentNode.getFreq()
parentNode=currentNode
currentNode=currentNode.middleNode
i=i+1
if(i==len(word) and (not Freq==0)):
print(word+" is in it ")
print("%d times recorded!"%Freq)
return parentNode
else:
print(word+" is not in it")
return None
class triesNode:
leftNode=rightNode=middleNode=None
char=None
parentNode=None
freq=0
def __init__(self,char):
self.char=char
def setFreq(self,freq):
self.freq=freq
def getFreq(self):
return self.freq
def getChar(self):
return self.char
person=tries()
person.addList(["shell","she","shelter","salad","sho","sh"])
print("printALL")
person.printAll()
print("printPrefix")
person.printPrefix("she")
|
[
"you@example.com"
] |
you@example.com
|
50efae8cf3c6ae5e6053a7c24c353564133793bc
|
8b7334be253552c4a2982e3022d211ad8970abf6
|
/data/migrations/0053_merge_20180615_1859.py
|
77b069dd59b066015e97cf4ed41046967c32178f
|
[
"MIT"
] |
permissive
|
Duke-GCB/bespin-api
|
e5442bf66471bd9469f8094575a1efc00eafc700
|
cea5c20fb2ff592adabe6ebb7ca934939aa11a34
|
refs/heads/master
| 2021-08-16T05:36:25.323416
| 2019-10-18T15:30:16
| 2019-10-18T15:30:16
| 69,596,994
| 0
| 3
|
MIT
| 2021-06-10T18:55:46
| 2016-09-29T18:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-06-15 18:59
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0051_merge_20180615_1758'),
('data', '0052_auto_20180615_1537'),
]
operations = [
]
|
[
"johnbradley2008@gmail.com"
] |
johnbradley2008@gmail.com
|
8f2a8b1a47778e9fafd036fb9c90ce4733f8b4a4
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_2_1/branyine/2016_round1B_A.py
|
3a2e46214d97bb27396b00187e8aa2574f9e2376
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
# link: https://code.google.com/codejam/contest/dashboard?c= #s=p0
import string
import time
testIndex=2
problemRoot="d:/prog/versenyek/googlejam"
problemDir="2016/round1B"
problemName="A"
inputFiles= ["-example.in", "-small.in", "-large.in"]
outputFiles=["-example.out", "-small.out", "-large.out"]
time1=time.time()
fileName=string.strip(problemRoot)+"/"+problemDir+"/"+problemName+inputFiles[testIndex]
inputData=[map(str, line.split()) for line in open(fileName,'r') if line.strip()]
fileName=string.strip(problemRoot)+"/"+problemDir+"/"+problemName+outputFiles[testIndex]
fileToWrite=open(fileName,'wb')
time2=time.time()
iLineNum=1
for iCase in xrange(int(inputData[0][0])):
numStr=inputData[iLineNum][0]
# Z num of zeros
# W num of twos
# U num of fours
# X num of sixes
# G num of eights
# F num minus fours is num fives
# V num minus fives is num sevens
# R num minus zeros minus fours is num threes
# I num minus fives minus sixes minus eights is num nines
# N num minus sevens minus nines*2 is num ones
numChs={}
for ch in 'ZWUXGFVRIN':
numChs[ch]=0
for ch in numStr:
if ch in 'ZWUXGFVRIN':
numChs[ch]+=1
nums=[0]*10
nums[0]=numChs['Z']
nums[2]=numChs['W']
nums[4]=numChs['U']
nums[6]=numChs['X']
nums[8]=numChs['G']
nums[5]=numChs['F']-nums[4]
nums[7]=numChs['V']-nums[5]
nums[3]=numChs['R']-nums[0]-nums[4]
nums[9]=numChs['I']-nums[5]-nums[6]-nums[8]
nums[1]=numChs['N']-nums[7]-2*nums[9]
toCall=''
for i in xrange(10):
toCall+=str(i)*nums[i]
print toCall
fileToWrite.write("Case #"+str(iCase+1)+": "+toCall+"\n")
iLineNum+=1
fileToWrite.close()
print 'Total time: ', time.time() - time1
print 'Solving time: ', time.time() - time2
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.