blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
457511baa39c93fcb58cbf7a167deb248fbf97f0
|
8deef5778d0104682d9e1c25d5ef8fc9a2e63feb
|
/PyInstaller/hooks/hook-h5py.py
|
449c848a3ec3624f6dc9d192bc3d65a5ea3ac1ff
|
[
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Weeeendi/Picture2Text
|
a2715a9c0f2d4749eab4768dea16f9720567557e
|
1919d99327b4360291b111fc8c122fffdce7ccc5
|
refs/heads/master
| 2022-07-03T06:30:55.664995
| 2022-06-03T13:01:35
| 2022-06-03T13:01:35
| 195,062,567
| 0
| 3
|
MIT
| 2022-06-03T13:01:36
| 2019-07-03T13:47:27
|
Python
|
UTF-8
|
Python
| false
| false
| 535
|
py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2018, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
Hook for http://pypi.python.org/pypi/h5py/
"""
hiddenimports = ['h5py._proxy', 'h5py.utils', 'h5py.defs', 'h5py.h5ac']
|
[
"wendi1078771091@gmail.com"
] |
wendi1078771091@gmail.com
|
b16e2e9a66be9969b417d12be51b37b00ed3b38c
|
6cc50a15672155f7d66e88830ad1baec6a061077
|
/processing/legacy/icetop_llhratio/python/globals.py
|
515a24a58ef19c6d8f525f718693f26293ddc978
|
[
"MIT"
] |
permissive
|
jrbourbeau/cr-composition
|
16b29c672b2d1c8d75c1c45e35fe6bb60b53ffe2
|
e9efb4b713492aaf544b5dd8bb67280d4f108056
|
refs/heads/master
| 2020-06-24T21:48:21.784277
| 2018-11-01T21:30:56
| 2018-11-01T21:30:56
| 74,618,907
| 0
| 1
|
MIT
| 2018-08-23T21:01:03
| 2016-11-23T22:31:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,420
|
py
|
# -*- coding: utf-8 -*-
#
## copyright (C) 2018
# The Icecube Collaboration
#
# $Id$
#
# @version $Revision$
# @date $LastChangedDate$
# @author Hershal Pandya <hershal@udel.edu> Last changed by: $LastChangedBy$
#
import numpy as np
logEnergyBins = np.linspace(3,8,26)
logEnergyBins=np.array([logEnergyBins[i] for i in range(len(logEnergyBins)) if i%2==0],dtype=float)
cosZenBin0 = 0.86
cosZenBins = np.linspace(cosZenBin0, 1.0+ np.finfo(float).eps , (1-cosZenBin0)/0.01+1)
cosZenBins=np.array([cosZenBins[i] for i in range(len(cosZenBins)) if i%2==0],dtype=float)
logChargeBins = np.linspace(-3,4,71)
deltaCharge = 0.1
unhitCharge = logChargeBins[0]-0.5*deltaCharge
logChargeBins = np.hstack([unhitCharge-0.5*deltaCharge, logChargeBins])
excludedCharge = logChargeBins[0]-0.5*deltaCharge
logChargeBins = np.hstack([excludedCharge-0.5*deltaCharge, logChargeBins])
deltaT = 0.1
nBins = 5.0/deltaT
tBinsUp = np.linspace(0,5,nBins+1)
tBinsDown = -1.0*tBinsUp
tBinsDown.sort()
logTBins = np.hstack([tBinsDown[0:-1],tBinsUp])
unhitTime = logTBins[0]-0.5*deltaT
logTBins = np.hstack([unhitTime-0.5*deltaT, logTBins])
excludedTime = logTBins[0]-0.5*deltaT
logTBins = np.hstack([excludedTime-0.5*deltaT, logTBins])
logDBins = np.linspace(0,3.5,36)
pulses1='Shield_HLCSLCTimeCorrectedTankMerged_SplineMPEfast_SRT_Split_InIcePulses_singleHits'
pulses2='Shield_HLCSLCTimeCorrectedTankMerged_SplineMPEfast_SRT_Split_InIcePulses_singleHits_UnHit'
pulses3='IceTopExcludedTanks'
reco_track2='SplineMPEfast_SRT_Split_InIcePulses'
reco_track1='MuEx_mie_SplineMPEfast_SRT_Split_InIcePulses'
def rotate_to_shower_cs(x,y,z,phi,theta,core_x,core_y,core_z):
"""
Rotate to shower CS takes a fit (assumes is set) and returns a rotation matrix.
Requires np.
"""
# counter-clockwise (pi + phi) rotation
d_phi = np.matrix([ [ -np.cos(phi), -np.sin(phi), 0],
[ np.sin(phi), -np.cos(phi), 0],
[ 0, 0, 1] ])
# clock-wise (pi - theta) rotation
d_theta = np.matrix([ [ -np.cos(theta), 0, -np.sin(theta)],
[ 0, 1, 0, ],
[ np.sin(theta), 0, -np.cos(theta)] ])
rotation=d_theta*d_phi
origin = np.array([[core_x], [core_y], [core_z]])
det_cs_position = np.array([[x],
[y],
[z]])
shower_cs_position = rotation*(det_cs_position - origin)
shower_cs_radius = np.sqrt(shower_cs_position[0]**2 + shower_cs_position[1]**2)
return np.float(shower_cs_radius)
def to_shower_cs(fit):
"""
Rotate to shower CS takes a fit (assumes fit.dir is set) and returns a rotation matrix.
Requires numpy.
"""
import numpy
from math import cos, sin
# counter-clockwise (pi + phi) rotation
d_phi = numpy.matrix([ [ -cos(fit.dir.phi), -sin(fit.dir.phi), 0],
[ sin(fit.dir.phi), -cos(fit.dir.phi), 0],
[ 0, 0, 1] ])
# clock-wise (pi - theta) rotation
d_theta = numpy.matrix([ [ -cos(fit.dir.theta), 0, -sin(fit.dir.theta)],
[ 0, 1, 0, ],
[ sin(fit.dir.theta), 0, -cos(fit.dir.theta)] ])
return d_theta*d_phi
|
[
"jrbourbeau@gmail.com"
] |
jrbourbeau@gmail.com
|
058fc6c307680b8132797732d1c2935f208e2cff
|
71e43068e82c91acbb3849169d1723f1375ac27f
|
/test/test_login_params.py
|
c5ace7940a02f5cf97942e6a18680162679dbcb8
|
[
"MIT"
] |
permissive
|
talon-one/talon_one.py
|
aa08a1dbddd8ea324846ae022e43d441c57028f6
|
917dffb010e3d3e2f841be9cccba5bba1ea6c5c3
|
refs/heads/master
| 2023-05-11T18:50:00.041890
| 2023-05-03T20:17:39
| 2023-05-03T20:17:39
| 79,575,913
| 1
| 7
|
MIT
| 2023-05-03T15:10:14
| 2017-01-20T16:29:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,115
|
py
|
# coding: utf-8
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document:
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import talon_one
from talon_one.models.login_params import LoginParams # noqa: E501
from talon_one.rest import ApiException
class TestLoginParams(unittest.TestCase):
"""LoginParams unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test LoginParams
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = talon_one.models.login_params.LoginParams() # noqa: E501
if include_optional :
return LoginParams(
email = 'john.doe@example.com',
password = 'admin123456'
)
else :
return LoginParams(
email = 'john.doe@example.com',
password = 'admin123456',
)
def testLoginParams(self):
"""Test LoginParams"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
talon-one.noreply@github.com
|
1a310d12e5d6aca3b58eccea94976393c70dcc33
|
836d5f7190f6b4503e758c87c71598f18fdfce14
|
/2-Veri-Tipleri-ve-Değişkenler/Float-Veri-Tipi.py
|
e16df4bfbc907194105af1f6be9ca54d54325f37
|
[] |
no_license
|
S-Oktay-Bicici/PYTHON-PROGRAMMING
|
cf452723fd3e7e8ec2aadc7980208d747c502e9a
|
22e864f89544249d6309d6f4570a4104bf47346b
|
refs/heads/main
| 2021-11-30T00:19:21.158084
| 2021-11-16T15:44:29
| 2021-11-16T15:44:29
| 316,716,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
a = 3.5
print(a)
print(type(a))
b = 2.1
print(b)
print(type(b))
d = 3
print(d)
print(type(d))
z = 3.0
print(z)
print(type(z))
t = 10/5
print(t)
print(type(t))
# // işlemi ile float çıkacak sonucu integer hale getiriyoruz
t = 10//5
print(t)
print(type(t))
#işlem gören sayılardan biri float ise sonuç da float olur
t =10.2//5
print(t)
print(type(t))
#işlem gören sayılardan biri float ise sonuç da float olur
t =10//5.2
print(t)
print(type(t))
|
[
"noreply@github.com"
] |
S-Oktay-Bicici.noreply@github.com
|
7b0597275393a4e60df88ff6dabff13ca0bfa6f1
|
61bc53ec90d92aece91753ec5ec9d25e0879a1e2
|
/content/pythia/pythia/legacy/top_down_bottom_up/unittests.py
|
d1f24d7a35974cc3ea42778088b753406f23a637
|
[
"BSD-3-Clause"
] |
permissive
|
aluka1994/textvqa
|
08a16c9b21ea9c5eca05f5d4d1763c190d2d7275
|
694cb2be08def519ba73be78e34664afa2c607b5
|
refs/heads/master
| 2021-05-26T23:44:21.973827
| 2020-04-08T22:05:58
| 2020-04-08T22:05:58
| 254,190,630
| 0
| 0
|
MIT
| 2020-04-08T20:14:11
| 2020-04-08T20:14:10
| null |
UTF-8
|
Python
| false
| false
| 4,467
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import unittest
import numpy as np
import torch
from torch.autograd import Variable
from global_variables.global_variables import use_cuda
from top_down_bottom_up.classifier import logit_classifier
from top_down_bottom_up.image_embedding import image_embedding
from top_down_bottom_up.question_embeding import QuestionEmbeding
from top_down_bottom_up.top_down_bottom_up_model import \
top_down_bottom_up_model
class Test_top_down_bottom_up_model(unittest.TestCase):
def test_classifier(self):
batch_size = 12
joint_embedding_dim = 10
num_ans_candidates = 20
text_embeding_dim = 64
image_embedding_dim = 32
my_classifier = logit_classifier(
joint_embedding_dim,
num_ans_candidates,
image_embedding_dim,
text_embeding_dim,
)
joint_embedding = Variable(torch.randn(batch_size, joint_embedding_dim))
res = my_classifier(joint_embedding)
self.assertEqual((12, 20), res.shape)
def test_classifier_batch_size_1(self):
batch_size = 1
joint_embedding_dim = 10
num_ans_candidates = 20
text_embeding_dim = 64
image_embedding_dim = 32
my_classifier = logit_classifier(
joint_embedding_dim,
num_ans_candidates,
image_embedding_dim,
text_embeding_dim,
)
joint_embedding = Variable(torch.randn(batch_size, joint_embedding_dim))
res = my_classifier(joint_embedding)
self.assertEqual((1, 20), res.shape)
def test_question_embedding(self):
num_vocab = 20
embedding_dim = 300
lstm_dim = 512
lstm_layer = 1
dropout = 0.1
batch_first = True
batch_size = 32
question_len = 10
my_word_embedding_model = QuestionEmbeding(
num_vocab, embedding_dim, lstm_dim, lstm_layer, dropout, batch_first
)
my_word_embedding_model = (
my_word_embedding_model.cuda() if use_cuda else my_word_embedding_model
)
input_txt = Variable(
torch.rand(batch_size, question_len).type(torch.LongTensor) % num_vocab
)
input_txt = input_txt.cuda() if use_cuda else input_txt
embedding = my_word_embedding_model(input_txt, batch_first=True)
self.assertEqual((32, 512), embedding.shape)
def test_image_embedding(self):
image_feat_dim = 40
txt_embedding_dim = 50
hidden_size = 30
num_of_loc = 5
batch_size = 16
my_image_embeding = image_embedding(
image_feat_dim, txt_embedding_dim, hidden_size
)
image_feat = Variable(torch.randn(batch_size, num_of_loc, image_feat_dim))
txt = Variable(torch.randn(batch_size, txt_embedding_dim))
res = my_image_embeding(image_feat, txt)
self.assertEqual((batch_size, image_feat_dim), res.shape)
def test_model(self):
image_feat_dim = 40
txt_embedding_dim = 300
lstm_dim = 512
hidden_size = 30
num_of_loc = 5
batch_size = 16
num_vocab = 60
num_ans_candidates = 35
joint_embedding_dim = 500
question_len = 13
batch_first = True
image_embedding_model = image_embedding(image_feat_dim, lstm_dim, hidden_size)
question_embedding_model = QuestionEmbeding(
num_vocab,
txt_embedding_dim,
lstm_dim,
lstm_layer=2,
dropout=0.1,
batch_first=batch_first,
)
my_classifier = logit_classifier(
joint_embedding_dim, num_ans_candidates, image_feat_dim, txt_embedding_dim
)
loss = torch.nn.CrossEntropyLoss()
my_model = top_down_bottom_up_model(
image_embedding_model, question_embedding_model, my_classifier, loss
)
image_feat = np.random.rand(batch_size, num_of_loc, image_feat_dim)
input_txt = Variable(
torch.rand(batch_size, question_len).type(torch.LongTensor) % num_vocab
)
res = my_model(image_feat, input_txt, batch_first)
self.assertEqual((batch_size, num_ans_candidates), res.shape)
if __name__ == "__main__":
unittest.main()
|
[
"anandkumar@instance-1.us-central1-a.c.andromanit.internal"
] |
anandkumar@instance-1.us-central1-a.c.andromanit.internal
|
c50c0e9005ec170abfa46abc1f26c3c35a8a774c
|
a99a44aee5cfc5e080f6d83d2bcc1c3d273a3426
|
/htdocs/plotting/auto/scripts/p98.py
|
fce3feefb062bd6cee36902c4ba30cf7d0d5e9f0
|
[
"MIT"
] |
permissive
|
ragesah/iem
|
1513929c8bc7f254048271d61b4c4cf27a5731d7
|
8ed970d426bddeaa3e7ded593665d22f0f9f6e87
|
refs/heads/main
| 2023-08-20T20:01:15.480833
| 2021-10-12T15:44:52
| 2021-10-12T15:44:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,728
|
py
|
"""Day of month frequency."""
import calendar
import numpy as np
from pandas.io.sql import read_sql
from pyiem import network
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {
"precip": "Daily Precipitation",
"snow": "Daily Snowfall",
"snowd": "Daily Snow Depth",
"high": "High Temperature",
"low": "Low Temperature",
}
PDICT2 = {"above": "At or Above Threshold", "below": "Below Threshold"}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc[
"description"
] = """This plot produces the daily frequency of
a given criterion being meet for a station and month of your choice. The
number labeled above each bar is the actual number of years.
"""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IATDSM",
label="Select Station",
network="IACLIMATE",
),
dict(type="month", name="month", default=9, label="Which Month:"),
dict(
type="select",
name="var",
default="high",
label="Which Variable:",
options=PDICT,
),
dict(
type="text",
name="thres",
default="90",
label="Threshold (F or inch):",
),
dict(
type="select",
name="dir",
default="above",
label="Threshold Direction:",
options=PDICT2,
),
]
return desc
def plotter(fdict):
"""Go"""
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
varname = ctx["var"]
month = ctx["month"]
threshold = float(ctx["thres"])
if PDICT.get(varname) is None:
return
drct = ctx["dir"]
if PDICT2.get(drct) is None:
return
operator = ">=" if drct == "above" else "<"
table = "alldata_%s" % (station[:2],)
nt = network.Table("%sCLIMATE" % (station[:2],))
df = read_sql(
f"""
SELECT sday,
sum(case when {varname} {operator} %s then 1 else 0 end)
as hit,
count(*) as total
from {table} WHERE station = %s and month = %s
GROUP by sday ORDER by sday ASC
""",
pgconn,
params=(threshold, station, month),
index_col="sday",
)
if df.empty:
raise NoDataFound("No Data Found.")
df["freq"] = df["hit"] / df["total"] * 100.0
title = ("[%s] %s %s %s %s\nduring %s (Avg: %.2f days/year)") % (
station,
nt.sts[station]["name"],
PDICT.get(varname),
PDICT2.get(drct),
threshold,
calendar.month_name[month],
df["hit"].sum() / float(df["total"].sum()) * len(df.index),
)
fig, ax = figure_axes(title=title)
bars = ax.bar(np.arange(1, len(df.index) + 1), df["freq"])
for i, mybar in enumerate(bars):
ax.text(
i + 1,
mybar.get_height() + 0.3,
"%s" % (df["hit"][i],),
ha="center",
)
ax.set_ylabel("Frequency (%)")
ax.set_xlabel(
("Day of %s, number of years (out of %s) meeting criteria labelled")
% (calendar.month_name[month], np.max(df["total"]))
)
ax.grid(True)
ax.set_xlim(0.5, 31.5)
ax.set_ylim(0, df["freq"].max() + 5)
return fig, df
if __name__ == "__main__":
plotter(
dict(
month=9,
dir="below",
thres=65,
station="IA2724",
network="IACLIMATE",
)
)
|
[
"akrherz@iastate.edu"
] |
akrherz@iastate.edu
|
ed65d8cabf8d7f04c1951349663419deb2979c50
|
0d9b5c2842721c2246d4b58890511d154fa6df1b
|
/myadmin/migrations/0018_auto_20180311_1219.py
|
b7424714cb179baa93c9eeaf22cf4097a6e17e01
|
[] |
no_license
|
bhavingandha9/senseshop
|
862c13056cd4f53b265d040fc05337e6e46841e9
|
b2982399bc8223c5eeeb25ce9e1edbd4449d6e93
|
refs/heads/master
| 2021-04-30T08:10:23.039521
| 2018-06-06T16:21:53
| 2018-06-06T16:21:53
| 121,368,692
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2018-03-11 06:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myadmin', '0017_auto_20180311_1212'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, upload_to=''),
),
]
|
[
"="
] |
=
|
7b2ae6979df18c1e5d9c6f4544cb5b8e95eb7e4a
|
6d50225574554cf651b7693f22115f6e0a2f3c58
|
/upyutils/SD_AM.py
|
59562cc33a81736240fbd8568190cf173f43efb0
|
[
"MIT"
] |
permissive
|
tovam/upydev
|
abc8f9af5667821bb4644bafcead5f847a4114a1
|
0f9b73cb55750c291d2d016a3fd29d2feb71b8fc
|
refs/heads/master
| 2022-10-03T12:43:38.699244
| 2020-06-07T12:58:19
| 2020-06-07T12:58:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,434
|
py
|
#!/usr/bin/env python
# @Author: carlosgilgonzalez
# @Date: 2019-07-05T20:19:56+01:00
# @Last modified by: carlosgilgonzalez
# @Last modified time: 2019-07-10T00:55:01+01:00
from machine import SPI, Pin
import sdcard
import os
import time
# sd detect pin (15)
sd_detect = Pin(15, Pin.IN, pull=None)
sd_detect.value()
# sd sig (A5)
sd_sig = Pin(4, Pin.OUT)
sd_sig.value()
sd_sig.on()
sd_sig.value()
sd_detect.value()
# Callback
# LED
led = Pin(13, Pin.OUT)
sd_out = True
spi = SPI(1, baudrate=10000000, sck=Pin(5), mosi=Pin(18), miso=Pin(19))
cs = Pin(21, Pin.OUT)
# sd = sdcard.SDCard(spi, cs)
sd = None
irq_busy_sd = False
def pd_txtfiles(path, tabs=0):
print("txt Files on filesystem:")
print("====================")
files = [filename for filename in os.listdir(
path) if filename[-3:] == 'txt']
for file in files:
stats = os.stat(path + "/" + file)
filesize = stats[6]
isdir = stats[0] & 0x4000
_kB = 1024
if filesize < _kB:
sizestr = str(filesize) + " by"
elif filesize < _kB**2:
sizestr = "%0.1f KB" % (filesize / _kB)
elif filesize < _kB**3:
sizestr = "%0.1f MB" % (filesize / _kB**2)
else:
sizestr = "%0.1f GB" % (filesize / _kB**3)
prettyprintname = ""
for _ in range(tabs):
prettyprintname += " "
prettyprintname += file
if isdir:
prettyprintname += "/"
print('{0:<40} Size: {1:>10}'.format(prettyprintname, sizestr))
# # recursively print directory contents
# if isdir:
# print_directory(path + "/" + file, tabs + 1)
def toggle_led_sd(x, butpress=sd_detect, light=led, sd_spi=spi, sd_cs=cs, getinfo=pd_txtfiles):
global irq_busy_sd, sd_out, sd
if irq_busy_sd:
return
else:
irq_busy_sd = True
if butpress.value() == 1: # reverse op == 0
if sd_out is True:
print('SD card detected')
for i in range(4):
led.value(not led.value())
time.sleep_ms(250)
butpress.init(Pin.OUT)
sd = sdcard.SDCard(sd_spi, sd_cs)
time.sleep_ms(1000)
os.mount(sd, '/sd')
print(os.listdir('/'))
# butpress.value(0) # reverse op == 1
butpress.init(Pin.IN)
getinfo("/sd")
sd_out = False
# butpress.init(Pin.IN, Pin.PULL_UP)
elif butpress.value() == 0:
if sd_out is False:
print('SD card removed')
for i in range(4):
led.value(not led.value())
time.sleep_ms(250)
time.sleep_ms(1000)
butpress.init(Pin.OUT)
os.umount('/sd')
time.sleep_ms(1000)
sd_out = True
irq_busy_sd = False
sd_detect.irq(trigger=3, handler=toggle_led_sd)
if sd_detect.value() == 1:
print('SD card detected')
for i in range(4):
led.value(not led.value())
time.sleep_ms(250)
sd_detect.init(Pin.OUT)
sd = sdcard.SDCard(spi, cs)
time.sleep_ms(1000)
os.mount(sd, '/sd')
print(os.listdir('/'))
# butpress.value(0) # reverse op == 1
sd_detect.init(Pin.IN)
pd_txtfiles("/sd")
sd_out = False
else:
print('SD card not detected')
|
[
"carlosgilglez@gmail.com"
] |
carlosgilglez@gmail.com
|
a89274a540eccad2f64b0f01e06449ec329ce901
|
66052f5ba08ddac0a56ee140af17cf78b1ff1174
|
/PLURALSIGHT_BEGINNERS/lib/python3.9/site-packages/anyio/_core/_compat.py
|
8a0cfd088eadb36ec9786f05ab4ea9ab959ecd8e
|
[] |
no_license
|
enriquefariasrdz/Python
|
34704ceed001bbe8a23471eebefbe536b00031a5
|
b9191f7ad87b709a1b83c5cb3797a866b56aaa0d
|
refs/heads/master
| 2022-12-26T03:06:26.481456
| 2022-04-20T14:09:57
| 2022-04-20T14:09:57
| 27,020,899
| 1
| 1
| null | 2022-12-18T21:02:43
| 2014-11-23T03:33:52
|
Python
|
UTF-8
|
Python
| false
| false
| 5,668
|
py
|
from abc import ABCMeta, abstractmethod
from contextlib import AbstractContextManager
from types import TracebackType
from typing import (
TYPE_CHECKING, Any, AsyncContextManager, Callable, ContextManager, Generator, Generic,
Iterable, List, Optional, Tuple, Type, TypeVar, Union, overload)
from warnings import warn
if TYPE_CHECKING:
from ._testing import TaskInfo
else:
TaskInfo = object
T = TypeVar('T')
AnyDeprecatedAwaitable = Union['DeprecatedAwaitable', 'DeprecatedAwaitableFloat',
'DeprecatedAwaitableList[T]', TaskInfo]
@overload
async def maybe_async(__obj: TaskInfo) -> TaskInfo:
...
@overload
async def maybe_async(__obj: 'DeprecatedAwaitableFloat') -> float:
...
@overload
async def maybe_async(__obj: 'DeprecatedAwaitableList[T]') -> List[T]:
...
@overload
async def maybe_async(__obj: 'DeprecatedAwaitable') -> None:
...
async def maybe_async(__obj: 'AnyDeprecatedAwaitable[T]') -> Union[TaskInfo, float, List[T], None]:
"""
Await on the given object if necessary.
This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and
methods were converted from coroutine functions into regular functions.
Do **not** try to use this for any other purpose!
:return: the result of awaiting on the object if coroutine, or the object itself otherwise
.. versionadded:: 2.2
"""
return __obj._unwrap()
class _ContextManagerWrapper:
def __init__(self, cm: ContextManager[T]):
self._cm = cm
async def __aenter__(self) -> T:
return self._cm.__enter__()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
return self._cm.__exit__(exc_type, exc_val, exc_tb)
def maybe_async_cm(cm: Union[ContextManager[T], AsyncContextManager[T]]) -> AsyncContextManager[T]:
"""
Wrap a regular context manager as an async one if necessary.
This function is intended to bridge the gap between AnyIO 2.x and 3.x where some functions and
methods were changed to return regular context managers instead of async ones.
:param cm: a regular or async context manager
:return: an async context manager
.. versionadded:: 2.2
"""
if not isinstance(cm, AbstractContextManager):
raise TypeError('Given object is not an context manager')
return _ContextManagerWrapper(cm)
def _warn_deprecation(awaitable: 'AnyDeprecatedAwaitable[Any]', stacklevel: int = 1) -> None:
warn(f'Awaiting on {awaitable._name}() is deprecated. Use "await '
f'anyio.maybe_async({awaitable._name}(...)) if you have to support both AnyIO 2.x '
f'and 3.x, or just remove the "await" if you are completely migrating to AnyIO 3+.',
DeprecationWarning, stacklevel=stacklevel + 1)
class DeprecatedAwaitable:
def __init__(self, func: Callable[..., 'DeprecatedAwaitable']):
self._name = f'{func.__module__}.{func.__qualname__}'
def __await__(self) -> Generator[None, None, None]:
_warn_deprecation(self)
if False:
yield
def __reduce__(self) -> Tuple[Type[None], Tuple[()]]:
return type(None), ()
def _unwrap(self) -> None:
return None
class DeprecatedAwaitableFloat(float):
def __new__(
cls, x: float, func: Callable[..., 'DeprecatedAwaitableFloat']
) -> 'DeprecatedAwaitableFloat':
return super().__new__(cls, x)
def __init__(self, x: float, func: Callable[..., 'DeprecatedAwaitableFloat']):
self._name = f'{func.__module__}.{func.__qualname__}'
def __await__(self) -> Generator[None, None, float]:
_warn_deprecation(self)
if False:
yield
return float(self)
def __reduce__(self) -> Tuple[Type[float], Tuple[float]]:
return float, (float(self),)
def _unwrap(self) -> float:
return float(self)
class DeprecatedAwaitableList(List[T]):
def __init__(self, iterable: Iterable[T] = (), *,
func: Callable[..., 'DeprecatedAwaitableList[T]']):
super().__init__(iterable)
self._name = f'{func.__module__}.{func.__qualname__}'
def __await__(self) -> Generator[None, None, List[T]]:
_warn_deprecation(self)
if False:
yield
return list(self)
def __reduce__(self) -> Tuple[Type[List[T]], Tuple[List[T]]]:
return list, (list(self),)
def _unwrap(self) -> List[T]:
return list(self)
class DeprecatedAsyncContextManager(Generic[T], metaclass=ABCMeta):
@abstractmethod
def __enter__(self) -> T:
pass
@abstractmethod
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
pass
async def __aenter__(self) -> T:
warn(f'Using {self.__class__.__name__} as an async context manager has been deprecated. '
f'Use "async with anyio.maybe_async_cm(yourcontextmanager) as foo:" if you have to '
f'support both AnyIO 2.x and 3.x, or just remove the "async" from "async with" if '
f'you are completely migrating to AnyIO 3+.', DeprecationWarning)
return self.__enter__()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
return self.__exit__(exc_type, exc_val, exc_tb)
|
[
"enriquefariasrdz@gmail.com"
] |
enriquefariasrdz@gmail.com
|
d99f160d8ad572b13e39fa68ab9d1c9ebaeb17c3
|
493c7d9678a0724736fb9dd7c69580a94099d2b4
|
/apps/organization/models.py
|
81669bc929ff0415215ac5e9aca33cf0d6ca3b2d
|
[] |
no_license
|
cuixiaozhao/MxOnline
|
e253c8c5f5fa81747d8e1ca064ce032e9bd42566
|
c96ae16cea9ad966df36e9fcacc902c2303e765c
|
refs/heads/master
| 2020-03-29T18:47:11.158275
| 2018-10-22T14:06:50
| 2018-10-22T14:06:50
| 150,231,387
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,442
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
from django.db import models
# Create your models here.
class CityDict(models.Model):
name = models.CharField(max_length=20, verbose_name=u"城市")
desc = models.CharField(max_length=200, verbose_name=u"描述")
add_time = models.CharField(default=datetime.now, verbose_name=u"添加时间", max_length=30)
class Meta:
verbose_name = u"城市"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class CourseOrg(models.Model):
name = models.CharField(max_length=50, verbose_name=u"机构名称")
desc = models.TextField(verbose_name=u"机构描述")
category = models.CharField(default="pxjg", max_length=20, choices=(("pxjg", "培训机构"), ("gr", "个人"), ("gx", "高校"),))
click_nums = models.IntegerField(default=0, verbose_name=u"点击数")
fav_nums = models.IntegerField(default=0, verbose_name=u"收藏数")
image = models.ImageField(upload_to="org/%Y/%m", verbose_name=u"LOGO")
address = models.CharField(max_length=150, verbose_name=u"机构地址")
city = models.ForeignKey(CityDict, verbose_name=u"所在城市")
students = models.IntegerField(default=0, verbose_name=u"学生人数")
course_nums = models.IntegerField(default=0, verbose_name=u"学习人数")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"课程机构"
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class Teacher(models.Model):
org = models.ForeignKey(CourseOrg, verbose_name=u"所属机构")
name = models.CharField(max_length=50, verbose_name=u"教师名")
work_years = models.IntegerField(default=0, verbose_name=u"工作年限")
work_company = models.CharField(max_length=50, verbose_name=u"就职公司")
work_position = models.CharField(max_length=50, verbose_name=u"公司职位")
points = models.CharField(max_length=50, verbose_name=u"教学特点")
click_nums = models.IntegerField(default=0, verbose_name=u"点击数")
fav_nums = models.IntegerField(default=0, verbose_name=u"收藏数")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"教师"
verbose_name_plural = verbose_name
|
[
"19930911cXS"
] |
19930911cXS
|
ef3f10ffb9fb82da880e30592f7c192f58c36a89
|
2fc65c833223d282bd9867729ad3ed054c0832c2
|
/timetable/Section/router.py
|
308f412c58640aa5d05ae79329afb640054f1a26
|
[] |
no_license
|
libbyandhelen/DB_timetable
|
72b744ec332e5c1c3e242df1df6b4373493472ba
|
17936821b7064bed2ebb51289e5a9b0e131929d1
|
refs/heads/master
| 2020-09-21T20:43:55.008545
| 2019-12-12T01:18:48
| 2019-12-12T01:18:48
| 224,921,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
from Section.views import get_selected_sections_by_user, create_select_section, delete_select_section, \
create_select_section_by_section_id
from base.error import Error
from base.response import error_response
def router_selectsection(request):
"""
/api/usersections
GET: get_selected_sections_by_user
POST: create_select_section
"""
if request.method == "GET":
return get_selected_sections_by_user(request)
elif request.method == "POST":
return create_select_section_by_section_id(request)
# elif request.method == "POST":
# return create_select_section(request)
# elif request.method == "DELETE":
# return delete_section_by_category(request)
elif request.method == "DELETE":
return delete_select_section(request)
else:
return error_response(Error.ERROR_METHOD)
def router_selectsection_id(request, section_id):
"""
/api/usersections/:section_id
DELETE: delete_select_section
POST: create_select_section_by_section_id
"""
if request.method == "DELETE":
return delete_select_section(request, section_id)
elif request.method == "POST":
return create_select_section_by_section_id(request, section_id)
else:
return error_response(Error.ERROR_METHOD)
|
[
"libbyandhelen@163.com"
] |
libbyandhelen@163.com
|
4fc695ac70d158a6cba3bae5ba199844e1cd2fc5
|
80dbb004883779f51733f5382040f940507e9180
|
/youtube/urls.py
|
5ade2eef4a72366cff05f9902e55dac1992d6caf
|
[] |
no_license
|
Shayan-9248/youtube_search
|
94824398f498022fb53aa5ca7f08ba6008f70396
|
e07d9a2aa0dac0d76675db028c3584583151b31d
|
refs/heads/master
| 2023-03-26T06:07:55.303627
| 2021-03-24T15:21:19
| 2021-03-24T15:21:19
| 350,349,734
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.urls import path
from . import views
app_name = 'youtube'
urlpatterns = [
path('', views.index, name='index'),
]
|
[
"shayan.aimoradii@gmail.com"
] |
shayan.aimoradii@gmail.com
|
7e7469802b3c5b924e652ee98673659d9cfede94
|
6a5477e9bfae8110b2203182ad1db0517d09b2f2
|
/Realestate4/Tagent4/models.py
|
132aff2e669b0c6bb3868eae0e56314f45160235
|
[] |
no_license
|
Jagadishbommareddy/multiadress
|
b90f46ef80b50ddae8d8499e3e8c2d56d10796a9
|
a8fa8f5fe2803f66bd7e5a8668e82b589df846b5
|
refs/heads/master
| 2021-01-23T10:04:07.921653
| 2017-09-06T12:16:31
| 2017-09-06T12:16:31
| 102,604,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
from django.core.urlresolvers import reverse
from django.db import models
from .validations import *
class Agent(models.Model):
agent_id= models.AutoField(primary_key=True)
first_name= models.CharField(max_length=20,validators=[validate_first_name])
last_name= models.CharField(max_length=20,validators=[validate_last_name])
age=models.IntegerField()
education= models.CharField(max_length=50,validators=[validate_education])
company_name=models.CharField(max_length=50)
specialization= models.CharField(max_length=100,validators=[validate_specelization])
experence=models.IntegerField()
agent_notes=models.TextField()
def get_absolute_url(self):
return reverse('agent-update', kwargs={'pk': self.pk})
class Address(models.Model):
agent= models.ForeignKey(Agent)
address_id= models.AutoField(primary_key=True)
address1 = models.CharField(max_length=100)
address2 = models.CharField(max_length=100)
city = models.CharField(max_length=20,validators=[validate_city])
state= models.CharField(max_length=20,validators=[validate_state])
landmark= models.CharField(max_length=20,validators=[validate_landmark])
pincode= models.IntegerField()
|
[
"noreply@github.com"
] |
Jagadishbommareddy.noreply@github.com
|
2f217ccdcd79a8d5bb7e6c3d2f7d2ab5c1838d56
|
742f15ee3880306a946df7efee0020e42684b109
|
/out/string/python-flask/openapi_server/models/variable_collection.py
|
9cfe8be729a4ce32a9dd09c941f50f540d31840e
|
[] |
no_license
|
potiuk/airflow-api-clients
|
d0196f80caf6e6f4ecfa6b7c9657f241218168ad
|
325ba127f1e9aa808091916d348102844e0aa6c5
|
refs/heads/master
| 2022-09-14T00:40:28.592508
| 2020-05-31T10:05:42
| 2020-05-31T10:15:55
| 268,128,082
| 0
| 0
| null | 2020-05-30T17:28:04
| 2020-05-30T17:28:03
| null |
UTF-8
|
Python
| false
| false
| 1,941
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.variable_collection_item import VariableCollectionItem
from openapi_server import util
from openapi_server.models.variable_collection_item import VariableCollectionItem # noqa: E501
class VariableCollection(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, variables=None): # noqa: E501
"""VariableCollection - a model defined in OpenAPI
:param variables: The variables of this VariableCollection. # noqa: E501
:type variables: List[VariableCollectionItem]
"""
self.openapi_types = {
'variables': List[VariableCollectionItem]
}
self.attribute_map = {
'variables': 'variables'
}
self._variables = variables
@classmethod
def from_dict(cls, dikt) -> 'VariableCollection':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The VariableCollection of this VariableCollection. # noqa: E501
:rtype: VariableCollection
"""
return util.deserialize_model(dikt, cls)
@property
def variables(self):
"""Gets the variables of this VariableCollection.
:return: The variables of this VariableCollection.
:rtype: List[VariableCollectionItem]
"""
return self._variables
@variables.setter
def variables(self, variables):
"""Sets the variables of this VariableCollection.
:param variables: The variables of this VariableCollection.
:type variables: List[VariableCollectionItem]
"""
self._variables = variables
|
[
"kamil.bregula@polidea.com"
] |
kamil.bregula@polidea.com
|
2dc861a7f683325aeac69c4dacf18f63fa19f428
|
03f037d0f6371856ede958f0c9d02771d5402baf
|
/graphics/VTK-7.0.0/Examples/Infovis/Python/streaming_statistics_pyqt.py
|
6f778bc6de891077741ee6e337ac42522a554388
|
[
"BSD-3-Clause"
] |
permissive
|
hlzz/dotfiles
|
b22dc2dc5a9086353ed6dfeee884f7f0a9ddb1eb
|
0591f71230c919c827ba569099eb3b75897e163e
|
refs/heads/master
| 2021-01-10T10:06:31.018179
| 2016-09-27T08:13:18
| 2016-09-27T08:13:18
| 55,040,954
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,739
|
py
|
#!/usr/bin/env python
from __future__ import print_function
from vtk import *
import os.path
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
data_dir = VTK_DATA_ROOT + "/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = VTK_DATA_ROOT + "/Data/Infovis/SQLite/"
if not os.path.exists(data_dir):
data_dir = VTK_DATA_ROOT + "/Data/Infovis/SQLite/"
sqlite_file = data_dir + "temperatures.db"
# I'm sure there's a better way then these global vars
currentRow = 0;
numberOfRows = 1;
done = False;
psuedoStreamingData = vtkProgrammableFilter()
def streamData():
global done
global currentRow
input = psuedoStreamingData.GetInput()
output = psuedoStreamingData.GetOutput()
# Copy just the columns names/types
output.GetRowData().CopyStructure(input.GetRowData())
# Loop through all the input data and grab the next bunch of rows
startRow = currentRow
endRow = startRow + numberOfRows
if (endRow >= input.GetNumberOfRows()):
endRow = input.GetNumberOfRows()
done = True;
print("streaming: ", startRow, "-", endRow)
for i in range(startRow, endRow):
output.InsertNextRow(input.GetRow(i))
currentRow = endRow;
psuedoStreamingData.SetExecuteMethod(streamData)
class Timer(QObject):
def __init__(self, parent=None):
super(Timer, self).__init__(parent)
# Setup the data streaming timer
self.timer = QTimer()
QObject.connect(self.timer, SIGNAL("timeout()"), self.update)
self.timer.start(100)
def update(self):
if (done):
quit();
psuedoStreamingData.Modified() # Is there a way to avoid this?
psuedoStreamingData.GetExecutive().Push()
printStats()
def printStats():
sStats = ss.GetOutputDataObject( 1 )
sPrimary = sStats.GetBlock( 0 )
sDerived = sStats.GetBlock( 1 )
sPrimary.Dump( 15 )
sDerived.Dump( 15 )
if __name__ == "__main__":
""" Main entry point of this python script """
# Set up streaming executive
streamingExec = vtkThreadedStreamingPipeline()
vtkAlgorithm.SetDefaultExecutivePrototype(streamingExec)
streamingExec.FastDelete()
vtkThreadedStreamingPipeline.SetAutoPropagatePush(True)
# Pull the table from the database
databaseToTable = vtkSQLDatabaseTableSource()
databaseToTable.SetURL("sqlite://" + sqlite_file)
databaseToTable.SetQuery("select * from main_tbl")
# Hook up the database to the streaming data filter
psuedoStreamingData.SetInputConnection(databaseToTable.GetOutputPort())
# Calculate offline(non-streaming) descriptive statistics
print("# Calculate offline descriptive statistics:")
ds = vtkDescriptiveStatistics()
ds.SetInputConnection(databaseToTable.GetOutputPort())
ds.AddColumn("Temp1")
ds.AddColumn("Temp2")
ds.Update()
dStats = ds.GetOutputDataObject( 1 )
dPrimary = dStats.GetBlock( 0 )
dDerived = dStats.GetBlock( 1 )
dPrimary.Dump( 15 )
dDerived.Dump( 15 )
# Stats filter to place 'into' the streaming filter
inter = vtkDescriptiveStatistics()
inter.AddColumn("Temp1")
inter.AddColumn("Temp2")
# Calculate online(streaming) descriptive statistics
print("# Calculate online descriptive statistics:")
ss = vtkStreamingStatistics()
ss.SetStatisticsAlgorithm(inter)
ss.SetInputConnection(psuedoStreamingData.GetOutputPort())
# Spin up the timer
app = QApplication(sys.argv)
stream = Timer()
sys.exit(app.exec_())
|
[
"shentianweipku@gmail.com"
] |
shentianweipku@gmail.com
|
e54795bb281bdf8f85f066736ab758402ee247bb
|
8d35b8aa63f3cae4e885e3c081f41235d2a8f61f
|
/discord/ext/dl/extractor/formula1.py
|
fe89d221c6f687c2412b0273b350ca3685ae8f59
|
[
"MIT"
] |
permissive
|
alexyy802/Texus
|
1255f4e54c8d3cc067f0d30daff1cf24932ea0c9
|
c282a836f43dfd588d89d5c13f432896aebb540f
|
refs/heads/master
| 2023-09-05T06:14:36.217601
| 2021-11-21T03:39:55
| 2021-11-21T03:39:55
| 429,390,575
| 0
| 0
|
MIT
| 2021-11-19T09:22:22
| 2021-11-18T10:43:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class Formula1IE(InfoExtractor):
_VALID_URL = (
r"https?://(?:www\.)?formula1\.com/en/latest/video\.[^.]+\.(?P<id>\d+)\.html"
)
_TEST = {
"url": "https://www.formula1.com/en/latest/video.race-highlights-spain-2016.6060988138001.html",
"md5": "be7d3a8c2f804eb2ab2aa5d941c359f8",
"info_dict": {
"id": "6060988138001",
"ext": "mp4",
"title": "Race highlights - Spain 2016",
"timestamp": 1463332814,
"upload_date": "20160515",
"uploader_id": "6057949432001",
},
"add_ie": ["BrightcoveNew"],
}
BRIGHTCOVE_URL_TEMPLATE = "http://players.brightcove.net/6057949432001/S1WMrhjlh_default/index.html?videoId=%s"
def _real_extract(self, url):
bc_id = self._match_id(url)
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % bc_id, "BrightcoveNew", bc_id
)
|
[
"noreply@github.com"
] |
alexyy802.noreply@github.com
|
ad2d33429d0c99627e9c18caa875ca3926d8864f
|
f028c7ca2e4c42505011ac0543cde4a111ee5c74
|
/eggs/django_lfs-0.10.2-py2.7.egg/lfs/order/settings.py
|
deea3ffc34011276d45e862026eca7c9462fbb11
|
[] |
no_license
|
yunmengyanjin/website
|
d625544330c28f072707dcbbc5eb7308a3f4bd9f
|
77e9c70687b35fd8b65a7f2d879e0261ae69c00e
|
refs/heads/master
| 2021-04-22T13:10:09.584559
| 2017-05-15T07:39:32
| 2017-05-15T07:39:32
| 56,428,389
| 2
| 16
| null | 2020-10-02T07:41:08
| 2016-04-17T09:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 755
|
py
|
# django imports
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
SUBMITTED = 0
PAID = 1
SENT = 2
CLOSED = 3
CANCELED = 4
PAYMENT_FAILED = 5
PAYMENT_FLAGGED = 6
PREPARED = 7
ORDER_STATES = [
(SUBMITTED, _(u"Submitted")),
(PAID, _(u"Paid")),
(PREPARED, _(u"Prepared")),
(SENT, _(u"Sent")),
(CLOSED, _(u"Closed")),
(CANCELED, _(u"Canceled")),
(PAYMENT_FAILED, _(u"Payment Failed")),
(PAYMENT_FLAGGED, _(u"Payment Flagged")),
]
# use numbers above 20 for custom order states to avoid conflicts if new base states are added to LFS core!
LFS_EXTRA_ORDER_STATES = getattr(settings, 'LFS_EXTRA_ORDER_STATES', [])
if LFS_EXTRA_ORDER_STATES:
ORDER_STATES.extend(LFS_EXTRA_ORDER_STATES)
|
[
"daniel48@126.com"
] |
daniel48@126.com
|
f3bcde6ae30cfb731230794841388499d4d42f42
|
4403600c57fd170aad6bb505e4f14c4b70e63356
|
/sensor.py
|
3d90ccfcb357c6b8161da75db9832f89fceda02f
|
[] |
no_license
|
moonclearner/sensor
|
de4ef554cbc3dadb5fe5e801c55627d9f9340d19
|
0c3ad14375267b135940e8254262b0d054bd472c
|
refs/heads/master
| 2021-01-12T05:11:52.667742
| 2017-01-03T06:38:59
| 2017-01-03T06:38:59
| 77,886,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
# sensor collection system
from __future__ import unicode_literals
# starting time: 30, December,2016
# author: moonclearner
# -*- coding: utf-8 -*-
from socket import *
from time import ctime
Host = '127.0.0.1'
Port = 21010
BufferSize = 1024
ADDR = (Host,Port)
def server_init():
tcpserversock = socket(AF_INET,SOCK_STREAM)
tcpserversock.bind(ADDR)
tcpserversock.listen(5)
while True:
print "waiting for connection ..."
tcpCliSock, addr =tcpserversock.accept()
print '...connected from:',addr
while True:
data = tcpCliSock.recv(BufferSize)
if not data:
break
tcpCliSock.send('[%s] %s' % (ctime(),data))
tcpCliSock.close()
pass
server_init()
|
[
"718857460@qq.com"
] |
718857460@qq.com
|
3ea4d53a79484f18a2f537cce2b80ff6fb76d9d5
|
2e9f3f35cd239ce59f528c7b3b5e9714f7e5d5a3
|
/furnace/kernels/lib_tree_filter/functions/bfs.py
|
0c42bda0aebff2c10db5ec40a4cd5d48df3bdd46
|
[
"MIT"
] |
permissive
|
CV-IP/TreeFilter-Torch
|
8e2bd831060d0fa4e589a56353c2d91a7d4ac87b
|
46f36024f4522056fb9a3edf90c94f0a86a1352b
|
refs/heads/master
| 2023-02-04T19:09:32.790909
| 2020-12-16T07:14:41
| 2020-12-16T07:14:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
import tree_filter_cuda as _C
class _BFS(Function):
@staticmethod
def forward(ctx, edge_index, max_adj_per_vertex):
sorted_index, sorted_parent, sorted_child =\
_C.bfs_forward(edge_index, max_adj_per_vertex)
return sorted_index, sorted_parent, sorted_child
bfs = _BFS.apply
|
[
"stevengrove@stu.xjtu.edu.cn"
] |
stevengrove@stu.xjtu.edu.cn
|
cf3574e7f1b07fdaf295f9b85a87e7e6aa4fa6a1
|
34bb6071725fb31f50ef7ff147fce5a06a5bb534
|
/code/router/handler.py
|
131eb17758636f15b29d37e503a366f93725e8eb
|
[] |
no_license
|
joshmarshall/intro-to-wsgi-presentation-2013
|
6f28612da4fc7225e8ed2081f725ae940821c0d3
|
19bf30410f435a0bb9a101bb2800cac294096931
|
refs/heads/master
| 2023-08-14T09:39:08.858311
| 2019-01-09T11:45:25
| 2019-01-09T11:45:25
| 164,854,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
class Handler(object):
def __init__(self, environ, start_response):
self._environ = environ
self._start_response = start_response
self._response_started = False
self._code = 200
self._message = "OK"
self.headers = {}
def start_response(self, code, status="OK"):
self.headers.setdefault("Content-Length", "application/json")
self._start_response(
"%s %s" % (code, status), list(self.headers.items()))
|
[
"catchjosh@gmail.com"
] |
catchjosh@gmail.com
|
08c134f6f876b56b29c1de913786e6806a67d98e
|
74b12c96a73d464e3ca3241ae83a0b6fe984b913
|
/python/tvm/runtime/__init__.py
|
e0da680a24fc3e555e5824caa81dc414f22c6abf
|
[
"Apache-2.0",
"BSD-3-Clause",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
masahi/tvm
|
cf765bb892655f02135e1ce3afde88698f026483
|
c400f7e871214451b75f20f4879992becfe5e3a4
|
refs/heads/master
| 2023-08-22T20:46:25.795382
| 2022-04-13T08:47:10
| 2022-04-13T08:47:10
| 138,661,036
| 4
| 2
|
Apache-2.0
| 2021-09-03T20:35:19
| 2018-06-25T23:39:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM runtime namespace."""
# class exposures
from .packed_func import PackedFunc
from .object import Object
from .object_generic import ObjectGeneric, ObjectTypes
from .ndarray import NDArray, DataType, DataTypeCode, Device
from .module import Module, num_threads
from .profiling import Report
# function exposures
from .object_generic import convert_to_object, convert, const
from .ndarray import device, cpu, cuda, gpu, opencl, cl, vulkan, metal, mtl
from .ndarray import vpi, rocm, ext_dev
from .module import load_module, enabled, system_lib
from .container import String, ShapeTuple
from .params import save_param_dict, load_param_dict
from . import executor
|
[
"noreply@github.com"
] |
masahi.noreply@github.com
|
9f36779e687ed1474e2e97a9940161e6764000b2
|
b0c8e0cafa4a8916faab3cce65756ae91426c43f
|
/study/Python/Week8/BOJ_11497_강의현.py
|
e96c5e3d830e8bd8766220fac444dcc77cd359af
|
[] |
no_license
|
Rurril/IT-DA-3rd
|
b3e3ec3c2a5efbc75b76b84e9002c27a0ba4a1c4
|
9985e237cb1b90e9609656d534e0ed164723e281
|
refs/heads/master
| 2022-07-22T15:26:39.085369
| 2021-11-23T13:30:06
| 2021-11-23T13:30:06
| 288,980,334
| 3
| 29
| null | 2020-11-05T10:25:30
| 2020-08-20T10:49:17
|
Java
|
UTF-8
|
Python
| false
| false
| 598
|
py
|
# 통나무 건너뛰기 - S2
import sys
from collections import deque
T=int(sys.stdin.readline())
for _ in range(T):
new_log=deque()
N=int(sys.stdin.readline())
log=list(map(int,sys.stdin.readline().split()))
log.sort()
new_log.append(log[-1])
for i in range(N-2,-1,-1):
if i%2==0:
new_log.appendleft(log[i])
else:
new_log.append(log[i])
difficulty=list()
difficulty.append(abs(new_log[-1]-new_log[0]))
for i in range(1,N):
difficulty.append(abs(new_log[i]-new_log[i-1]))
print(max(difficulty))
|
[
"riverkeh@naver.com"
] |
riverkeh@naver.com
|
bf3ab03aff33092943b6feb95eb8ecc781d53477
|
91d9bba0d7608818c077676c588701855dd1382c
|
/virtual/bin/pyrsa-encrypt
|
3bf93434aec95d8f816bcb119259cc94286688e0
|
[] |
no_license
|
Antony-me/movie-library
|
89e82c908288d55153aa81f2289087246e383cf4
|
cbd333a79362ab1a2aa26356a6e914e9b67f5a63
|
refs/heads/main
| 2023-01-11T10:29:56.626291
| 2020-11-19T12:17:29
| 2020-11-19T12:17:29
| 313,741,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
#!/home/moringa/Documents/Moringa-Projects/CORE-PYTHON/Django/Netflix/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
|
[
"antonymunyasia993@gmail.com"
] |
antonymunyasia993@gmail.com
|
|
0485c06a92bd564030cc3dff86d3ed9c9bb8fbd3
|
ec931947aa3e06ce565637e7ee1cb707f56375a2
|
/aoc2015/modules/grid.py
|
145b44dc85960aecdd0fa419d8dbac10c91fa804
|
[] |
no_license
|
chrisglencross/advent-of-code
|
5f16ed7e2265d27ce15f502ce2a1c2f11fc99fc0
|
21623d4aa01a9e20285a0233c50f8f56c4099af5
|
refs/heads/master
| 2023-01-24T22:01:30.829679
| 2023-01-12T23:03:03
| 2023-01-12T23:03:03
| 224,833,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,596
|
py
|
from __future__ import annotations
from typing import Tuple, Dict, List, Optional
import networkx as nx
from aoc2019.modules import textgridprinter
from aoc2019.modules.directions import COMPASS_DIRECTIONS
Coords = Tuple[int, int]
def default_is_navigable(grid: Grid, from_coord: Coords, to_coord: Coords):
return grid[from_coord] in {"."} and grid[to_coord] in {"."}
def default_node_factory(coords):
return coords
class Grid:
def __init__(self, grid, directions=COMPASS_DIRECTIONS.values()):
self.grid = grid
self.directions = directions
def items(self):
return self.grid.items()
def get_bounds(self) -> Tuple[Coords, Coords]:
xs = set([c[0] for c in self.grid.keys()])
ys = set([c[1] for c in self.grid.keys()])
if not xs:
xs = {0}
if not ys:
ys = {0}
return (min(xs), min(ys)), (max(xs), max(ys))
def find_cell(self, symbol) -> Optional[Coords]:
for coords, cell in self.grid.items():
if cell == symbol:
return coords
return None
def find_cells(self, symbol) -> List[Coords]:
result = []
for coords, cell in self.grid.items():
if cell == symbol:
result.append(coords)
return result
def index_cells(self, symbols=None, not_symbols=None) -> Dict[str, Coords]:
if symbols is None and not_symbols is None:
not_symbols = {".", "#", " "}
result = {}
for coords, cell in self.grid.items():
if (symbols and cell in symbols) or (not_symbols and cell not in not_symbols):
if result.get(cell) is not None:
raise Exception(f"Symbol {cell} is repeated in grid. Index it with index_repeating_cells()")
result[cell] = coords
return result
def index_repeating_cells(self, symbols=None, not_symbols=None) -> Dict[str, List[Coords]]:
if symbols is None and not_symbols is None:
not_symbols = {".", "#", " "}
result = {}
for coords, cell in self.grid.items():
if (symbols and cell in symbols) or (not_symbols and cell not in not_symbols):
result_list = result.get(cell)
if result_list is None:
result_list = []
result[cell] = result_list
result_list.append(coords)
return result
def keys(self):
return self.grid.keys()
def values(self):
return self.grid.values()
def get(self, coords: Coords, default_value=None):
return self.grid.get(coords, default_value)
def __getitem__(self, coords: Coords):
return self.get(coords)
def __setitem__(self, coords: Coords, cell: str):
self.grid[coords] = cell
def build_graph(self,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable) -> nx.Graph:
graph = nx.Graph()
self.add_graph_edges(graph, directions, node_factory, is_navigable)
return graph
def build_digraph(self,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable) -> nx.DiGraph:
graph = nx.DiGraph()
self.add_graph_edges(graph, directions, node_factory, is_navigable)
return graph
def add_graph_edges(self, graph: nx.Graph,
directions=COMPASS_DIRECTIONS.values(),
node_factory=default_node_factory,
is_navigable=default_is_navigable):
for from_coords, from_symbol in self.items():
from_node = node_factory(from_coords)
for direction in directions:
to_coords = direction.move(from_coords)
to_symbol = self.get(to_coords)
if to_symbol and is_navigable(self, from_coords, to_coords):
to_node = node_factory(to_coords)
graph.add_edge(from_node, to_node, distance=1)
def print(self):
textgridprinter.TextGridPrinter().print(self)
def parse_grid(content: str) -> Grid:
grid = {}
for y, line in enumerate(content.split("\n")):
for x, cell in enumerate(line.rstrip()):
grid[(x, y)] = cell
return Grid(grid)
def load_grid(file: str) -> Grid:
with open(file) as f:
content = f.read()
return parse_grid(content)
|
[
"chris@glencross.org"
] |
chris@glencross.org
|
24f099a2224e7baa91a9ab2ebaa2b26ed626e085
|
b86a0656dfb861e0af4b784c94f52742738c29ae
|
/abf-repoclosure/repoclosure/renders/render_repoclosure.py
|
a50e71f3bd0e2a7cd0cf7642ac73fd62d6f25f28
|
[] |
no_license
|
OpenMandrivaSoftware/docker-abf
|
dba52547ac51fa86028a4ee56f9b165297e66fd5
|
635774f0f97e71aeaa8f9a3965bfa94c99ad1d93
|
refs/heads/master
| 2023-06-21T14:59:42.311892
| 2023-06-20T01:04:10
| 2023-06-20T01:04:10
| 45,573,628
| 0
| 6
| null | 2018-03-19T21:56:09
| 2015-11-04T23:12:59
|
Shell
|
UTF-8
|
Python
| false
| false
| 560
|
py
|
from ..templates import repoclosure
def render_repoclosure(result, title, compressed_report, path):
t = repoclosure()
if result['return_code'] == -1:
t.code = -1
t.title = title
t.errors = result['errors_raw']
else:
t.bad_packages = result['report']
t.code = result['return_code']
t.errors = result['errors_raw']
t.count = result['count']
t.title = title
t.total_count = result['total_count']
t.percent = result['percent']
t.compressed = compressed_report
with open(path, "w") as f:
f.write(t.respond())
|
[
"you@example.com"
] |
you@example.com
|
2ba8d0457b0f510232d4d95043d81f82a5fa7b41
|
b8fed8222b41e447cd5ce83513eb4d014c01742b
|
/sbm_purchase_rent/contract/po_contract.py
|
bc25bb0bcb7486749663e8676b19e87b615275c2
|
[] |
no_license
|
lajayuhniyarsyah/ERP-Supra
|
e993d8face6e022b6f863d1dff7cb51cda36be8d
|
5a64dbb57ee40070354926700091fb9025c1350c
|
refs/heads/master
| 2021-01-25T22:09:46.306990
| 2017-11-08T05:32:04
| 2017-11-08T05:32:04
| 23,605,825
| 0
| 10
| null | 2017-11-08T05:32:05
| 2014-09-03T03:58:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
from datetime import datetime
import netsvc
from osv import osv, fields
class POContract(osv.osv):
_inherit = 'purchase.order'
_columns = {
'contract_id' : fields.many2one('purchase.order.contract.data','Contract',ondelete="Cascade"),
'contract_no' : fields.related('contract_id','contract_no',type="char",string="Contract No",store=False),
'start_contract' : fields.related('contract_id','start_contract',type="date",string="Contract Start",store=False),
'expire_contract' : fields.related('contract_id','expire_contract',type="date",string="Contract Expire",store=False),
'notes' : fields.related('contract_id','notes',type="text",string="Notes",store=False),
}
# def write(self,cr,uid,ids,vals,context=None):
# return False
POContract()
class POContractData(osv.osv):
_name = 'purchase.order.contract.data'
_rec_name = 'contract_no'
_columns = {
'contract_no' : fields.char('Contract No',30,required=True),
'start_contract' : fields.date('Date Start',required=True),
'expire_contract' : fields.date('Expire Contract',required=True),
'notes' : fields.text('Notes'),
'pos' : fields.one2many('purchase.order','contract_id','POs')
}
POContractData()
|
[
"lajayuhni@gmail.com"
] |
lajayuhni@gmail.com
|
6d4ccc01a0dc81b33d21a9f1ecd1714a78978b4a
|
9b1446b26e81a79c303f9799fb6a91785c7adb03
|
/.history/Code/markov_chain_20200121115909.py
|
bdd2412da83d245c8cc5b9d7f73b3d70654d3c82
|
[] |
no_license
|
SamirIngley/CS1.2-Tweet-Gen
|
017ea15b1113881a156ff24682828bc654eb6c81
|
bcd95fa63e05849cbf8e36230d8e31032b99daaa
|
refs/heads/master
| 2020-12-14T20:19:57.733290
| 2020-08-04T23:19:23
| 2020-08-04T23:19:23
| 234,856,234
| 0
| 0
| null | 2020-06-05T21:13:04
| 2020-01-19T07:05:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,340
|
py
|
import sample
import random
from clean_text import clean
from dictogram import Dictogram
class Markov():
def __init__(self, corpus):
self.corpus = clean(corpus)
self.states = {}
self.chain()
def chain(self):
last_word = None
for word in self.corpus:
if last_word is not None: # set last word line 14
if last_word not in self.states: # if we haven't seen this word before
self.states[last_word] = Dictogram() # empty histogram as value
self.states[last_word].add_count(word) # add word to last word histogram
last_word = word # set word as last_word
def __str__(self):
return str(self.states)
def random_walk(self, num_words=11):
# length = len(self.states)
# rand = random.randint(0, length)
sentence = []
length = len(self.states)
rand = random.randint(0, length)
words_counter = 0
last_word = None
while num_words > words_counter:
if last_word:
pickings = self.states[last_word] # dictionary of words to pick from based on last_word's hist
print(pickings)
total_wc = 0 # number of words in dictionary for a word
print(total_wc)
dart = random.randint(0, 100) # as percentage
print(dart)
for value in pickings.values(): # calculates total word count
total_wc += value
print(value)
counter = 0
for key,value in pickings.items():
print(key, value)
while counter < dart:
counter += (value / total_wc) * 100 # as percentage
print(counter)
last_word = key
print(last_word)
else:
last_word = (list(self.states)[rand])
words_counter += 1
sentence.append(last_word)
return sentence
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
markov = Markov('source.txt')
print(markov.states)
print('')
print(markov.random_walk())
|
[
"samir.ingle7@gmail.com"
] |
samir.ingle7@gmail.com
|
3af099fce18a35cd4837291a2a99727140954c97
|
74f8d8c8030ce0c8cd3622cb99f0a668ba93a7e8
|
/dialogue-engine/test/programytest/parser/template/node_tests/test_authorise.py
|
d421ef78f11762c08248c75b2c9b95023a672603
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Tommytosik/cotoba-agent-oss
|
3124a376ac0ca1147a58405a8f269a0eb68bc014
|
78e5c2c244b92e71755783d972228904c4d0d373
|
refs/heads/master
| 2022-12-08T15:44:27.731731
| 2020-08-04T08:36:41
| 2020-08-04T08:36:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,002
|
py
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.authorise import TemplateAuthoriseNode
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class MockTemplateAuthoriseNode(TemplateAuthoriseNode):
def __init__(self):
TemplateAuthoriseNode.__init__(self)
def resolve_to_string(self, context):
raise Exception("This is a failure")
class TemplateAuthoriseNodeTests(ParserTestsBaseClass):
def test_node_init(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("[AUTHORISE (role=root)]", node.to_string())
def test_node_init_optiona_srai(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
node.denied_srai = "ACCESS_DENIED"
self.assertIsNotNone(node)
self.assertEqual("ACCESS_DENIED", node.denied_srai)
root.append(node)
self.assertEqual(len(root.children), 1)
self.assertEqual("[AUTHORISE (role=root, denied_srai=ACCESS_DENIED)]", node.to_string())
def test_to_xml_service_no_content(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise role="root" /></template>', xml_str)
def test_to_xml_service_with_content(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
node.append(TemplateWordNode("Hello"))
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise role="root">Hello</authorise></template>', xml_str)
def test_to_xml_service_no_content_and_optional_srai(self):
root = TemplateNode()
node = TemplateAuthoriseNode()
node.role = "root"
node.denied_srai = "ACCESS_DENIED"
self.assertIsNotNone(node)
root.append(node)
self.assertEqual(len(root.children), 1)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><authorise denied_srai="ACCESS_DENIED" role="root" /></template>', xml_str)
def test_node_exception_handling(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = MockTemplateAuthoriseNode()
node.role = "root"
self.assertIsNotNone(node)
self.assertEqual("root", node.role)
root.append(node)
self.assertEqual(len(root.children), 1)
with self.assertRaises(Exception):
node.resolve_to_string(self._client_context)
with self.assertRaises(Exception):
root.resolve(self._client_context)
|
[
"cliff@cotobadesign.com"
] |
cliff@cotobadesign.com
|
d6a017668b77161fc0092d339bbc5ee76cb9b2a8
|
29ed133feb870455ca619c9fa2ce9b7eb1dcc470
|
/URIs/URI1930.py
|
22e1944f07f5f32fae61f640c2b1136be3f4465e
|
[] |
no_license
|
jrantunes/URIs-Python-3
|
c5e676686a979b6bbfd10b8e7168a6d35fb8f6a2
|
4692f3fba4a1c9a0f51322a13e9e267d8b07ea3e
|
refs/heads/master
| 2022-04-17T10:56:52.468275
| 2020-03-28T17:07:46
| 2020-03-28T17:07:46
| 250,395,664
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
#Electrical Outlet
t1, t2, t3, t4 = input().split()
t1, t2, t3, t4 = int(t1) - 1, int(t2) - 1, int(t3) - 1, int(t4)
tomadas = [t1, t2, t3, t4]
print(sum(tomadas))
|
[
"noreply@github.com"
] |
jrantunes.noreply@github.com
|
24c9821ee09e36a22850395bcbc3a104f1b923c9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/429/usersdata/321/101584/submittedfiles/jogoDaVelha.py
|
6a1f739064ae86cf4550efd44217ef6939a0aeb4
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# -*- coding: utf-8 -*-
from jogoDaVelha_BIB import *
# COLOQUE SEU PROGRAMA A PARTIR DAQUI
print('Bem vindo ao JogoDaVelha do grupo 8 [Iara, Ingrid, Luiz Otávio, Tatiane]')
nome = str(input('Qual seu nome? '))
s = str(input('Qual símbolo você deseja utilizar no jogo? (X ou O) '))
while s != X or s != O:
print('Isira um símbolo válido')
s = str(input('Qual símbolo você deseja utilizar no jogo? '))
if s = X
print(sorteio(inicio))
print(tabuleiro)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1cb73c6568858279025a470e045c2fd95de4ee58
|
1d1f173d67a04b78f732aee99ef0e2d4e8284d63
|
/dev/migrate_testing_phylesystem.py
|
8df26db7e044c0beb132c03618620d7e68edd506
|
[
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
rvosa/peyotl
|
8767165ec85129c8f25c56a572f0bd879158aa2a
|
98154af9832d18cbcb079f7e2db3b0e45893e1da
|
refs/heads/master
| 2021-01-18T19:48:31.273061
| 2015-09-03T15:30:13
| 2015-09-03T15:30:13
| 41,867,598
| 0
| 0
| null | 2015-09-03T15:29:00
| 2015-09-03T15:29:00
| null |
UTF-8
|
Python
| false
| false
| 4,445
|
py
|
#!/usr/bin/env python
from peyotl.phylografter.nexson_workaround import workaround_phylografter_export_diffs, \
add_default_prop
from peyotl.phylesystem.git_actions import get_filepath_for_namespaced_id
from peyotl import get_logger
from subprocess import call
import codecs
import json
import sys
import os
import re
_LOG = get_logger(__name__)
def debug(m):
_LOG.debug(m)
old_phylesystem = sys.argv[1]
old_phylesystem_study = os.path.abspath(os.path.join(old_phylesystem, 'study'))
new_phylesystem = sys.argv[2]
new_phylesystem_study = os.path.abspath(os.path.join(new_phylesystem, 'study'))
scratch_par = sys.argv[3]
assert(os.path.isdir(old_phylesystem_study))
assert(os.path.isdir(new_phylesystem_study))
assert(os.path.isdir(scratch_par))
script_name = os.path.abspath(sys.argv[0])
peyotl_dev_dir = os.path.split(script_name)[0]
peyotl_dir =os.path.split(peyotl_dev_dir)[0]
conversion_script = os.path.join(peyotl_dir, 'scripts', 'nexson', 'nexson_nexml.py')
assert(os.path.isfile(conversion_script))
validation_script = os.path.join(peyotl_dir, 'scripts', 'nexson', 'validate_ot_nexson.py')
assert(os.path.isfile(conversion_script))
failed = []
pg_study_pat = re.compile(r'^\d+')
if len(sys.argv) > 4:
sl = sys.argv[4:]
else:
sl = os.listdir(old_phylesystem_study)
for f in sl:
if pg_study_pat.match(f):
source_study = f
dest_full = get_filepath_for_namespaced_id(new_phylesystem, f)
scratch_dir = os.path.join(scratch_par, f)
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
full_source = os.path.join(old_phylesystem_study, source_study, source_study + '.json')
dest_dir = os.path.split(dest_full)[0]
assert(os.path.exists(full_source))
if os.path.exists(dest_full):
debug('Skipping {} because output exists'.format(f))
continue
# read input and do the phylografter_workaround to valid 0.0.0 syntax
# store in scratch.
valid_bf = os.path.join(scratch_dir, 'v0.0.0-' + source_study + '.json')
debug('Raw phylografter from "{}" to valid 0.0.0 NexSON at "{}" ...'.format(full_source, valid_bf))
inp = codecs.open(full_source, mode='rU', encoding='utf-8')
obj = json.load(inp)
try:
workaround_phylografter_export_diffs(obj, valid_bf)
except:
_LOG.exception('Exception in workaround_phylografter_export_diffs for study ' + f)
failed.append(f)
continue
# Convert to 1.2.1
unchecked_hbf = os.path.join(scratch_dir, 'v1.2.1-' + source_study + '.json')
debug('Converting cleaned 0.0.0 NexSON from "{}" to unchecked 1.2.1 NexSON at "{}" ...'.format(valid_bf, unchecked_hbf))
invoc = [sys.executable,
conversion_script,
'-s',
'-e',
'1.2.1',
'-o',
unchecked_hbf,
valid_bf]
debug('invoc: "{}"'.format('" "'.join(invoc)))
rc = call(invoc)
if rc != 0:
failed.append(f)
else:
inp = codecs.open(unchecked_hbf, mode='rU', encoding='utf-8')
obj = json.load(inp)
aug_hbf = os.path.join(scratch_dir, 'augmentedv1.2.1-' + source_study + '.json')
add_default_prop(obj, aug_hbf)
# validate
annotation = os.path.join(scratch_dir, 'validation.json')
tmp = os.path.join(scratch_dir, 'final.json')
debug('Writing annotated version of "{}" to "{}" with annotations to "{}" ...'.format(
aug_hbf,
tmp,
annotation))
invoc = [sys.executable,
validation_script,
'--embed',
'--agent-only',
'-e',
annotation,
'-o',
tmp,
aug_hbf]
debug('invoc: "{}"'.format('" "'.join(invoc)))
rc = call(invoc)
if rc != 0:
failed.append(f)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
os.rename(tmp, dest_full)
if failed:
m = '\n '.join(failed)
sys.exit('Conversion of the following studies failed:\n {}'.format(m))
|
[
"mtholder@gmail.com"
] |
mtholder@gmail.com
|
b6fc79b993cd002a05a15ed4d474c68787c15613
|
1b9075ffea7d4b846d42981b41be44238c371202
|
/2009/devel/programming/library/caps/actions.py
|
518c5e326c2b2857a117363e060e5b3fc85eebcc
|
[] |
no_license
|
pars-linux/contrib
|
bf630d4be77f4e484b8c6c8b0698a5b34b3371f4
|
908210110796ef9461a1f9b080b6171fa022e56a
|
refs/heads/master
| 2020-05-26T20:35:58.697670
| 2011-07-11T11:16:38
| 2011-07-11T11:16:38
| 82,484,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import pisitools
WorkDir = "ipfdevlib_linux"
def install():
pisitools.dobin("examples/i686/ipfinfo")
pisitools.insinto("/usr/include/caps", "include/caps/capsimage.h")
pisitools.dolib_so("lib/i686/libcapsimage.so.2.0")
pisitools.dosym("/usr/lib/libcapsimage.so.2.0", "/usr/lib/libcapsimage.so.2")
pisitools.dosym("/usr/lib/libcapsimage.so.2.0", "/usr/lib/libcapsimage.so")
pisitools.dodoc("HISTORY", "LICENSE", "README")
|
[
"zaburt@users.noreply.github.com"
] |
zaburt@users.noreply.github.com
|
c0a560dc7b728ab8f5e2bb57b87cb1e63a75ab05
|
30a2f77f5427a3fe89e8d7980a4b67fe7526de2c
|
/python/HERWIGPP_POWHEG_GluonFusion_H1200_bbbar_8TeV_cff.py
|
69a444d3216d8ed1263402b24a90ec1ffe8bbca7
|
[] |
no_license
|
DryRun/QCDAnalysis
|
7fb145ce05e1a7862ee2185220112a00cb8feb72
|
adf97713956d7a017189901e858e5c2b4b8339b6
|
refs/heads/master
| 2020-04-06T04:23:44.112686
| 2018-01-08T19:47:01
| 2018-01-08T19:47:01
| 55,909,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,112
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.HerwigppDefaults_cfi import *
generator = cms.EDFilter(
"ThePEGGeneratorFilter",
herwigDefaultsBlock,
configFiles = cms.vstring(),
parameterSets = cms.vstring(
'cm8TeV',
'powhegNewDefaults',
'GluonFusionHbbParameters',
'basicSetup',
'setParticlesStableForDetector',
),
powhegNewDefaults = cms.vstring(
'# Need to use an NLO PDF',
'# and strong coupling',
'cp /Herwig/Partons/MRST-NLO /Herwig/Partons/cmsPDFSet',
'create Herwig::O2AlphaS O2AlphaS',
'set /Herwig/Generators/LHCGenerator:StandardModelParameters:QCD/RunningAlphaS O2AlphaS',
'# Setup the POWHEG shower',
'cd /Herwig/Shower',
'set Evolver:HardEmissionMode POWHEG',
'# higgs + W (N.B. if considering all W decay modes useful to set )',
'# (jet pT cut to zero so no cut on W decay products )',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2WH',
'# set /Herwig/Cuts/JetKtCut:MinKT 0.0*GeV',
'# higgs + Z (N.B. if considering all Z decay modes useful to set )',
'# (jet pT cut to zero so no cut on Z decay products )',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2ZH',
'# set /Herwig/Cuts/JetKtCut:MinKT 0.0*GeV',
'# gg/qqbar -> Higgs',
'# insert SimpleQCD:MatrixElements[0] PowhegMEHiggs',
'# Weak boson pair production: WW / ZZ / WZ / W+Z [WpZ] / W-Z [WmZ]',
'# insert SimpleQCD:MatrixElements[0] PowhegMEPP2VV',
'# set PowhegMEPP2VV:Process WpZ'
),
pdfCTEQ6M = cms.vstring(
'mkdir /LHAPDF',
'cd /LHAPDF',
'create ThePEG::LHAPDF CTEQ6M',
'set CTEQ6M:PDFName cteq6mE.LHgrid',
'set CTEQ6M:RemnantHandler /Herwig/Partons/HadronRemnants',
'cp CTEQ6M /cmsPDFSet',
'cd /'
),
GluonFusionHbbParameters = cms.vstring(
'cd /Herwig/MatrixElements/',
'insert SimpleQCD:MatrixElements[0] PowhegMEHiggs',
'set /Herwig/Particles/h0:NominalMass 1200.*GeV',
'set /Herwig/Particles/h0/h0->b,bbar;:OnOff On',
#'set /Herwig/Particles/h0/h0->b,bbar;:BranchingRatio 0.7195',
#'set /Herwig/Particles/h0/h0->b,bbar;:BranchingRatio 1.0000',
'set /Herwig/Particles/h0/h0->W+,W-;:OnOff Off',
'set /Herwig/Particles/h0/h0->tau-,tau+;:OnOff Off',
'set /Herwig/Particles/h0/h0->g,g;:OnOff Off',
'set /Herwig/Particles/h0/h0->c,cbar;:OnOff Off',
'set /Herwig/Particles/h0/h0->Z0,Z0;:OnOff Off',
'set /Herwig/Particles/h0/h0->gamma,gamma;:OnOff Off',
'set /Herwig/Particles/h0/h0->mu-,mu+;:OnOff Off',
'set /Herwig/Particles/h0/h0->t,tbar;:OnOff Off'
),
#crossSection = cms.untracked.double(0.1665),
#filterEfficiency = cms.untracked.double(1.0)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision: 1.3 $'),
name = cms.untracked.string('\$Source: /local/reps/CMSSW/CMSSW/Configuration/GenProduction/python/EightTeV/HERWIGPP_POWHEG_H125_bbbar_W_lnu_8TeV_cff.py,v $'),
annotation = cms.untracked.string('HERWIGPP/POWHEG: (H->bb)(W->lnu), m(H)=125 GeV, l=e or mu or tau')
)
|
[
"david.renhwa.yu@gmail.com"
] |
david.renhwa.yu@gmail.com
|
b614ca7ed169de8fd6bc9ceab1f35f66a6ecbd4e
|
3bb70650b4b83e4653dcc18c8233c106c7a5611a
|
/receipt/models.py
|
a0249ce6f92b8a79eb8446196a76c3d5c9ab0a32
|
[] |
no_license
|
khanhlu2013/pos_connect_code
|
48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef
|
fdf70de858c10b175832af31ecc0cf770d028396
|
refs/heads/master
| 2023-04-08T02:35:46.181265
| 2016-10-18T21:12:51
| 2016-10-18T21:12:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,959
|
py
|
from django.db import models
from store.models import Store
from store_product.models import Store_product
import decimal
from payment_type.models import Payment_type
class Receipt(models.Model):
date = models.DateTimeField()
tax_rate = models.DecimalField(max_digits=6, decimal_places=4)
store = models.ForeignKey(Store)
_receipt_doc_id = models.CharField(max_length=40,unique=True)#this field is the receipt doc id from couch.as an optimization to save sale data to master. we bulk create models.Receipt and need this link to document.Receipt to bulk insert models.Receipt_ln
def __unicode__(self):
return str(self.id)
class Tender_ln(models.Model):
receipt = models.ForeignKey(Receipt,related_name="tender_ln_lst")
payment_type = models.ForeignKey(Payment_type,blank=True,null=True)
amount = models.DecimalField(max_digits=6, decimal_places=2)
name = models.CharField(blank=True,null=True,max_length=100)
class Receipt_ln(models.Model):
receipt = models.ForeignKey(Receipt,related_name="receipt_ln_lst")
qty = models.IntegerField()
discount = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
override_price = models.DecimalField(max_digits=6, decimal_places=3,blank=True,null=True)
date = models.DateTimeField()
store_product = models.ForeignKey(Store_product,blank=True,null=True)
sp_stamp_name = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_value_customer_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_crv = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_is_taxable = models.NullBooleanField(blank=True,null=True)
sp_stamp_is_sale_report = models.NullBooleanField(blank=True,null=True)
sp_stamp_p_type = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_p_tag = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_cost = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
sp_stamp_vendor = models.CharField(max_length=100,blank=True,null=True)
sp_stamp_buydown = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
mm_deal_discount = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
mm_deal_name = models.CharField(max_length=100,blank=True,null=True)
non_inventory_name = models.CharField(max_length=100,blank=True,null=True)
non_inventory_price = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
non_inventory_crv = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
non_inventory_is_taxable = models.NullBooleanField(blank=True,null=True)
non_inventory_cost = models.DecimalField(max_digits=6, decimal_places=2,blank=True,null=True)
|
[
"khanhlu2013@gmail.com"
] |
khanhlu2013@gmail.com
|
0ce8d4ae15eba8e000cbe459928f96dd85b9f175
|
9e5eca27222871dd04e42c9106bb2fba07e598ff
|
/src/osxification/foundation/ns_string.py
|
4305958fd9999299dcc1df4b9c9d9d5641838191
|
[] |
no_license
|
jepebe/osxification
|
b2a68dec07cd0be3b7ebd519bd99d0bbd51e61c7
|
c9a539f4dbeda9200e32a2eea2c955dd94e6f45e
|
refs/heads/master
| 2016-09-03T06:35:41.659315
| 2015-05-19T18:00:23
| 2015-05-19T18:00:23
| 35,567,134
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,614
|
py
|
from osxification.foundation import NSStringEncoding, NSObject
class NSString(NSObject):
def __init__(self, content, encoding=None):
if isinstance(content, str):
encoding = NSStringEncoding.NSUTF8StringEncoding
# elif isinstance(content, unicode):
# encoding = NSStringEncoding.NSUnicodeStringEncoding
else:
raise UserWarning("[%s] Error: 'content' should be a string, received: %s" % (self.__class__.__name__, type(content)))
identifier = NSString._init(self.alloc(), content, encoding)
super(NSString, self).__init__(identifier)
def __str__(self):
return self._asCString(NSStringEncoding.NSUTF8StringEncoding)
# def __unicode__(self):
# return self._asCString(NSStringEncoding.NSUnicodeStringEncoding)
def __int__(self):
return self._intValue()
def __float__(self):
return self._floatValue()
def __eq__(self, other):
return self._isEqualToString(other)
@classmethod
def from_param(cls, instance):
if isinstance(instance, str):
instance = NSString(instance)
return NSObject.from_param(instance)
NSString._init = NSString.bindMethodToClass("initWithCString:encoding:")
NSString._asCString = NSString.bindMethodToClass("cStringUsingEncoding:", returns=str)
NSString._intValue = NSString.bindMethodToClass("integerValue", returns=int)
NSString._floatValue = NSString.bindMethodToClass("doubleValue", returns=float)
NSString._isEqualToString = NSString.bindMethodToClass("isEqualToString:", parameters=[NSString], returns=bool)
|
[
"jepebe@users.noreply.github.com"
] |
jepebe@users.noreply.github.com
|
bb4e08299b87e0d44389027cb157b9ba193b8b62
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/250/32382/submittedfiles/swamee.py
|
65a4d92fd968b2221e8050cbd8814d6ae8e3c0f0
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite f:'))
l=float(input('digite l:'))
q=float(input('digite q:'))
deltah=float(input('digite o delta:'))
v=float(input('digite v:'))
g=9.81
e=0.000002
D=((((8*f*l*q*q)/((math.pi**2)*(g*deltah))))**1/5)
print('o valor de D é:%.4f'%D)
rey=((4*q)/(math.pi*D*v))
print('o valor de rey é:%.4f'%rey)
k=0.25/(math.log10((e/(3.7*D))+(5.74/(rey**0.9))))
print('o valor de k é:%.4f'%k)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a4a1a15fcd715bdc69965843f94c3b2f571c20b3
|
30227ff573bcec32644fca1cca42ef4cdd612c3e
|
/leetcode/linkedList/singly_linked_list/remove_node_a1.py
|
8ee8af6cdfb55f3965cc1e1c627c57e7e5e85560
|
[] |
no_license
|
saurabh-pandey/AlgoAndDS
|
bc55864422c93e6c93b8432e483394f286ce8ef2
|
dad11dedea9ceb4904d6c2dea801ce0172abfc81
|
refs/heads/master
| 2023-07-01T09:12:57.951949
| 2023-06-15T12:16:36
| 2023-06-15T12:16:36
| 88,239,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
#URL: https://leetcode.com/explore/learn/card/linked-list/219/classic-problems/1207/
# Description
"""
Given the head of a linked list and an integer val, remove all the nodes of the linked list that
has Node.val == val, and return the new head.
Example 1:
Input: head = [1,2,6,3,4,5,6], val = 6
Output: [1,2,3,4,5]
Example 2:
Input: head = [], val = 1
Output: []
Example 3:
Input: head = [7,7,7,7], val = 7
Output: []
Constraints:
The number of nodes in the list is in the range [0, 104].
1 <= Node.val <= 50
0 <= k <= 50
"""
def removeElements(head, val):
if head is None:
return
newHead = head
prevNode = head
currNode = head
while currNode is not None:
if currNode._val == val:
if currNode is newHead:
newHead = currNode._next
else:
prevNode._next = currNode._next
else:
# Only move previous node if current node is not the one to be deleted. Previous node should
# always point to something that is going to be part of the list
prevNode = currNode
currNode = currNode._next
return newHead
|
[
"saurabhpandey85@gmail.com"
] |
saurabhpandey85@gmail.com
|
cd8aecca91fd152be1487734efe54d582598aa3d
|
ab47546a5fbf086193130b66a0ac8d849aa75d23
|
/venv/bin/pip3.7
|
aa3156ab60ab683ac043913e5b5ec19d31981c22
|
[] |
no_license
|
icerahi/ecommerce-webapp
|
1f7b7a29d78d73ab03baa6aeeb69a4c95e042090
|
eb44e9c1f5b8444a72b3aaf5fcd96f30aa4757c9
|
refs/heads/master
| 2020-05-01T06:30:05.477773
| 2019-03-24T08:11:38
| 2019-03-24T08:11:38
| 177,331,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
7
|
#!/home/rahi/PycharmProjects/E-commerce/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"zanjarwhite@gmail.com"
] |
zanjarwhite@gmail.com
|
ddf2a1f0cc0195a7ea1195d4200196e3b871e4be
|
7a20dac7b15879b9453150b1a1026e8760bcd817
|
/Curso/ModuloTkinter/Aula001HelloWorld.py
|
32a092bff66a7ed68b5bc07212de60aeb6f607e8
|
[
"MIT"
] |
permissive
|
DavidBitner/Aprendizado-Python
|
7afbe94c48c210ddf1ab6ae21109a8475e11bdbc
|
e1dcf18f9473c697fc2302f34a2d3e025ca6c969
|
refs/heads/master
| 2023-01-02T13:24:38.987257
| 2020-10-26T19:31:22
| 2020-10-26T19:31:22
| 283,448,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 547
|
py
|
from tkinter import *
# Sempre começar um programa com o root e a classe Tk
root = Tk()
# Criação do primeiro widget, nesse caso uma label presa a root com o texto "Hello World!"
myLabel = Label(root, text="Hello World!")
# Posicionando a label criada
myLabel.pack()
# Todo programa roda através de um loop, e com o atributo mainloop nós definimos que o programa deixa de rodar a partir desta linha de código. Ou seja, as linhas de código que vierem depois do mainloop serão executadas apenas após o fechamento do gui
root.mainloop()
|
[
"david-bitner@hotmail.com"
] |
david-bitner@hotmail.com
|
a90cd4d5bf5d588410d769c97cfa33f4a39619c4
|
d0eb9e95c796042067aceaf0fc3d43f56d4eb87b
|
/Tests/PootyTests.py
|
a75560eb9533c2d64a49521fcaed266ae119f381
|
[] |
no_license
|
finneyjm/RynLib
|
ea0fd0f8ccd21fdac4663d5fb2b6836efce49a10
|
42e7d07ff879f72ae163f682cb07ba7489ce0a06
|
refs/heads/master
| 2021-02-16T15:30:02.181769
| 2020-03-05T19:20:22
| 2020-03-05T19:20:22
| 245,020,012
| 0
| 0
| null | 2020-03-04T22:47:09
| 2020-03-04T22:47:08
| null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
from Peeves.TestUtils import *
from unittest import TestCase
from PootyAndTheBlowfish.Templator import *
from PootyAndTheBlowfish.PotentialTemplator import PotentialTemplate
import sys
class PootyTests(TestCase):
@inactiveTest
def test_ApplyBaseTemplate(self):
import os
curdir = os.getcwd()
template = os.path.join(curdir, "RynLib", "PootyAndTheBlowfish", "Templates", "PotentialTemplate")
writer = TemplateWriter(template, LibName = "ploot")
out = os.path.expanduser("~/Desktop")
writer.iterate_write(out)
worked = os.path.exists(os.path.join(out, "plootPot", "src", "CMakeLists.txt"))
self.assertTrue(worked)
@inactiveTest
def test_SimplePotential(self):
import os
writer = PotentialTemplate(
lib_name = "DumbPot",
function_name = "DumbPot",
linked_libs = [ "DumbPot" ],
potential_source = TestManager.test_data("DumbPot"),
requires_make = True
)
out = os.path.expanduser("~/Desktop")
writer.apply(out)
|
[
"b3m2a1@gmail.com"
] |
b3m2a1@gmail.com
|
20dafa7d284b657578456c3c93e1fdde8a345ed6
|
5817da2441abfe5974cd31f323caaa00958e26f5
|
/dekanat/migrations/0002_news_newsimage.py
|
aa0501d98902d0f66c90062049b5e6076b935ab5
|
[] |
no_license
|
katalyzator/Dekanat
|
3923f05eefb120fb366db2658e418ea9d87272f3
|
af32167bc78ca6ed52ad5910f6bfc8dfa3f91469
|
refs/heads/master
| 2021-01-12T15:05:09.618166
| 2016-10-23T08:51:11
| 2016-10-23T08:51:11
| 71,691,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,473
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-10-18 02:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dekanat', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='\u0417\u0430\u0433\u043e\u043b\u043e\u0432\u043e\u043a \u043f\u043e\u0441\u0442\u0430')),
('description', models.CharField(max_length=1000, verbose_name='\u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435 \u043f\u043e\u0441\u0442\u0430')),
('text', models.TextField(verbose_name='\u0422\u0435\u043a\u0441\u0442 \u043f\u043e\u0441\u0442\u0430')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
'verbose_name_plural': '\u0414\u043e\u0431\u0430\u0432\u043b\u0435\u043d\u0438\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
},
),
migrations.CreateModel(
name='NewsImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='', verbose_name='\u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0430')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dekanat.News', verbose_name='\u0432\u044b\u0431\u0435\u0440\u0438\u0442\u0435 \u043d\u043e\u0432\u043e\u0441\u0442\u044c')),
],
options={
'verbose_name': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
'verbose_name_plural': '\u041a\u0430\u0440\u0442\u0438\u043d\u043a\u0438 \u043d\u043e\u0432\u043e\u0441\u0442\u0435\u0439',
},
),
]
|
[
"web.coder96@gmail.com"
] |
web.coder96@gmail.com
|
218f7f161ce570b21a5293386e4ddc9cc7759bd2
|
9b722ca41671eb2cea19bac5126d0920639261bd
|
/.history/app_20201124112830.py
|
dfe4f24e89674672c3d491d9d14c2ce2f017531e
|
[] |
no_license
|
thawalk/db_flask_server
|
7928fd481f99d30bdccc60d97f02db78324cfdbe
|
cd55f1c9bf84c734457ee02d9f64a6833e295fad
|
refs/heads/master
| 2023-01-25T02:40:19.097457
| 2020-12-06T07:45:50
| 2020-12-06T07:45:50
| 314,229,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,465
|
py
|
import json
import pymongo
from flask import Flask, jsonify, url_for, request, redirect,Response,Request
import pymongo
from bson.json_util import dumps
import mysql.connector
from werkzeug.serving import run_simple
import os
from dotenv import load_dotenv
import datetime
import time
app = Flask(__name__)
test_collection='test_collection'
mongo = pymongo.MongoClient('mongodb://54.83.130.150:27017/?readPreference=primary&appname=MongoDB%20Compass&ssl=false')
db = pymongo.database.Database(mongo, 'test')
metadata_col = pymongo.collection.Collection(db, 'test_collection')
db = mysql.connector.connect(
host ='3.84.158.241',
user = 'root',
password = '',
database = 'reviews',
)
cur = db.cursor()
@app.route('/',methods=["GET"])
def api_root():
data = {
'message': 'Welcome to our website. Where reviews are our number one priority'
}
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/categories', methods = ['GET']) #TODO: #returns list of categories
def get_categories():
categories = []
js = json.dumps(data)
response = Response(js, status=200, mimetype='application/json')
return response
@app.route('/search', methods=['GET']) #now it only searches for TITLE. the mongo metadata does not have author
def search_book():
try:
data = request.json
title = data["title"]
result = metadata_col.find({"title":title})
result_array = dumps(list(result))
print(result_array)
js = json.dumps(result_array)
response = Response(js, status=200, mimetype='application/json')
return response
except:
errMsg = "Please include title."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
# @app.route('/review', methods=['POST'])
# def add_review():
# if not request.json or not request.json['asin'] or type(request.json['asin']) != str or not request.json['overall'] or not request.json['reviewText'] or type(request.json['reviewText']) != str or not request.json['reviewTime'] or type(request.json['reviewTime']) != str or not request.json['reviewerID'] or type(request.json['reviewerID']) != str or not request.json['reviewerName'] or type(request.json['reviewerName']) != str or not request.json['summary'] or type(request.json['summary']) != str or not request.json['unixReviewTime'] or type(request.json['unixReviewTime']) != int :
# return 'invalid request msg', 404
# txt = "INSERT INTO 'kindle_reviews' ('id', 'asin', 'overall', 'reviewText', 'reviewTime', 'reviewerID', 'reviewerName', 'summary', 'unixReviewTime') VALUES (%s)"
# values = (None, request.json['asin'], request.json['overall'], request.json['reviewText'], request.json['reviewTime'], request.json['reviewerID'], request.json['reviewerName'], request.json['summary'], request.json['unixReviewTime'])
# cur.execute(txt, values)
# return 'successfully uploaded new review', 200
@app.route('/addBook',methods= ['POST'])
def add_book():
try:
data = request.json
title = data['title']
asin = data['asin']
description = data['description']
price = data['price']
categories = data['categories']
message = "Book added successfully"
metadata_col.insert({"title":title,"asin":asin,"description":description,"price":price,"categories":categories})
js = json.dumps(message)
response = Response(js, status=201, mimetype='application/json')
return response
except:
errMsg = "Please include title, asin, description, price and categories."
js = json.dumps(errMsg)
response = Response(js, status=400, mimetype='application/json')
return response
@app.route('/addReview',methods = ['POST']) #TODO: add review INTO sql part
def add_review():
try:
data = request.json
asin = data["asin"]
helpful = [0,0]
overall = data["overall"]
reviewTime = data["reviewTime"]
reviewerID = data["reviewerID"]
reviewerName = data["reviewerName"]
sum
@app.route('/sortByGenres', methods= ['GET']) #TODO: sort by genres from mongo metadata categories
def sort_by_genres():
pass
if __name__ == '__main__':
# app.run(host="0.0.0.0", port=80) #remember to change this part
app.run(debug=True)
|
[
"akmal_hakim_teo@hotmail.com"
] |
akmal_hakim_teo@hotmail.com
|
7bc09e89695184c589a6db756b746e3e9450f047
|
ab8ea44704ea1a444e4f68ee740b584288d3e558
|
/tests/test_execer.py
|
f0b0661a8b670a8b6d6093ea78392b65de20eb17
|
[
"BSD-2-Clause"
] |
permissive
|
jamiees2/xonsh
|
96514c3381ac2fcca872e473ea9d414d74c2fdc9
|
f7b5985a88b32fafdaf162012c1ebbd19e48e6b9
|
refs/heads/master
| 2021-01-18T07:49:58.323584
| 2015-03-11T01:56:42
| 2015-03-11T01:56:42
| 31,949,439
| 0
| 0
| null | 2015-03-10T09:42:21
| 2015-03-10T09:42:21
| null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
"""Tests the xonsh lexer."""
from __future__ import unicode_literals, print_function
import os
import sys
import ast
from xonsh.execer import Execer
from tools import mock_xonsh_env
DEBUG_LEVEL = 0
EXECER = None
#
# Helpers
#
def setup():
# only setup one parser
global EXECER
EXECER = Execer(debug_level=DEBUG_LEVEL)
def check_exec(input):
with mock_xonsh_env(None):
if not input.endswith('\n'):
input += '\n'
EXECER.debug_level = DEBUG_LEVEL
EXECER.exec(input)
def check_eval(input):
with mock_xonsh_env(None):
EXECER.debug_level = DEBUG_LEVEL
EXECER.eval(input)
#
# Tests
#
def test_bin_ls():
yield check_eval, '/bin/ls -l'
def test_ls_dashl():
yield check_eval, 'ls -l'
def test_which_ls():
yield check_eval, 'which ls'
if __name__ == '__main__':
nose.runmodule()
|
[
"scopatz@gmail.com"
] |
scopatz@gmail.com
|
3d787e6984f3eee88abe60dd5170ec3af6010e22
|
c6cd9829966c730e52ba932ff04b05c186c3af99
|
/udpserver.py
|
c14eb6b87daa9bfa2fcbed85a24e70c5792b7053
|
[] |
no_license
|
fotopretty/ESP8266Server
|
ba3b9c980c35edd57a5c759225bfedfdb82c26e6
|
aca0baa6762e5230593a1fe3bf1379db89530a78
|
refs/heads/master
| 2021-05-29T12:19:18.611152
| 2015-09-16T17:03:40
| 2015-09-16T17:03:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 730
|
py
|
# This is a Python UDP server to display UDP messages sent by the ESP8266 Arduino Shield by http://www.doit.am/
# Listen to UDP port 9000 and print any message received.
# Based on https://pymotw.com/2/socket/udp.html
__author__ = 'Luppy'
import socket
import sys
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to any IP address, port 9000
server_address = ('', 9000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
print >>sys.stderr, '\nwaiting to receive message'
while True:
data, address = sock.recvfrom(4096)
print >>sys.stderr, '----received %s bytes from %s' % (len(data), address)
print >>sys.stderr, data
|
[
"lupyuen@gmail.com"
] |
lupyuen@gmail.com
|
d9f1f1ef4a21917821be03f6b3eae82be1d88ae0
|
2728543f61eb17dcccca9853ba6e6d2d932c8f8e
|
/roundsolutions/src/g4f_ws.py
|
c7273191eeff049503dbb05f0d4afbf69c165e74
|
[
"MIT"
] |
permissive
|
bewest/unapy
|
7a77afb841e354de5943f4bdfe9a08f1d3f49c88
|
cc55cfb90f38c7ac01ef244cc4b3509e4426b0e4
|
refs/heads/master
| 2016-09-11T06:09:39.908520
| 2012-06-17T23:10:20
| 2012-06-17T23:10:20
| 2,311,697
| 3
| 6
| null | null | null | null |
WINDOWS-1252
|
Python
| false
| false
| 2,461
|
py
|
############################################
# gauge4free WS2300 Python application #
# Copyright 2008, © Round Solutions #
# #
############################################
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions
#are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright
#notice, this list of conditions and the following disclaimer in
#the documentation and/or other materials provided with the distribution.
#
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS``AS
#IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
#TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
#PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
#PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
#NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#version 20080128.1
import MAIN
import LOCALS
# Change values below
# GPRS APN settings
# incorrect values lead to imposibility to use GPRS
apn = 'internet'
gprs_userid = ''
gprs_passw = ''
# gauge4free password
g4f_passw = 'demo'
# Interval between data upload to server
# in 1/10 of second
interval = 18000
# WS2300 driver
# how many times a command will be retried before declare fail
LOCALS.maxtrials = 30
# receive timeout when reading from WS2300
LOCALS.receive_timeout = 3
'''
Debug level is in LOCALS.debug_level
if bit 2 is set print driver level messages
if bit 1 is set print low level applications messages
if bit 0 is set print high level applications messages
'''
LOCALS.debug_level = 3
# !!! Do not change anything from here !!!
LOCALS.cgdcont = apn
LOCALS.gprsuserid = gprs_userid
LOCALS.gprspassw = gprs_passw
LOCALS.g4fpassw = g4f_passw
LOCALS.interval = interval
MAIN.main()
|
[
"bewest@gmail.com"
] |
bewest@gmail.com
|
bba3cbf765243f23c4a7e1d0c54c19cce2b7e9b6
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/conv1dthenlinear_81.py
|
da2fc2b67de2ce6afe44794e2c90add3e214fc37
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100
| 2021-10-27T18:37:12
| 2021-10-27T18:37:12
| 227,103,881
| 2
| 1
| null | 2020-02-19T22:07:24
| 2019-12-10T11:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
# -*- coding: utf-8 -*-
"""
conv1dthenlinear_81.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class Conv1dThenLinear_81(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Conv1d(in_channels=16, out_channels=41, kernel_size=(30,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=True, padding_mode='zeros')
self.f1 = nn.Conv1d(in_channels=41, out_channels=10, kernel_size=(20,), stride=(1,), padding=(0,), dilation=(1,), groups=1, bias=False, padding_mode='zeros')
self.f2 = nn.Linear(in_features=10, out_features=113, bias=False)
self.f3 = nn.Sigmoid()
self.f4 = nn.Linear(in_features=113, out_features=10, bias=False)
self.f5 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],16,49)
x = self.f0(x)
x = self.f1(x)
x = x.view(x.shape[0],10)
x = self.f2(x)
x = self.f3(x)
x = self.f4(x)
x = self.f5(x)
return x
|
[
"41098605+ahgamut@users.noreply.github.com"
] |
41098605+ahgamut@users.noreply.github.com
|
8aee48b71c0ebb2d53999918e1c552b0a87ce133
|
72409ee3ffad4d865bfd900ba989a0756ff12e24
|
/time_series_detector/algorithm/xgboosting.py
|
e9ec290e6904b48ed32c1af6aaa202fbf2131f15
|
[] |
no_license
|
ncucjm/ts_detector
|
559cb5b25932e1a46aac2966fc0b031382080b11
|
742f4026a6da89331b9d6e46ae6ae4e2ea697215
|
refs/heads/master
| 2020-07-06T01:04:41.299625
| 2019-08-30T15:07:14
| 2019-08-30T15:07:14
| 202,840,403
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,709
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import xgboost as xgb
from time_series_detector.feature import feature_service
from time_series_detector.common.tsd_errorcode import *
from time_series_detector.common.tsd_common import *
MODEL_PATH = os.path.join(os.path.dirname(__file__), '../model/')
DEFAULT_MODEL = MODEL_PATH + "xgb_default_model"
class XGBoosting(object):
"""
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient,
flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework.
XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems
in a fast and accurate way. The same code runs on major distributed environment (Hadoop, SGE, MPI)
and can solve problems beyond billions of examples.
https://github.com/dmlc/xgboost
"""
def __init__(self,
threshold=0.15,
max_depth=10,
eta=0.05,
gamma=0.1,
silent=1,
min_child_weight=1,
subsample=0.8,
colsample_bytree=1,
booster='gbtree',
objective='binary:logistic',
eval_metric='auc'):
"""
:param threshold: The critical point of normal.
:param max_depth: Maximum tree depth for base learners.
:param eta: Value means model more robust to overfitting but slower to compute.
:param gamma: Minimum loss reduction required to make a further partition on a leaf node of the tree.
:param silent: If 1, it will print information about performance. If 2, some additional information will be printed out.
:param min_child_weight: Minimum sum of instance weight(hessian) needed in a child.
:param subsample: Subsample ratio of the training instance.
:param colsample_bytree: Subsample ratio of columns when constructing each tree.
:param booster: Specify which booster to use: gbtree, gblinear or dart.
:param objective: Specify the learning task and the corresponding learning objective or a custom objective function to be used (see note below).
:param eval_metric: If a str, should be a built-in evaluation metric to use. See doc/parameter.md. If callable, a custom evaluation metric.
"""
self.threshold = threshold
self.max_depth = max_depth
self.eta = eta
self.gamma = gamma
self.silent = silent
self.min_child_weight = min_child_weight
self.subsample = subsample
self.colsample_bytree = colsample_bytree
self.booster = booster
self.objective = objective
self.eval_metric = eval_metric
def __save_libsvm_format(self, data, feature_file_name):
"""
Save the time features to libsvm format.
:param data: feature values
:param file_name: file saves the time features and label
"""
try:
f = open(feature_file_name, "w")
except Exception as ex:
return TSD_CAL_FEATURE_ERR, str(ex)
times = 0
for temp in data:
if times > 0:
f.write("\n")
result = ['{0}:{1}'.format(int(index) + 1, value) for index, value in enumerate(temp[0])]
f.write(str(temp[1]))
for x in result:
f.write(' ' + x)
times = times + 1
return TSD_OP_SUCCESS, ""
def __calculate_features(self, data, feature_file_name, window=DEFAULT_WINDOW):
"""
Caculate time features and save as libsvm format.
:param data: the time series to detect of
:param feature_file_name: the file to use
:param window: the length of window
"""
features = []
for index in data:
if is_standard_time_series(index["data"], window):
temp = []
temp.append(feature_service.extract_features(index["data"], window))
temp.append(index["flag"])
features.append(temp)
try:
ret_code, ret_data = self.__save_libsvm_format(features, feature_file_name)
except Exception as ex:
ret_code = TSD_CAL_FEATURE_ERR
ret_data = str(ex)
return ret_code, ret_data
def xgb_train(self, data, task_id, num_round=300):
"""
Train an xgboost model.
:param data: Training dataset.
:param task_id: The id of the training task.
:param num_round: Max number of boosting iterations.
"""
model_name = MODEL_PATH + task_id + "_model"
feature_file_name = MODEL_PATH + task_id + "_features"
ret_code, ret_data = self.__calculate_features(data, feature_file_name)
if ret_code != TSD_OP_SUCCESS:
return ret_code, ret_data
try:
dtrain = xgb.DMatrix(feature_file_name)
except Exception as ex:
return TSD_READ_FEATURE_FAILED, str(ex)
params = {
'max_depth': self.max_depth,
'eta': self.eta,
'gamma': self.gamma,
'silent': self.silent,
'min_child_weight': self.min_child_weight,
'subsample': self.subsample,
'colsample_bytree': self.colsample_bytree,
'booster': self.booster,
'objective': self.objective,
'eval_metric': self.eval_metric,
}
try:
bst = xgb.train(params, dtrain, num_round)
bst.save_model(model_name)
except Exception as ex:
return TSD_TRAIN_ERR, str(ex)
return TSD_OP_SUCCESS, ""
def predict(self, X, window=DEFAULT_WINDOW, model_name=DEFAULT_MODEL):
"""
:param X: the time series to detect of
:type X: pandas.Series
:param window: the length of window
:param model_name: Use a xgboost model to predict a particular sample is an outlier or not.
:return 1 denotes normal, 0 denotes abnormal.
"""
if is_standard_time_series(X, window):
ts_features = []
features = [10]
features.extend(feature_service.extract_features(X, window))
ts_features.append(features)
res_pred = xgb.DMatrix(np.array(ts_features))
bst = xgb.Booster({'nthread': 4})
bst.load_model(model_name)
xgb_ret = bst.predict(res_pred)
if xgb_ret[0] < self.threshold:
value = 0
else:
value = 1
return [value, xgb_ret[0]]
else:
return [0, 0]
|
[
"1300887184@qq.com"
] |
1300887184@qq.com
|
db8d15fe436a1605c48b2d2a6915384b202132f1
|
c44a3227d1c2b3a892a9a52438a324e675485ff7
|
/odp/ui/admin/views/providers.py
|
0b97bea6fff0e6ea171b7e750a6c02b9312ef3de
|
[
"MIT"
] |
permissive
|
SAEONData/Open-Data-Platform
|
4b87aece6a83befd82a67f97d4ae330380c1f947
|
50c52bf476fd5c82afdf44379805f8790bb20319
|
refs/heads/main
| 2022-11-07T00:30:38.697706
| 2022-11-04T15:09:37
| 2022-11-04T15:09:37
| 251,641,495
| 2
| 1
|
MIT
| 2022-09-20T12:35:56
| 2020-03-31T15:12:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
from flask import Blueprint, flash, redirect, render_template, request, url_for
from odp.ui.admin.forms import ProviderForm
from odplib.const import ODPScope
from odplib.ui import api
bp = Blueprint('providers', __name__)
@bp.route('/')
@api.client(ODPScope.PROVIDER_READ)
def index():
page = request.args.get('page', 1)
providers = api.get(f'/provider/?page={page}')
return render_template('provider_list.html', providers=providers)
@bp.route('/<id>')
@api.client(ODPScope.PROVIDER_READ)
def view(id):
provider = api.get(f'/provider/{id}')
return render_template('provider_view.html', provider=provider)
@bp.route('/new', methods=('GET', 'POST'))
@api.client(ODPScope.PROVIDER_ADMIN)
def create():
form = ProviderForm(request.form)
if request.method == 'POST' and form.validate():
try:
api.post('/provider/', dict(
id=(id := form.id.data),
name=form.name.data,
))
flash(f'Provider {id} has been created.', category='success')
return redirect(url_for('.view', id=id))
except api.ODPAPIError as e:
if response := api.handle_error(e):
return response
return render_template('provider_edit.html', form=form)
@bp.route('/<id>/edit', methods=('GET', 'POST'))
@api.client(ODPScope.PROVIDER_ADMIN)
def edit(id):
provider = api.get(f'/provider/{id}')
form = ProviderForm(request.form, data=provider)
if request.method == 'POST' and form.validate():
try:
api.put('/provider/', dict(
id=id,
name=form.name.data,
))
flash(f'Provider {id} has been updated.', category='success')
return redirect(url_for('.view', id=id))
except api.ODPAPIError as e:
if response := api.handle_error(e):
return response
return render_template('provider_edit.html', provider=provider, form=form)
@bp.route('/<id>/delete', methods=('POST',))
@api.client(ODPScope.PROVIDER_ADMIN)
def delete(id):
api.delete(f'/provider/{id}')
flash(f'Provider {id} has been deleted.', category='success')
return redirect(url_for('.index'))
|
[
"52427991+marksparkza@users.noreply.github.com"
] |
52427991+marksparkza@users.noreply.github.com
|
038fbd532f9fd4dbb174c02e9e979f5807987c8e
|
9cbe84017abd74dd4863c60c3438420aeaa4cb5b
|
/OcCo_Torch/models/pointnet_util.py
|
223edb2e5970e591f491deb0d0fde065371aadb5
|
[
"MIT"
] |
permissive
|
zebrajack/OcCo
|
3c7be8e4c46b61e0899c533c5c101dad56127a3f
|
c218a2bb446f91702cf8fa6f56bb3a1da406009f
|
refs/heads/master
| 2023-04-30T08:15:48.189980
| 2020-12-29T10:49:21
| 2020-12-29T10:49:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,576
|
py
|
# Copyright (c) 2020. Hanchen Wang, hw501@cam.ac.uk
# Ref: https://github.com/fxia22/pointnet.pytorch/pointnet/model.py
import torch, torch.nn as nn, numpy as np, torch.nn.functional as F
from torch.autograd import Variable
def feature_transform_regularizer(trans):
d = trans.size()[1]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1) - I), dim=(1, 2)))
return loss
# STN -> Spatial Transformer Network
class STN3d(nn.Module):
def __init__(self, channel):
super(STN3d, self).__init__()
self.conv1 = nn.Conv1d(channel, 64, 1) # in-channel, out-channel, kernel size
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0] # global descriptors
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(3).flatten().astype(np.float32))).view(1, 9).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = nn.Conv1d(k, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0]
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(
1, self.k ** 2).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetEncoder(nn.Module):
def __init__(self, global_feat=True, feature_transform=False,
channel=3, detailed=False):
# when input include normals, it
super(PointNetEncoder, self).__init__()
self.stn = STN3d(channel) # Batch * 3 * 3
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
self.detailed = detailed
def forward(self, x):
_, D, N = x.size() # Batch Size, Dimension of Point Features, Num of Points
trans = self.stn(x)
x = x.transpose(2, 1)
if D > 3:
# pdb.set_trace()
x, feature = x.split([3, D-3], dim=2)
x = torch.bmm(x, trans)
# feature = torch.bmm(feature, trans) # feature -> normals
if D > 3:
x = torch.cat([x, feature], dim=2)
x = x.transpose(2, 1)
out1 = self.bn1(self.conv1(x))
x = F.relu(out1)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2, 1)
else:
trans_feat = None
pointfeat = x
out2 = self.bn2(self.conv2(x))
x = F.relu(out2)
out3 = self.bn3(self.conv3(x))
# x = self.bn3(self.conv3(x))
x = torch.max(out3, 2, keepdim=False)[0]
if self.global_feat:
return x, trans, trans_feat
elif self.detailed:
return out1, out2, out3, x
else: # concatenate global and local feature together
x = x.view(-1, 1024, 1).repeat(1, 1, N)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetPartSegEncoder(nn.Module):
def __init__(self, feature_transform=True, channel=3):
super(PointNetPartSegEncoder, self).__init__()
self.stn = STN3d(channel)
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 128, 1)
self.conv4 = nn.Conv1d(128, 512, 1)
self.conv5 = nn.Conv1d(512, 2048, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(2048)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=128)
def forward(self, point_cloud, label):
B, D, N = point_cloud.size()
trans = self.stn(point_cloud)
point_cloud = point_cloud.transpose(2, 1)
if D > 3:
point_cloud, feature = point_cloud.split(3, dim=2)
point_cloud = torch.bmm(point_cloud, trans)
if D > 3:
point_cloud = torch.cat([point_cloud, feature], dim=2)
point_cloud = point_cloud.transpose(2, 1)
out1 = F.relu(self.bn1(self.conv1(point_cloud)))
out2 = F.relu(self.bn2(self.conv2(out1)))
out3 = F.relu(self.bn3(self.conv3(out2)))
if self.feature_transform:
trans_feat = self.fstn(out3)
net_transformed = torch.bmm(out3.transpose(2, 1), trans_feat)
out3 = net_transformed.transpose(2, 1)
out4 = F.relu(self.bn4(self.conv4(out3)))
out5 = self.bn5(self.conv5(out4))
out_max = torch.max(out5, 2, keepdim=False)[0]
out_max = torch.cat([out_max, label.squeeze(1)], 1)
expand = out_max.view(-1, 2048 + 16, 1).repeat(1, 1, N)
concat = torch.cat([expand, out1, out2, out3, out4, out5], 1)
if self.feature_transform:
return concat, trans_feat
return concat
class encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=True, channel=num_channel)
def forward(self, x):
feat, _, _ = self.feat(x)
return feat
class detailed_encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(detailed_encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=False,
channel=num_channel,
detailed=True)
def forward(self, x):
out1, out2, out3, x = self.feat(x)
return out1, out2, out3, x
|
[
"hc.wang96@gmail.com"
] |
hc.wang96@gmail.com
|
9e19072ff7971bc211783e2524be3902ccd8e5c3
|
39b8dddb1bda5e8055c661da060a9c71040c0ae3
|
/reinforcement/tensorflow/minigo/tests/test_shipname.py
|
93a9e7848d426fc5cb67bf8191c89a4eecd8e1c1
|
[
"Apache-2.0"
] |
permissive
|
dagarcia-nvidia/mlperf_training
|
22e7c120bce338ec84b008b5cd64a3e53c2362e3
|
bad6f14e6f5a119bfffb3181a8a742874c441753
|
refs/heads/master
| 2022-12-11T03:28:22.641969
| 2019-02-27T19:05:59
| 2019-02-27T19:05:59
| 172,770,644
| 1
| 1
|
Apache-2.0
| 2022-12-08T02:29:51
| 2019-02-26T18:54:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,119
|
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import shipname
class TestShipname(unittest.TestCase):
def test_bootstrap_gen(self):
name = shipname.generate(0)
self.assertIn('bootstrap', name)
def test_detect_name(self):
string = '000017-model.index'
detected_name = shipname.detect_model_name(string)
self.assertEqual(detected_name, '000017-model')
def test_detect_num(self):
string = '000017-model.index'
detected_name = shipname.detect_model_num(string)
self.assertEqual(detected_name, 17)
|
[
"deepakn94@gmail.com"
] |
deepakn94@gmail.com
|
ddabea7784ef8342f76c1ca6530fde0cfab7b4f2
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/3378.py
|
d9230c40d82e40ec840c98885d3db4d3250b8334
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,242
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 09 06:47:06 2017
@author: rajbhagat
For Code Jam - Faster Tidy numbers
"""
readfileopen=open("C:/Users/rajbh/Desktop/B-large.in",'r')
writefileout=open("C:/Users/rajbh/Desktop/B-large.out",'w')
caseno=0
for e in readfileopen:
if caseno>0:
checkno=int(e.strip().rstrip())
ch=str(e.strip().rstrip())
ls=list(ch)
startno=0
digiter=9
noofdigits=len(ls)
while startno<noofdigits:
j=startno
while j<noofdigits:
ls[j]=digiter
j+=1
createdno=int("".join(str(x) for x in ls))
ls=list(str(createdno))
if createdno<=checkno:
startno+=1
digiter=9
elif digiter!=1:
digiter-=1
else:
noofdigits-=1
startno=0
digiter=9
ls=ls[1:]
outstring="Case #"+str(caseno)+": "+str(createdno)+"\n"
writefileout.write(outstring)
caseno+=1
readfileopen.close()
writefileout.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
daf27f71cbc15575ee65fd0f02661c46889e6984
|
2efda4e99b5b9da5041d4984b71a2121561a29d3
|
/EwhaEverytimeEverywhere/board/views.py
|
4acb5691431bf2776659eac49a6cf82d7009ef6f
|
[] |
no_license
|
yunaisme/Cyber_Graduation_Project
|
2ff31284ced20688cad9e4546fad2d3af2217cdf
|
5388fe8a3dce0c6053ff00522c50390e8a6160b1
|
refs/heads/main
| 2023-07-30T12:50:15.754026
| 2021-09-26T14:20:19
| 2021-09-26T14:20:19
| 397,037,621
| 0
| 0
| null | 2021-08-17T01:04:28
| 2021-08-17T01:04:27
| null |
UTF-8
|
Python
| false
| false
| 2,572
|
py
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from .forms import PostForm, CommentForm
from .models import Post, Comment
@login_required(login_url='login')
def post_list(request):
posts = Post.objects.all().order_by('-created_at')
return render(request, 'board/post_list.html',
{'posts': posts})
@login_required(login_url='login')
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
comments = Comment.objects.filter(post_id=pk).order_by('-comment_created')
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment_form = form.save(commit=False)
comment_form.post = post
comment_form.comment_writer = request.user
comment_form.save()
return redirect('board:post_detail', pk=post.pk)
else:
form = CommentForm()
context = {
'post': post,
'comments': comments,
'comment_form': form
}
return render(request, 'board/post_detail.html', context)
@login_required(login_url='login')
def post_upload(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('board:post_list')
else:
form = PostForm()
return render(request, 'board/post_upload.html', {
'form': form,
})
@login_required(login_url='login')
def post_edit(request, pk):
item = get_object_or_404(Post, pk=pk)
# 그 사람 id인거 인증하는거 있어야함
if request.method == 'POST':
form = PostForm(instance=item)
if form.is_valid():
item = form.save()
messages.success(request, '포스트를 수정했습니다.')
return redirect(item)
else:
form = PostForm(instance=item)
return render(request, 'board/post_edit.html', {
'form': form,
})
@login_required(login_url='login')
def post_delete(request, pk):
post = Post.objects.get(pk=pk)
if request.method == 'POST':
# 그 사람 id인거 인증하는거 있어야함
post.delete()
messages.success(request, '포스팅을 삭제했습니다.')
return redirect('board:post_list')
|
[
"gegiraffe@gmail.com"
] |
gegiraffe@gmail.com
|
8bce87db52839bfb325e37a18ea9b5a477384736
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5634697451274240_0/Python/sachinr20/b.py
|
1805d5abd977f454eca786173f8e9a14c75ab1cd
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
import sys
def flip(st, count):
#if count==len(st):
# return st[::-1]
l = count
st2 = ""
for i in range(count):
if st[i]=='+':
st2 = st2 + "-"
else:
st2 = st2 + "+"
#print("st2new:"+st2)
st2 = st2[::-1]
return st2+st[count:]
def handleit(line, count):
#print("Handling "+line + " len:"+ str(len(line)))
chars = [x for x in line]
if len(line)<=0:
return count;
if len(line) == 1:
if chars[0]=='+':
return count;
else:
count = count + 1
return count
total = len(line)
if line[total-1] == '+':
return handleit(line[:-1], count)
else:
pluses = 0
for ch in chars:
if ch != '+':
break
pluses += 1
if pluses == 0:
line = flip(line, len(line))
count +=1
else:
line = flip(line, pluses)
line = flip(line, len(line))
count += 2
return handleit(line[:len(line)-pluses], count)
name = sys.argv[1]
with open(name) as f:
lines = f.readlines()
lines = lines[1:]
case = 0
with open("out", "w") as o:
for line in lines:
case += 1
line = line.strip()
count = 0
c = handleit(line, count)
op = "Case #"+str(case)+": "+str(c)+"\n"
print(op, end="")
o.write(op)
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
97a9944271b7d91b192683ba190c0287a2a545fd
|
f281d0d6431c1b45c6e5ebfff5856c374af4b130
|
/DAY001~099/DAY05-BOJ1260-DFS와BFS/joohyuk.py
|
db9bbd3aa34e66679027eda6a4ef5d38dca52708
|
[] |
no_license
|
tachyon83/code-rhino
|
ec802dc91dce20980fac401b26165a487494adb4
|
b1af000f5798cd12ecdab36aeb9c7a36f91c1101
|
refs/heads/master
| 2022-08-13T09:10:16.369287
| 2022-07-30T11:27:34
| 2022-07-30T11:27:34
| 292,142,812
| 5
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
import sys
from collections import deque
si = sys.stdin.readline
graph_unsorted = [set() for _ in range(1001)]
graph = [[]for _ in range(1001)]
visited = [False for _ in range(1001)]
def dfs(s):
print(s, end=' ')
for e in graph[s]:
if not visited[e]:
visited[e] = True
dfs(e)
def main():
n, m, v = [int(e) for e in si().split()]
while m:
m -= 1
a, b = [int(e) for e in si().split()]
graph_unsorted[a].add(b)
graph_unsorted[b].add(a)
for i in range(1, n+1):
e = list(graph_unsorted[i])
e.sort()
graph[i] = e
visited[v] = True
dfs(v)
print()
q = deque()
for i in range(1, n+1):
visited[i] = False
q.append(v)
visited[v] = True
while q:
curr = q.popleft()
print(curr, end=' ')
for e in graph[curr]:
if not visited[e]:
visited[e] = True
q.append(e)
print()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
tachyon83.noreply@github.com
|
52333cc2e65db038cbb4d42924cde56aee596bdb
|
a290925e8c3103bb84327f6f38f0b4ffd7945c1d
|
/dataugmentation/reverse_para_order.py
|
46b542bb4e9b7a02170208992335f7e00154d9dd
|
[] |
no_license
|
enterpriseih/lightningHotpotQA
|
6db502747b2b7a876e7f32743b839c65f851ee49
|
b3a992f27a1c2b7881e6ab0c16132c20fb880f8d
|
refs/heads/master
| 2023-08-24T05:38:32.419496
| 2021-05-27T01:09:29
| 2021-05-27T01:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,252
|
py
|
import json
import sys
from tqdm import tqdm
assert len(sys.argv) == 4
raw_data = json.load(open(sys.argv[1], 'r'))
para_file = sys.argv[2]
with open(para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
#################################
reverse_output_file = sys.argv[3]
################################
selected_para_dict_reverse = {}
################################
for case in tqdm(raw_data):
guid = case['_id']
##############################################
ir_selected_paras = para_data[guid]
selected_para_dict_reverse[guid] = []
assert len(ir_selected_paras) == 3
if len(ir_selected_paras[0]) == 2:
reverse_ir_paras_1st = [ir_selected_paras[0][1], ir_selected_paras[0][0]]
else:
reverse_ir_paras_1st = ir_selected_paras[0]
selected_para_dict_reverse[guid].append(reverse_ir_paras_1st)
selected_para_dict_reverse[guid].append(ir_selected_paras[1])
if len(ir_selected_paras[2]) == 2:
reverse_ir_paras_3rd = [ir_selected_paras[2][1], ir_selected_paras[2][0]]
else:
reverse_ir_paras_3rd = ir_selected_paras[2]
selected_para_dict_reverse[guid].append(reverse_ir_paras_3rd)
json.dump(selected_para_dict_reverse, open(reverse_output_file, 'w'))
|
[
"guangtao.wang@jd.com"
] |
guangtao.wang@jd.com
|
93f40d4918907f15aad52856cb8a80bb9202195c
|
e6252e7ad0e024cd20e0e0779347945b735dd64a
|
/myenv/restdemo.py
|
2c0d9453c08607d15e642c47b4412ccd350d5fee
|
[] |
no_license
|
Icode4passion/FlaskApp_RestDemo_Calculator_WeightConvertor
|
97391a9c7ed1f2b6eab402169f52ac17e4e49c64
|
8865d0d98c070331e3ebcd70ecd5b7ad2dd9c2e2
|
refs/heads/master
| 2020-04-11T07:33:25.152968
| 2018-12-13T14:33:29
| 2018-12-13T14:33:29
| 161,614,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
from flask import Flask , jsonify , make_response , abort, render_template , request
app = Flask(__name__)
movies = [
{
'id' : 1,
'title': 'Batman',
'Author': 'Bob Kane',
'Director' : 'Christopher'
},
{
'id' : 2,
'title': 'Superman',
'Author': 'Jerry Siegel',
'Director' : 'Richard Donner'
}]
@app.route('/movie/api/v1.0/movies',methods=['GET'])
def get_tasks():
return jsonify({'movies':movies})
@app.route('/movie/api/v1.0/movies/<int:movie_id>',methods=['GET'])
def get_tasks_id(movie_id):
movie = [movie for movie in movies if movie['id'] == movie_id]
if len(movie) == 0 :
abort(400)
return jsonify({'movie': movie[0]})
@app.errorhandler(400)
def errorhandler(error):
return make_response(jsonify({'error':'Not Found'}),404)
#return render_template('home.html')
@app.route('/movie/api/v1.0/movies',methods=['POST'])
def create_tasks():
if not request.json or not 'title' in request.json:
abort(400)
movie ={
'id' : movies[-1]['id'] +1,
'title' : request.json['title'],
'Author' : request.json.get('Author', ''),
'Director' : request.json.get('Director', ''),
}
movies.append(movie)
return jsonify({'task':task}),201
if __name__ == '__main__':
app.run(debug = True)
|
[
"yogeerama@gmail.com"
] |
yogeerama@gmail.com
|
e504ebb5f478fb423b42fd1cbe28748625513ef9
|
c93a0a6dedc8ebf100dd15eefc897457410e2d06
|
/opsweb/resources/migrations/0008_cmdbmodel_dev_team.py
|
334ca0cdc4e2e901f6fa80d9ece75cf34c848bee
|
[] |
no_license
|
sungy2014/WS-OPS
|
efaab4ca8d3c56352c685508fe5b273daaedc2bb
|
7563e40c130d0791ccacb259f7a71a9f276ca6c6
|
refs/heads/master
| 2020-03-11T12:25:42.030148
| 2018-04-11T12:44:02
| 2018-04-11T12:44:02
| 129,997,121
| 1
| 0
| null | 2018-04-18T03:14:03
| 2018-04-18T03:14:03
| null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-22 14:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('resources', '0007_cmdbmodel_ansible_playbook'),
]
operations = [
migrations.AddField(
model_name='cmdbmodel',
name='dev_team',
field=models.ManyToManyField(to='auth.Group', verbose_name='负责的开发组'),
),
]
|
[
"root@172-17-134-23.(none)"
] |
root@172-17-134-23.(none)
|
f7931575c366e22a71c78e7146d1397848ab5a87
|
92bf9ddd7b92e7ed73fa6989164700b2be3657b8
|
/Project1/download/google-cloud-sdk/.install/.backup/lib/surface/config/configurations/describe.py
|
0b368769e222fe9b76407d71ace98df3c1c32661
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
bopopescu/8220-lab
|
bc991424557ff46f325d4611a84d02560ba5a6cb
|
3f0ca82028962e5b1c0f4a2c4a2390ce6603e11c
|
refs/heads/master
| 2022-11-19T22:31:54.741707
| 2018-01-07T16:56:47
| 2018-01-07T16:56:47
| 282,337,970
| 0
| 0
| null | 2020-07-25T00:01:24
| 2020-07-25T00:01:23
| null |
UTF-8
|
Python
| false
| false
| 2,295
|
py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to describe named configuration."""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import named_configs
from googlecloudsdk.core import properties
class Describe(base.Command):
"""Describes a named configuration by listing its properties."""
detailed_help = {
'DESCRIPTION': """\
{description}
See `gcloud topic configurations` for an overview of named
configurations.
""",
'EXAMPLES': """\
To describe esisting named configuration, run:
$ {command} my_config
This is similar in content to:
$ gcloud config configurations activate my_config
$ gcloud config list
""",
}
@staticmethod
def Args(parser):
"""Adds args for this command."""
parser.add_argument(
'configuration_name',
help='Configuration name to descrive')
parser.add_argument(
'--all', action='store_true',
help='Include unset properties in output.')
def Run(self, args):
fname = named_configs.GetPathForConfigName(args.configuration_name)
if not named_configs.IsPathReadable(fname):
raise named_configs.NamedConfigLoadError(
'Reading named configuration [{0}] failed because [{1}] cannot '
'be read.'.format(args.configuration_name, fname))
return properties.VALUES.AllValues(
list_unset=args.all,
properties_file=properties.PropertiesFile([fname]),
only_file_contents=True)
def Display(self, _, result):
if not result:
log.err.Print('(empty configuration)')
properties.DisplayProperties(log.out, result)
|
[
"yitianl@g.clemson.edu"
] |
yitianl@g.clemson.edu
|
64bb9f225783b606da3d8267a0ac7d33b510a04b
|
2430b2a50efec6eebf27c0162b11d10d88f62729
|
/pyprob/__init__.py
|
57d07c2a883967767c0ab26beb5ea4593b133414
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
feynmanliang/pyprob
|
60d27e67c02e96f7a8116d9f1c5bdf1c374d908a
|
16e345fde1d4305138bc909b087c81ad0f668cc5
|
refs/heads/master
| 2022-11-28T16:28:19.921825
| 2020-05-25T22:28:45
| 2020-05-25T22:28:45
| 268,595,724
| 0
| 0
| null | 2020-06-01T18:05:21
| 2020-06-01T18:05:21
| null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
__version__ = '1.1.3'
from .util import TraceMode, PriorInflation, InferenceEngine, InferenceNetwork, ImportanceWeighting, Optimizer, LearningRateScheduler, ObserveEmbedding, set_verbosity, set_device, seed
from .state import sample, observe, tag
from .address_dictionary import AddressDictionary
from .model import Model, RemoteModel
|
[
"atilimgunes.baydin@gmail.com"
] |
atilimgunes.baydin@gmail.com
|
7eba8f570b7af1fd4e912d31ca096771effd2c08
|
99e0fef58ec7d3985f7471d0ab021333f8ea8c95
|
/output_head_tables.py
|
7eedb6ba85940104fdb796ed0260cc5c87a52a95
|
[] |
no_license
|
deBroglieeeen/get_pair_noun_in_corpus_2
|
176d6d1ea69a0947dbf7fe991525aafaab5d1e50
|
5667598604158c18f096c731f59780c83f79f8f7
|
refs/heads/main
| 2023-02-24T17:05:16.111928
| 2021-01-30T07:53:51
| 2021-01-30T07:53:51
| 326,991,591
| 0
| 0
| null | 2021-01-18T02:15:26
| 2021-01-05T12:26:49
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
import pandas as pd
import scipy as sp
import scipy.stats
# コンマ区切りのテキストデータを読み込む
data = pd.read_csv("output/df_sample2.tsv", sep='/t')
data.head(15).to_csv('output/head_alldata_sample.csv')
|
[
"u825246d@ecs.osaka-u.ac.jp"
] |
u825246d@ecs.osaka-u.ac.jp
|
cb0026bf57ccc9abc71541d4c3d1f690f344d7ae
|
47aaa3f1fa5764779e5246fa3b765adaaac15bd1
|
/distributed_jobman/parsers/config.py
|
f18760451974217f79da36fcfa3e1de8d8f31456
|
[] |
no_license
|
bouthilx/distributed-jobman
|
a3ec4958001b052a8327416b4be268f55dea2bf7
|
d20aeda23bb9137445f754c8542d2f7e328a7fae
|
refs/heads/master
| 2021-01-24T21:12:47.077725
| 2016-02-18T16:02:32
| 2016-02-18T16:05:16
| 49,673,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
from ConfigParser import ConfigParser, Error
import os
p2_config = ConfigParser()
p2_config.read(os.path.join(os.environ["HOME"], ".distributed_jobman.rc"))
default_values = dict(cache_timeout=str(60 * 5))
keys = ["username", "password", "address", "name", "cache_timeout"]
database = dict()
for key in keys:
value = p2_config.get("database", key, vars=default_values)
if value is None:
raise ValueError("Option %s must be set in configuration file "
"~/.distributed_jobman.rc")
database[key] = value
database["cache_timeout"] = float(database["cache_timeout"])
scheduler_types = ["multi-gpu", "cluster"]
scheduler = dict(type=p2_config.get("scheduler", "type"))
if scheduler["type"] not in scheduler_types:
raise Error("Invalid scheduler type: %s" % scheduler["type"])
config = dict(database=database, scheduler=scheduler)
|
[
"xavier.bouthillier@umontreal.ca"
] |
xavier.bouthillier@umontreal.ca
|
24c5484f67c0ebe9391bd91e453f7b27f8619284
|
01b7cc0017c81c99d1da1c37c6a5dcb0bf4af9a5
|
/python/PythonBinding.py
|
d2d8ed69c64c69724515739b1392ad016908ff42
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lsst-camera-dh/jh-ccs-utils
|
8c366c2cf3883944373a2aed02ee328c823a5cc7
|
4948df295311dff95d2b7d8e11f9ba392cd6b933
|
refs/heads/master
| 2022-03-09T09:37:31.640529
| 2022-01-17T03:27:47
| 2022-01-17T03:27:47
| 87,144,848
| 0
| 0
|
NOASSERTION
| 2022-01-17T03:27:48
| 2017-04-04T03:38:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,898
|
py
|
"""
Socket connection interface to CCS Jython interpreter.
"""
import sys
import time
import re
import socket
import threading
import uuid
__all__ = ['CcsJythonInterpreter', 'CcsException', 'CcsExecutionResult']
class CcsExecutionResult:
"""Results class."""
def __init__(self, thread):
self.thread = thread
def getOutput(self):
"""Return the result of a jython command as a string."""
while self.thread.running:
time.sleep(0.1)
return self.thread.execution_output
class CcsException(Exception):
"""Exception class for CCS Jython interface."""
def __init__(self, value):
super(CcsException, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class CcsJythonInterpreter:
"""Interface class to CCS Jython interpreter."""
def __init__(self, name=None, host=None, port=4444):
self.port = port
if host is None:
# Get local machine name
self.host = socket.gethostname()
else:
self.host = host
host_and_port = '{}:{}'.format(self.host, self.port)
try:
self.socket_connection = self._socket_connection()
print('Connected to CCS Python interpreter on host:port',
host_and_port)
except Exception as eobj:
print(eobj)
raise CcsException("Could not connect to CCS Python Interpreter " +
"on host:port " + host_and_port)
if name is not None:
name = name.replace("\n", "")
self.syncExecution("initializeInterpreter " + name)
def _socket_connection(self):
sc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sc.connect((self.host, self.port))
connectionResult = sc.recv(1024).decode('utf-8')
if "ConnectionRefused" in connectionResult:
raise CcsException("Connection Refused")
return sc
def aSyncExecution(self, statement):
return self.sendInterpreterServer(statement)
def syncExecution(self, statement):
result = self.sendInterpreterServer(statement)
# Calling .getOutput() here causes the object to wait for the
# underlying thread to stop running.
result.getOutput()
return result
def aSyncScriptExecution(self, filename):
with open(filename, "r") as fd:
fileContent = fd.read()
return self.sendInterpreterServer(fileContent)
def syncScriptExecution(self, filename, setup_commands=(), verbose=False):
if verbose and setup_commands:
print("Executing setup commands for", filename)
for command in setup_commands:
if verbose:
print(command)
self.syncExecution(command)
if verbose:
print("Executing %s..." % filename)
with open(filename, "r") as fd:
fileContent = fd.read()
result = self.sendInterpreterServer(fileContent)
# Calling .getOutput() here causes the object to wait for the
# underlying thread to stop running.
result.getOutput()
return result
def sendInterpreterServer(self, content):
thread_id = str(uuid.uuid4())
executor_thread = CcsPythonExecutorThread(thread_id,
self.socket_connection)
return executor_thread.executePythonContent(content)
class CcsPythonExecutorThread:
def __init__(self, thread_id, socket_connection):
self.socket_connection = socket_connection
self.thread_id = thread_id
self.output_thread = threading.Thread(target=self.listenToSocketOutput)
self.java_exceptions = []
def executePythonContent(self, content):
self.running = True
self.output_thread.start()
content = ("startContent:" + self.thread_id + "\n" +
content + "\nendContent:" + self.thread_id + "\n")
self.socket_connection.send(content.encode('utf-8'))
return CcsExecutionResult(self)
def listenToSocketOutput(self):
re_obj = re.compile(r'.*java.*[Ee]xception.*')
self.execution_output = ""
while self.running:
try:
output = self.socket_connection.recv(1024).decode('utf-8')
except Exception as eobj:
print(eobj)
raise CcsException("Communication Problem with Socket")
for item in output.split('\n'):
if re_obj.match(item):
self.java_exceptions.append(item)
if "doneExecution:" + self.thread_id not in output:
sys.stdout.write(output)
sys.stdout.flush()
self.execution_output += output
else:
self.running = False
del self.output_thread
|
[
"jchiang@slac.stanford.edu"
] |
jchiang@slac.stanford.edu
|
580b18797f6bcd128bf024691e448bb0b188ad18
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/fish1_20200805130243.py
|
e999797ad429e042ebb73a7054817607af8ed019
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
def fish(A,B):
# we place fish moving downwards to a downstream stack
# then when its not empty we'll check with the empty at A[i]
# if it eats that fish we deduct it from the alive fish and del from A
# otherwise we shall pop from the down stream stack
downStream = []
j = 0
aliveFish = len(A)
fishRemoved = 0
for j in range(len(B)):
if B[j] == 0:
while downStream !=[] and :
print(fish([4,3],[0,1]))
# print(fish([4,3,2,1,5],[0,1,0,0,0]))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
30e1d63614fa8d56d2ca697cb2da652ee3a00995
|
e836275adf8adca9b77acdd3d25bac157592a995
|
/dyconnmap/cluster/__init__.py
|
958f031b8568d142c88bb25e94a002ca0a8d42f5
|
[
"BSD-3-Clause"
] |
permissive
|
makism/dyconnmap
|
3de6f482d1370bf25ec3813ddf576b675ed99d9e
|
cbef247e635d55cb1489ba1e429d9d472b501b56
|
refs/heads/master
| 2023-08-03T19:30:40.779333
| 2022-03-14T18:24:16
| 2022-03-14T18:24:16
| 98,643,787
| 67
| 25
|
BSD-3-Clause
| 2023-07-24T04:49:03
| 2017-07-28T11:37:17
|
Python
|
UTF-8
|
Python
| false
| false
| 485
|
py
|
# -*- coding: utf-8 -*-
"""
"""
# Author: Avraam Marimpis <avraam.marimpis@gmail.com>
from .ng import NeuralGas
from .mng import MergeNeuralGas
from .rng import RelationalNeuralGas
from .gng import GrowingNeuralGas
from .som import SOM
from .umatrix import umatrix
from .validity import ray_turi, davies_bouldin
__all__ = [
"NeuralGas",
"MergeNeuralGas",
"RelationalNeuralGas",
"GrowingNeuralGas",
"SOM",
"umatrix",
"ray_turi",
"davies_bouldin",
]
|
[
"makhsm@gmail.com"
] |
makhsm@gmail.com
|
9407a3410c3adf54c911ab96278515594e083f7c
|
8cd15fba24b6dfa431f3764932101969f5fb524f
|
/JAMediaVideo/gtk2/Globales.py
|
814c31044263af9b38ece76b1e5a3998450b5472
|
[] |
no_license
|
srevinsaju/JAMediaSuite
|
c872b4781657bf1bcf63908f71abeca799b8c666
|
1813d1205cf31f89be3c4512eb495baed427494f
|
refs/heads/master
| 2020-12-04T12:14:53.794749
| 2019-01-05T12:52:13
| 2019-01-05T12:52:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,966
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Globals.py por:
# Flavio Danesse <fdanesse@gmail.com>
# Uruguay
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
def get_ip():
"""
Devuelve ip rango de difusión en la red.
"""
import socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("google.com", 80))
ret = s.getsockname()[0]
s.close()
return ret
except:
return ""
def get_color(color):
"""
Devuelve Colores predefinidos.
"""
from gtk import gdk
colors = {
"GRIS": gdk.Color(60156, 60156, 60156),
"AMARILLO": gdk.Color(65000, 65000, 40275),
"NARANJA": gdk.Color(65000, 26000, 0),
"BLANCO": gdk.Color(65535, 65535, 65535),
"NEGRO": gdk.Color(0, 0, 0),
"ROJO": gdk.Color(65000, 0, 0),
"VERDE": gdk.Color(0, 65000, 0),
"AZUL": gdk.Color(0, 0, 65000),
}
return colors.get(color, None)
def get_colors(key):
from gtk import gdk
_dict = {
"window": "#ffffff",
"barradeprogreso": "#778899",
"toolbars": "#f0e6aa",
"drawingplayer": "#000000",
}
return gdk.color_parse(_dict.get(key, "#ffffff"))
def describe_archivo(archivo):
"""
Devuelve el tipo de un archivo (imagen, video, texto).
-z, --uncompress para ver dentro de los zip.
"""
import commands
datos = commands.getoutput('file -ik %s%s%s' % ("\"", archivo, "\""))
retorno = ""
for dat in datos.split(":")[1:]:
retorno += " %s" % (dat)
return retorno
def make_base_directory():
"""
Crea toda la estructura de Directorios de JAMedia.
"""
import os
import commands
if not os.path.exists(os.path.join(
os.environ["HOME"], "JAMediaDatos")):
os.mkdir(os.path.join(os.environ["HOME"], "JAMediaDatos"))
os.chmod(os.path.join(os.environ["HOME"], "JAMediaDatos"), 0755)
# unificar directorios de JAMedia, JAMediaVideo y JAMediaImagenes
directorio_viejo = os.path.join(os.environ["HOME"], ".JAMediaDatos")
directorio_nuevo = os.path.join(os.environ["HOME"], "JAMediaDatos")
if os.path.exists(directorio_viejo):
for elemento in os.listdir(directorio_viejo):
commands.getoutput('mv %s %s' % (os.path.join(directorio_viejo,
elemento), directorio_nuevo))
commands.getoutput('rm -r %s' % (directorio_viejo))
# Directorios JAMedia
DIRECTORIO_MIS_ARCHIVOS = os.path.join(
os.environ["HOME"], "JAMediaDatos", "MisArchivos")
DIRECTORIO_DATOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Datos")
if not os.path.exists(DIRECTORIO_MIS_ARCHIVOS):
os.mkdir(DIRECTORIO_MIS_ARCHIVOS)
os.chmod(DIRECTORIO_MIS_ARCHIVOS, 0755)
if not os.path.exists(DIRECTORIO_DATOS):
os.mkdir(DIRECTORIO_DATOS)
os.chmod(DIRECTORIO_DATOS, 0755)
# Directorio JAMediaTube
DIRECTORIO_YOUTUBE = os.path.join(os.environ["HOME"],
"JAMediaDatos", "YoutubeVideos")
if not os.path.exists(DIRECTORIO_YOUTUBE):
os.mkdir(DIRECTORIO_YOUTUBE)
os.chmod(DIRECTORIO_YOUTUBE, 0755)
# Directorios JAMediaVideo
AUDIO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Audio")
if not os.path.exists(AUDIO_JAMEDIA_VIDEO):
os.mkdir(AUDIO_JAMEDIA_VIDEO)
os.chmod(AUDIO_JAMEDIA_VIDEO, 0755)
VIDEO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Videos")
if not os.path.exists(VIDEO_JAMEDIA_VIDEO):
os.mkdir(VIDEO_JAMEDIA_VIDEO)
os.chmod(VIDEO_JAMEDIA_VIDEO, 0755)
IMAGENES_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Fotos")
if not os.path.exists(IMAGENES_JAMEDIA_VIDEO):
os.mkdir(IMAGENES_JAMEDIA_VIDEO)
os.chmod(IMAGENES_JAMEDIA_VIDEO, 0755)
def get_data_directory():
"""
Devuelve el Directorio de Datos de JAMedia y JAMediaTube.
"""
import os
DIRECTORIO_DATOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Datos")
if not os.path.exists(DIRECTORIO_DATOS):
make_base_directory()
return DIRECTORIO_DATOS
def get_tube_directory():
"""
Devuelve el Directorio de Videos de JAMediaTube.
"""
import os
DIRECTORIO_YOUTUBE = os.path.join(os.environ["HOME"],
"JAMediaDatos", "YoutubeVideos")
if not os.path.exists(DIRECTORIO_YOUTUBE):
make_base_directory()
return DIRECTORIO_YOUTUBE
def get_audio_directory():
"""
Devuelve el Directorio de Audio de JAMedia y JAMediaTube.
"""
import os
AUDIO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Audio")
if not os.path.exists(AUDIO_JAMEDIA_VIDEO):
make_base_directory()
return AUDIO_JAMEDIA_VIDEO
def get_imagenes_directory():
"""
Devuelve el Directorio de Imagenes de JAMediaVideo y JAMediaImagenes.
"""
import os
IMAGENES_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Fotos")
if not os.path.exists(IMAGENES_JAMEDIA_VIDEO):
make_base_directory()
return IMAGENES_JAMEDIA_VIDEO
def get_video_directory():
"""
Devuelve el Directorio de Video de JAMediaVideo.
"""
import os
VIDEO_JAMEDIA_VIDEO = os.path.join(os.environ["HOME"],
"JAMediaDatos", "Videos")
if not os.path.exists(VIDEO_JAMEDIA_VIDEO):
make_base_directory()
return VIDEO_JAMEDIA_VIDEO
'''
def get_my_files_directory():
"""
Devuelve el Directorio de Archivos del usuario en JAMedia.
"""
import os
DIRECTORIO_MIS_ARCHIVOS = os.path.join(os.environ["HOME"],
"JAMediaDatos", "MisArchivos")
if not os.path.exists(DIRECTORIO_MIS_ARCHIVOS):
make_base_directory()
return DIRECTORIO_MIS_ARCHIVOS
'''
def get_separador(draw=False, ancho=0, expand=False):
"""
Devuelve un separador generico.
"""
import gtk
separador = gtk.SeparatorToolItem()
separador.props.draw = draw
separador.set_size_request(ancho, -1)
separador.set_expand(expand)
return separador
'''
def get_togle_boton(archivo, flip=False,
color=get_color("GRIS"), pixels=24):
# Gdk.Color(65000, 65000, 65000)
"""
Devuelve un toggletoolbutton generico.
"""
import gtk
boton = gtk.ToggleToolButton()
imagen = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(
archivo, pixels, pixels)
if flip:
pixbuf = pixbuf.flip(True)
imagen.set_from_pixbuf(pixbuf)
boton.set_icon_widget(imagen)
imagen.show()
boton.show()
return boton
'''
def get_boton(archivo, flip=False, rotacion=None,
pixels=24, tooltip_text=None):
"""
Devuelve un toolbutton generico.
"""
import gtk
boton = gtk.ToolButton()
imagen = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(archivo, pixels, pixels)
if flip:
pixbuf = pixbuf.flip(True)
if rotacion:
pixbuf = pixbuf.rotate_simple(rotacion)
imagen.set_from_pixbuf(pixbuf)
boton.set_icon_widget(imagen)
imagen.show()
boton.show()
if tooltip_text:
boton.set_tooltip_text(tooltip_text)
boton.TOOLTIP = tooltip_text
return boton
|
[
"fdanesse@gmail.com"
] |
fdanesse@gmail.com
|
0d178fa066c1f4c5d384bfd333819d9ac8351337
|
fd5edffed3c69a4d749880e18189c391a0a92562
|
/blog/migrations/0002_auto_20181026_1956.py
|
b69729dbed11c9f0b6dd0221836d82990b3583f9
|
[] |
no_license
|
bgarcial/hostayni_platform
|
4e9768bc1a13f006167d16b6d33bce88a029c524
|
2cf136b24b27db1a907ccc1274d32c1523abe1a2
|
refs/heads/master
| 2021-10-14T07:42:30.095351
| 2018-11-14T16:11:54
| 2018-11-14T16:11:54
| 103,794,415
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-10-26 19:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='category',
),
migrations.AlterField(
model_name='article',
name='draft',
field=models.BooleanField(default=False, help_text='Si seleccionas esta opción tu artículo no será publicado por el momento', verbose_name='Guardar publicación'),
),
]
|
[
"botibagl@gmail.com"
] |
botibagl@gmail.com
|
4ec65bf797fd519390932f21927af6966f94336b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/168/usersdata/276/70781/submittedfiles/exercicio24.py
|
65d4c1fabd7be393ddb9b1c8b13fb77cc8cf2feb
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
# -*- coding: utf-8 -*-
import math
x = int (input('Digite o valor de x: '))
y = int (input('Digite o valor de y: '))
i = 1
mdc = 0
while (i<=y):
if (x%i==0) and (y%i==0):
mdc = i
print (i)
i = i + 1
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
39defe150001c2805ae5c7822c51642555a4b3dc
|
2bd8fbe6e2ee2511d00479440aa589249234c2d8
|
/01-Supervised/11-16/day17/day17-01-integrate-2-RandomForest-2-parallelized.py
|
0698a4110a7311f17677f8802c4b7f25c36c8f54
|
[] |
no_license
|
LeenonGo/sklearn-learn
|
71d21f9b26cfb5cc6d65a22883127db873a31091
|
460d6e75e82943c802f7c025a03c821d02b5d232
|
refs/heads/master
| 2023-07-13T18:42:17.510938
| 2021-08-18T11:34:06
| 2021-08-18T11:34:06
| 371,628,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
# -*- coding: utf-8 -*-
# @Author : Lee
# @Time : 2021/7/20 15:09
# @Function: 并行化: https://www.scikitlearn.com.cn/0.21.3/12/#11124
# n_jobs = k ,则计算被划分为 k 个作业,并运行在机器的 k 个核上
# 如果设置 n_jobs = -1 ,则使用机器的所有核。
#
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
"""
这个例子展示了在图像分类任务(faces)中使用树的森林来评估基于杂质的像素重要性。像素越热,越重要。
"""
n_jobs = 1 # 调整
data = fetch_olivetti_faces()
X, y = data.data, data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
|
[
"yaa.lee@hotmail.com"
] |
yaa.lee@hotmail.com
|
ccd07e782ba302eaba43b3b517b58b8b67f736ae
|
62758b6067133b1a4c75da979197d21a5691c34e
|
/ichnaea/cache.py
|
8cc5409f92eecfc5ca30f27177bcc16e2e11344f
|
[
"Apache-2.0"
] |
permissive
|
mate1983/ichnaea
|
903450705f9a83fd74aeb16e5b6fd9644de04065
|
ac3ed0640ee8cc7f142ba21cb6976dbf2bd488cb
|
refs/heads/master
| 2020-12-03T10:44:19.124756
| 2016-03-11T11:17:57
| 2016-03-11T11:59:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
"""
Functionality related to using Redis as a cache and a queue.
"""
from contextlib import contextmanager
import redis
from redis.exceptions import RedisError
from six.moves.urllib.parse import urlparse
def configure_redis(cache_url, _client=None):
"""
Configure and return a :class:`~ichnaea.cache.RedisClient` instance.
:param _client: Test-only hook to provide a pre-configured client.
"""
if cache_url is None or _client is not None:
return _client
url = urlparse(cache_url)
netloc = url.netloc.split(':')
host = netloc[0]
if len(netloc) > 1:
port = int(netloc[1])
else: # pragma: no cover
port = 6379
if len(url.path) > 1:
db = int(url.path[1:])
else: # pragma: no cover
db = 0
pool = redis.ConnectionPool(
max_connections=20,
host=host,
port=port,
db=db,
socket_timeout=30.0,
socket_connect_timeout=60.0,
socket_keepalive=True,
)
return RedisClient(connection_pool=pool)
@contextmanager
def redis_pipeline(redis_client, execute=True):
"""
Return a Redis pipeline usable as a context manager.
:param execute: Should the pipeline be executed or aborted at the end?
:type execute: bool
"""
with redis_client.pipeline() as pipe:
yield pipe
if execute:
pipe.execute()
class RedisClient(redis.StrictRedis):
"""A strict pingable RedisClient."""
# The last part of these keys is a counter than can be incremented
# whenever the contents/structure of the cache changes. This allows
# for easy `cache-busting'.
cache_keys = {
'downloads': b'cache:downloads:3',
'fallback_blue': b'cache:fallback:blue:',
'fallback_cell': b'cache:fallback:cell:',
'fallback_wifi': b'cache:fallback:wifi:',
'leaders': b'cache:leaders:2',
'leaders_weekly': b'cache:leaders_weekly:2',
'stats': b'cache:stats:3',
'stats_regions': b'cache:stats_regions:4',
'stats_blue_json': b'cache:stats_blue_json:2',
'stats_cell_json': b'cache:stats_cell_json:2',
'stats_wifi_json': b'cache:stats_wifi_json:2',
}
def ping(self):
"""
Ping the Redis server. On success return `True`, otherwise `False`.
"""
try:
self.execute_command('PING')
except RedisError:
return False
return True
|
[
"hanno@hannosch.eu"
] |
hanno@hannosch.eu
|
8e76debaea8ecc60552b1c5384895640a9b54d55
|
bbc8fbbdd40665af61fedf69962b38c1d5939683
|
/deploy/pinax.wsgi
|
702a6744a1f0adf054e2dce91b62c0c1158c1580
|
[] |
no_license
|
braskin/pd
|
64b299ad8058e8d3939bc9778fd1576522f786b0
|
df32f96b432c2f07e1a20bcbd84df3eccad5e29a
|
refs/heads/master
| 2021-01-10T22:10:34.318229
| 2013-01-23T11:50:37
| 2013-01-23T11:50:37
| 7,773,119
| 0
| 1
| null | 2020-07-25T19:53:06
| 2013-01-23T11:09:43
|
Python
|
UTF-8
|
Python
| false
| false
| 454
|
wsgi
|
# pinax.wsgi is configured to live in projects/playdation/deploy.
import os
import sys
from os.path import abspath, dirname, join
from site import addsitedir
sys.path.insert(0, abspath(join(dirname(__file__), "../../")))
from django.conf import settings
os.environ["DJANGO_SETTINGS_MODULE"] = "playdation.settings"
sys.path.insert(0, join(settings.PROJECT_ROOT, "apps"))
from django.core.handlers.wsgi import WSGIHandler
application = WSGIHandler()
|
[
"boris.raskin@gmail.com"
] |
boris.raskin@gmail.com
|
4adb56b19f422e4b95744f384d76d14ff2d0e9c6
|
e6ede210d500b8f0772ff09f6a91578297ad6395
|
/tests/database/database_perf_load01.py
|
5d0fed1c12eb4b69890a20306a01f56a6878d493
|
[
"BSD-3-Clause"
] |
permissive
|
pnarvor/nephelae_base
|
392d70e001c49d03e7027989d75adaf065f968ee
|
d5f1abeae0b0473b895b4735f182ddae0516a1bd
|
refs/heads/master
| 2020-06-23T14:23:41.294273
| 2020-02-28T17:01:26
| 2020-02-28T17:01:26
| 198,648,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
#! /usr/bin/python3
import sys
sys.path.append('../../')
import os
import signal
import time
from ivy.std_api import *
import logging
from nephelae_mapping.database import NephelaeDataServer
from helpers.helpers import *
print("loading database... ", end='', flush=True)
t0 = time.time()
# dtbase = NephelaeDataServer.load('output/database_perf01.neph')
# dtbase = NephelaeDataServer.load('output/database_perf02.neph')
dtbase = NephelaeDataServer.load('output/database_perf03.neph')
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['GPS','101'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['101','var_0'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
print("Reading database... ", flush=True)
t0 = time.time()
for i in range(10):
output = [entry.data for entry in dtbase.find_entries(['var_0','101'],
Fancy()[0:10.0,0:10.0,0:10.0,0:10.0])]
# for item in output:
# print(item)
t1 = time.time()
print("Done. (ellapsed : ", t1 - t0,")", flush=True)
|
[
"pnarvor@laas.fr"
] |
pnarvor@laas.fr
|
b38cc83718ba67b213d350be50f5983e023c5b64
|
838a0c32eb0ab8fa513cfdc698a09ab1eaaef00a
|
/codes/275. H-Index II.py
|
e0705b2c4b55f09ebc65aec748c8f9f6ec607acd
|
[] |
no_license
|
zcgu/leetcode
|
ff270db9deb000e63dc9f338131c746ce7d24dfb
|
a041962eeab9192799ad7f74b4bbd3e4f74933d0
|
refs/heads/master
| 2021-01-11T20:02:49.126449
| 2016-12-31T23:51:38
| 2016-12-31T23:51:38
| 68,346,234
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
if not citations:
return 0
h = 0
i = len(citations) - 1
while citations[i] > h and i >= 0: # this citations[i] > h not citations[i] >= h
h += 1
i -= 1
return h
|
[
"patron@loan-rds-490-x.local"
] |
patron@loan-rds-490-x.local
|
0d0107c5fc211ba55a7f4194bd58bfb09b71cc71
|
0764489a1cb0793a39252bb0e6afa76854119644
|
/scenarios/credit_failed_state/executable.py
|
965e78d1121ac9ff409962b2dae867a57ae767dc
|
[] |
no_license
|
rserna2010/balanced-python
|
8ac7bef3cb309be8affaa2aa62e882d631b62bda
|
093a401d187bc09293a88214156e9e316185bfa3
|
refs/heads/master
| 2021-01-21T07:20:47.157987
| 2013-11-08T17:39:54
| 2013-11-08T17:39:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
import balanced
balanced.configure('ak-test-1p1Tsac7gHeMQowL2seB7ieliuAJAufyq')
bank_account_info = {
"routing_number": "121000358",
"type": "checking",
"account_number": "9900000005",
"name": "Johann Bernoulli"
}
credit = balanced.Credit(
amount=10000,
bank_account=bank_account_info
).save()
|
[
"ben@unfiniti.com"
] |
ben@unfiniti.com
|
65aabc1185420c1de3350fef656d55b4d0889e67
|
f3050b7f84e584dcde54ca1690944bfccc6f5d9c
|
/doReport.py
|
1f3e2459f51cf6acf2c67e58f657274c4e11e715
|
[] |
no_license
|
azhenglianxi/api_Project
|
0c8444c2bad7464fd57911be4fdcd131a63c46b2
|
2ae87b87e41f522d4ef20f63bad6adcaec1f9874
|
refs/heads/master
| 2020-09-14T12:08:07.080748
| 2019-12-12T09:08:22
| 2019-12-12T09:08:22
| 223,124,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,537
|
py
|
import unittest
import ddt
from testCase.course.courseTest1 import CourseTest1
from testCase.course.courseTest2 import CourseTest2
import HtmlTestRunner
from HTMLTestRunner import HTMLTestRunner
# 测试套件 test_suite
# 1.-1: 用例 一个个的添加到suite
# suite=unittest.TestSuite()
# suite.addTest(CourseTest1("test_101"))
# suite.addTest(CourseTest1("test_103"))
# suite.addTest(CourseTest1("test_102"))
# suite.addTest(CourseTest1("test_102"))
# suite.addTest(CourseTest2("test_202"))
# 1-2: 用例放入列表中 在添加suite
# suite=unittest.TestSuite()
# list=[CourseTest1("test_101"),CourseTest1("test_103"),CourseTest1("test_102"),CourseTest2("test_202")]
# suite.addTests(list)
# 1-3 :用Testloader类的discover方法来
suite=unittest.defaultTestLoader.discover('testCase',pattern="*Test*.py")
# 2 运行用例,查看结果
# 2-1 第1种情况:不使用HtmlTestRunner插件
# runner=unittest.TextTestRunner()
# runner.run(suite)
# 2-2 第2种情况:使用【经典版】HtmlTestRunner插件
# 新建一个可写二进制文件
# reportFile=open('./report/经典Html报告4.html','wb')
# runner=HTMLTestRunner(stream=reportFile,verbosity=2,description="用例执行详细信息",
# title="测试报告")
# runner.run(suite)
# 2-3 第3种情况:使用【最新版】HtmlTestRunner插件
runner=HtmlTestRunner.HTMLTestRunner(output='./report/',report_name='【最新版】html2测试报告',
report_title='my_report')
runner.run(suite)
|
[
"azhenglianxi@163.com"
] |
azhenglianxi@163.com
|
1630d428b45f4ba249a3ce615b8614472bebbcec
|
efd55bc63da8ab6ee964ec82bd0b761fd36107cc
|
/leetcode/easy/add-strings.py
|
9a65e671a52bd084d149cc8082da1b152c7e4665
|
[] |
no_license
|
gsantam/competitive-programming
|
f9a2c9999470eeae9ef4aada6af43b91a65fcb50
|
0b208516a6ae3e72bc7b79ef0ac83dcbfa100496
|
refs/heads/master
| 2021-06-20T23:27:30.274275
| 2021-06-20T19:44:51
| 2021-06-20T19:44:51
| 162,201,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
i = 0
l1 = len(num1)
l2 = len(num2)
rest = 0
total_sum = 0
while l1-1-i>=0 or l2-1-i>=0:
sum_ = rest
if l1-1-i>=0:
sum_+=int(num1[l1-1-i])
if l2-1-i>=0:
sum_+=int(num2[l2-1-i])
rest = sum_//10
sum_ = sum_%10
total_sum+=sum_*(10**i)
i+=1
if rest!=0:
total_sum+=rest*(10**i)
return str(total_sum)
|
[
"santamaria.guille@gmail.com"
] |
santamaria.guille@gmail.com
|
7adcf3af90dc069ab9bec98b2839947c8aeeb910
|
0c2130f0aabf2e27fae19ba93a52b444d4abdffd
|
/webscraping_beautifulSoup/09 Hand on with AMAZON projects/043 amazon-project2-part2-get-book-detail-information-for-one-book.py
|
e17c7d86efde3a513d5c15b75c8bf65a8b03a310
|
[] |
no_license
|
abuzarrizvi/WebScrapingBeautifulSoup4
|
3e583b736f575596b69e0102dbde797d46f47a61
|
9e847e83cef9a914bc1774295fc48f974a1ab796
|
refs/heads/master
| 2020-06-17T15:01:16.657407
| 2019-08-14T05:08:32
| 2019-08-14T05:08:32
| 195,956,866
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
# strategy
# soup --> ISBN table id="productDetailsTable"
# find_all li tag --> get 4th li
# --> Detail --> iframe --> div.text
from bs4 import BeautifulSoup
from selenium import webdriver
#driver = webdriver.PhantomJS(executable_path = r'C:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
driver = webdriver.Chrome('C:\chromedriver_win32\chromedriver.exe')
url = 'https://www.amazon.com/Python-Programming-Introduction-Computer-Science/dp/1590282418/ref=sr_1_1?ie=UTF8&qid=1473731166&sr=8-1&keywords=python+programming'
driver.get(url)
soup = BeautifulSoup(driver.page_source,'lxml')
table = soup.find('table', {'id':'productDetailsTable'})
all_li = table.find_all('li')
isbn = all_li[3].text.strip('ISBN-10: ')
print isbn
driver.switch_to_frame( driver.find_element_by_tag_name('iframe'))
soup = BeautifulSoup(driver.page_source,'lxml')
description = soup.find('div').text
print description
driver.quit()
|
[
"noreply@github.com"
] |
abuzarrizvi.noreply@github.com
|
003d4fcb554b08dd645a2f33bd3035bdd7d5d3f1
|
7234e6c72eb3f09c4a66dbe91f00fdf7742f010f
|
/algo/arrays/fruitsIntoBasket.py
|
52054c5a06bb6d3e66bb587ba1ab54b57a3e8c24
|
[] |
no_license
|
srinathalla/python
|
718ac603473e7bed060ba66aa3d39a90cf7ef69d
|
b6c546070b1738350303df3939888d1b0e90e89b
|
refs/heads/master
| 2021-06-13T06:11:42.653311
| 2021-02-19T06:01:41
| 2021-02-19T06:01:41
| 150,374,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
from typing import List
#
# This problem is similar to longest sub array with two distinct characters..
#
# T.C : O(2n) => O(n) Two pass solution
# S.C : O(3) => O(1) as map holds only 3 entries at max
# #
class Solution:
def totalFruit(self, tree: List[int]) -> int:
if len(tree) < 3:
return len(tree)
left = 0
right = 0
maxVal = [-1, -1]
count = 0
fruitsMap = {}
while right < len(tree):
if tree[right] not in fruitsMap:
fruitsMap[tree[right]] = 0
if fruitsMap[tree[right]] == 0:
count += 1
fruitsMap[tree[right]] += 1
while count > 2:
fruitsMap[tree[left]] -= 1
if fruitsMap[tree[left]] == 0:
count -= 1
left += 1
if maxVal[1] - maxVal[0] < right + 1 - left:
maxVal[1] = right + 1
maxVal[0] = left
right += 1
return maxVal[1] - maxVal[0]
s = Solution()
print(s.totalFruit([3, 3, 3, 1, 2, 1, 1, 2, 3, 3, 4]))
|
[
"srinathb10j.ik@gmail.com"
] |
srinathb10j.ik@gmail.com
|
76c42d25f8cacebb06202933fa87bbde25eaea41
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_coarsens.py
|
7e3f51e1ea0cdb68080e778c6311c0f110396d1c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
from xai.brain.wordbase.verbs._coarsen import _COARSEN
#calss header
class _COARSENS(_COARSEN, ):
def __init__(self,):
_COARSEN.__init__(self)
self.name = "COARSENS"
self.specie = 'verbs'
self.basic = "coarsen"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ca1ea0bfa5e35ba455e7b13dca16a027b2a67ae0
|
76fb0a3cfc9d9362ab29174bd1d55e888ea4d7f6
|
/tfx/orchestration/kubeflow/v2/components/experimental/ai_platform_training_executor.py
|
22029308648a87a84dc866cf7e1b633872bbf10c
|
[
"Apache-2.0"
] |
permissive
|
tensorflow/tfx
|
0cfc9c55171352ecc98c9dfa8ffe976c689d7073
|
1b328504fa08a70388691e4072df76f143631325
|
refs/heads/master
| 2023-08-30T11:56:50.894497
| 2023-08-29T22:47:19
| 2023-08-29T22:48:26
| 169,116,405
| 2,116
| 899
|
Apache-2.0
| 2023-09-14T21:51:42
| 2019-02-04T17:14:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,422
|
py
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor for AI Platform Training component."""
import datetime
from typing import Any, Dict, List
from tfx import types
from tfx.dsl.components.base import base_executor
from tfx.extensions.google_cloud_ai_platform import runner
from tfx.orchestration.launcher import container_common
from tfx.utils import json_utils
_POLLING_INTERVAL_IN_SECONDS = 30
_CONNECTION_ERROR_RETRY_LIMIT = 5
# Keys for AIP training config.
PROJECT_CONFIG_KEY = 'project_id'
TRAINING_JOB_CONFIG_KEY = 'training_job'
JOB_ID_CONFIG_KEY = 'job_id'
LABELS_CONFIG_KEY = 'labels'
CONFIG_KEY = 'aip_training_config'
class AiPlatformTrainingExecutor(base_executor.BaseExecutor):
"""AI Platform Training executor."""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
self._log_startup(input_dict, output_dict, exec_properties)
aip_config = json_utils.loads(exec_properties.pop(CONFIG_KEY))
assert aip_config, 'AIP training config is not found.'
training_job = aip_config.pop(TRAINING_JOB_CONFIG_KEY)
job_id = aip_config.pop(JOB_ID_CONFIG_KEY)
project = aip_config.pop(PROJECT_CONFIG_KEY)
# Resolve parameters.
training_job['training_input'][
'args'] = container_common._resolve_container_command_line( # pylint: disable=protected-access
cmd_args=training_job['training_input']['args'],
input_dict=input_dict,
output_dict=output_dict,
exec_properties=exec_properties)
training_job['job_id'] = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
# Invoke CMLE job
runner._launch_cloud_training( # pylint: disable=protected-access
project=project,
training_job=training_job)
|
[
"tensorflow-extended-nonhuman@googlegroups.com"
] |
tensorflow-extended-nonhuman@googlegroups.com
|
1674abf712c6b066af59fe0fea6ab7e259a5eb39
|
2d74104aaa132896a65ea0032951eee5d4c97840
|
/chemman/msds_collector/migrations/0003_uploadedmsds_token.py
|
4bd9419402891cf29b641c4c80e6ef4bb2b7ea19
|
[] |
no_license
|
Whitie/ChemManager
|
6e228e8713f9dfeca21adbd3e9a65c8871a822bc
|
d40792361527219514b1b4cc03718ea7c2a92777
|
refs/heads/master
| 2023-06-09T09:29:41.626087
| 2022-12-14T13:29:44
| 2022-12-14T13:29:44
| 189,994,861
| 0
| 0
| null | 2023-04-21T21:40:13
| 2019-06-03T11:47:23
|
Python
|
UTF-8
|
Python
| false
| false
| 553
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-26 06:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('msds_collector', '0002_parseddata'),
]
operations = [
migrations.AddField(
model_name='uploadedmsds',
name='token',
field=models.CharField(default='12345', editable=False, max_length=64, verbose_name='Security token'),
preserve_default=False,
),
]
|
[
"weimann.th@yahoo.com"
] |
weimann.th@yahoo.com
|
e7c04ec2cf024157d985c805cf4d4068468f9938
|
19ee165c252970294333e203728020cdcae550b3
|
/agc018/agc018_a/20200210103816.py
|
ab764993bcef7aa989b4543b9e7a8b7a477f7530
|
[] |
no_license
|
autumncolors11/atc_submits
|
4528c700e488d530f9cdde3a4198f36b30c3d35e
|
6f9689b6d7de45fd4e44ad118e4e3531bb8dac4d
|
refs/heads/master
| 2022-11-28T17:59:55.750896
| 2020-08-09T14:10:05
| 2020-08-09T14:10:05
| 258,122,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
import sys
sys.setrecursionlimit(10**6)
from math import floor,ceil,sqrt,factorial,log
from heapq import heappop, heappush, heappushpop
from collections import Counter,defaultdict,deque
from itertools import accumulate,permutations,combinations,product,combinations_with_replacement
from bisect import bisect_left,bisect_right
from copy import deepcopy
from operator import itemgetter
from fractions import gcd
from functools import reduce
mod = 10 ** 9 + 7
#整数input
def ii(): return int(sys.stdin.readline().rstrip()) #int(input())
def mii(): return map(int,sys.stdin.readline().rstrip().split())
def limii(): return list(mii()) #list(map(int,input().split()))
def lin(n:int): return [ii() for _ in range(n)]
def llint(n: int): return [limii() for _ in range(n)]
#文字列input
def ss(): return sys.stdin.readline().rstrip() #input()
def mss(): return sys.stdin.readline().rstrip().split()
def limss(): return list(mss()) #list(input().split())
def lst(n:int): return [ss() for _ in range(n)]
def llstr(n: int): return [limss() for _ in range(n)]
#本当に貪欲法か? DP法では??
#本当に貪欲法か? DP法では??
#本当に貪欲法か? DP法では??
#agc018 getting difference
n,k=mii()
arr=limii()
#print(arr)
def gcd1(numbers):
return reduce(gcd,numbers)
p=gcd1(arr)
if k%p==0 and k<=max(arr):
print("POSSIBLE")
else:
print("IMPOSSIBLE")
|
[
"biomimetics500tour@gmail.com"
] |
biomimetics500tour@gmail.com
|
6aaa96fca2f0988e8a953d3ea9d73960f446d645
|
af4d559792c4255d5f26bc078cd176b70c0e643f
|
/hpsklearn/components/linear_model/_omp.py
|
5ea2e28c9530e946a54473c50100917878145894
|
[
"BSD-3-Clause"
] |
permissive
|
hyperopt/hyperopt-sklearn
|
ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce
|
4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91
|
refs/heads/master
| 2023-08-02T07:19:20.259964
| 2022-12-15T17:53:07
| 2022-12-15T17:53:07
| 8,293,893
| 1,480
| 292
|
NOASSERTION
| 2022-12-15T17:53:08
| 2013-02-19T16:09:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import linear_model
import numpy as np
import typing
@scope.define
def sklearn_OrthogonalMatchingPursuit(*args, **kwargs):
return linear_model.OrthogonalMatchingPursuit(*args, **kwargs)
@scope.define
def sklearn_OrthogonalMatchingPursuitCV(*args, **kwargs):
return linear_model.OrthogonalMatchingPursuitCV(*args, **kwargs)
def orthogonal_matching_pursuit(name: str,
n_nonzero_coefs: int = None,
tol: typing.Union[float, Apply] = None,
fit_intercept: bool = True,
precompute: typing.Union[str, bool] = "auto"
):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.linear_model.OrthogonalMatchingPursuit model.
Args:
name: name | str
n_nonzero_coefs: target number non-zero coefficients | int
tol: maximum norm of residual | float
fit_intercept: whether to calculate intercept for model | bool
precompute: whether to use precomputed Gram and Xy matrix | str, bool
"""
def _name(msg):
return f"{name}.orthogonal_matching_pursuit_{msg}"
hp_space = dict(
n_nonzero_coefs=n_nonzero_coefs,
tol=hp.loguniform(_name("tol"), np.log(1e-5), np.log(1e-2)) if tol is None else tol,
fit_intercept=fit_intercept,
precompute=precompute
)
return scope.sklearn_OrthogonalMatchingPursuit(**hp_space)
def orthogonal_matching_pursuit_cv(name: str,
copy: bool = True,
fit_intercept: bool = True,
max_iter: typing.Union[int, Apply] = None,
cv: typing.Union[int, callable, typing.Generator, Apply] = None,
n_jobs: int = 1,
verbose: typing.Union[bool, int] = False
):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.linear_model.OrthogonalMatchingPursuitCV model.
Args:
name: name | str
copy: whether design matrix must be copied | bool
fit_intercept: whether to calculate intercept for model | bool
max_iter: maximum number of iterations | int
cv: cross-validation splitting strategy| int, callable or generator
n_jobs: number of CPUs during cv | int
verbose: verbosity amount | bool, int
"""
def _name(msg):
return f"{name}.orthogonal_matching_pursuit_cv_{msg}"
hp_space = dict(
copy=copy,
fit_intercept=fit_intercept,
max_iter=max_iter,
cv=hp.pchoice(_name("cv"), [(0.0625, 3), (0.175, 4), (0.525, 5), (0.175, 6), (0.0625, 7)])
if cv is None else cv,
n_jobs=n_jobs,
verbose=verbose
)
return scope.sklearn_OrthogonalMatchingPursuitCV(**hp_space)
|
[
"38689620+mandjevant@users.noreply.github.com"
] |
38689620+mandjevant@users.noreply.github.com
|
0a6a59073b7043bda4ed6a38ceee5501721c11b1
|
db6533cae5a58becf3163d750cd890c73035d0c5
|
/set_mark/link.py
|
cc0f91e2b4be47254492f099864a57c07bc33132
|
[
"BSD-3-Clause"
] |
permissive
|
goranmabi/openNAMU
|
7e076f18279614a69a7969e22cf3b9fa31605cb5
|
1c0781cb6034040032122df2514e6d8baecc6120
|
refs/heads/master
| 2021-05-16T16:15:14.076942
| 2018-02-01T10:04:09
| 2018-02-01T10:04:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,698
|
py
|
import sqlite3
import re
from urllib import parse
import hashlib
def url_pas(data):
return parse.quote(data).replace('/','%2F')
def sha224(data):
return hashlib.sha224(bytes(data, 'utf-8')).hexdigest()
def link(conn, title, data, num, category, backlink):
curs = conn.cursor()
data = data.replace('\', '\\')
m = re.findall("\[\[(분류:(?:(?:(?!\]\]|#).)+))((?:#(?:(?:(?!#|\]\]).)+))+)?\]\]", data)
for g in m:
if title != g[0]:
if num == 1:
backlink += [[title, g[0], 'cat']]
curs.execute("select title from data where title = ?", [g[0]])
if curs.fetchall():
red = ""
else:
red = 'class="not_thing"'
if(category != ''):
category += ' / '
style = ''
if g[1]:
if re.search('#blur', g[1]):
style = ' style="filter: blur(3px);" onmouseover="this.style.filter=\'none\';" onmouseout="this.style.filter=\'blur(3px)\';"'
category += '<a ' + red + ' ' + style + ' href="/w/' + url_pas(g[0]) + '">' + re.sub("분류:", "", g[0]) + '</a>'
data = re.sub("\[\[(분류:(?:(?:(?!\]\]|#).)+))((?:#(?:(?:(?!#|\]\]).)+))+)?\]\]", '', data, 1)
test = re.findall('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', data)
for wiki in test:
if wiki[1]:
out = wiki[1]
else:
out = wiki[0]
data = re.sub('\[\[wiki:([^|\]]+)(?:\|([^\]]+))?\]\]', '<a id="inside" href="/' + wiki[0] + '">' + out + '</a>', data, 1)
test = re.findall('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', data)
for wiki in test:
curs.execute('select link from inter where title = ?', [wiki[0]])
inter = curs.fetchall()
if not inter:
data = re.sub('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', '인터위키 정보 없음', data, 1)
else:
if wiki[2]:
out = wiki[0] + ':' + wiki[2]
else:
out = wiki[0] + ':' + wiki[1]
data = re.sub('\[\[inter:([^:]+):((?:(?!\||]]).)+)(?:\|([^\]]+))?]]', '<a id="inside" href="' + inter[0][0] + wiki[1] + '">' + out + '</a>', data, 1)
data = re.sub("\[\[(?::(?P<in>(?:분류|파일):(?:(?:(?!\]\]).)*)))\]\]", "[[\g<in>]]", data)
a = re.findall('\[\[\.\.\/(\|(?:(?!]]).)+)?]]', data)
for i in a:
b = re.search('(.*)\/', title)
if b:
m = b.groups()
if i:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + m[0] + ']]', data, 1)
else:
if i:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + i + ']]', data, 1)
else:
data = re.sub('\[\[\.\.\/(\|((?!]]).)+)?]]', '[[' + title + ']]', data, 1)
data = re.sub('\[\[(?P<in>\/(?:(?!]]|\|).)+)(?P<out>\|(?:(?:(?!]]).)+))?]]', '[[' + title + '\g<in>\g<out>]]', data)
link = re.compile('\[\[((?:(?!\[\[|\]\]|\|).)*)(?:\|((?:(?!\[\[|\]\]).)*))?\]\]')
while 1:
l_d = link.search(data)
if l_d:
d = l_d.groups()
if re.search('^(?:파일|외부):', d[0]):
width = ''
height = ''
align = ''
span = ['', '']
try:
w_d = re.search('width=([0-9]+(?:[a-z%]+)?)', d[1])
if w_d:
width = 'width="' + w_d.groups()[0] + '" '
h_d = re.search('height=([0-9]+(?:[a-z%]+)?)', d[1])
if h_d:
height = 'height="' + h_d.groups()[0] + '" '
a_d = re.search('align=(center|right)', d[1])
if a_d:
span[0] = '<span style="display: block; text-align: ' + a_d.groups()[0] + ';">'
span[1] = '</span>'
except:
pass
f_d = re.search('^파일:([^.]+)\.(.+)$', d[0])
if f_d:
if not re.search("^파일:([^\n]*)", title):
if num == 1:
backlink += [[title, d[0], 'file']]
file_name = f_d.groups()
curs.execute("select title from data where title = ?", ['파일:' + file_name[0] + '.' + file_name[1]])
if not curs.fetchall():
img = '<a class="not_thing" href="/w/' + url_pas('파일:' + file_name[0] + '.' + file_name[1]) + '">파일:' + file_name[0] + '.' + file_name[1] + '</a>'
else:
img = span[0] + '<img src="/image/' + sha224(file_name[0]) + '.' + file_name[1] + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
else:
img = span[0] + '<img src="' + re.sub('^외부:', '', d[0]) + '" ' + width + height + '>' + span[1]
data = link.sub(img, data, 1)
elif re.search('^https?:\/\/', re.sub('<([^>]*)>', '', d[0])):
view = d[0]
try:
if re.search('(.+)', d[1]):
view = d[1]
except:
pass
data = link.sub('<a class="out_link" rel="nofollow" href="' + re.sub('<([^>]*)>', '', d[0]) + '">' + view + '</a>', data, 1)
else:
view = d[0].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
try:
if re.search('(.+)', d[1]):
view = d[1].replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\')
except:
pass
sh = ''
s_d = re.search('#((?:(?!x27;|#).)+)$', d[0])
if s_d:
href = re.sub('#((?:(?!x27;|#).)+)$', '', d[0])
sh = '#' + s_d.groups()[0]
else:
href = d[0]
if d[0] == title:
data = link.sub('<b>' + view + '</b>', data, 1)
elif re.search('^#', d[0]):
data = link.sub('<a title="' + sh + '" href="' + sh + '">' + view + '</a>', data, 1)
else:
a = re.sub('<([^>]*)>', '', href.replace(''', "'").replace('"', '"').replace('\\\\', '<slash>').replace('\\', '').replace('<slash>', '\\'))
if num == 1:
backlink += [[title, a, '']]
curs.execute("select title from data where title = ?", [a])
if not curs.fetchall():
no = 'class="not_thing"'
if num == 1:
backlink += [[title, a, 'no']]
else:
no = ''
data = link.sub('<a ' + no + ' title="' + re.sub('<([^>]*)>', '', href) + sh + '" href="/w/' + url_pas(a) + sh + '">' + view.replace('\\', '\\\\') + '</a>', data, 1)
else:
break
data = data.replace('\\', '\')
return [data, category, backlink]
|
[
"min08101@naver.com"
] |
min08101@naver.com
|
3efa40b1568ac779495027a89b5b37e1c9ac8094
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/3303.py
|
9127ca7f11dd01a24730d2a21ab3e5fad553dcc5
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
def parse_input(str):
str_first_val = str.split()
real_digits = [ int(c) for c in str_first_val[0] ]
return real_digits
def solve(test):
big_number = parse_input(test)
num_of_digits = len(big_number)
index_of_max_incrising_digit = 0
for digit_ind in range(0,num_of_digits):
if( big_number[digit_ind] > big_number[index_of_max_incrising_digit] ):
index_of_max_incrising_digit = digit_ind;
elif ( big_number[digit_ind] < big_number[index_of_max_incrising_digit] ):
big_number[index_of_max_incrising_digit] -= 1
for digit_ind_in_change in range(index_of_max_incrising_digit+1,num_of_digits):
big_number[digit_ind_in_change] = 9
break
num_in_str = ''.join(map(str,big_number))
if( num_in_str[0] == '0'):
num_in_str = num_in_str[1:]
return num_in_str
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
b3ab80fc9ff47764f6c0bf07ebfada6f13074ce2
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_355/ch131_2020_04_01_17_55_42_755784.py
|
4c58703c96d166274d0324a80fa7cc166fe51e65
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
import random
a = random.randint(1, 10)
b = random.randint(1, 10)
s = a + b
contador = 0
print ("Vc tem 10 dinheiros")
chutes = input("quantos chutes quer comprar?")
while contador < chutes:
pri_numero = input("aposte 1 numero")
seg_numeros = input("aposte outro numero, este deve ser maior ou igual ao anterior")
if s < pri_numero:
print ("Soma menor")
if s > seg_numero:
print ("Soma maior")
if s == pri_numero or s == seg_numero:
h = 10 - chutes
g= h + (h*3)
print("acertou")
return g
else:
print("Soma no meio")
contador+=1
|
[
"you@example.com"
] |
you@example.com
|
2cdf52486711ebe99c6646a833bcf3b370fd8337
|
d6c9c730ca514af81307018c669bd2f7e5de51c6
|
/Stack_20190722/stack_class.py
|
fea59faa2fbd2e6bd8c2179dd296a356f5911880
|
[] |
no_license
|
itbullet/python_projects
|
a1a56d070a6a70b0814cdc2a83cbd1ce9bc0dab8
|
06d171f1cab7f45c704944e40ffb0b7a175c1d2d
|
refs/heads/master
| 2020-06-22T15:07:40.768297
| 2019-09-09T13:49:02
| 2019-09-09T13:49:02
| 197,734,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
last = len(self.items) - 1
return self.items[last]
def size(self):
return len(self.items)
|
[
"eduard.shapirov@gmail.com"
] |
eduard.shapirov@gmail.com
|
1c58e39e99b670be9e8d2f6c4131c0c5d37638b3
|
6a1595e33051ebbd098f78cb0ff7d09cfc0a57dc
|
/day3/work_day3.py
|
64838662dc94e940f40402a3a1643dba55a2d0ff
|
[] |
no_license
|
MannixZ/Python_1-100
|
f3514ef1255ca27b656209716bdb27d3821df46e
|
740c3d2800f8d55fd2bcd8f789486253e01c9d53
|
refs/heads/master
| 2020-07-04T03:18:10.125091
| 2019-11-18T16:45:11
| 2019-11-18T16:45:11
| 202,135,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/29 15:05
# @Author : Mannix
# @File : work_day3.py
# @Software: PyCharm
def work_1():
'''英制单位英寸和公制单位厘米互换'''
value = float(input('请输入长度: '))
unit = input('请输入单位: ')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == 'cm' or unit == '厘米':
print('%f厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位')
def work_2():
'''掷骰子决定做什么事情'''
from random import randint
face = randint(1, 6)
if face == 1:
result = '唱首歌'
elif face == 2:
result = '跳个舞'
elif face == 3:
result = '学狗叫'
elif face == 4:
result = '做俯卧撑'
elif face == 5:
result = '念绕口令'
else:
result = '讲冷笑话'
print(result)
if __name__ == '__main__':
work_1()
work_2()
|
[
"noreply@github.com"
] |
MannixZ.noreply@github.com
|
37b16598f173de07ea41f4d67978f031034c90e7
|
ee838d827f128b6d651675fbc11c6127be58280a
|
/scipy_341_ex3.py
|
2c29670607e0bd65654f760b0260cb4d971ce5e0
|
[] |
no_license
|
CodedQuen/Scipy-and-Numpy
|
80a4b2d6792ba4702634849d583e0ce86e4a2820
|
1b333d5f7cf2c6310c64523f9de80718c6a84cb4
|
refs/heads/master
| 2022-11-09T23:12:17.624938
| 2020-06-27T04:38:01
| 2020-06-27T04:38:01
| 275,300,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
import numpy as np
from scipy.stats import geom
# Here set up the parameters for the normal distribution.
# where loc is the mean and scale is the standard deviation.
p = 0.5
dist = geom(p)
# Setup the sample range
x = np.linspace(0, 5, 1000)
# Calling norm's PMF and CDF
pmf = dist.pmf(x)
cdf = dist.cdf(x)
# Here we draw out 500 random values from
sample = dist.rvs(500)
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
07633b13bd1cf0f0286c52bae03096144bf0adb2
|
868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30
|
/keras/keras35_lstm_sequences.py
|
13dc52d0ca3c1cfc6a2f3bdc6e3f021efc2c58f9
|
[] |
no_license
|
inJAJA/Study
|
35d4e410df7b476a4c298664bb99ce9b09bf6296
|
c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4
|
refs/heads/master
| 2022-12-21T11:41:15.396610
| 2020-09-20T23:51:45
| 2020-09-20T23:51:45
| 263,212,524
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,440
|
py
|
## LSTM_Sequence : LSTM을 2개 연결하기
from numpy import array
from keras.models import Model
from keras.layers import Dense, LSTM, Input
# 1. 데이터
x = array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],
[5,6,7],[6,7,8],[7,8,9],[8,9,10],
[9,10,11],[11,12,13],
[20,30,40],[30,40,50],[40,50,60],
])
y = array([4,5,6,7,8,9,10,11,12,13,50,60,70]) # (13, ) 벡터
x_predict = array([50, 60, 70]) # (3, )
print('x.shape : ',x.shape) # (13, 3)
print('y.shape : ',y.shape) # (13, ) != (13, 1)
# 벡터 행렬
# x = x.reshape(13, 3, 1)
x = x.reshape(x.shape[0], x.shape[1], 1) # x.shape[0] = 13 / x.shape[1] = 3 / data 1개씩 작업 하겠다.
print(x.shape) # (13, 3, 1)
#2. 모델구성
input1 = Input(shape = (3, 1))
LSTM1 = LSTM(100, return_sequences= True)(input1)
# LSTM2 = LSTM(10)(LSTM1, return_sequences= True)(LSTM1) # return_sequences를 썼으면 무조건 LSTM사용
LSTM2 = LSTM(100)(LSTM1)
dense1 = Dense(50)(LSTM2)
dense2 = Dense(50)(dense1)
dense3 = Dense(50)(dense2)
output1 = Dense(1)(dense3)
model = Model(inputs = input1, outputs = output1)
model.summary()
'''
LSTM = ( , , ) : 3 차원
Dense = ( , ) : 2 차원
# return_sequences : 들어온 원래 차원으로 output
ex) x.shape = (13, 3, 1)
LSTM1 = LSTM( 10 )(dense1)
' 2 '차원으로 output
LSTM1 = LSTM( 10, return_sequence = True )(LSTM2)
(받아 들인) ' 3 '차원으로 output
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 3, 1) 0
_________________________________________________________________
lstm_1 (LSTM) (None, 3, 10) 480
_________________________________________________________________
lstm_2 (LSTM) (None, 10) 840
_________________________________________________________________
dense_1 (Dense) (None, 5) 55
_________________________________________________________________
dense_2 (Dense) (None, 1) 6
=================================================================
# 앞에 output_node가 input_dim(feature)가 된다.
# LSTM_sequences_parameter
:num_param = 4 * ( num_units + input_dim + bias) * num_units
= 4 * (LSTM2_output + LSTM1_output + 1 ) * LSTM2_output
= 4 * ( 10 + 10 + 1 ) * 10
= 840
'''
# EarlyStopping
from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor = 'loss', patience=100, mode = 'min')
#3. 실행
model.compile(optimizer='adam', loss = 'mse')
model.fit(x, y, epochs =10000, batch_size = 13,callbacks = [es]
)
#4. 예측
x_predict = x_predict.reshape(1, 3, 1) # x값 (13, 3, 1)와 동일한 shape로 만들어 주기 위함
# (1, 3, 1) : 확인 1 * 3 * 1 = 3
# x_predict = x_predict.reshape(1, x_predict.shape[0], 1)
print(x_predict)
y_predict = model.predict(x_predict)
print(y_predict)
|
[
"zaiin4050@gmail.com"
] |
zaiin4050@gmail.com
|
547e6a3a571c9e2c706f867b40ebd19184612a68
|
4b64dd47fa9321b50875e96298a5f0766ffe97c9
|
/adventofcode/2020/day7/run.py
|
9046f4736d837a0d56f7717eedabdfc086788e75
|
[] |
no_license
|
choupi/puzzle
|
2ce01aa85201660da41378c6df093036fa2d3a19
|
736964767717770fe786197aecdf7b170d421c8e
|
refs/heads/master
| 2021-07-23T13:17:45.086526
| 2021-07-20T11:06:28
| 2021-07-20T11:06:28
| 13,580,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
def dfs(bag):
if bag == 'shiny gold':
bag_result[bag] = 1
return 1
if bag in bag_result:
return bag_result[bag]
if bag not in bag_dict or not bag_dict[bag]:
#print(bag)
bag_result[bag] = 0
return 0
for b in bag_dict[bag]:
if b in bag_result and bag_result[b] == 1:
bag_result[bag] = 1
return 1
if dfs(b):
bag_result[bag] = 1
return 1
return 0
bag_dict = {}
with open('input.txt') as f:
#with open('inp') as f:
for l in f:
if 'no other bags.' in l:
continue
bag, contains = l.strip().split(' contain ', 1)
bag = bag[:bag.rindex(' ')]
#print(bag)
contains = [' '.join(c.split(' ')[1:-1]) for c in contains.split(', ')]
#print(bag, contains)
bag_dict[bag] = contains
#print(len(bag_dict))
bag_result = {}
for bag in bag_dict:
if bag in bag_result:
continue
dfs(bag)
print(sum([v for b,v in bag_result.items()])-1)
|
[
"chromosome460@gmail.com"
] |
chromosome460@gmail.com
|
ce12c3bac2fa1e50590db1267dd69ad54d66dae2
|
1bfca35cb83842000a3e37f81a69627535a12bf6
|
/examples/testWhile3.py
|
78d24d522215b5cd084dafeb3a2c0b6ab0f53bc6
|
[] |
no_license
|
scar86/python_scripts
|
4a8a51f15d21f3b71fa8f0cd2131f75612c40613
|
686b1229c6736147b7cfcd2d0bf31e5f12e85e00
|
refs/heads/master
| 2021-01-11T00:28:31.989712
| 2016-11-04T18:52:21
| 2016-11-04T18:52:21
| 70,526,392
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
'''Test yourself again: what happens?'''
nums = list()
i = 4
while (i < 9):
nums.append(i)
i = i+2
print(nums)
|
[
"gogs@fake.local"
] |
gogs@fake.local
|
29131f57a53f289fa1acbf453e12bd04d8254414
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03971/s324739743.py
|
cf8d957b84e9ee6c1e59d6f5affdedeba9c06742
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
N, A, B = map(int, input().split())
S = input()
ac_count = 0
ac_b_count = 0
for s in S:
if ac_count < A + B:
if s == 'a':
print('Yes')
ac_count += 1
elif s == 'b' and ac_b_count < B:
print('Yes')
ac_count += 1
ac_b_count += 1
else:
print('No')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b592bfd26e518c213f887d4d3836f718c8a09754
|
4234dc363d0599e93abc1d9a401540ad67702b3b
|
/clients/client/python/test/test_ui_container.py
|
c3e92fd3828cd09328d2c9a7225f247880fd3b55
|
[
"Apache-2.0"
] |
permissive
|
ninjayoto/sdk
|
8065d3f9e68d287fc57cc2ae6571434eaf013157
|
73823009a416905a4ca1f9543f1a94dd21e4e8da
|
refs/heads/master
| 2023-08-28T03:58:26.962617
| 2021-11-01T17:57:24
| 2021-11-01T17:57:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,014
|
py
|
"""
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.21
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.ui_nodes import UiNodes
from ory_client.model.ui_texts import UiTexts
globals()['UiNodes'] = UiNodes
globals()['UiTexts'] = UiTexts
from ory_client.model.ui_container import UiContainer
class TestUiContainer(unittest.TestCase):
"""UiContainer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUiContainer(self):
"""Test UiContainer"""
# FIXME: construct object with mandatory attributes with example values
# model = UiContainer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"3372410+aeneasr@users.noreply.github.com"
] |
3372410+aeneasr@users.noreply.github.com
|
8771d96e92a6351aa1051fd247148c3df97ae325
|
f27996a45d59afbd9619f2cb92639e088e6bea3c
|
/python/geodjango/fishtracking_receivers/manage.py
|
5718fae7f83fc3162c599c88b338320c96e1adb6
|
[] |
no_license
|
bopopescu/snippets
|
d7e689b5c74207f716b0f9c57a342b86662f39a5
|
1924cd8c7938dc32b6c1a50137cc7f053d4aafb2
|
refs/heads/master
| 2021-05-31T12:04:26.588555
| 2016-05-04T14:05:26
| 2016-05-04T14:05:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fishtracking_receivers.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"bart.aelterman@gmail.com"
] |
bart.aelterman@gmail.com
|
e9ebc7aaca1f90e2f3771a9aa5a6dcfda029d314
|
762de1c66746267e05d53184d7854934616416ee
|
/tools/MolSurfGenService/MolSurfaceGen32/chimera/share/AddAttr/gui.py
|
6e64524fe36c39941dcf1ec1fd3c40af7584d9e7
|
[] |
no_license
|
project-renard-survey/semanticscience
|
6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677
|
024890dba56c3e82ea2cf8c773965117f8cda339
|
refs/heads/master
| 2021-07-07T21:47:17.767414
| 2017-10-04T12:13:50
| 2017-10-04T12:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,341
|
py
|
# --- UCSF Chimera Copyright ---
# Copyright (c) 2000 Regents of the University of California.
# All rights reserved. This software provided pursuant to a
# license agreement containing restrictions on its disclosure,
# duplication and use. This notice must be embedded in or
# attached to all copies, including partial copies, of the
# software or any revisions or derivations thereof.
# --- UCSF Chimera Copyright ---
#
# $Id: gui.py 26655 2009-01-07 22:02:30Z gregc $
import chimera
from chimera import replyobj
from chimera.baseDialog import ModelessDialog
import Tkinter, Pmw
from OpenSave import OpenModeless
from AddAttr import addAttributes
class AddAttrDialog(OpenModeless):
title = "Define Attribute"
provideStatus = True
name = "add/change attrs"
help = "ContributedSoftware/defineattrib/defineattrib.html"
def __init__(self):
OpenModeless.__init__(self, clientPos='s', clientSticky='nsew',
historyID="AddAttr")
def Apply(self):
mols = self.molListBox.getvalue()
if not mols:
self.enter()
replyobj.error("No models chosen in dialog\n")
return
for path in self.getPaths():
setAttrs = addAttributes(path, models=mols,
log=self.doLog.get(),
raiseAttrDialog=self.openDialog.get())
if setAttrs == []:
replyobj.error("No attributes were set from"
" file %s\n" % path)
def fillInUI(self, parent):
OpenModeless.fillInUI(self, parent)
from chimera.widgets import MoleculeScrolledListBox
self.molListBox = MoleculeScrolledListBox(self.clientArea,
listbox_selectmode="extended",
labelpos="w", label_text="Restrict to models:")
self.molListBox.grid(row=0, column=0, sticky="nsew")
self.clientArea.rowconfigure(0, weight=1)
self.clientArea.columnconfigure(0, weight=1)
checkButtonFrame = Tkinter.Frame(self.clientArea)
checkButtonFrame.grid(row=1, column=0)
self.openDialog = Tkinter.IntVar(parent)
self.openDialog.set(True)
Tkinter.Checkbutton(checkButtonFrame, variable=self.openDialog,
text="Open Render/Select by Attribute").grid(
row=0, column=0, sticky='w')
self.doLog = Tkinter.IntVar(parent)
self.doLog.set(False)
Tkinter.Checkbutton(checkButtonFrame,
text="Send match info to Reply Log",
variable=self.doLog).grid(row=1, column=0, sticky='w')
from chimera import dialogs
dialogs.register(AddAttrDialog.name, AddAttrDialog)
|
[
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] |
alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5
|
c2dc614ebb35d37b1f02d60a7a2b4379aa756714
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/eventgrid/v20200601/list_domain_shared_access_keys.py
|
1f55623514778ec7b92005163dffba4572484403
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDomainSharedAccessKeysResult',
'AwaitableListDomainSharedAccessKeysResult',
'list_domain_shared_access_keys',
]
@pulumi.output_type
class ListDomainSharedAccessKeysResult:
"""
Shared access keys of the Domain.
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> Optional[str]:
"""
Shared access key1 for the domain.
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> Optional[str]:
"""
Shared access key2 for the domain.
"""
return pulumi.get(self, "key2")
class AwaitableListDomainSharedAccessKeysResult(ListDomainSharedAccessKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDomainSharedAccessKeysResult(
key1=self.key1,
key2=self.key2)
def list_domain_shared_access_keys(domain_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDomainSharedAccessKeysResult:
"""
Use this data source to access information about an existing resource.
:param str domain_name: Name of the domain.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['domainName'] = domain_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20200601:listDomainSharedAccessKeys', __args__, opts=opts, typ=ListDomainSharedAccessKeysResult).value
return AwaitableListDomainSharedAccessKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
782df01ee7388692ea2870c9a5f8b636234f32e9
|
6413fe58b04ac2a7efe1e56050ad42d0e688adc6
|
/tempenv/lib/python3.7/site-packages/plotly/validators/isosurface/colorbar/title/font/_color.py
|
3d45a80274dd4a6a4d2f5e5279d0ca5accc9ccbe
|
[
"MIT"
] |
permissive
|
tytechortz/Denver_temperature
|
7f91e0ac649f9584147d59193568f6ec7efe3a77
|
9d9ea31cd7ec003e8431dcbb10a3320be272996d
|
refs/heads/master
| 2022-12-09T06:22:14.963463
| 2019-10-09T16:30:52
| 2019-10-09T16:30:52
| 170,581,559
| 1
| 0
|
MIT
| 2022-06-21T23:04:21
| 2019-02-13T21:22:53
|
Python
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='isosurface.colorbar.title.font',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
[
"jmswank7@gmail.com"
] |
jmswank7@gmail.com
|
1e877888ec765a400293dfc038262acb74aba999
|
3baad9ca9756a8dbe6463df6e7f535aa2e0bffa3
|
/{{ cookiecutter.site_name }}/{{ cookiecutter.main_module }}.py
|
31d3b2b7913d472088c2dc695f0841b9d91b3e82
|
[
"MIT"
] |
permissive
|
brettcannon/python-azure-web-app-cookiecutter
|
7fcaece747e7cef6d584c236aad4b842b63fa2f0
|
e7a3fbc3a724b7bbde43eb5904881d2e0cc07c42
|
refs/heads/master
| 2023-07-12T07:10:22.594048
| 2017-02-27T20:00:29
| 2017-02-27T20:00:29
| 63,901,465
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,264
|
py
|
{% if cookiecutter.site_type == "socket" %}
"""An example HTTP server using sockets on Azure Web Apps."""
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import os
import sys
class PythonVersionHandler(BaseHTTPRequestHandler):
def do_GET(self):
charset = "utf-8"
self.send_response(200)
self.send_header("Content-type", "text/plain; charset={}".format(charset))
self.send_header("Content-Length", len(sys.version))
self.end_headers()
self.wfile.write(sys.version.encode(charset))
if __name__ == "__main__":
server_address = "127.0.0.1", int(os.environ.get("PORT", 5555))
server = HTTPServer(server_address, PythonVersionHandler)
server.serve_forever()
{% else %}
"""An example WSGI server on Azure Web Apps."""
import sys
def wsgi_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
yield sys.version.encode()
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 5555, wsgi_app)
httpd.serve_forever()
{% endif %}
|
[
"brett@python.org"
] |
brett@python.org
|
8cd013a5cfbea88a36682c33babb0f3b7dae5129
|
b0c39c21ea63904d3e3c610a06c1e11b0a0c80d9
|
/setup.py
|
3998246ca1e02d4c827786524e5a89b7b902ab42
|
[
"Apache-2.0"
] |
permissive
|
kevenli/FeedIn
|
d9893d6f7c29d818460da875d5abcb5b9f25b958
|
9b45ba9090d279834ac59887a24154e6ac7f4593
|
refs/heads/master
| 2021-01-23T00:48:30.404336
| 2015-05-26T06:33:05
| 2015-05-26T06:33:05
| 27,056,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
from distutils.core import setup
from setuptools import find_packages
setup(name='FeedIn',
version='0.1',
author='Keven Li',
author_email='kevenli@users.noreply.github.com',
url='https://github.com/kevenli/FeedIn',
download_url='https://github.com/kevenli/FeedIn',
description='Web data fetching engine.',
long_description='A web data fetching engine which can be used in \
easy configuration and has multiple build-in modules.',
packages=find_packages(exclude=('tests', 'tests.*')),
provides=['feedin'],
keywords='web data python fetching',
license='Apache License, Version 2.0',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'lxml',
'BeautifulSoup',
],
)
|
[
"pbleester@gmail.com"
] |
pbleester@gmail.com
|
a89d0a7db49b9c97787f5713a000415bb2870f84
|
a97db7d2f2e6de010db9bb70e4f85b76637ccfe6
|
/leetcode/743-Network-Delay-Time.py
|
89a0689140f2f23f05b225a027399d92382c2f3c
|
[] |
no_license
|
dongxiaohe/Algorithm-DataStructure
|
34547ea0d474464676ffffadda26a92c50bff29f
|
a9881ac5b35642760ae78233973b1608686730d0
|
refs/heads/master
| 2020-05-24T20:53:45.689748
| 2019-07-19T03:46:35
| 2019-07-19T03:46:35
| 187,463,938
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
class Solution(object):
def networkDelayTime(self, times, N, K):
routes, seen, minHeap = collections.defaultdict(list), {}, []
for u, v, w in times:
routes[u].append([v, w])
heapq.heappush(minHeap, [0, K])
while minHeap:
time, tmpNode = heapq.heappop(minHeap)
if tmpNode not in seen:
seen[tmpNode] = time
for v, w in routes[tmpNode]:
heapq.heappush(minHeap, [time + w, v])
return max(seen.values()) if N == len(seen) else -1
|
[
"ddong@zendesk.com"
] |
ddong@zendesk.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.