blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17739c606e36fb190c627f7507e332546d8a1ae7
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-core-2.5.1/PyObjCTest/test_bridges.py
|
e803ab31cb684dd435a09d66a77d2d80598b1a77
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 2,106
|
py
|
from PyObjCTools.TestSupport import *
from PyObjCTest.testbndl import OC_TestClass2
import objc
import collections
import sys
if sys.version_info[0] == 2:
from UserList import UserList
from UserDict import IterableUserDict
else:
from collections import UserDict as IterableUserDict, UserList
NSMutableArray = objc.lookUpClass("NSMutableArray")
NSMutableDictionary = objc.lookUpClass("NSMutableDictionary")
def classOfProxy(value):
return OC_TestClass2.classOfObject_(value)
class TestBridges (TestCase):
# NOTE: the two "register" functions from objc._bridges aren't
# tested explictly, but the tests in this class do verify that
# the default registrations (which are made through those two
# functions) work properly.
def test_xrange(self):
range_type = range if sys.version_info[0] == 3 else xrange
v = range_type(0, 10)
self.assertTrue(issubclass(classOfProxy(v), NSMutableArray))
def test_user_collectons(self):
# Note: Not "UserDict" because UserDict doesn't implement
# __iter__ and hence isn't a collections.Mapping, and doesn't
# implement enough API to implement the NSDictionary interface.
v = IterableUserDict()
self.assertTrue(issubclass(classOfProxy(v), NSMutableDictionary))
v = UserList()
self.assertTrue(issubclass(classOfProxy(v), NSMutableArray))
def test_abc(self):
class MySequence (collections.Sequence):
def __getitem__(self, idx):
raise IndexError(idx)
def __len__(self):
return 0
class MyDictionary (collections.Mapping):
def __getitem__(self, key):
raise KeyError(key)
def __len__(self):
return 0
def __iter__(self):
return
yield
v = MyDictionary()
self.assertTrue(issubclass(classOfProxy(v), NSMutableDictionary))
v = MySequence()
self.assertTrue(issubclass(classOfProxy(v), NSMutableArray))
if __name__ == "__main__":
main()
|
[
"opensource@apple.com"
] |
opensource@apple.com
|
90e7ecdd1b7ff7bb7d60e0fd4a70f7234fb44cf0
|
f000fa4e6ef1de9591eeabff43ba57b7bf32561d
|
/tests/common/test_retryutils.py
|
878419ecfa97affa641c0bd768d0921c7056692b
|
[] |
no_license
|
VictorDenisov/ceph-lcm
|
1aca07f2d17bfda8760d192ffd6d17645705b6e4
|
3cfd9ced6879fca1c39039e195d22d897ddcde80
|
refs/heads/master
| 2021-01-15T09:19:23.723613
| 2016-09-17T01:18:45
| 2016-09-17T01:18:45
| 68,424,913
| 0
| 0
| null | 2016-09-17T01:17:36
| 2016-09-17T01:17:36
| null |
UTF-8
|
Python
| false
| false
| 3,106
|
py
|
# -*- coding: utf-8 -*-
"""Tests for cephlcm.common.retryutils"""
import unittest.mock
import pymongo.errors
import pytest
from cephlcm.common import retryutils
@pytest.fixture
def func_always_fails():
func = unittest.mock.MagicMock()
func.__name__ = ""
func.side_effect = Exception
return func
@pytest.fixture
def func_always_passed():
return unittest.mock.MagicMock()
@pytest.fixture
def func_pass_fail():
func = unittest.mock.MagicMock()
func.__name__ = ""
func.side_effect = [Exception(), True]
return func
@pytest.mark.parametrize("attempts, attempt", (
(0, 0),
(1, 0),
(1, 3),
))
def test_exp_sleep_time_fails(attempts, attempt):
with pytest.raises(ValueError):
retryutils.exp_sleep_time(1, 10, attempts, attempt)
def test_exp_sleep_time():
assert retryutils.exp_sleep_time(1, 10, 100, 1) == 1
assert retryutils.exp_sleep_time(1, 10, 100, 100) == 10
values = [
retryutils.exp_sleep_time(1, 10, 10, num) for num in range(1, 11)]
for idx, less in enumerate(values, start=1):
for more in values[idx:]:
assert less <= more
def test_simple_retry_ok(func_always_passed, func_pass_fail):
for func in func_always_passed, func_pass_fail:
retryutils.simple_retry()(func)()
def test_simple_retry_fail(func_always_fails):
with pytest.raises(Exception):
retryutils.simple_retry()(func_always_fails)()
def test_sleep_retry_ok_always(func_always_passed, no_sleep):
retryutils.sleep_retry()(func_always_passed)()
no_sleep.assert_not_called()
def test_sleep_retry_ok_failed_once(func_pass_fail, no_sleep):
retryutils.sleep_retry()(func_pass_fail)()
assert len(no_sleep.mock_calls) == 1
def test_sleep_retry_fail(func_always_fails, no_sleep):
with pytest.raises(Exception):
retryutils.sleep_retry()(func_always_fails)()
assert len(no_sleep.mock_calls) == 5 - 1
@pytest.mark.parametrize("exc", (
pymongo.errors.AutoReconnect,
pymongo.errors.ConnectionFailure,
pymongo.errors.ExecutionTimeout,
pymongo.errors.CursorNotFound,
pymongo.errors.ExceededMaxWaiters,
pymongo.errors.NetworkTimeout,
pymongo.errors.NotMasterError,
pymongo.errors.ServerSelectionTimeoutError
))
def test_mongo_retry_ok(exc, func_pass_fail, no_sleep):
func_pass_fail.side_effect = [exc(""), True]
retryutils.mongo_retry()(func_pass_fail)()
@pytest.mark.parametrize("exc", (
pymongo.errors.PyMongoError,
pymongo.errors.ConfigurationError,
pymongo.errors.OperationFailure,
pymongo.errors.WriteConcernError,
pymongo.errors.WriteError,
pymongo.errors.WTimeoutError,
pymongo.errors.DuplicateKeyError,
pymongo.errors.BulkWriteError,
pymongo.errors.InvalidOperation,
pymongo.errors.BSONError,
pymongo.errors.InvalidName,
pymongo.errors.InvalidURI,
pymongo.errors.DocumentTooLarge
))
def test_mongo_retry_fail(exc, func_pass_fail, no_sleep):
func_pass_fail.side_effect = [exc(""), True]
with pytest.raises(exc):
retryutils.mongo_retry()(func_pass_fail)()
|
[
"sarkhipov@mirantis.com"
] |
sarkhipov@mirantis.com
|
2a4ab14fe77c86874630bbd75a3bd0aa9c75fbb6
|
291fe7fb4cc5b682e560b0c5958e2220054451c6
|
/Big48/클래스연습1.py
|
c1975edcbe42a3c2942626d1924915e230b33b94
|
[] |
no_license
|
MinksChung/BigdataCourse
|
44dc5e7e578515e1dafbb7870911e09347a788f4
|
293803415da5d9f354059ea556818cc7610f36a5
|
refs/heads/master
| 2022-12-22T06:14:59.880933
| 2020-01-26T14:58:09
| 2020-01-26T14:58:09
| 202,575,724
| 0
| 0
| null | 2022-12-15T23:28:43
| 2019-08-15T16:29:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,357
|
py
|
# 파이썬은 '함수' 중심의 언어(모듈 중심). 모듈과 클래스는 별개
## 클래스 생성
class Dog: # 클래스명은 대문자로 사용. ( ) 없이 :만 사용
## 멤버 변수
# 변수선언
color = "" # 색
field = "" # 견종
## 생성자 함수(init) (생성자는 함수? (o), 객체를 생성할 때 자동으로 호출되는 함수를 생성자라고 한다)
def __init__(self): # 클래스안에 있는 생성자라는 표시로 변수명이 self 로 자동완성 됨
print("내가 마 생성자다 마. 내가 마 느그 객체 생성도 하고 마 그 때 호출도 하고 마 다했어 마.")
## 멤버 함수
def jump(self):
print("강아지가 뛰고 있다.")
def sleep(self):
print("강아지가 자고 있다.")
## 출력용 함수(str) (자바의 toString() 역할)
def __str__(self):
# 이 클래스 내의 변수를 사용하기 위해 self.변수명 으로 사용
return self.color + ", " + self.field
## 객체 생성
dog1 = Dog()
dog1.color = "빨간색"
dog1.field = "토이푸들"
print(dog1)
dog1.jump()
print("------------------------------------------------------")
## 객체 생성
dog2 = Dog()
dog2.color = "까만색"
dog2.field = "닥스훈트"
print(dog2)
dog2.sleep()
|
[
"minkschung@gmail.com"
] |
minkschung@gmail.com
|
5ef9ea6662f4ffcc844776800d65dfb1c07daa47
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r9/Gen/DecFiles/options/13102432.py
|
ef19e6f2fb686eeee810105d86b362e257cc42be
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/13102432.py generated: Fri, 27 Mar 2015 16:09:59
#
# Event Type: 13102432
#
# ASCII decay Descriptor: {[[B_s0]nos -> pi+ pi- (pi0 -> gamma gamma)]cc, [[B_s0]os -> pi- pi+ (pi0 -> gamma gamma)]cc}
#
from Configurables import Generation
Generation().EventType = 13102432
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_pi+pi-pi0=DecProdCut,sqDalitz.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13102432
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
9aaa01fae066eb8b73fc2e88e7d5553d38668bed
|
7341a4f317639eed3c6868310c7421e8eb7016ce
|
/usados/users/admin.py
|
b8baf198dd2a857373c88643faac6d26fb6227d9
|
[] |
no_license
|
fabianfalon/drf-boilerplate
|
f637fb66f7dd260e4a3b9c9daf6217ad99a90765
|
d24fd3a8de653f9731d41781e1e4e207881fbbb2
|
refs/heads/master
| 2020-05-19T06:05:12.629587
| 2019-11-12T18:44:15
| 2019-11-12T18:44:15
| 184,864,871
| 0
| 0
| null | 2019-10-22T06:26:39
| 2019-05-04T07:25:09
|
Python
|
UTF-8
|
Python
| false
| false
| 614
|
py
|
"""User models admin."""
# Django
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Models
from .models import Profile, User
class CustomUserAdmin(UserAdmin):
"""User model admin."""
list_display = ('email', 'username', 'first_name', 'last_name', 'is_staff',)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
"""Profile model admin."""
list_display = ('user', 'dni', 'address', 'publications_numbers', 'birthdate')
search_fields = ('user__username', 'user__email', )
list_filter = ('dni',)
admin.site.register(User, CustomUserAdmin)
|
[
"fabian.falon@gmail.com"
] |
fabian.falon@gmail.com
|
ca4ae6b51f677ae0b957d2c9e6b32f94cde2244b
|
3d2e5d1092acccfb73c07d68b6beeffc44b3f776
|
/planet/src/utils/plotting.py
|
cda9af3d36ef802034023f45d54fcd336fb5a5c4
|
[] |
no_license
|
MatthijsBiondina/WorldModels
|
f6cbcfe5349da7119329ef10831810d1b85c9d02
|
ab468f1aa978e3aa4e05174db24922085d1e33b1
|
refs/heads/master
| 2022-12-22T11:54:46.040828
| 2020-09-23T11:41:48
| 2020-09-23T11:41:48
| 248,212,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,336
|
py
|
import json
import os
from bokeh.plotting import output_file, figure, save
from bokeh.layouts import gridplot
import src.utils.config as cfg
def save_metrics(metrics: dict, save_loc: str):
with open(os.path.join(save_loc, 'metrics.json'), 'w+') as f:
json.dump(metrics, f, indent=2)
output_file(os.path.join(save_loc, 'plt.html'), title=save_loc.split('/')[-1])
s_top = figure(width=720, height=360, title="Performance", x_axis_label='episodes', y_axis_label='reward')
s_top.line(not_none(metrics['episodes'], metrics['rewards']), not_none(metrics['rewards']),
legend_label="With Action Noise", line_color="orchid", line_width=3, line_alpha=0.66)
s_top.line(not_none(metrics['episodes'], metrics['t_scores']), not_none(metrics['t_scores']),
legend_label="Without Action Noise", line_color="royalblue", line_width=3, line_alpha=0.66)
s_top.legend.location = "bottom_right"
s_bot = figure(width=720, height=360, x_range=s_top.x_range, title="Loss Scores",
x_axis_label="episode", y_axis_label='loss')
s_bot.line(not_none(metrics['episodes'], metrics['o_loss']), not_none(metrics['o_loss']),
legend_label="Observation Loss (MSE)", line_color="orchid", line_width=3, line_alpha=0.66)
s_bot.line(not_none(metrics['episodes'], metrics['r_loss']),
list(map(lambda x: x / cfg.action_repeat, not_none(metrics['r_loss']))),
legend_label="Reward Loss (MSE)", line_color="royalblue", line_width=3, line_alpha=0.66)
s_bot.line(not_none(metrics['episodes'], metrics['kl_loss']),
list(map(lambda x: x / (1 + cfg.overshooting_kl_beta) - cfg.free_nats, not_none(metrics['kl_loss']))),
legend_label="Complexity Loss (KL-divergence)", line_color="sienna", line_width=3, line_alpha=0.66)
# s_bot.line(not_none(metrics['episodes'], metrics['p_loss']),
# list(map(lambda x: x / cfg.action_repeat, not_none(metrics['p_loss']))),
# legend_label="Policy Loss (MSE)", line_color="seagreen", line_width=3)
p = gridplot([[s_top], [s_bot]])
save(p)
pass
def not_none(vlist, klist=None):
if klist is None:
return [x for x in vlist if x is not None]
else:
return [x for x, k in zip(vlist, klist) if k is not None]
|
[
"biondina.matthijs@gmail.com"
] |
biondina.matthijs@gmail.com
|
c807672ac9a93e578d16c491d87364a6512dba76
|
da3e36172daaf863ef73372f8c36cc2629ec1769
|
/tuplas/eje03.py
|
bb54a038752b4310aef8c28e3c48b273a8959656
|
[] |
no_license
|
mentecatoDev/python
|
08eef1cb5a6ca2f16b01ee98192ccf1a65b9380a
|
80ddf541d3d1316ba8375db8f6ec170580e7831b
|
refs/heads/master
| 2021-06-30T07:03:51.957376
| 2021-02-22T09:40:46
| 2021-02-22T09:40:46
| 222,322,503
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
"""
Ejercicio 3
===========
Escribir un programa que lea un archivo e imprima las letras en orden decreciente de
frecuencia de aparición. El programa debería convertir toda la entrada a minúsculas
y solo contar las letras a-z (excluir la "ñ"). No se debeen contar espacios, dígitos,
signos de puntuación o cualquier otro carácter. Buscar textos en diferentes lenguas y
ver cómo la frecuencia de las letras varían entre lenguajes. Comparar los resultados
con las tablas que se puede encontrar en wikipedia.org/wiki/Letter_frequencies.
"""
import string
try:
fhandle = open(input("Introduzca el nombre del fichero: "))
except IOError:
print("El fichero no existe")
exit()
full_string = fhandle.read()
alphabet = dict()
for letter in full_string:
letter = letter.lower()
if letter in string.ascii_lowercase:
alphabet[letter] = alphabet.get(letter, 0) + 1
total = 0
ordered_list = []
for key, value in alphabet.items():
total += alphabet[key]
ordered_list += [(value, key)]
ordered_list.sort(reverse=True)
for tupla in ordered_list:
print("%s %.2f%%" % (tupla[1], tupla[0]*100/total))
|
[
"favila@iesromerovargas.com"
] |
favila@iesromerovargas.com
|
3da0ac5df135ea5ca9acb2681b46952392d40bff
|
ef0f84bcba3ded3624697c5c2a36e5c99cc5e498
|
/bumpversion/functions.py
|
b00f726a6650eb78bbecce9bb7197b7618e92fcd
|
[
"MIT"
] |
permissive
|
lbryio/bumpversion
|
e502d7a78608f72f6d6817bad451232fc02d75c0
|
0c8f0e327ac97d896ca3fd2e254628e2afd0a4fe
|
refs/heads/master
| 2021-01-11T11:57:20.662827
| 2018-10-31T15:49:54
| 2018-10-31T15:49:54
| 76,699,829
| 3
| 1
|
MIT
| 2018-10-31T15:49:56
| 2016-12-17T02:44:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,873
|
py
|
import re
import datetime
class NumericFunction(object):
"""
This is a class that provides a numeric function for version parts.
It simply starts with the provided first_value (0 by default) and
increases it following the sequence of integer numbers.
The optional value of this function is equal to the first value.
This function also supports alphanumeric parts, altering just the numeric
part (e.g. 'r3' --> 'r4'). Only the first numeric group found in the part is
considered (e.g. 'r3-001' --> 'r4-001').
"""
FIRST_NUMERIC = re.compile('([^\d]*)(\d+)(.*)')
def __init__(self, first_value=None):
if first_value is not None:
try:
part_prefix, part_numeric, part_suffix = self.FIRST_NUMERIC.search(
first_value).groups()
except AttributeError:
raise ValueError(
"The given first value {} does not contain any digit".format(first_value))
else:
first_value = 0
self.first_value = str(first_value)
self.optional_value = self.first_value
def bump(self, value):
part_prefix, part_numeric, part_suffix = self.FIRST_NUMERIC.search(
value).groups()
bumped_numeric = int(part_numeric) + 1
return "".join([part_prefix, str(bumped_numeric), part_suffix])
class ValuesFunction(object):
"""
This is a class that provides a values list based function for version parts.
It is initialized with a list of values and iterates through them when
bumping the part.
The default optional value of this function is equal to the first value,
but may be otherwise specified.
When trying to bump a part which has already the maximum value in the list
you get a ValueError exception.
"""
def __init__(self, values, optional_value=None, first_value=None):
if len(values) == 0:
raise ValueError("Version part values cannot be empty")
self._values = values
if optional_value is None:
optional_value = values[0]
if optional_value not in values:
raise ValueError("Optional value {0} must be included in values {1}".format(
optional_value, values))
self.optional_value = optional_value
if first_value is None:
first_value = values[0]
if first_value not in values:
raise ValueError("First value {0} must be included in values {1}".format(
first_value, values))
self.first_value = first_value
def bump(self, value):
try:
return self._values[self._values.index(value)+1]
except IndexError:
raise ValueError(
"The part has already the maximum value among {} and cannot be bumped.".format(self._values))
|
[
"giordani.leonardo@gmail.com"
] |
giordani.leonardo@gmail.com
|
f20cea7c2a92454e78c5329560ee8cf9c53555e6
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/sklearn/impute/_knn.py
|
e995c37a08e2e2b058641a63cec0e3dbf58a4303
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:890c089e7000acacdf6a7598f456bcc2d31f0331adb33d1b18c7cb84120d3758
size 11662
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
781e20ee60bcb21fe4e6068d568ba9a9e5393d25
|
8e311f8f94c9d218bd37f81c0badc906d78d6b33
|
/env/Lib/site-packages/openpyxl/utils/escape.py
|
52ef33e0a97ffe61274bfc2b8c8686fd8d78d159
|
[
"MIT"
] |
permissive
|
htwenhe/DJOA
|
d76307ff8752c1e2a89101de1f74094b94bf9b18
|
3c2d384a983e42dedfd72561353ecf9370a02115
|
refs/heads/master
| 2021-09-03T21:49:28.267986
| 2018-01-12T08:12:55
| 2018-01-12T08:12:55
| 108,937,324
| 0
| 1
|
MIT
| 2018-01-12T08:06:50
| 2017-10-31T02:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 828
|
py
|
from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
"""
OOXML has non-standard escaping for characters < \031
"""
import re
def escape(value):
r"""
Convert ASCII < 31 to OOXML: \n == _x + hex(ord(\n)) +_
"""
CHAR_REGEX = re.compile(r"[\001-\031]")
def _sub(match):
"""
Callback to escape chars
"""
return "_x%04x_" % ord(match.group(0)) # py 2.6
return CHAR_REGEX.sub(_sub, value)
def unescape(value):
r"""
Convert escaped strings to ASCIII: _x000a_ == \n
"""
ESCAPED_REGEX = re.compile("_x([0-9A-Fa-f]{4})_")
def _sub(match):
"""
Callback to unescape chars
"""
return chr(int(match.group(1), 16))
if "_x" in value:
value = ESCAPED_REGEX.sub(_sub, value)
return value
|
[
"htwenhe@hotmail.com"
] |
htwenhe@hotmail.com
|
e4832b743c96639a2e1820a18dea177fd0b25045
|
54da94dce244ab659c8036cafcdc1b326fbfe490
|
/datoteke-s-predavanj/2015-16/10-nakljucna-stevila/matematiki/ocenipi.py
|
a001630b6569c0a30461ff211502ce246eb54d9d
|
[] |
no_license
|
jakamrak/uvod-v-programiranje
|
640b2738164e2026308d7e60f1478659df79cc40
|
3c05290f4f23b384ad9063880fffe208c08fc599
|
refs/heads/master
| 2022-07-17T16:50:18.563453
| 2020-05-18T13:54:13
| 2020-05-18T13:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,149
|
py
|
import random
def oceni_pi(stevilo_poskusov):
v_krogu = 0
for _ in range(stevilo_poskusov):
x, y = random.random(), random.random()
if x ** 2 + y ** 2 <= 1:
v_krogu += 1
return 4 * v_krogu / stevilo_poskusov
def oceni_eno_kocko(stevilo_poskusov):
pogostosti = {}
for _ in range(stevilo_poskusov):
met = random.randint(1, 6)
pogostosti[met] = pogostosti.get(met, 0) + 1
return {met: pogostost / stevilo_poskusov for met, pogostost in pogostosti.items()}
def oceni_dve_kocki(stevilo_poskusov):
pogostosti = {}
for _ in range(stevilo_poskusov):
met = random.randint(1, 6) + random.randint(1, 6)
pogostosti[met] = pogostosti.get(met, 0) + 1
return {met: pogostost / stevilo_poskusov for met, pogostost in pogostosti.items()}
def oceni_max_tri_kocke(stevilo_poskusov):
pogostosti = {}
for _ in range(stevilo_poskusov):
met = max(random.randint(1, 6), random.randint(1, 6), random.randint(1, 6))
pogostosti[met] = pogostosti.get(met, 0) + 1
return {met: pogostost / stevilo_poskusov for met, pogostost in pogostosti.items()}
|
[
"matija@pretnar.info"
] |
matija@pretnar.info
|
9ff1408db7be8a1136af5560f389a255626ebc8d
|
f1614f3531701a29a33d90c31ab9dd6211c60c6b
|
/menu_sun_integration/infrastructure/pernod/translators/pernod_product_translator.py
|
310121443153e34684dfcbcfcb6ea8f758c223f6
|
[] |
no_license
|
pfpacheco/menu-sun-api
|
8a1e11543b65db91d606b2f3098847e3cc5f2092
|
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
|
refs/heads/master
| 2022-12-29T13:59:11.644409
| 2020-10-16T03:41:54
| 2020-10-16T03:41:54
| 304,511,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,581
|
py
|
from menu_sun_api.domain.model.product.product import Product, ProductStatus
from menu_sun_api.domain.model.seller.seller import Seller
from menu_sun_integration.application.translators.interfaces.abstract_product_translator import \
AbstractProductTranslator
from menu_sun_integration.infrastructure.pernod.presentations.product.pernod_product_get_request import \
PernodProductGetRequest
from menu_sun_integration.infrastructure.pernod.presentations.product.pernod_product_response import \
PernodProductResponse
from menu_sun_integration.presentations.interfaces.abstract_platform import AbstractPlatform
from menu_sun_integration.presentations.interfaces.abstract_request import AbstractRequest
class PernodProductTranslator(AbstractProductTranslator):
def bind_product(self, product: PernodProductResponse) -> Product:
status = ProductStatus.ENABLED if product.active else ProductStatus.DISABLED
return Product(status=status,
sku=product.sku, name=product.name, description=product.description, weight=product.weight,
ean=product.ean, brand=product.brand, width=product.width, height=product.height,
length=product.length)
def to_seller_send_format(self, entity: AbstractPlatform) -> AbstractRequest:
pass
def to_seller_get_format(self, seller: Seller, **kwargs) -> PernodProductGetRequest:
return PernodProductGetRequest()
def to_domain_format(self, response: PernodProductResponse) -> [Product]:
return self.bind_product(response)
|
[
"pfpacheco@gmail.com"
] |
pfpacheco@gmail.com
|
f94f884ae27d3ed75f4d10e846d2b7d48fa3ebc8
|
169f134442ce7f040e8a1878258c90ef9f0cbcc1
|
/skivvy.py
|
0ff0ad391b9f1781aa56ece1702caab15635ef0e
|
[] |
no_license
|
Kazade/skivvy
|
9d41b0acc934e6059d60b386cc54a0638b14f7cc
|
41278fa66b559ee507862c729ad89028ccd5ae52
|
refs/heads/master
| 2016-09-11T04:41:31.998555
| 2014-12-05T16:36:10
| 2014-12-05T16:36:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,832
|
py
|
import os
import sys
import argparse
import json
import re
import tempfile
import subprocess
ENV_REGEX = r"(.+)\[(.+)\]"
parser = argparse.ArgumentParser(description="underling task worker")
parser.add_argument("--config", type=unicode, default="underling.json")
parser.add_argument("--env", type=unicode, default="")
parser.add_argument("--watch", type=bool, default=False)
parser.add_argument("command", type=unicode)
def locate_task(task_dirs, task_name):
for task_dir in task_dirs:
full_path = os.path.join(task_dir, task_name)
if os.path.exists(full_path):
return full_path
return ""
def expand_globs(inputs):
pass
def replace_constants(string, constants):
for constant in constants:
if isinstance(constants[constant], list):
string = string.replace("{" + constant + "}", " ".join(constants[constant]))
else:
string = string.replace("{" + constant + "}", constants[constant])
return string
def run_command(config, command, namespace=""):
task_roots = config.get("__task_dirs__", [])
constants = {}
constants["PROJECT_ROOT"] = os.getcwd()
new_constants = config.get("__constants__", {})
for const in new_constants:
if isinstance(new_constants[const], list):
new_constants[const] = [ replace_constants(x, constants) for x in new_constants[const] ]
else:
new_constants[const] = replace_constants(new_constants[const], constants)
constants.update(new_constants)
task_roots = [ replace_constants(x, constants) for x in task_roots ]
commands = config.get("__commands__", {})
if command not in commands:
print("Unrecognized command: %s" % command)
print("Available commands: \n%s" % "\n\n".join(commands.keys()))
sys.exit(1)
for dependency in commands[command]:
if dependency.startswith("file://"):
print("Unhandled dependency type: %s" % dependency)
sys.exit(1)
dependency_name = dependency
dependency = config.get(dependency_name)
if not dependency:
print("Unrecognized dependency: %s" % dependency_name)
sys.exit(1)
inputs = ""
for task in dependency:
if not isinstance(task, dict):
print("Pipeline %s should be a list of tasks (dictionaries)" % dependency_name)
task_name = task.get("task")
has_env = re.match(ENV_REGEX, task_name)
if has_env and not has_env.group(2).startswith(namespace):
continue # Ignore tasks that don't have this namespace
if has_env:
task_name = has_env.group(1)
inputs = replace_constants(task.get("input", ""), constants).split(" ") or inputs
inputs = expand_globs(inputs)
if inputs:
constants["INPUT_FILES"] = inputs
output_file = task.get("output")
if not output_file: # No explicit output? Then generate a temporary file
_, output_file = tempfile.mkstemp()
task = locate_task(task_roots, task_name)
if not task:
print("Unable to find task: %s" % task_name)
sys.exit(1)
final_command = [ task, "--output=%s" % output_file ]
for input_file in inputs:
final_command.append("--input=%s" % input_file)
print subprocess.check_output(final_command)
inputs = output_file
def load_config(config):
with open(config, "r") as f:
config = json.loads(f.read())
return config
def main():
args = parser.parse_args()
config = load_config(args.config)
run_command(config, args.command)
return 0
if __name__ == '__main__':
sys.exit(main())
|
[
"kazade@gmail.com"
] |
kazade@gmail.com
|
6a761ba24b8b47cb5af1cfae4382ae695c5c1676
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_sultanates.py
|
18564bf3d0e82aa07511fead77ff8b845972a870
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
#calss header
class _SULTANATES():
def __init__(self,):
self.name = "SULTANATES"
self.definitions = sultanate
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['sultanate']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1ccac1f41ac29343126ed052cb5f0f3e98e6ec18
|
4909bdc40f9d606e336a077908ca16933989f66f
|
/tensorflow/python/distribute/multi_worker_continuous_run_test.py
|
19790a0d69fb2eb2bdeee043f96d30c1f8e66767
|
[
"Apache-2.0"
] |
permissive
|
xhook/tensorflow
|
9532fcf1b466f65f938aa95aba290d99d2004ad0
|
978d3b37393a4dd2411e2e6657dff1bbcac81a66
|
refs/heads/master
| 2020-09-06T21:00:35.848217
| 2019-11-08T18:03:54
| 2019-11-08T18:59:48
| 220,535,874
| 0
| 0
|
Apache-2.0
| 2019-11-08T19:39:19
| 2019-11-08T19:39:18
| null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for continuous runs using cross-worker collective ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_process_runner_util
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
# TODO(b/143286947): expand the test to cover fault tolerance and elasticity
class MultiWorkerContinuousRunTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['eager']))
def testAllReduceContinuousRun(self, mode):
num_workers = 5
tensor_shape = [2, 2]
local_device = '/device:CPU:0'
if config.list_physical_devices('GPU'):
local_device = '/device:GPU:0'
def worker_step_fn():
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
tf_config = json.loads(os.environ['TF_CONFIG'])
worker_id = tf_config['task']['index']
@def_function.function
def run_reduce():
with ops.device(local_device):
t_in = array_ops.ones(tensor_shape) * worker_id
return strategy.reduce(reduce_util.ReduceOp.MEAN, t_in, axis=None)
t_out = run_reduce()
# Element values from the workers are
# 0, 1, ..., (num_workers - 1)
expected_mean = (num_workers - 1) / 2
expected_out = np.ones(tensor_shape) * expected_mean
self.assertAllClose(t_out, expected_out)
def worker_fn():
gpus = config.list_physical_devices('GPU')
if gpus:
# Set virtual GPU with memory limit of 64MB so that multiple worker
# processes can share the physical GPU
config.set_logical_device_configuration(
gpus[0], [context.LogicalDeviceConfiguration(64)])
for _ in range(100):
worker_step_fn()
# TODO(b/141948186): Remove this `with` block once b/141948186 is resolved.
with multi_process_runner_util.try_run_and_except_connection_error(self):
multi_process_runner.MultiProcessRunner().run(
worker_fn,
cluster_spec=test_base.create_cluster_spec(num_workers=num_workers))
if __name__ == '__main__':
multi_process_runner.test_main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
59a16006343239f0fce22a8359cb005c9a563593
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03363/s899204324.py
|
4e166a6e2b65758d9abb5e6794647c95abe254e6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
import bisect
import math
n = int(input())
a = list(map(int,input().split()))
s = [0]*(n+1)
for i,e in enumerate(a):
s[i+1] = s[i] + e
def comb(n,m):
return math.factorial(n)//math.factorial(m)//math.factorial(n-m)
ans = 0
s = sorted(s)
l = 0
r = 0
#print(s)
while l < n+1:
while r < n+1 and s[l] == s[r]:
#print(r,s[r])
r += 1
if r-l >= 2:
ans += comb(r-l,2)
l = r
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
dcb8a80e5b04d0964118e6cb6917ac60f0a9857e
|
ce76b3ef70b885d7c354b6ddb8447d111548e0f1
|
/place_and_point/week_and_next_day/work_or_high_way/take_good_year_with_next_hand/woman/get_small_way.py
|
5a4a7c6086c5ee148aac418594f1d5b2fa66fbc4
|
[] |
no_license
|
JingkaiTang/github-play
|
9bdca4115eee94a7b5e4ae9d3d6052514729ff21
|
51b550425a91a97480714fe9bc63cb5112f6f729
|
refs/heads/master
| 2021-01-20T20:18:21.249162
| 2016-08-19T07:20:12
| 2016-08-19T07:20:12
| 60,834,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
#! /usr/bin/env python
def place_and_few_case(str_arg):
go_next_thing_at_great_way(str_arg)
print('old_work')
def go_next_thing_at_great_way(str_arg):
print(str_arg)
if __name__ == '__main__':
place_and_few_case('seem_point')
|
[
"jingkaitang@gmail.com"
] |
jingkaitang@gmail.com
|
3dccb0ced631f2774613aa58ddedc938d556cee2
|
083b758356821647b6a2db3f4ae32b355ebd28c4
|
/Question_91_100/answers/answer_98.py
|
e98d41c8e6551a43cc8e51166b79b4fd2f798cbf
|
[
"MIT"
] |
permissive
|
litiangu/Gasyori100knock
|
0b2e2844748acecba383500f56a25afad5a22973
|
38305760a4db4c6f8addf176630197960e7fc6a9
|
refs/heads/master
| 2020-07-16T18:00:05.756714
| 2019-09-01T22:58:39
| 2019-09-01T22:58:39
| 205,837,921
| 1
| 0
|
MIT
| 2019-09-02T11:02:30
| 2019-09-02T11:02:30
| null |
UTF-8
|
Python
| false
| false
| 5,894
|
py
|
import cv2
import numpy as np
np.random.seed(0)
# read image
img = cv2.imread("imori_1.jpg")
H, W, C = img.shape
# Grayscale
gray = 0.2126 * img[..., 2] + 0.7152 * img[..., 1] + 0.0722 * img[..., 0]
gt = np.array((47, 41, 129, 103), dtype=np.float32)
cv2.rectangle(img, (gt[0], gt[1]), (gt[2], gt[3]), (0,255,255), 1)
def iou(a, b):
area_a = (a[2] - a[0]) * (a[3] - a[1])
area_b = (b[2] - b[0]) * (b[3] - b[1])
iou_x1 = np.maximum(a[0], b[0])
iou_y1 = np.maximum(a[1], b[1])
iou_x2 = np.minimum(a[2], b[2])
iou_y2 = np.minimum(a[3], b[3])
iou_w = max(iou_x2 - iou_x1, 0)
iou_h = max(iou_y2 - iou_y1, 0)
area_iou = iou_w * iou_h
iou = area_iou / (area_a + area_b - area_iou)
return iou
def hog(gray):
h, w = gray.shape
# Magnitude and gradient
gray = np.pad(gray, (1, 1), 'edge')
gx = gray[1:h+1, 2:] - gray[1:h+1, :w]
gy = gray[2:, 1:w+1] - gray[:h, 1:w+1]
gx[gx == 0] = 0.000001
mag = np.sqrt(gx ** 2 + gy ** 2)
gra = np.arctan(gy / gx)
gra[gra<0] = np.pi / 2 + gra[gra < 0] + np.pi / 2
# Gradient histogram
gra_n = np.zeros_like(gra, dtype=np.int)
d = np.pi / 9
for i in range(9):
gra_n[np.where((gra >= d * i) & (gra <= d * (i+1)))] = i
N = 8
HH = h // N
HW = w // N
Hist = np.zeros((HH, HW, 9), dtype=np.float32)
for y in range(HH):
for x in range(HW):
for j in range(N):
for i in range(N):
Hist[y, x, gra_n[y*4+j, x*4+i]] += mag[y*4+j, x*4+i]
## Normalization
C = 3
eps = 1
for y in range(HH):
for x in range(HW):
#for i in range(9):
Hist[y, x] /= np.sqrt(np.sum(Hist[max(y-1,0):min(y+2, HH), max(x-1,0):min(x+2, HW)] ** 2) + eps)
return Hist
def resize(img, h, w):
_h, _w = img.shape
ah = 1. * h / _h
aw = 1. * w / _w
y = np.arange(h).repeat(w).reshape(w, -1)
x = np.tile(np.arange(w), (h, 1))
y = (y / ah)
x = (x / aw)
ix = np.floor(x).astype(np.int32)
iy = np.floor(y).astype(np.int32)
ix = np.minimum(ix, _w-2)
iy = np.minimum(iy, _h-2)
dx = x - ix
dy = y - iy
out = (1-dx) * (1-dy) * img[iy, ix] + dx * (1 - dy) * img[iy, ix+1] + (1 - dx) * dy * img[iy+1, ix] + dx * dy * img[iy+1, ix+1]
out[out>255] = 255
return out
class NN:
def __init__(self, ind=2, w=64, w2=64, outd=1, lr=0.1):
self.w1 = np.random.normal(0, 1, [ind, w])
self.b1 = np.random.normal(0, 1, [w])
self.w2 = np.random.normal(0, 1, [w, w2])
self.b2 = np.random.normal(0, 1, [w2])
self.wout = np.random.normal(0, 1, [w2, outd])
self.bout = np.random.normal(0, 1, [outd])
self.lr = lr
def forward(self, x):
self.z1 = x
self.z2 = sigmoid(np.dot(self.z1, self.w1) + self.b1)
self.z3 = sigmoid(np.dot(self.z2, self.w2) + self.b2)
self.out = sigmoid(np.dot(self.z3, self.wout) + self.bout)
return self.out
def train(self, x, t):
# backpropagation output layer
#En = t * np.log(self.out) + (1-t) * np.log(1-self.out)
En = (self.out - t) * self.out * (1 - self.out)
grad_wout = np.dot(self.z3.T, En)
grad_bout = np.dot(np.ones([En.shape[0]]), En)
self.wout -= self.lr * grad_wout
self.bout -= self.lr * grad_bout
# backpropagation inter layer
grad_u2 = np.dot(En, self.wout.T) * self.z3 * (1 - self.z3)
grad_w2 = np.dot(self.z2.T, grad_u2)
grad_b2 = np.dot(np.ones([grad_u2.shape[0]]), grad_u2)
self.w2 -= self.lr * grad_w2
self.b2 -= self.lr * grad_b2
grad_u1 = np.dot(grad_u2, self.w2.T) * self.z2 * (1 - self.z2)
grad_w1 = np.dot(self.z1.T, grad_u1)
grad_b1 = np.dot(np.ones([grad_u1.shape[0]]), grad_u1)
self.w1 -= self.lr * grad_w1
self.b1 -= self.lr * grad_b1
def sigmoid(x):
return 1. / (1. + np.exp(-x))
# crop and create database
Crop_num = 200
L = 60
H_size = 32
F_n = ((H_size // 8) ** 2) * 9
db = np.zeros((Crop_num, F_n+1))
for i in range(Crop_num):
x1 = np.random.randint(W-L)
y1 = np.random.randint(H-L)
x2 = x1 + L
y2 = y1 + L
crop = np.array((x1, y1, x2, y2))
_iou = iou(gt, crop)
if _iou >= 0.5:
cv2.rectangle(img, (x1, y1), (x2, y2), (0,0,255), 1)
label = 1
else:
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 1)
label = 0
crop_area = gray[y1:y2, x1:x2]
crop_area = resize(crop_area, H_size, H_size)
_hog = hog(crop_area)
db[i, :F_n] = _hog.ravel()
db[i, -1] = label
## train neural network
nn = NN(ind=F_n, lr=0.01)
for i in range(10000):
nn.forward(db[:, :F_n])
nn.train(db[:, :F_n], db[:, -1][..., None])
# read detect target image
img2 = cv2.imread("imori_many.jpg")
H2, W2, C2 = img2.shape
# Grayscale
gray2 = 0.2126 * img2[..., 2] + 0.7152 * img2[..., 1] + 0.0722 * img2[..., 0]
# [h, w]
recs = np.array(((42, 42), (56, 56), (70, 70)), dtype=np.float32)
detects = np.ndarray((0, 5), dtype=np.float32)
# sliding window
for y in range(0, H2, 4):
for x in range(0, W2, 4):
for rec in recs:
dh = int(rec[0] // 2)
dw = int(rec[1] // 2)
x1 = max(x-dw, 0)
x2 = min(x+dw, W2)
y1 = max(y-dh, 0)
y2 = min(y+dh, H2)
region = gray2[max(y-dh,0):min(y+dh,H2), max(x-dw,0):min(x+dw,W2)]
region = resize(region, H_size, H_size)
region_hog = hog(region).ravel()
score = nn.forward(region_hog)
if score >= 0.7:
cv2.rectangle(img2, (x1, y1), (x2, y2), (0,0,255), 1)
detects = np.vstack((detects, np.array((x1, y1, x2, y2, score))))
print(detects)
cv2.imwrite("out.jpg", img2)
cv2.imshow("result", img2)
cv2.waitKey(0)
|
[
"naga.yoshi.yoshi@gmail.com"
] |
naga.yoshi.yoshi@gmail.com
|
0146a04104dc8ddd02af43bba75341ee40c82792
|
e73e318c099c5c71b750269ee84f1bbe09fffac2
|
/promis/admin.py
|
9e1f17c2da686b0bb73e82d7a11c462b1f97fda6
|
[] |
no_license
|
ArietNyshanbaev/deputat
|
f304f2deb0807241d691a8838d2d11f3e7328c46
|
6a744969a2ee7a811f31b03994eced9d571c4be2
|
refs/heads/master
| 2021-03-23T23:47:39.893956
| 2016-09-12T16:46:15
| 2016-09-12T16:46:15
| 66,534,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
# Импорт стандартных пакетов Django
from django.contrib import admin
# Импорт моделей из баззы данных
from .models import Promis, PromisRank, Result, Comments
class PromisAdmin(admin.ModelAdmin):
""" Класс Регистрации Админки для Клеинтов """
list_display = ('title', 'person', 'date', 'is_approved')
list_filter = ('date','is_approved')
admin.site.register(Promis, PromisAdmin)
class PromisRankAdmin(admin.ModelAdmin):
""" Класс Регистрации Админки для Движимости """
list_display = ('promis', 'user')
list_filter = ('promis',)
admin.site.register(PromisRank, PromisRankAdmin)
admin.site.register(Result)
admin.site.register(Comments)
|
[
"ariet.nyshanbaev@iaau.edu.kg"
] |
ariet.nyshanbaev@iaau.edu.kg
|
6f74581e0d8602c2f47bea02cff088e3ce4eadb6
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_11059.py
|
9815daed4b4f8901097b703a4e2b8c9c3ac7fe0a
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
# How to Handle JSON with escaped Unicode characters using python json module?
data = jsDat.get('data')
data = data.encode('ascii', 'ignore')
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
17968e3caa7ee7355836b9479e64c6bc5051f7d8
|
2bd8fbe6e2ee2511d00479440aa589249234c2d8
|
/01-Supervised/17-neuralNetwork/day19/day19-1-neuralNetwork-2-MLPClassifier.py
|
23e1e91ceac3ac54f709ffc568dc722516ce7512
|
[] |
no_license
|
LeenonGo/sklearn-learn
|
71d21f9b26cfb5cc6d65a22883127db873a31091
|
460d6e75e82943c802f7c025a03c821d02b5d232
|
refs/heads/master
| 2023-07-13T18:42:17.510938
| 2021-08-18T11:34:06
| 2021-08-18T11:34:06
| 371,628,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
# -*- coding: utf-8 -*-
# @Author : Lee
# @Time : 2021/7/23 9:20
# @Function: https://www.scikitlearn.com.cn/0.21.3/18/#1172
#
from sklearn.neural_network import MLPClassifier
X = [[0., 0.], [1., 1.]]
y = [0, 1]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X, y)
print(clf.predict([[2., 2.], [-1., -2.]]))
print([coef.shape for coef in clf.coefs_])
# clf.coefs_ 包含了构建模型的权值矩阵
# 目前, MLPClassifier 只支持交叉熵损失函数,通过运行 predict_proba 方法进行概率估计。
# 使用了通过反向传播计算得到的梯度和某种形式的梯度下降来进行训练
# 最小化交叉熵损失函数,为每个样本 x 给出一个向量形式的概率估计 P(y|x)
print(clf.predict_proba([[2., 2.], [1., 2.]]))
# [[1.96718015e-04 9.99803282e-01]
# [1.96718015e-04 9.99803282e-01]]
# 表示预测[2., 2.]为标签0的概率为1.96718015e-04, 为标签1的概率为9.99803282e-01
# 此外,该模型支持 多标签分类 ,一个样本可能属于多个类别。
# 对于每个类,原始输出经过 logistic 函数变换后,大于或等于 0.5 的值将进为 1,否则为 0。
# 对于样本的预测输出,值为 1 的索引位置表示该样本的分类类别
X = [[0., 0.], [1., 1.]]
y = [[0, 1], [1, 1]]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1)
clf.fit(X, y)
print(clf.predict([[1., 2.]]))
print(clf.predict([[0., 0.]]))
|
[
"yaa.lee@hotmail.com"
] |
yaa.lee@hotmail.com
|
e60c0abdc0b7ac4f9ffc02f61ca16a77fee99345
|
61ba1d073fdbb34ad2ae4e9d0ae1b88083faeb02
|
/Sem I/Programarea Algoritmilor/laboratoare/lab2/problema6.py
|
82197831572d9292a03a2e658d6479b36dae91a4
|
[] |
no_license
|
CosminHorjea/fmi
|
d899b639a085203963413918d2c508307cb9ba60
|
6b4911cdec64929710d984223385c1e8e36d5c7c
|
refs/heads/master
| 2023-07-09T01:04:24.418286
| 2023-06-22T21:50:00
| 2023-06-22T21:50:00
| 213,064,779
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
'''
6. Jurnalul electronic al Anei conține, în fiecare zi, câte o frază cu informații despre cheltuielile pe
care ea le-a efectuat în ziua respectivă. Scrieți un program care să citească o frază de acest tip din
jurnalul Anei și apoi să afișeze suma totală cheltuită de ea în ziua respectivă. De exemplu, pentru fraza
“Astăzi am cumpărat pâine de 5 RON, pe lapte am dat 10 RON, iar de 15 RON am cumpărat niște
cașcaval. De asemenea, mi-am cumpărat și niște papuci cu 50 RON!”, programul trebuie să afișeze
suma totală de 80 RON. Fraza se consideră corectă, adică toate numerele care apar în ea sunt
numere naturale reprezentând sume cheltuite de Ana în ziua respectivă!
'''
s = "Astăzi am cumpărat pâine de 5 RON, pe lapte am dat 10 RON, iar de 15 RON am cumpărat niște cașcaval. De asemenea, mi-am cumpărat și niște papuci cu 50 RON!"
poz = s.find("RON")
suma=0
for i in range(poz):
while(not s[i:poz-1].isnumeric()):
i += 1
# print(s[i:poz])
suma+=int(s[i:poz])
s = s[poz+3:]
poz = s.find("RON")
if(poz is -1):
break
print(suma)
|
[
"horjeacosmin@yahoo.com"
] |
horjeacosmin@yahoo.com
|
5ae0ec49d2a08f1c7a2d6df28e40d28902ff9950
|
1d450f4655ae63240e88b474b8a17c1e711e1a81
|
/AttributesAndMethods/DocumentManagement/project/topic.py
|
1244ef4688a709dd1f29d1fbdede9e2fc76badc3
|
[] |
no_license
|
RuzhaK/PythonOOP
|
f2cb396390349e2aac605c90fd7a18039653cf5e
|
68cbf321b5947b376459d7397aed36554347d256
|
refs/heads/master
| 2023-05-30T06:17:42.618984
| 2021-06-17T08:04:46
| 2021-06-17T08:04:46
| 371,284,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
class Topic:
def __init__(self,id, topic,storage_folder):
self.id=id
self.topic=topic
self.storage_folder=storage_folder
def edit(self,new_topic: str, new_storage_folder: str):
self.topic=new_topic
self.storage_folder=new_storage_folder
def __repr__(self):
return f"Topic {self.id}: {self.topic} in {self.storage_folder}"
|
[
"rkaraatanasova@gmail.com"
] |
rkaraatanasova@gmail.com
|
9aaf56b91989e82c66fa113d95de30112ec46571
|
bf2d60eaa66d20f4a463999aca15bc43a026db22
|
/app/email.py
|
6f81dd2a55cde5d2582978572a2654ce5caa7d47
|
[] |
no_license
|
kfinn6561/covid_map
|
2c60fdea3787b73b5f754eed51c56d3e276473fb
|
52d3f36375f0d610cc9b852b485f5397cb2ab642
|
refs/heads/main
| 2023-03-24T19:54:35.251710
| 2021-03-23T10:18:23
| 2021-03-23T10:18:23
| 350,666,986
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
'''
Created on 7 Jun 2020
@author: kieran
'''
from flask_mail import Message
from app import app,mail
from threading import Thread
def send_async_email(app,msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email,args=(app,msg)).start()
|
[
"kieran.finn@hotmail.com"
] |
kieran.finn@hotmail.com
|
2dbbce0ffe811caa9084b2700bb291b04f1da200
|
b372bc13b4715e87f39f80c1c2465fc6d93f3609
|
/register/tipmac.py
|
56092803f1532810fb9f495adae1b494bb9538a3
|
[
"WTFPL"
] |
permissive
|
Retenodus/Maiznet
|
4dff822f0ab3d8c08196d09f08ef169357e37c2f
|
f7fa2c4ee964cab1cc2e33feadeed826f18489b7
|
refs/heads/master
| 2021-01-24T00:02:49.168595
| 2012-06-25T23:13:02
| 2012-06-25T23:13:02
| 2,130,383
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
########################################################################
# vim: fileencoding=utf-8 ts=8 noexpandtab :
#
# ~~~~ Maiznet.fr ~~~~
#
# -> register/tipmac.py
#
#
# Copyright 2011 Grégoire Leroy <gregoire.leroy@retenodus.net>
# Copyright 2011 Rémy Sanchez <remy.sanchez@hyperthese.net>
#
# This file is distributed under the terms of the WTFPL. For more
# informations, see http://sam.zoy.org/wtfpl/COPYING
########################################################################
import socket
import re
from django.conf import settings
def isMac(mac):
"""
Retourne True si la valeur est une adresse MAC, False sinon.
"""
X = '([a-fA-F0-9]{2}[:\-]){5}[a-fA-F0-9]{2}' # this is the regex
if re.compile(X).search(mac):
return True
return False
def ip_to_mac(ip):
"""
Effectue une requête auprès du serveur tipmac, et retourne la
MAC correspondant à l'IP. Si la MAC est incorrecte, une
exception est levée.
Cette fonction dépend de paramètres à définir dans le
settings.py :
- **MAIZ_IP_GUEST**, la plage d'IP des invités. Si l'adresse
n'est pas dans cette plage, la fonction retourne tout de
suite une exception. À l'heure où ces lignes sont écrites,
cette plage est 172.17.192.0/18.
- **TIPMAC_SERVER**, l'adresse IP du serveur tipmac.
- **TIPMAC_PORT**, le port du serveur.
"""
# Teste si l'adresse IP est un invité de Maiz
import IPy
if ip == None or IPy.IPint(settings.MAIZ_IP_GUEST).overlaps(ip) != 1:
raise Exception("IP not in guest subnet")
ip_server = settings.TIPMAC_SERVER
port = settings.TIPMAC_PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((ip_server,port))
s.send(ip)
try :
mac = s.recv(17)
except :
raise Exception("No data received")
if not isMac(mac):
raise Exception("Error : invalid MAC")
s.close()
return mac
|
[
"remy.sanchez@hyperthese.net"
] |
remy.sanchez@hyperthese.net
|
1d6a50cd2453611e0a4132a1cb10f5c1d4fbffc1
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_23/models/network_interface_eth.py
|
3190b8ae65d39d8a5813962e70c371dac0b1d779
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,348
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.23
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_23 import models
class NetworkInterfaceEth(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address': 'str',
'gateway': 'str',
'mac_address': 'str',
'mtu': 'int',
'netmask': 'str',
'subinterfaces': 'list[FixedReferenceNoId]',
'subnet': 'ReferenceNoId',
'subtype': 'str',
'vlan': 'int'
}
attribute_map = {
'address': 'address',
'gateway': 'gateway',
'mac_address': 'mac_address',
'mtu': 'mtu',
'netmask': 'netmask',
'subinterfaces': 'subinterfaces',
'subnet': 'subnet',
'subtype': 'subtype',
'vlan': 'vlan'
}
required_args = {
}
def __init__(
self,
address=None, # type: str
gateway=None, # type: str
mac_address=None, # type: str
mtu=None, # type: int
netmask=None, # type: str
subinterfaces=None, # type: List[models.FixedReferenceNoId]
subnet=None, # type: models.ReferenceNoId
subtype=None, # type: str
vlan=None, # type: int
):
"""
Keyword args:
address (str): The IPv4 or IPv6 address to be associated with the specified network interface.
gateway (str): The IPv4 or IPv6 address of the gateway through which the specified network interface is to communicate with the network.
mac_address (str): The media access control address associated with the specified network interface.
mtu (int): Maximum message transfer unit (packet) size for the network interface, in bytes. MTU setting cannot exceed the MTU of the corresponding physical interface.
netmask (str): Netmask of the specified network interface that, when combined with the address of the interface, determines the network address of the interface.
subinterfaces (list[FixedReferenceNoId]): List of network interfaces configured to be a subinterface of the specified network interface.
subnet (ReferenceNoId): Subnet that is associated with the specified network interface.
subtype (str): The subtype of the specified network interface. Only interfaces of subtype `virtual` can be created. Configurable on POST only. Valid values are `failover_bond`, `lacp_bond`, `physical`, and `virtual`.
vlan (int): VLAN ID
"""
if address is not None:
self.address = address
if gateway is not None:
self.gateway = gateway
if mac_address is not None:
self.mac_address = mac_address
if mtu is not None:
self.mtu = mtu
if netmask is not None:
self.netmask = netmask
if subinterfaces is not None:
self.subinterfaces = subinterfaces
if subnet is not None:
self.subnet = subnet
if subtype is not None:
self.subtype = subtype
if vlan is not None:
self.vlan = vlan
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceEth`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkInterfaceEth, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkInterfaceEth):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
ab2bda27234228c0eddad94ca062b9b2d0cf30ea
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_242/ch16_2020_03_23_19_51_17_162358.py
|
4c14a0bbe8dc96d55dae4bea543ccd289b62c78f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 185
|
py
|
v = float(input('Qual o valor da conta?:'))
def valor_da_conta(v):
resultado = v * 1.1
return resultado
print('Valor da conta com 10%: R$ {0:.2f}'.format(valor_da_conta(v)))
|
[
"you@example.com"
] |
you@example.com
|
1a5fe7c6d9e954247f9388fd4b723a23fe944586
|
5286255a93db21ea9defc1f8f6fc71990c3c2fa9
|
/testing/scripts/.svn/text-base/xclean_unit_tests.py.svn-base
|
5c8066c963806e62aba75180468dffb41a007d7e
|
[] |
no_license
|
brynmathias/AnalysisV2
|
1367767dbf22eef6924700c4b0a00581ea8ed965
|
ee17c019bb04243876a51c7ef7719cc58a52adea
|
refs/heads/master
| 2021-01-01T19:20:27.277628
| 2012-04-17T13:34:26
| 2012-04-17T13:34:26
| 2,600,415
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
#!/usr/bin/env python
import setupSUSY
import unittest
#import xclean.clonevector_tests
#import xclean.basic_tests
if __name__ == '__main__':
testcases=["xclean.clonevector_tests.TestCloneVector",
"xclean.basic_tests.TestBasic"]
suite = unittest.TestLoader().loadTestsFromNames(testcases)
print "="*50
print "Cross Cleaner Unit Tests"
print "="*50
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"brynmathias@gmail.com"
] |
brynmathias@gmail.com
|
|
65062af86c81873c11336b2bac695d78088412cc
|
f08f09b23dbf3ce3b967c46c5f109b792c5c8993
|
/visual_dynamics/envs/env_spec.py
|
4145f5c2af567b74f0d3173f709f5f31f27471e0
|
[
"MIT"
] |
permissive
|
saadmahboob/visual_dynamics
|
5df6ea0f53144f0de8fdc991c9f00d78ac98c680
|
90227bb0d0aebb1989117b5c25ca311655ca7cc7
|
refs/heads/master
| 2021-06-17T05:50:22.661375
| 2017-05-12T02:53:47
| 2017-05-12T02:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
from visual_dynamics.spaces.base import Space
from visual_dynamics.utils.config import ConfigObject
class EnvSpec(ConfigObject):
def __init__(self, action_space, observation_space):
self._action_space = action_space
self._observation_space = observation_space
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
def _get_config(self):
config = super(EnvSpec, self)._get_config()
action_space = self.action_space
if not isinstance(action_space, ConfigObject):
action_space = Space.create(action_space)
observation_space = self.observation_space
if not isinstance(observation_space, ConfigObject):
observation_space = Space.create(observation_space)
config.update({'action_space': action_space,
'observation_space': observation_space})
return config
|
[
"alexleegk@gmail.com"
] |
alexleegk@gmail.com
|
42fa0316fcea9a59c8394e31d472a79f370cde1f
|
d4fe2607c25e514df42831ddae3f9509057c2d46
|
/USBApplication/tasks/serial_task.py
|
17f72ecaadd8834d1c0fcb55d28f2c8e6acdfd52
|
[] |
no_license
|
bxm156/EECS398
|
8cdbb1057f8d7d2fd8764df4309dd4712799d766
|
aa638d81fea008d467118691882cee73cefde147
|
refs/heads/master
| 2021-01-01T05:36:00.159758
| 2013-12-05T17:11:09
| 2013-12-05T17:11:09
| 12,497,895
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 142
|
py
|
from base_task import BaseTask
class SerialTask(BaseTask):
def set_parameters(self, param_dict):
self.parameters = param_dict
|
[
"bxm156@case.edu"
] |
bxm156@case.edu
|
2b7259f9c38153470debef2a76db963f8b051486
|
660c72411c148507b0b04c517f154df7d0396281
|
/wiki/middleware.py
|
7ad75cf3cf8b65314f7ee3a31ae1213d0886b9e0
|
[] |
no_license
|
xuguangzong/WIKI
|
e38dc3d434470c3238ebdf552768e42a7becb292
|
ac6b573ff6a658977fc97508ff90f004df3169a1
|
refs/heads/main
| 2023-06-26T15:29:55.675667
| 2021-07-23T08:39:50
| 2021-07-23T08:39:50
| 386,569,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
import time
import logging
from wiki.documents import ELASTICSEARCH_ENABLED, ElaspedTimeDocumentManager
logger = logging.getLogger(__name__)
class OnlineMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
super().__init__()
def __call__(self, request):
"""
page render time
:param request:
:return:
"""
start_time = time.time()
response = self.get_response(request)
http_user_agent = request.META.get('HTTP_USER_AGENT', '')
if 'spider'.upper() not in http_user_agent.upper():
try:
cast_time = time.time() - start_time
if ELASTICSEARCH_ENABLED:
time_taken = round((cast_time) * 1000, 2)
url = request.path
from django.utils import timezone
ElaspedTimeDocumentManager.create(
url=url,
time_taken=time_taken,
log_datetime=timezone.now(),
type='wiki',
useragent=http_user_agent)
response.content = response.content.replace(
b'<!!LOAD_TIMES!!>', str.encode(str(cast_time)[:5]))
except Exception as e:
logger.error("Error OnlineMiddleware: %s" % e)
return response
|
[
"2359301733@qq.com"
] |
2359301733@qq.com
|
d7a1d6e57acc930c4bd99451bd5df4c5e3bc6cfe
|
fb8ee3a962f6d690badd02409206be0724e6a659
|
/examples/bScript.py
|
a85a71e0fb6d3ca27b6c1f220cfa141ea50ff6fa
|
[] |
no_license
|
chonlei/SanPy
|
3efbfcfd46b5223c52724a57d1640a241cbde82b
|
fb9f399527adcdf01dad49011d7ecfd47ac77139
|
refs/heads/master
| 2023-07-11T03:46:37.249897
| 2021-08-11T01:54:44
| 2021-08-11T01:54:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from sanpy import bAnalysis
# open an abf file into a bAnalysis object
myFile = '../data/19114001.abf'
ba = bAnalysis.bAnalysis(myFile)
# detect spikes
myThreshold = 100
myMedianFilter = 3
halfHeights = [20, 50, 80]
ba.spikeDetect(dVthresholdPos=myThreshold, medianFilter=myMedianFilter, halfHeights=halfHeights)
# ba now has a number of spikes, they are all in a list called ba.spikeDict
print('number of spikes detected:', len(ba.spikeDict))
# each spike in the list is a python dictionary
# lets look at one spike
mySpikeNumber= 5
print(ba.spikeDict[mySpikeNumber])
# each spike has a number of keys (e.g. the name of the stat) and for each of those a 'value'
for key,value in ba.spikeDict[mySpikeNumber].items():
print(key, value)
for spike in ba.spikeDict:
print(spike['thresholdVal'])
# plot spike threshold (mV) versus spike time (seconds)
|
[
"robert.cudmore@gmail.com"
] |
robert.cudmore@gmail.com
|
b407fac8430611e58e38951ad808096c8da0eb7f
|
3f394cd47a1aaf0ae2f8de5ab9854f52341e017a
|
/clay/helpers.py
|
df83597113a8b027b38f28f310309ccb1a9a6517
|
[
"MIT"
] |
permissive
|
devildeveloper/Clay
|
e3771d97d23ae3ba7d866d8921102d50e95a6562
|
ca419ee4cfe191724ed68e3507515a5b258bb4bb
|
refs/heads/master
| 2021-01-18T02:27:22.094481
| 2013-11-18T20:24:02
| 2013-11-18T20:24:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import errno
from fnmatch import fnmatch
import io
import os
import shutil
import unicodedata
def to_unicode(txt, encoding='utf8'):
if not isinstance(txt, basestring):
txt = str(txt)
if isinstance(txt, unicode):
return txt
return unicode(txt, encoding)
def unormalize(text, form='NFD'):
return unicodedata.normalize(form, text)
def fullmatch(path, pattern):
path = unormalize(path)
name = os.path.basename(path)
return fnmatch(name, pattern) or fnmatch(path, pattern)
def read_content(path, **kwargs):
kwargs.setdefault('mode', 'rt')
with io.open(path, **kwargs) as f:
return f.read()
def make_dirs(*lpath):
path = os.path.join(*lpath)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
def create_file(path, content, encoding='utf8'):
if not isinstance(content, unicode):
content = unicode(content, encoding)
with io.open(path, 'w+t', encoding=encoding) as f:
f.write(content)
def copy_if_updated(path_in, path_out):
if os.path.exists(path_out):
newt = os.path.getmtime(path_in)
currt = os.path.getmtime(path_out)
if currt >= newt:
return
shutil.copy2(path_in, path_out)
def get_updated_datetime(path):
ut = os.path.getmtime(path)
return datetime.fromtimestamp(ut)
def sort_paths_dirs_last(paths):
def dirs_last(a, b):
return cmp(a[0].count('/'), b[0].count('/')) or cmp(a[0], b[0])
return sorted(paths, cmp=dirs_last)
|
[
"juanpablo@lucumalabs.com"
] |
juanpablo@lucumalabs.com
|
849c3e365db5121e9b999f5684403461c40b7bfd
|
2467b5d4a6d8d6ffeff547478a8dd7fa3d4d9234
|
/chapter04/demo_4.4.py
|
efcf5f6dae4cb9cfb3a2035c834d797028778720
|
[
"MIT"
] |
permissive
|
NetworkRanger/tensorflow-ml-exercise
|
6c92ec3cf87a6def0c1d7818e59c83585cc1aebe
|
d0c46c10bfc3ee06c211ebe2f25489f8407c369f
|
refs/heads/master
| 2020-04-02T22:54:57.268384
| 2018-12-22T13:52:01
| 2018-12-22T13:52:01
| 154,848,809
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,629
|
py
|
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/11/5 下午10:44
# 4.4 TensorFlow上核函数的使用
# 1. 导入必要编程库,创建一个计算图会话
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
sess = tf.Session()
# 2. 生成模拟数。生成的数据是一两个同心圆数据,每个不同的环代表不同的类,确保只有类-1或者1。为了让绘图方便,这里将每类数据分成x值和y值
(x_vals, y_vals) = datasets.make_circles(n_samples=500, factor=.5, noise=.1)
y_vals = np.array([1 if y==1 else -1 for y in y_vals])
class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == 1]
class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == 1]
class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i] == -1]
class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i] == -1]
# 3. 声明批量大小、占位符,创建模型变量b。对于SVM算法,为了让每次迭代训练不波动,得到一个稳定的训练模型,这时批量大小得取更大。注意,本例为预测数据点声明有额外的占位符。最后创建彩色的网格来可视化不同的区域代表不同的类别
batch_size = 250
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)
b = tf.Variable(tf.random_normal(shape=[1, batch_size]))
# 4. 创建高斯核函数。该核函数用矩阵操作来表示
gamma = tf.constant(-50.0)
dist = tf.reduce_mean(tf.square(x_data), 1)
dist = tf.reshape(dist, [-1, 1])
sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
"""
注意,在sq_dists中应用广播加法和减法操作。线性核函数可以表示为: my_kernel = tf.matmul(x_data, tf.transpose(x_data))。
"""
# 5. 声明对偶问题。为了最大化,这里采用最小化损失函数的负数: tf.negative()
model_output = tf.matmul(b, my_kernel)
first_term = tf.reduce_mean(b)
b_vec_cross = tf.matmul(tf.transpose(b), b)
y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
second_term = tf.reduce_mean(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
loss = tf.negative(tf.subtract(first_term, second_term))
# 6. 创建预测函数和准确度函数。先创建一个预测核函数,但用预测数据点的核函数用模拟数据点的核函数。预测值是模型输出的符号函数值
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target), b), pred_kernel)
prediction = tf.sign(prediction_output-tf.reduce_mean(prediction_output))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32))
"""
为了实现线性预测核函数,将预测核函数改为: pred_kernel = tf.matmul(x_data, tf.transpose(prediction_grid))。
"""
# 7. 创建优化器函数,初始化所有的变量
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
init = tf.initialize_all_variables()
sess.run(init)
# 8. 开始迭代训练
loss_vec = []
batch_accuracy = []
for i in range(500):
rand_index = np.random.choice(len(x_vals), size=batch_size)
rand_x = x_vals[rand_index]
rand_y = np.transpose([y_vals[rand_index]])
sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y})
temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y})
loss_vec.append(temp_loss)
acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: rand_x})
batch_accuracy.append(acc_temp)
if (i+1) % 100 == 0:
print('Step #' + str(i+1))
print('Loss = ' + str(temp_loss))
# 9. 输出结果如下
"""
Step #100
Loss = -0.040872738
Step #200
Loss = -0.04066868
Step #300
Loss = -0.04294016
Step #400
Loss = -0.042239938
Step #500
Loss = -0.043024104
"""
# 10. 为了能够在整个数据空间可视化分类返回结果,我们将创建预测数据点的网格,在其上进行预测
x_min, x_max = x_vals[:, 0].min() - 1, x_vals[:, 0].max() + 1
y_min, y_max = x_vals[:, 1].min() - 1, x_vals[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
grid_points = np.c_[xx.ravel(), yy.ravel()]
[grid_prediction] = sess.run(prediction, feed_dict={x_data: rand_x, y_target: rand_y, prediction_grid: grid_points})
grid_prediction = grid_prediction.reshape(xx.shape)
# 11. 下面绘制预测结果、批量准确度和损失函数
plt.contourf(xx, yy, grid_prediction, cmap=plt.cm.Paired, alpha=0.8)
plt.plot(class1_x, class1_y, 'ro', label='Class 1')
plt.plot(class2_x, class2_y, 'kx', label='Class -1')
plt.legend(loc='lower right')
plt.ylim([-1.5, 1.5])
plt.xlim([-1.5, 1.5])
plt.show()
plt.plot(batch_accuracy, 'k-', label='Accuracy')
plt.title('Batch Accuracy')
plt.xlabel('Generation')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
plt.plot(loss_vec, 'k-')
plt.title('Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()
# 12. 简单扼要,这里只显示训练结果图,不过也可以分开运行绘图代码展示其他效果
|
[
"17346503142@163.com"
] |
17346503142@163.com
|
6f9704f52133fd74a784d3d12df74871c7595eff
|
70538979b952b8afc380bd19ac565b3967178b87
|
/docker_odoo_env/commands/command.py
|
e95e7d0446d250b93fb8a576afb260b1bd0a060c
|
[
"MIT"
] |
permissive
|
sebatista/docker_odoo_env
|
b693b9111e68f4162784ee77910c034b5dcb0b21
|
57963fb677257a71c2d9bfbf0400a78eaa62fd10
|
refs/heads/master
| 2020-04-25T06:59:31.635170
| 2019-02-25T01:42:22
| 2019-02-25T01:42:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# -*- coding: utf-8 -*-
# For copyright and license notices, see __manifest__.py file in module root
from docker_odoo_env.messages import Msg
msg = Msg()
class Command(object):
def __init__(self, config):
self._config = config
def execute(self):
raise NotImplementedError
def show_doc(self):
msg.text(self._config.args.get('command'))
exit()
|
[
"jorge.obiols@gmail.com"
] |
jorge.obiols@gmail.com
|
3b55ba6c605dd0fc783bf9b32b031d064c3a3e25
|
32bbbd6dbd100bbb9a2282f69ac3b7b34516347f
|
/Study/keras/keras40_mnist3_dnn.py
|
fb46b91e2a7ed5c39bd84d7fe794c8c87742301f
|
[] |
no_license
|
kimjh1753/AIA_Academy_Study
|
2162d4d4f1a6b8ca1870f86d540df45a8742f359
|
6022718ae7f9e5170a19c4786d096c8042894ead
|
refs/heads/master
| 2023-05-07T12:29:12.920693
| 2021-06-05T01:09:33
| 2021-06-05T01:09:33
| 324,136,796
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,065
|
py
|
# 주말과제
# dense 모델로 구성 input_shape=(28*28, )
# 인공지능계의 hello world라 불리는 mnist!!!
import numpy as np
import matplotlib.pyplot as plt
# 1. 데이터
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) # (60000, 28, 28) (60000,)
print(x_test.shape, y_test.shape) # (10000, 28, 28) (10000,)
print(x_train[0])
print("y_train[0] : ", y_train[0])
print(x_train[0].shape) # (28, 28)
# plt.imshow(x_train[0], 'gray')
# plt.imshow(x_train[0])
# plt.show()
x_train = x_train.reshape(60000, 28*28)
x_test = x_test.reshape(10000, 28*28)
# (x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1))
print(x_train.shape, x_test.shape) # (60000, 784) (10000, 784)
# OnHotEncoding
# 여러분이 하시오!!!!!
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) # (60000, 10)
print(y_test.shape) # (10000, 10)
# 2. 모델 구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model = Sequential()
model.add(Dense(200, activation='relu', input_shape=(784,)))
model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(200, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 실습!! 완성하시오!!!
# 지표는 acc /// 0.985 이상
# 응용
# y_test 10개와 y_pred 10개를 출력하시오
# y_test[:10] = (?,?,?,?,?,?,?,?,?,?,?)
# y_pred[:10] = (?,?,?,?,?,?,?,?,?,?,?)
# 3. 컴파일 훈련
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='loss', patience=30, mode='auto')
model.fit(x_train, y_train, epochs=2000, validation_split=0.2, batch_size=2000, callbacks=[es])
# 4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=1)
print("loss : ", loss)
print("acc : ", acc)
y_test = np.array(model.predict(x_train[:1]))
print(y_test[:10])
print("============")
y_pred = np.array(model.predict(x_test[:1]))
print(y_pred[:10])
# keras40_mnist2_cnn
# loss : 0.00260396976955235
# acc : 0.9854999780654907
# [[8.6690171e-08 2.8707976e-08 9.1137373e-09 9.6521189e-06 4.6547077e-09
# 9.9998856e-01 7.6187533e-08 5.5741470e-08 1.3864026e-06 2.0224462e-07]]
# ============
# [[7.0327958e-30 2.2413428e-23 6.9391834e-21 9.2217209e-22 5.1841172e-22
# 8.7506048e-26 2.4799229e-27 1.0000000e+00 8.0364114e-26 3.3208760e-17]]
# keras40_mnist3_dnn
# loss : 0.005172424484044313
# acc : 0.9724000096321106
# [[9.4863184e-15 2.2668929e-19 1.8625454e-22 5.9676188e-07 2.5733180e-25
# 9.9999940e-01 1.5588427e-20 7.8994310e-23 5.6835017e-22 2.6443269e-20]]
# ============
# [[3.0520350e-26 2.7246760e-23 4.5444517e-25 3.6449811e-28 1.3460386e-28
# 2.1042897e-27 6.9805158e-30 1.0000000e+00 1.8761058e-26 2.6409651e-25]]
|
[
"kimjh1753@naver.com"
] |
kimjh1753@naver.com
|
ea5831164ca916edd5d87547c9867c3506951b19
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/scattermapbox/marker/colorbar/_ticksuffix.py
|
f10a7f2ec190f24f5ab3753b5e8bbd093bd710b0
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="ticksuffix",
parent_name="scattermapbox.marker.colorbar",
**kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
c5e6b35ebc6e4bdbfceaaeb25098ab18f8b9b869
|
667f153e47aec4ea345ea87591bc4f5d305b10bf
|
/Solutions/Ch5Ex113.py
|
c53cc96c6971b3a0a5a3f6418b386e6ee3f1b5fb
|
[] |
no_license
|
Parshwa-P3/ThePythonWorkbook-Solutions
|
feb498783d05d0b4e5cbc6cd5961dd1e611f5f52
|
5694cb52e9e9eac2ab14b1a3dcb462cff8501393
|
refs/heads/master
| 2022-11-15T20:18:53.427665
| 2020-06-28T21:50:48
| 2020-06-28T21:50:48
| 275,670,813
| 1
| 0
| null | 2020-06-28T21:50:49
| 2020-06-28T21:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
# Ch5Ex113.py
# Author: Parshwa Patil
# ThePythonWorkbook Solutions
# Exercise No. 113
# Title: Formatting a List
from Ch5Ex107 import inputWordList
def formatWordList(words):
if len(words) == 1:
return str(words[0])
elif len(words) == 2:
return str(words[0]) + " and " + str(words[1])
elif len(words) >= 3:
res = ""
for w in words[:-2]:
res += str(w) + ", "
res += formatWordList(words[-2:])
return res
else:
return ""
def main():
words = inputWordList()
res = formatWordList(words)
print(res)
if __name__ == "__main__": main()
|
[
"noreply@github.com"
] |
Parshwa-P3.noreply@github.com
|
40ac2a92a23c0cf29aaa67a5f202967f7527b8dc
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/4040/494004040.py
|
8b2f5f83eb14d0b75d277c1ae37a8190e141fa8f
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
from bots.botsconfig import *
from records004040 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'TP',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'REN', MIN: 1, MAX: 1},
{ID: 'DK', MIN: 1, MAX: 1},
{ID: 'PI', MIN: 1, MAX: 8},
{ID: 'PR', MIN: 0, MAX: 200},
{ID: 'SS', MIN: 0, MAX: 1},
{ID: 'SA', MIN: 1, MAX: 1},
{ID: 'CD', MIN: 0, MAX: 150},
{ID: 'GY', MIN: 0, MAX: 150},
{ID: 'RAB', MIN: 0, MAX: 12},
{ID: 'PT', MIN: 0, MAX: 50},
{ID: 'LX', MIN: 0, MAX: 1, LEVEL: [
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PI', MIN: 0, MAX: 15},
]},
{ID: 'R9', MIN: 0, MAX: 10, LEVEL: [
{ID: 'R2B', MIN: 0, MAX: 10, LEVEL: [
{ID: 'R2C', MIN: 0, MAX: 10},
]},
]},
{ID: 'SCL', MIN: 0, MAX: 999, LEVEL: [
{ID: 'RD', MIN: 0, MAX: 6},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
19dad8cafa255111037ec4d564a616e8fe94fe5d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2819/60580/293103.py
|
a5bcdd091887b7679ef45973eb5fbcfc0d5f124a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import math
size = int(input())
tempList = input().split()
intList = []
for var in tempList:
intList.append(int(var))
d = {}
for var in intList:
if var in d.keys():
d[var] += 1
else:
d[var] = 1
l = sorted(d.keys())
realD = {}
for i in l:
realD[i] = d[i]
result = 0
resultD = {}
resultD[1] = 0
resultD[2] = 0
resultD[3] = 0
for key, value in realD.items():
if key == 4:
result += value
if key == 2:
result += value // 2
resultD[2] = value % 2
if key == 3 or key == 1:
resultD[key] = value
result += resultD[3]
resultD[1] = resultD[1] - resultD[3]
if resultD[1] <= 0:
if resultD[2] == 1:
result += 1
else:
if resultD[2] == 1:
resultD[1] -= 2
result += 1
if resultD[1] > 0:
result += math.ceil(resultD[1] / 4)
print(result)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
1d8c1e36a64b2846dae7a9d01729e5c129613ed3
|
c84d807bd359ae58ed5e115a51fb85be5ac93262
|
/11_binary_search/9_search_rotated_array_duplicates.py
|
e650cdc1c68f500010263519962cbcf78ade74c2
|
[] |
no_license
|
terrifyzhao/educative
|
bc35d1d10ea280ddc50b1a2708c0e22a7a5cd6d0
|
7a5c82abeb7853a9a1262e28b2fe58a20f547802
|
refs/heads/master
| 2020-11-28T04:25:30.425519
| 2020-03-05T10:23:09
| 2020-03-05T10:23:09
| 229,702,388
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
def search_rotated_with_duplicates(arr, key):
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return mid
# the only difference from the previous solution,
# if numbers at indexes start, mid, and end are same, we can't choose a side
# the best we can do, is to skip one number from both ends as key != arr[mid]
if arr[start] == arr[mid] and arr[end] == arr[mid]:
start += 1
end -= 1
elif arr[start] <= arr[mid]: # left side is sorted in ascending order
if key >= arr[start] and key < arr[mid]:
end = mid - 1
else: # key > arr[mid]
start = mid + 1
else: # right side is sorted in ascending order
if key > arr[mid] and key <= arr[end]:
start = mid + 1
else:
end = mid - 1
# we are not able to find the element in the given array
return -1
def search_rotated_with_duplicates2(arr, key):
start, end = 0, len(arr) - 1
while start <= end:
mid = start + (end - start) // 2
if arr[mid] == key:
return mid
while arr[start] == arr[mid] == arr[end]:
start += 1
end -= 1
if arr[start] <= arr[mid]:
if arr[start] <= key < arr[mid]:
end = mid - 1
else:
start = mid
else:
if arr[mid] < key <= arr[end]:
start = mid + 1
else:
end = mid
return -1
def main():
print(search_rotated_with_duplicates([3, 7, 3, 3, 3], 7))
main()
|
[
"zjiuzhou@gmail.com"
] |
zjiuzhou@gmail.com
|
8a3938da7e8df9e31d0c8249ff2d5bd9dcdbeb84
|
80c8d4e84f2ea188a375ff920a4adbd9edaed3a1
|
/third/opencv/gaussian_mix.py
|
f7aebe86c7fe07415fabff67ac7d4fa8ccebf641
|
[
"MIT"
] |
permissive
|
Birkid/penter
|
3a4b67801d366db15ca887c31f545c8cda2b0766
|
0200f40c9d01a84c758ddcb6a9c84871d6f628c0
|
refs/heads/master
| 2023-08-22T14:05:43.106499
| 2021-10-20T07:10:10
| 2021-10-20T07:10:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for _i in range(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
def draw_gaussain(img, mean, cov, color):
x, y = np.int32(mean)
w, u, _vt = cv.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA)
def main():
cluster_n = 5
img_size = 512
print('press any key to update distributions, ESC - exit\n')
while True:
print('sampling distributions...')
points, ref_distrs = make_gaussians(cluster_n, img_size)
print('EM (opencv) ...')
em = cv.ml.EM_create()
em.setClustersNumber(cluster_n)
em.setCovarianceMatrixType(cv.ml.EM_COV_MAT_GENERIC)
em.trainEM(points)
means = em.getMeans()
covs = em.getCovs() # Known bug: https://github.com/opencv/opencv/pull/4232
found_distrs = zip(means, covs)
print('ready!\n')
img = np.zeros((img_size, img_size, 3), np.uint8)
for x, y in np.int32(points):
cv.circle(img, (x, y), 1, (255, 255, 255), -1)
for m, cov in ref_distrs:
draw_gaussain(img, m, cov, (0, 255, 0))
for m, cov in found_distrs:
draw_gaussain(img, m, cov, (0, 0, 255))
cv.imshow('gaussian mixture', img)
ch = cv.waitKey(0)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
[
"350840291@qq.com"
] |
350840291@qq.com
|
f7cc3625a1915ecc9b75645903de41058b2871d8
|
41c0d29efcb3ac0e22237bd3fadc5cdf550698cd
|
/homeassistant/helpers/check_config.py
|
4052a94b9de9b74054a64a28de168d2b2c8172b9
|
[
"Apache-2.0"
] |
permissive
|
EthanW1215/home-assistant
|
7c19ce668821f3063b3d46f9e9a0ef5a6e102689
|
a48ac4d18fab253572780671f896b3a417322699
|
refs/heads/master
| 2020-09-05T09:02:59.513681
| 2019-11-05T18:57:08
| 2019-11-05T18:57:08
| 220,050,094
| 2
| 0
|
Apache-2.0
| 2019-11-06T17:13:33
| 2019-11-06T17:13:32
| null |
UTF-8
|
Python
| false
| false
| 6,303
|
py
|
"""Helper to check the configuration file."""
from collections import OrderedDict, namedtuple
from typing import List
import attr
import voluptuous as vol
from homeassistant import loader
from homeassistant.core import HomeAssistant
from homeassistant.config import (
CONF_CORE,
CORE_CONFIG_SCHEMA,
CONF_PACKAGES,
merge_packages_config,
_format_config_error,
find_config_file,
load_yaml_config_file,
extract_domain_configs,
config_per_platform,
)
from homeassistant.requirements import (
async_get_integration_with_requirements,
RequirementsNotFound,
)
import homeassistant.util.yaml.loader as yaml_loader
from homeassistant.exceptions import HomeAssistantError
# mypy: allow-untyped-calls, allow-untyped-defs, no-warn-return-any
CheckConfigError = namedtuple("CheckConfigError", "message domain config")
@attr.s
class HomeAssistantConfig(OrderedDict):
"""Configuration result with errors attribute."""
errors: List[CheckConfigError] = attr.ib(default=attr.Factory(list))
def add_error(self, message, domain=None, config=None):
"""Add a single error."""
self.errors.append(CheckConfigError(str(message), domain, config))
return self
@property
def error_str(self) -> str:
"""Return errors as a string."""
return "\n".join([err.message for err in self.errors])
async def async_check_ha_config_file(hass: HomeAssistant) -> HomeAssistantConfig:
"""Load and check if Home Assistant configuration file is valid.
This method is a coroutine.
"""
config_dir = hass.config.config_dir
result = HomeAssistantConfig()
def _pack_error(package, component, config, message):
"""Handle errors from packages: _log_pkg_error."""
message = "Package {} setup failed. Component {} {}".format(
package, component, message
)
domain = f"homeassistant.packages.{package}.{component}"
pack_config = core_config[CONF_PACKAGES].get(package, config)
result.add_error(message, domain, pack_config)
def _comp_error(ex, domain, config):
"""Handle errors from components: async_log_exception."""
result.add_error(_format_config_error(ex, domain, config), domain, config)
# Load configuration.yaml
try:
config_path = await hass.async_add_executor_job(find_config_file, config_dir)
if not config_path:
return result.add_error("File configuration.yaml not found.")
config = await hass.async_add_executor_job(load_yaml_config_file, config_path)
except FileNotFoundError:
return result.add_error(f"File not found: {config_path}")
except HomeAssistantError as err:
return result.add_error(f"Error loading {config_path}: {err}")
finally:
yaml_loader.clear_secret_cache()
# Extract and validate core [homeassistant] config
try:
core_config = config.pop(CONF_CORE, {})
core_config = CORE_CONFIG_SCHEMA(core_config)
result[CONF_CORE] = core_config
except vol.Invalid as err:
result.add_error(err, CONF_CORE, core_config)
core_config = {}
# Merge packages
await merge_packages_config(
hass, config, core_config.get(CONF_PACKAGES, {}), _pack_error
)
core_config.pop(CONF_PACKAGES, None)
# Filter out repeating config sections
components = set(key.split(" ")[0] for key in config.keys())
# Process and validate config
for domain in components:
try:
integration = await async_get_integration_with_requirements(hass, domain)
except (RequirementsNotFound, loader.IntegrationNotFound) as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
try:
component = integration.get_component()
except ImportError as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
config_schema = getattr(component, "CONFIG_SCHEMA", None)
if config_schema is not None:
try:
config = config_schema(config)
result[domain] = config[domain]
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
component_platform_schema = getattr(
component,
"PLATFORM_SCHEMA_BASE",
getattr(component, "PLATFORM_SCHEMA", None),
)
if component_platform_schema is None:
continue
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component_platform_schema(p_config)
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = await async_get_integration_with_requirements(
hass, p_name
)
platform = p_integration.get_platform(domain)
except (
loader.IntegrationNotFound,
RequirementsNotFound,
ImportError,
) as ex:
result.add_error(f"Platform error {domain}.{p_name} - {ex}")
continue
# Validate platform specific schema
platform_schema = getattr(platform, "PLATFORM_SCHEMA", None)
if platform_schema is not None:
try:
p_validated = platform_schema(p_validated)
except vol.Invalid as ex:
_comp_error(ex, f"{domain}.{p_name}", p_validated)
continue
platforms.append(p_validated)
# Remove config for current component and add validated config back in.
for filter_comp in extract_domain_configs(config, domain):
del config[filter_comp]
result[domain] = platforms
return result
|
[
"balloob@gmail.com"
] |
balloob@gmail.com
|
d7272c54660fa14d98ac4d2516403bfb2e29ff54
|
1284718203be50b23dcd1f6159746cfa42a04163
|
/python_visual_mpc/visual_mpc_core/agent/create_configs_agent.py
|
a2b76acca59fe3533af98db31afeec8a2b65ee17
|
[] |
no_license
|
febert/robustness_via_retrying
|
8fe4106d7705228ff339f9643518a80c0a243d36
|
1def282dc22f24b72c51ff1ef9ea1a7a83291369
|
refs/heads/master
| 2020-03-31T19:33:39.664525
| 2018-11-07T21:52:56
| 2018-11-07T21:52:56
| 152,502,702
| 17
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
""" This agent is responsible for creating experiment configurations for benchmarks """
from .general_agent import GeneralAgent
class CreateConfigAgent(GeneralAgent):
def __init__(self, hyperparams):
super().__init__(hyperparams)
def rollout(self, policy, i_trial, i_traj):
# Take the sample.
self._init()
agent_data, policy_outputs = {}, []
agent_data['traj_ok'] = True
initial_env_obs, reset_state = self.env.reset()
agent_data['reset_state'] = reset_state
obs = self._post_process_obs(initial_env_obs, agent_data, initial_obs=True)
for t in range(self._hyperparams['T']):
self.env.move_arm() # should look into creating one "generate task" function for long term....
self.env.move_objects()
try:
obs = self._post_process_obs(self.env.current_obs(), agent_data)
except ValueError:
return {'traj_ok': False}, None, None
return agent_data, obs, policy_outputs
|
[
"sdasari@berkeley.edu"
] |
sdasari@berkeley.edu
|
4c751b204ddfbb53faf33d7dc3ac55f1264ffed0
|
66276325d623c894c9e6344bb161f3c25974a838
|
/LeetCode/1000.Minimum-Cost-To-Merge-Stones/Minimum-Cost-To-Merge-Stones.py
|
28e75801dcc20fa763cdee38273e25d1f9a94f16
|
[] |
no_license
|
htingwang/HandsOnAlgoDS
|
034b5199b394ca82fd4fb16614ddabb45f3325e2
|
5b14b6f42baf59b04cbcc8e115df4272029b64c8
|
refs/heads/master
| 2021-07-11T15:50:30.944794
| 2020-09-27T05:08:02
| 2020-09-27T05:08:02
| 192,391,446
| 12
| 2
| null | 2019-07-03T04:09:35
| 2019-06-17T17:36:01
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 874
|
py
|
import heapq
class Solution(object):
def mergeStones(self, stones, K):
"""
:type stones: List[int]
:type K: int
:rtype: int
"""
n = len(stones)
if (n - 1) % (K - 1): return -1
pre_sum = [0] * (n + 1)
for i in range(1, n + 1):
pre_sum[i] = pre_sum[i - 1] + stones[i - 1]
dp = [[0] * n for _ in range(n)]
for m in range(K, n + 1):
for i in range(n - m + 1):
j = i + m - 1
dp[i][j] = float('inf')
for mid in range(i, j, K - 1):
dp[i][j] = min(dp[i][j], dp[i][mid] + dp[mid + 1][j])
if (j - i) % (K - 1) == 0:
dp[i][j] += (pre_sum[j + 1] - pre_sum[i])
return dp[0][n - 1]
|
[
"49448790+htingwang@users.noreply.github.com"
] |
49448790+htingwang@users.noreply.github.com
|
0665059974ce35e023c58e61975a130e51d29396
|
6d24fb1c67771e7285dea61840f9766013589dd1
|
/manage.py
|
d7031ecdad34e4d427ed940b884e8d8c8477c930
|
[] |
no_license
|
PandaBalu/gglobal
|
b045fb66f7daea8eeb8d6c62f5dc872ff0b1b246
|
c063c44c30d023bf562c0b4b39d10161540e7a92
|
refs/heads/master
| 2020-12-10T03:14:00.710720
| 2017-06-26T12:22:05
| 2017-06-26T12:22:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# gglobal directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'gglobal'))
execute_from_command_line(sys.argv)
|
[
"narnikgamarnikus@gmail.com"
] |
narnikgamarnikus@gmail.com
|
3738ae3586b803368bac15fdb7da8a6778e9353d
|
473645b727129e33ab12b42ecece255db73dfcfc
|
/PatObjectOwnRefProducer/patobjectownrefproducer_cfg.py
|
bcd95a65ab955d338995758f308c9b88bc41d278
|
[] |
no_license
|
jpata/AnalysisModules
|
b3c17ff60ec31b76798ff8a473397b5728e96ca7
|
02d9d3e28f937c683616c7be4efeddf8874f571c
|
refs/heads/master
| 2021-01-21T07:39:34.632912
| 2014-07-02T07:47:48
| 2014-07-02T07:47:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
import FWCore.ParameterSet.Config as cms
import os
inFile = os.environ["TESTING_FILE"]
process = cms.Process("OWNPARTICLES")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger=cms.Service("MessageLogger",
destinations=cms.untracked.vstring('cout'),
debugModules=cms.untracked.vstring('patJetsPuCleaned'),
cout=cms.untracked.PSet(threshold=cms.untracked.string('DEBUG'))
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
inFile
)
)
process.patJetsWithOwnRef = cms.EDProducer('PatObjectOwnRefProducer<pat::Jet>',
src=cms.InputTag("selectedPatJets")
)
process.patJetsPuCleaned = cms.EDProducer('CleanNoPUJetProducer',
# jetSrc = cms.InputTag("patJetsWithOwnRef"),
jetSrc = cms.InputTag("selectedPatJets"),
PUidMVA = cms.InputTag("puJetMva", "fullDiscriminant", "PAT"),
PUidFlag = cms.InputTag("puJetMva", "fullId", "PAT"),
PUidVars = cms.InputTag("puJetId", "", "PAT"),
isOriginal = cms.bool(True)
)
process.simpleAnalyzer = cms.EDAnalyzer(
'SimpleEventAnalyzer',
interestingCollections = cms.untracked.VInputTag([
"selectedPatJets",
"patJetsWithOwnRef",
]),
maxObjects=cms.untracked.uint32(1)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('test.root')
)
process.p = cms.Path(
process.patJetsWithOwnRef
* process.simpleAnalyzer
* process.patJetsPuCleaned
)
process.e = cms.EndPath(process.out)
|
[
"joosep.pata@gmail.com"
] |
joosep.pata@gmail.com
|
dfe1d9dd9dfdd2d164927e535fef2fab31bc1835
|
5e8342e4f6e48688f4a0079310e8f0b5e5386044
|
/POO/Factura/factura.py
|
9f45e3faba5e32f086d57287d8428a3e6b3f8bb6
|
[] |
no_license
|
fernado1981/python_
|
27a154406b5fba7e18da418bc5f75c58f3ccc24f
|
7d846cd332405464fa14707ea3f2286a918fc9de
|
refs/heads/master
| 2023-02-15T19:30:02.257345
| 2021-01-21T10:35:46
| 2021-01-21T10:35:46
| 277,186,729
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
from InteractuarFactura import InteractuarFactura
# Escribir un programa que gestione las facturas pendientes de cobro de una empresa.
# Las facturas se almacenarán en un diccionario donde la clave de cada factura será el número de factura
# y el valor el coste de la factura. El programa debe preguntar al usuario si quiere añadir una nueva factura,
# pagar una existente o terminar. Si desea añadir una nueva factura se preguntará por el número de factura
# y su coste y se añadirá al diccionario. Si se desea pagar una factura se preguntará por el número de factura
# y se eliminará del diccionario. Después de cada operación el programa debe mostrar por pantalla la cantidad
# cobrada hasta el momento y la cantidad pendiente de cobro.
class factura:
clave = int(input("Numero de Factura: "))
valor = float(input("Coste de la factura: "))
fact = InteractuarFactura(clave, valor)
fact.anadirOrden()
fact.pagarOrden()
fact.terminar()
|
[
"fernando.manrique.villanueva@gmail.com"
] |
fernando.manrique.villanueva@gmail.com
|
0f5bee18ae85a0d5b0a6a43a1130f89838d0cf3c
|
49d419d657d4fc29b486fb97c4409b904fe43012
|
/pytorch/synaptic/gan.py
|
0efae754f48b139ef3a96e9884a969c73b9f460a
|
[] |
no_license
|
anantguptadbl/python
|
660101e7284fb24bd269659bb8f461f7f13d47b6
|
4954efbe52ff3190201a8c0836d80015d13d4d15
|
refs/heads/master
| 2022-03-23T03:52:47.956875
| 2022-02-24T12:26:29
| 2022-02-24T12:26:29
| 114,909,673
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,581
|
py
|
import os
import numpy as np
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
# FIRST SHAPE GAN
learning_rate=0.001
n_classes=4
embedding_dim=10
latent_dim=10
# CONDITIONAL GAN
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.label_conditioned_generator = nn.Sequential(
nn.Embedding(n_classes, embedding_dim),
nn.Linear(embedding_dim, 16)
)
self.latent = nn.Sequential(
nn.Linear(latent_dim, 4*4*512),
nn.LeakyReLU(0.2, inplace=True)
)
self.model = nn.Sequential(nn.ConvTranspose2d(513, 64*8, 4, 2, 1, bias=False),
nn.BatchNorm2d(64*8, momentum=0.1, eps=0.8),
nn.ReLU(True),
nn.ConvTranspose2d(64*8, 64*4, 4, 2, 1,bias=False),
nn.BatchNorm2d(64*4, momentum=0.1, eps=0.8),
nn.ReLU(True),
nn.ConvTranspose2d(64*4, 64*2, 4, 2, 1,bias=False),
nn.BatchNorm2d(64*2, momentum=0.1, eps=0.8),
nn.ReLU(True),
nn.ConvTranspose2d(64*2, 64*1, 4, 2, 1,bias=False),
nn.BatchNorm2d(64*1, momentum=0.1, eps=0.8),
nn.ReLU(True),
nn.ConvTranspose2d(64*1, 3, 4, 2, 1, bias=False),
nn.Tanh()
)
def forward(self, inputs):
noise_vector, label = inputs
label_output = self.label_conditioned_generator(label)
label_output = label_output.view(-1, 1, 4, 4)
latent_output = self.latent(noise_vector)
latent_output = latent_output.view(-1, 512,4,4)
concat = torch.cat((latent_output, label_output), dim=1)
image = self.model(concat)
#print(image.size())
return image
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.label_condition_disc = nn.Sequential(nn.Embedding(n_classes, embedding_dim),
nn.Linear(embedding_dim, 3*128*128))
self.model = nn.Sequential(nn.Conv2d(6, 64, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 64*2, 4, 3, 2, bias=False),
nn.BatchNorm2d(64*2, momentum=0.1, eps=0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64*2, 64*4, 4, 3,2, bias=False),
nn.BatchNorm2d(64*4, momentum=0.1, eps=0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64*4, 64*8, 4, 3, 2, bias=False),
nn.BatchNorm2d(64*8, momentum=0.1, eps=0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Flatten(),
nn.Dropout(0.4),
nn.Linear(4608, 1),
nn.Sigmoid()
)
def forward(self, inputs):
img, label = inputs
label_output = self.label_condition_disc(label)
label_output = label_output.view(-1, 3, 128, 128)
concat = torch.cat((img, label_output), dim=1)
output = self.model(concat)
return output
batch_size=16
train_transform = transforms.Compose([
transforms.Resize(128),
transforms.ToTensor(),
transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])])
train_dataset = datasets.ImageFolder(root='rps', transform=train_transform)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
# Models
discriminator = Discriminator()
generator = Generator()
# Loss
discriminator_loss = nn.BCELoss()
generator_loss = nn.MSELoss()
# Optimizers
D_optimizer = torch.optim.Adam(discriminator.parameters(), lr=learning_rate, weight_decay=1e-5)
G_optimizer = torch.optim.Adam(generator.parameters(), lr=learning_rate, weight_decay=1e-5)
num_epochs = 200
device='cpu'
for epoch in range(1, num_epochs+1):
D_loss_list, G_loss_list = [], []
for index, (real_images, labels) in enumerate(train_loader):
D_optimizer.zero_grad()
real_images = real_images.to(device)
labels = labels.to(device)
labels = labels.unsqueeze(1).long()
real_target = Variable(torch.ones(real_images.size(0), 1).to(device))
fake_target = Variable(torch.zeros(real_images.size(0), 1).to(device))
D_real_loss = discriminator_loss(discriminator((real_images, labels)), real_target)
# print(discriminator(real_images))
#D_real_loss.backward()
noise_vector = torch.randn(real_images.size(0), latent_dim, device=device)
noise_vector = noise_vector.to(device)
generated_image = generator((noise_vector, labels))
output = discriminator((generated_image.detach(), labels))
D_fake_loss = discriminator_loss(output, fake_target)
# train with fake
#D_fake_loss.backward()
D_total_loss = (D_real_loss + D_fake_loss) / 2
D_loss_list.append(D_total_loss)
D_total_loss.backward()
D_optimizer.step()
# Train generator with real labels
G_optimizer.zero_grad()
G_loss = generator_loss(discriminator((generated_image, labels)), real_target)
G_loss_list.append(G_loss)
G_loss.backward()
G_optimizer.step()
print("Epoch {0} Gen loss {1} Discrim loss {2}".format(epoch, G_loss, D_total_loss))
|
[
"noreply@github.com"
] |
anantguptadbl.noreply@github.com
|
1cc02218743180f0e7cfee9382aa8e4dfc3a14f4
|
b1018e272ed284ab70ffe6055b90726e879004b3
|
/MIDI Remote Scripts/Push2/observable_property_alias.py
|
9425fff2c4cd6cac938da28aa0e083b11d2b675d
|
[] |
no_license
|
aumhaa/livepy_diff
|
8e593ffb30b1e7909225352f3a0084d4de2e51e6
|
266a7380c4d5a162c051c23f534f74cb7eace538
|
refs/heads/master
| 2020-04-12T03:17:56.545373
| 2017-04-24T02:02:07
| 2017-04-24T02:02:07
| 13,946,086
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
from __future__ import absolute_import, print_function
from ableton.v2.base import EventObject, Slot
class ObservablePropertyAlias(EventObject):
def __init__(self, alias_host, property_host = None, property_name = '', alias_name = None, getter = None, *a, **k):
super(ObservablePropertyAlias, self).__init__(*a, **k)
self._alias_host = alias_host
self._alias_name = alias_name or property_name
self._property_host = property_host
self._property_name = property_name
self._property_slot = None
self._setup_alias(getter)
def _get_property_host(self):
return self._property_host
def _set_property_host(self, host):
self._property_host = host
self._property_slot.subject = host
property_host = property(_get_property_host, _set_property_host)
def _setup_alias(self, getter):
aliased_prop = property(getter or self._get_property)
setattr(self._alias_host.__class__, self._alias_name, aliased_prop)
notifier = getattr(self._alias_host, 'notify_' + self._alias_name)
self._property_slot = self.register_slot(Slot(self.property_host, notifier, self._property_name))
def _get_property(self, _):
return getattr(self.property_host, self._property_name, None)
|
[
"aumhaa@gmail.com"
] |
aumhaa@gmail.com
|
3dec57ae1f1dbea2ae89aaa75b6bee067092dd1f
|
865bd0c84d06b53a39943dd6d71857e9cfc6d385
|
/179-largest-number/largest-number.py
|
4bc8f0d54937b5992d4b0409c3c534c6cb19937f
|
[] |
no_license
|
ANDYsGUITAR/leetcode
|
1fd107946f4df50cadb9bd7189b9f7b7128dc9f1
|
cbca35396738f1fb750f58424b00b9f10232e574
|
refs/heads/master
| 2020-04-01T18:24:01.072127
| 2019-04-04T08:38:44
| 2019-04-04T08:38:44
| 153,473,780
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
# Given a list of non negative integers, arrange them such that they form the largest number.
#
# Example 1:
#
#
# Input: [10,2]
# Output: "210"
#
# Example 2:
#
#
# Input: [3,30,34,5,9]
# Output: "9534330"
#
#
# Note: The result may be very large, so you need to return a string instead of an integer.
#
class Solution:
def largestNumber(self, nums: List[int]) -> str:
import functools
def cmp(a,b):
if int(a + b) > int(b + a):
return 1
elif int(a + b) < int(b + a):
return -1
else:
return 0
nums = list(map(str, nums))
nums.sort(key = functools.cmp_to_key(cmp), reverse = True)
return ''.join(nums) if nums[0] != '0' else '0'
|
[
"andyandwei@163.com"
] |
andyandwei@163.com
|
48b99421ac4eec624238b6abb45995860b5a7022
|
fb64776f71eb2a469395a39c3ff33635eb388357
|
/apps/accounts/tests/factories/user.py
|
96bbc988f36a6f82a99567ea1bca2505a79beae1
|
[
"MIT"
] |
permissive
|
jimialex/django-wise
|
ec79d21c428fd1eea953362890051d2120e19f9e
|
3fdc01eabdff459b31e016f9f6d1cafc19c5a292
|
refs/heads/master
| 2023-04-30T20:59:51.625190
| 2021-05-10T06:55:40
| 2021-05-10T06:55:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 981
|
py
|
# -*- coding: utf-8 -*-
import factory
from faker import Factory
from faker.providers import misc, person, profile
from apps.accounts.models.user import User
fake = Factory.create()
fake.add_provider(person)
fake.add_provider(profile)
fake.add_provider(misc)
def fake_username():
return fake.simple_profile()['username']
def generate_user_profile():
user_profile = fake.simple_profile()
user_password = fake.uuid4()
full_name = fake.name().split(' ')
return {
'username': user_profile['username'],
'email': user_profile['mail'],
'firstName': full_name[0],
'lastName': full_name[1],
'password': user_password,
}
class UserFactory(factory.django.DjangoModelFactory):
username = factory.LazyFunction(fake_username)
email = factory.LazyFunction(fake.email)
first_name = factory.LazyFunction(fake.first_name)
last_name = factory.LazyFunction(fake.last_name)
class Meta:
model = User
|
[
"vicobits@gmail.com"
] |
vicobits@gmail.com
|
96d9a1b535980b5c4821400c772bd1885d87ca2c
|
a12c090eb57da4c8e1f543a1a9d497abad763ccd
|
/django-stubs/forms/utils.pyi
|
0131077285c5d3dbc9de498e79682756eb713275
|
[
"BSD-3-Clause"
] |
permissive
|
debuggerpk/django-stubs
|
be12eb6b43354a18675de3f70c491e534d065b78
|
bbdaebb244bd82544553f4547157e4f694f7ae99
|
refs/heads/master
| 2020-04-04T08:33:52.358704
| 2018-09-26T19:32:19
| 2018-09-26T19:32:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,779
|
pyi
|
from collections import UserList
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from django.core.exceptions import ValidationError
from django.utils.safestring import SafeText
def pretty_name(name: str) -> str: ...
def flatatt(attrs: Dict[str, Optional[str]]) -> SafeText: ...
class ErrorDict(dict):
def as_data(self) -> Dict[str, List[ValidationError]]: ...
def get_json_data(
self, escape_html: bool = ...
) -> Dict[str, List[Dict[str, str]]]: ...
def as_json(self, escape_html: bool = ...) -> str: ...
def as_ul(self) -> str: ...
def as_text(self) -> str: ...
class ErrorList(UserList, list):
data: List[Union[django.core.exceptions.ValidationError, str]]
error_class: str = ...
def __init__(
self,
initlist: Optional[
Union[List[ValidationError], List[str], ErrorList]
] = ...,
error_class: Optional[str] = ...,
) -> None: ...
def as_data(self) -> List[ValidationError]: ...
def get_json_data(
self, escape_html: bool = ...
) -> List[Dict[str, str]]: ...
def as_json(self, escape_html: bool = ...) -> str: ...
def as_ul(self) -> str: ...
def as_text(self) -> str: ...
def __contains__(self, item: str) -> bool: ...
def __eq__(self, other: Union[List[str], ErrorList]) -> bool: ...
def __getitem__(self, i: Union[int, str]) -> str: ...
def __reduce_ex__(
self, *args: Any, **kwargs: Any
) -> Tuple[
Callable,
Tuple[Type[ErrorList]],
Dict[str, Union[List[ValidationError], str]],
None,
None,
]: ...
def from_current_timezone(value: datetime) -> datetime: ...
def to_current_timezone(value: datetime) -> datetime: ...
|
[
"maxim.kurnikov@gmail.com"
] |
maxim.kurnikov@gmail.com
|
82e4a2ec6b46eff9f8e2a406b016bbc33060b85f
|
6ff318a9f67a3191b2a9f1d365b275c2d0e5794f
|
/python/小练习/socket.py
|
f649e706ec3dcebf2cd1769b94c7c70fb4f9a757
|
[] |
no_license
|
lvhanzhi/Python
|
c1846cb83660d60a55b0f1d2ed299bc0632af4ba
|
c89f882f601898b5caab25855ffa7d7a1794f9ab
|
refs/heads/master
| 2020-03-25T23:34:00.919197
| 2018-09-13T12:19:51
| 2018-09-13T12:19:51
| 144,281,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,429
|
py
|
# 1、买手机
import socket
phone = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # tcp称为流式协议,udp称为数据报协议SOCK_DGRAM
# print(phone)
# 2、插入/绑定手机卡
# phone.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
phone.bind(('127.0.0.1', 8080))
# 3、开机
phone.listen(5) # 半连接池,限制的是请求数
# 4、等待电话连接
print('start....')
while True: # 连接循环
conn, client_addr = phone.accept() # (三次握手建立的双向连接,(客户端的ip,端口))
# print(conn)
print('已经有一个连接建立成功', client_addr)
# 5、通信:收\发消息
while True: # 通信循环
try:
print('服务端正在收数据...')
data = conn.recv(1024) # 最大接收的字节数,没有数据会在原地一直等待收,即发送者发送的数据量必须>0bytes
# print('===>')
if len(data) == 0: break # 在客户端单方面断开连接,服务端才会出现收空数据的情况
print('来自客户端的数据', data)
conn.send(data.upper())
except ConnectionResetError:
break
# 6、挂掉电话连接
conn.close()
# 7、关机
phone.close()
print('aaa')
def foo():
pass
from b import foo
foo()
圣诞快乐国际旅客的世界观的雷锋精神的理解老师是DLGKLFDNHJGLADF
IF
报头
|
[
"1541860665@qq.com"
] |
1541860665@qq.com
|
fcfcac8d51858207750e6f6453a9f9c6478ac802
|
b3ab2979dd8638b244abdb2dcf8da26d45d7b730
|
/test/test_related_permission_model.py
|
990172835c1b8a26c3210da6641997d46a7c88b8
|
[] |
no_license
|
CU-CommunityApps/ct-cloudcheckr-cmx-client
|
4b3d9b82c5dfdaf24f8f443526868e971d8d1b15
|
18ac9fd4d6c4ae799c0d21745eaecd783da68c0c
|
refs/heads/main
| 2023-03-03T19:53:57.685925
| 2021-02-09T13:05:07
| 2021-02-09T13:05:07
| 329,308,757
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 977
|
py
|
# coding: utf-8
"""
CloudCheckr API
CloudCheckr API # noqa: E501
OpenAPI spec version: v1
Contact: support@cloudcheckr.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudcheckr_cmx_client
from cloudcheckr_cmx_client.models.related_permission_model import RelatedPermissionModel # noqa: E501
from cloudcheckr_cmx_client.rest import ApiException
class TestRelatedPermissionModel(unittest.TestCase):
"""RelatedPermissionModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRelatedPermissionModel(self):
"""Test RelatedPermissionModel"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudcheckr_cmx_client.models.related_permission_model.RelatedPermissionModel() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"pea1@cornell.edu"
] |
pea1@cornell.edu
|
8567a2b83ce96aee7862a2f85ae3ce4df8398a62
|
53192abcbb297198128952df6ceed17a32cb5f1f
|
/pyidml/models/tags.py
|
1e1fcd76f396d48e9313ac2465021f33ae9f0098
|
[] |
no_license
|
guardian/pyidml
|
2e4ba754c6487eb94193db3a74b32a7b58d79384
|
39afddfee9c432aa5ff12d526aad0eebd2ac66a3
|
refs/heads/master
| 2022-07-01T21:03:46.011695
| 2015-08-10T12:44:42
| 2015-08-10T12:44:42
| 987,182
| 5
| 8
| null | 2022-06-17T21:07:33
| 2010-10-14T14:51:16
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
from pyidml.fields import *
from pyidml.models import Element, Properties
class Tags(Element):
DOMVersion = StringField()
class XMLTagProperties(Properties):
TagColor = StringField() # TODO InDesignUIColorType_TypeDef
class XMLTag(Element):
Self = StringField(required=True)
Name = StringField(required=True)
Properties = EmbeddedDocumentField(XMLTagProperties)
|
[
"ben@firshman.co.uk"
] |
ben@firshman.co.uk
|
3f7f296b636c36d74be827a2e22cbcf0d2ca042d
|
028d788c0fa48a8cb0cc6990a471e8cd46f6ec50
|
/Python-OOP/Iterators-Generators/Exercise/09_permutations.py
|
0398ec46a79574384144b0fc86190ae53603e0df
|
[] |
no_license
|
Sheko1/SoftUni
|
d6b8e79ae545116f4c0e5705ad842f12d77a9c9d
|
a9fbeec13a30231b6a97c2b22bb35257ac1481c0
|
refs/heads/main
| 2023-07-13T15:39:48.826925
| 2021-08-21T12:51:02
| 2021-08-21T12:51:02
| 317,266,200
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from itertools import permutations
def possible_permutations(data):
result = permutations(data)
for el in result:
yield [*el]
[print(n) for n in possible_permutations([1, 2, 3])]
|
[
"martinkypar@gmail.com"
] |
martinkypar@gmail.com
|
51d49121dddb8c05767b1d8f43bd424a39fa97d0
|
552bc626603a1757cf7836401cff5f0332a91504
|
/django/django-instagram-clone_kindfamily/instaclone-backend/accounts/admin.py
|
05daffd421368208ed05399d35596a5e4d5b43c7
|
[] |
no_license
|
anifilm/webapp
|
85f3d0aae34f46917b3c9fdf8087ec8da5303df1
|
7ef1a9a8c0dccc125a8c21b22db7db4b9d5c0cda
|
refs/heads/master
| 2023-08-29T18:33:00.323248
| 2023-08-26T07:42:39
| 2023-08-26T07:42:39
| 186,593,754
| 1
| 0
| null | 2023-04-21T12:19:59
| 2019-05-14T09:49:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
from django.contrib import admin
from .models import Profile, Follow
class FollowInline(admin.TabularInline):
model = Follow
fk_name = 'from_user'
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ['id', 'nickname', 'user']
list_display_links = ['nickname', 'user']
search_fields = ['nickname']
inlines = [FollowInline]
@admin.register(Follow)
class FollowAdmin(admin.ModelAdmin):
list_display = ['from_user', 'to_user', 'created_at']
list_display_links = ['from_user', 'to_user', 'created_at']
|
[
"anifilm02@gmail.com"
] |
anifilm02@gmail.com
|
4c2b2d67d7dc4ec40f448d6cdbeaca6b5577c01c
|
ef5f369a8fb3978dbb57cdab2c0f83880fa43c36
|
/amatino/ledger_order.py
|
048e9ff9e8eb4cb348903040d2c78b4fc6279022
|
[
"MIT"
] |
permissive
|
pypi-buildability-project/amatino-python
|
c8a93c849d9e97ea907d411511a0c732ee51b29e
|
9178e0883b735f882729c19a7a68df68b49e057b
|
refs/heads/master
| 2022-07-19T12:24:06.587840
| 2020-05-21T05:28:08
| 2020-05-21T05:28:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
"""
Amatino API Python Bindings
Ledger Order Module
Author: hugh@amatino.io
"""
from enum import Enum
class LedgerOrder(Enum):
OLDEST_FIRST = True
YOUNGEST_FIRST = False
|
[
"hugh.jeremy@gmail.com"
] |
hugh.jeremy@gmail.com
|
31192339ea0302d32f15714aa4d5d108ec2ff4b1
|
443416bab5d7c258936dae678feb27de6c537758
|
/kratos/python_scripts/application_generator/classes/variableCreator.py
|
27d35d56986e388d58e7fe4caacfbe7e247208c9
|
[
"BSD-3-Clause"
] |
permissive
|
pyfsi/Kratos
|
b941e12594ec487eafcd5377b869c6b6a44681f4
|
726aa15a04d92c958ba10c8941ce074716115ee8
|
refs/heads/master
| 2020-04-27T17:10:10.357084
| 2019-11-22T09:05:35
| 2019-11-22T09:05:35
| 174,507,074
| 2
| 0
|
NOASSERTION
| 2020-03-27T16:38:28
| 2019-03-08T09:22:47
|
C++
|
UTF-8
|
Python
| false
| false
| 2,293
|
py
|
from __future__ import print_function, absolute_import, division
from utils.constants import ctab
class VariableCreator(object):
def __init__(self, name, vtype, is3D=False):
''' Creates a variable for an application
Input
-----
- name: string
name of the variable
- vtype: string
type of the variable
- is3D:: boolean
determines if the variable is vectorial(True) or scalar(False, default)
NOTE: This will be could be replaced by VariableCreator3D at some point.
'''
self.defineString = 'KRATOS_DEFINE_VARIABLE( {type}, {name} )\n'.format(type=vtype, name=name)
self.createString = 'KRATOS_CREATE_VARIABLE( {type}, {name} )\n'.format(type=vtype, name=name)
self.registerString = ctab + 'KRATOS_REGISTER_VARIABLE( {name} )\n'.format(name=name)
self.registerPythonString = ctab + 'KRATOS_REGISTER_IN_PYTHON_VARIABLE(m, {name} )\n'.format(name=name)
# String changes if is a 3D variable
if is3D:
self.defineString = 'KRATOS_DEFINE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
self.createString = 'KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
self.registerString = ctab + 'KRATOS_REGISTER_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
self.registerPythonString = ctab + 'KRATOS_REGISTER_IN_PYTHON_3D_VARIABLE_WITH_COMPONENTS(m, {name} )\n'.format(name=name)
class VariableCreator3D(object):
def __init__(self, name):
''' Creates a 3D variable for an application.
All 3D variables are "double" by definition
Input
-----
- name: string
name of the variable
'''
self.defineString = 'KRATOS_DEFINE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
self.createString = 'KRATOS_CREATE_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
self.registerString = ctab + 'KRATOS_REGISTER_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
self.registerPythonString = ctab + 'KRATOS_REGISTER_IN_PYTHON_3D_VARIABLE_WITH_COMPONENTS( {name} )\n'.format(name=name)
|
[
"roigcarlo@gmail.com"
] |
roigcarlo@gmail.com
|
a5c0765f4077d63debef0b2e1bd84bb5f445cc20
|
851e327e5e75392aa755f3d699b474846b886623
|
/qa/rpc-tests/smartfees.py
|
f453a2a7c1f68b8c3ebceb91cfdbdb5c58314971
|
[
"MIT"
] |
permissive
|
advantage-development/v4
|
140535cfb56e789459078de030c7455ef3228e8f
|
59245b326a2b1e488be77816dad9c32166465c73
|
refs/heads/master
| 2021-10-09T15:32:34.396521
| 2018-12-30T16:25:43
| 2018-12-30T16:25:43
| 163,597,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,309
|
py
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcredit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test fee estimation code
#
from test_framework import BitcreditTestFramework
from bitcreditrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class EstimateFeeTest(BitcreditTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir,
["-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
# Node1 mines small-but-not-tiny blocks, and allows free transactions.
# NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes,
# so blockmaxsize of 2,000 is really just 1,000 bytes (room enough for
# 6 or 7 transactions)
self.nodes.append(start_node(1, self.options.tmpdir,
["-blockprioritysize=1500", "-blockmaxsize=2000",
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]))
connect_nodes(self.nodes[1], 0)
# Node2 is a stingy miner, that
# produces very small blocks (room for only 3 or so transactions)
node2args = [ "-blockprioritysize=0", "-blockmaxsize=1500",
"-debug=mempool", "-debug=estimatefee", "-relaypriority=0"]
self.nodes.append(start_node(2, self.options.tmpdir, node2args))
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
# Prime the memory pool with pairs of transactions
# (high-priority, random fee and zero-priority, random fee)
min_fee = Decimal("0.001")
fees_per_kb = [];
for i in range(12):
(txid, txhex, fee) = random_zeropri_transaction(self.nodes, Decimal("1.1"),
min_fee, min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
# Mine blocks with node2 until the memory pool clears:
count_start = self.nodes[2].getblockcount()
while len(self.nodes[2].getrawmempool()) > 0:
self.nodes[2].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, super-stingy miner: "+str([str(e) for e in all_estimates]))
# Estimates should be within the bounds of what transactions fees actually were:
delta = 1.0e-6 # account for rounding error
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Generate transactions while mining 30 more blocks, this time with node1:
for i in range(30):
for j in range(random.randrange(6-4,6+4)):
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"),
Decimal("0.0"), min_fee, 20)
tx_kbytes = (len(txhex)/2)/1000.0
fees_per_kb.append(float(fee)/tx_kbytes)
self.nodes[1].setgenerate(True, 1)
self.sync_all()
all_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Fee estimates, more generous miner: "+str([ str(e) for e in all_estimates]))
for e in filter(lambda x: x >= 0, all_estimates):
if float(e)+delta < min(fees_per_kb) or float(e)-delta > max(fees_per_kb):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"%(float(e), min_fee_kb, max_fee_kb))
# Finish by mining a normal-sized block:
while len(self.nodes[0].getrawmempool()) > 0:
self.nodes[0].setgenerate(True, 1)
self.sync_all()
final_estimates = [ self.nodes[0].estimatefee(i) for i in range(1,20) ]
print("Final fee estimates: "+str([ str(e) for e in final_estimates]))
if __name__ == '__main__':
EstimateFeeTest().main()
|
[
"you@example.com"
] |
you@example.com
|
618e2b66a661669d15aebbce7f698055d592a0ef
|
c049d678830eb37879589a866b39f8e72186a742
|
/upcfcardsearch/c301.py
|
0a57ec0a5d0c8b1861615d90c6339ee5cee31ad4
|
[
"MIT"
] |
permissive
|
ProfessorSean/Kasutamaiza
|
682bec415397ba90e30ab1c31caa6b2e76f1df68
|
7a69a69258f67bbb88bebbac6da4e6e1434947e6
|
refs/heads/main
| 2023-07-28T06:54:44.797222
| 2021-09-08T22:22:44
| 2021-09-08T22:22:44
| 357,771,466
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,017
|
py
|
import discord
from discord.ext import commands
from discord.utils import get
class c301(commands.Cog, name="c301"):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='Mage\'s_Magic', aliases=['c301'])
async def example_embed(self, ctx):
embed = discord.Embed(title='Mage\'s Magic',
color=0x1D9E74)
embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2361242.jpg')
embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)
embed.add_field(name='Type', value='Spell/Quick-Play', inline=False)
embed.add_field(name='Card Effect', value='Target 1 Set Spell/Trap you control; banish that target, then banish 2 Spell/Traps on the field. You cannot activate the targeted card this Chain.', inline=False)
embed.set_footer(text='Set Code: ANCF')
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(c301(bot))
|
[
"professorsean3@gmail.com"
] |
professorsean3@gmail.com
|
387008f8bd1d7b7e2da36f90cdf6c06072c8b63d
|
76cef2e2909ffaa6f6b594a8fd1aaaa9b754a69b
|
/netmiko/scp_handler.py
|
5e0fb7d0cc0c75aea57b5907f566f8ba35c55ec4
|
[
"MIT"
] |
permissive
|
jinesh-patel/netmiko
|
978b7747ea2dea3a8b05208313ffc74846d9c2fc
|
f19b51f9de783a06102d74ef9780ca8547eb2f89
|
refs/heads/master
| 2021-01-18T05:16:16.285330
| 2015-09-04T01:16:34
| 2015-09-04T01:16:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,416
|
py
|
'''
Create a SCP side-channel to transfer a file to remote network device.
SCP requires a separate SSH connection.
Currently only supports Cisco IOS.
'''
from __future__ import print_function
from __future__ import unicode_literals
import re
import os
import hashlib
import paramiko
import scp
class SCPConn(object):
'''
Establish an SCP channel to the remote network
'''
def __init__(self, ssh_conn):
self.ssh_ctl_chan = ssh_conn
self.establish_scp_conn()
def establish_scp_conn(self):
'''
Establish the SCP connection
'''
self.scp_conn = paramiko.SSHClient()
self.scp_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.scp_conn.connect(hostname=self.ssh_ctl_chan.ip,
port=self.ssh_ctl_chan.port,
username=self.ssh_ctl_chan.username,
password=self.ssh_ctl_chan.password,
look_for_keys=False,
allow_agent=False,
timeout=8)
self.scp_client = scp.SCPClient(self.scp_conn.get_transport())
def scp_transfer_file(self, source_file, dest_file):
'''
Transfer file using SCP
Must close the SCP connection to get the file to write to the remote filesystem
'''
self.scp_client.put(source_file, dest_file)
def close(self):
'''
Close the SCP connection
'''
self.scp_conn.close()
class FileTransfer(object):
'''
Class to manage SCP file transfer and associated SSH control channel
'''
def __init__(self, ssh_conn, source_file, dest_file, file_system="flash:"):
'''
Establish a SCP connection to the remote network device
'''
self.ssh_ctl_chan = ssh_conn
self.source_file = source_file
self.source_md5 = self.file_md5(source_file)
self.dest_file = dest_file
self.file_system = file_system
src_file_stats = os.stat(source_file)
self.file_size = src_file_stats.st_size
def __enter__(self):
'''Context manager setup'''
self.establish_scp_conn()
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Context manager cleanup'''
self.close_scp_chan()
if exc_type is not None:
raise exc_type(exc_value)
def establish_scp_conn(self):
'''Establish SCP connection'''
self.scp_conn = SCPConn(self.ssh_ctl_chan)
def close_scp_chan(self):
'''Close the SCP connection to the remote network device'''
self.scp_conn.close()
self.scp_conn = None
def verify_space_available(self, search_pattern=r"(.*) bytes available "):
'''
Verify sufficient space is available on remote network device
Return a boolean
'''
remote_cmd = "show {0}".format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command(remote_cmd)
match = re.search(search_pattern, remote_output)
space_avail = int(match.group(1))
if space_avail > self.file_size:
return True
return False
def check_file_exists(self, remote_cmd=""):
'''
Check if the dest_file exists on the remote file system
Return a boolean
'''
if not remote_cmd:
remote_cmd = "dir flash:/{0}".format(self.dest_file)
remote_out = self.ssh_ctl_chan.send_command(remote_cmd)
search_string = r"Directory of .*{0}".format(self.dest_file)
if 'Error opening' in remote_out:
return False
elif re.search(search_string, remote_out):
return True
else:
raise ValueError("Unexpected output from check_file_exists")
@staticmethod
def file_md5(file_name):
'''
Compute MD5 hash of file
'''
with open(file_name, "rb") as f:
file_contents = f.read()
file_hash = hashlib.md5(file_contents).hexdigest()
return file_hash
@staticmethod
def process_md5(md5_output, pattern=r"= (.*)"):
'''
Process the string to retrieve the MD5 hash
Output from Cisco IOS:
.MD5 of flash:file_name Done!
verify /md5 (flash:file_name) = 410db2a7015eaa42b1fe71f1bf3d59a2
'''
match = re.search(pattern, md5_output)
if match:
return match.group(1)
else:
raise ValueError("Invalid output from MD5 command: {0}".format(md5_output))
def compare_md5(self, base_cmd='verify /md5', delay_factor=8):
'''
Calculate remote MD5 and compare to source MD5
Default command is Cisco specific
This command can be CPU intensive on the remote device
Return boolean
'''
remote_md5_cmd = "{0} {1}{2}".format(base_cmd, self.file_system, self.dest_file)
dest_md5 = self.ssh_ctl_chan.send_command(remote_md5_cmd, delay_factor=delay_factor)
dest_md5 = self.process_md5(dest_md5)
if self.source_md5 != dest_md5:
return False
else:
return True
def transfer_file(self):
'''
SCP transfer source_file to Cisco IOS device
Verifies MD5 of file on remote device or generates an exception
'''
self.scp_conn.scp_transfer_file(self.source_file, self.dest_file)
# Must close the SCP connection to get the file written to the remote filesystem (flush)
self.scp_conn.close()
def verify_file(self):
'''
Verify the file has been transferred correctly
'''
return self.compare_md5()
def enable_scp(self, cmd=None):
'''
Enable SCP on remote device.
Defaults to Cisco IOS command
'''
if cmd is None:
cmd = ['ip scp server enable']
elif not hasattr(cmd, '__iter__'):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)
def disable_scp(self, cmd=None):
'''
Disable SCP on remote device.
Defaults to Cisco IOS command
'''
if cmd is None:
cmd = ['no ip scp server enable']
elif not hasattr(cmd, '__iter__'):
cmd = [cmd]
self.ssh_ctl_chan.send_config_set(cmd)
|
[
"ktbyers@twb-tech.com"
] |
ktbyers@twb-tech.com
|
f10d440a207cdc966fcc27e8e8a60b4fe07de07f
|
ee6ec35a80351480d566b5c65ae331a0f6f577ee
|
/models/tempdb_tdc.py
|
ec464c80db58a3e4cf9f2425672a584466aae249
|
[] |
no_license
|
aroodooteam/connecteur_aro_tempdb
|
d262103a816156e73a4c30fd86ad6175f449a8b3
|
f29629a6b380487e6a38cb98344b446ecb49adf5
|
refs/heads/master
| 2020-03-12T14:05:21.855008
| 2018-04-23T08:28:01
| 2018-04-23T08:28:01
| 130,658,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# -*- coding: utf-8 -*-
from openerp import models, fields
class TempdbTdc(models.Model):
_name = 'tempdb.tdc'
_description = 'Load tempdb_tdc in recup nom apporteur'
name = fields.Char(string='Name')
statut = fields.Char(string='Statut')
agence = fields.Char(string='Agence', size=4)
old = fields.Char(string='Old', size=8)
new = fields.Char(string='New', size=16)
titre = fields.Char(string='Titre', size=16)
|
[
"aroodoo@asus.aro"
] |
aroodoo@asus.aro
|
7a7d02691a84fce6559b63583b7262b88c04daa9
|
ad59fb12042bfd3f5c43eca057d0f747f9e148cf
|
/Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/hosters/dustreaming.py
|
07a22d09c704cc118d14249ac2bf9d521d16dc1e
|
[] |
no_license
|
lexlong2007/eePlugins
|
d62b787100a7069ad5713a47c5688008063b45ec
|
167b262fe36901a2d3a2fae6d0f85e2307b3eff7
|
refs/heads/master
| 2022-03-09T05:37:37.567937
| 2022-02-27T01:44:25
| 2022-02-27T01:44:25
| 253,012,126
| 0
| 0
| null | 2020-04-04T14:03:29
| 2020-04-04T14:03:29
| null |
UTF-8
|
Python
| false
| false
| 2,140
|
py
|
#-*- coding: utf-8 -*-
#Vstream https://github.com/Kodi-vStream/venom-xbmc-addons
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import dialog
import json
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'Dustreaming'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def getPluginIdentifier(self):
return 'dustreaming'
def setHD(self, sHD):
self.__sHD = ''
def getHD(self):
return self.__sHD
def isDownloadable(self):
return True
def setUrl(self, sUrl):
self.__sUrl = str(sUrl)
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
api_call = ''
sUrl = self.__sUrl.replace('/v/', '/api/source/')
oRequest = cRequestHandler(sUrl)
oRequest.setRequestType(cRequestHandler.REQUEST_TYPE_POST)
oRequest.addHeaderEntry('Referer', self.__sUrl)
oRequest.addParameters('r', '')
oRequest.addParameters('d', 'dustreaming.fr')
sHtmlContent = oRequest.request()
page = json.loads(sHtmlContent)
if page:
url = []
qua = []
for x in page['data']:
url.append(x['file'])
qua.append(x['label'])
if (url):
api_call = dialog().VSselectqual(qua, url)
if (api_call):
return True, api_call
return False, False
|
[
"zdzislaw22@windowslive.com"
] |
zdzislaw22@windowslive.com
|
25daeb07963fe92517947f4aa8197cc45303103d
|
68e65df90da9169733025dfede0a8b30a5e3d7e3
|
/Inheritance_and_More_on_OOPS/11_practice_test2.py
|
a5a4b0e8cd0c57a3f7980babe409762b8328ee98
|
[] |
no_license
|
shubam-garg/Python-Beginner
|
290346cbb309a28d28d6ac04034cb084b71ccbc6
|
30742006c380a0a18aff574567a95c8b8c694754
|
refs/heads/main
| 2023-05-06T07:11:29.943475
| 2021-05-29T20:35:59
| 2021-05-29T20:35:59
| 354,527,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
''' create a class pets from class animals and further create class dog from pets,
add a method bark to class dog '''
class animals:
animaltype="Mammal"
class pets(animals):
petcolor="Black"
class dog(pets):
@staticmethod
def bark():
print("dog")
d=dog()
d.bark()
|
[
"81907680+shubam-garg@users.noreply.github.com"
] |
81907680+shubam-garg@users.noreply.github.com
|
ca6db4381b477d224e0c52a8e60201f6444ddf5d
|
1d0a4750e216f301ec49a247bf7bf07cd61fa29f
|
/app/views/reports/integration/advantage_payroll/advantage_payroll_client_setup_csv.py
|
fcfcf75d4a02f73167a6dcc346bf96726af2ace1
|
[] |
no_license
|
smoothbenefits/BenefitMY_Python
|
52745a11db2cc9ab394c8de7954974e6d5a05e13
|
b7e8474a728bc22778fd24fe88d1918945a8cfc8
|
refs/heads/master
| 2021-03-27T15:57:34.798289
| 2018-04-29T19:04:04
| 2018-04-29T19:04:04
| 24,351,568
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
from rest_framework.response import Response
from django.http import HttpResponse
from django.http import Http404
from app.views.reports.report_export_view_base import ReportExportViewBase
from app.service.Report.integration.advantage_payroll.advantage_payroll_company_setup_csv_service \
import AdvantagePayrollCompanySetupCsvService
class AdvantagePayrollClientSetupCsvView(ReportExportViewBase):
def get(self, request, company_id, format=None):
csv_service = AdvantagePayrollCompanySetupCsvService()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=client_setup.csv'
csv_service.get_report(company_id, response)
return response
|
[
"jeffzhang_misc@hotmail.com"
] |
jeffzhang_misc@hotmail.com
|
ab58404bbcdbd307415fd151a7e798392d35f0f6
|
25872e1ba4f86cbbf77d0130f341b21e5dd9e692
|
/GameOfLife.py
|
00600969a68b00f6a35c13b6dc1b798ddd428888
|
[] |
no_license
|
zongxinwu92/leetcode
|
dc3d209e14532b9b01cfce6d4cf6a4c2d7ced7de
|
e1aa45a1ee4edaf72447b771ada835ad73e7f508
|
refs/heads/master
| 2021-06-10T21:46:23.937268
| 2017-01-09T09:58:49
| 2017-01-09T09:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
'''
Created on 1.12.2017
@author: Jesse
''''''
According to the Wikipedia s article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970."
Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
Any live cell with fewer than two live neighbors dies, as if caused by under-population.
Any live cell with two or three live neighbors lives on to the next generation.
Any live cell with more than three live neighbors dies, as if by over-population..
Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
Write a function to compute the next state (after one update) of the board given its current state.
Follow up:
Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells.
In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems?
Credits:Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases."
'''
|
[
"darrencheng0817@gmail.com"
] |
darrencheng0817@gmail.com
|
f97ae739b9877f4e5efd4375b1584c6b49da597b
|
857d8f44ee11e7bf6972486e6be875aec9fff819
|
/mythx_cli/formatter/util.py
|
2d3396ae2ec6cc29ee9e594243ada061ecc91e95
|
[
"MIT"
] |
permissive
|
s0b0lev/mythx-cli
|
dfe45adfb41163098d08bf1849c48083241b22e5
|
27dc1c4ce1d87fbd02be4d32c5fbb4281da7c53c
|
refs/heads/master
| 2022-04-10T21:18:20.506815
| 2020-03-27T10:32:23
| 2020-03-27T10:32:23
| 250,260,634
| 0
| 0
|
MIT
| 2020-03-26T13:05:12
| 2020-03-26T13:05:11
| null |
UTF-8
|
Python
| false
| false
| 4,404
|
py
|
"""Utility functions for handling API requests and responses."""
from typing import List, Union
import click
from mythx_models.response import DetectedIssuesResponse, Severity
SEVERITY_ORDER = (
Severity.UNKNOWN,
Severity.NONE,
Severity.LOW,
Severity.MEDIUM,
Severity.HIGH,
)
def get_source_location_by_offset(source: str, offset: int) -> int:
"""Retrieve the Solidity source code location based on the source map
offset.
:param source: The Solidity source to analyze
:param offset: The source map's offset
:return: The offset's source line number equivalent
"""
return source.encode("utf-8")[0:offset].count("\n".encode("utf-8")) + 1
def generate_dashboard_link(uuid: str) -> str:
"""Generate a MythX dashboard link for an analysis job.
This method will generate a link to an analysis job on the official
MythX dashboard production setup. Custom deployment locations are currently
not supported by this function (but available at mythx.io).
:param uuid: The analysis job's UUID
:return: The analysis job's dashboard link
"""
return "https://dashboard.mythx.io/#/console/analyses/{}".format(uuid)
def normalize_swc_list(swc_list: Union[str, List[str], None]) -> List[str]:
"""Normalize a list of SWC IDs.
This method normalizes a list of SWC ID definitions, making SWC-101, swc-101,
and 101 equivalent.
:param swc_list: The list of SWC IDs as strings
:return: The normalized SWC ID list as SWC-XXX
"""
if not swc_list:
return []
if type(swc_list) == str:
swc_list = swc_list.split(",")
swc_list = [str(x).strip().upper() for x in swc_list]
swc_list = ["SWC-{}".format(x) if not x.startswith("SWC") else x for x in swc_list]
return swc_list
def set_ci_failure() -> None:
"""Based on the current context, set the return code to 1.
This method sets the return code to 1. It is called by the
respective subcommands (analyze and report) in case a severe issue
has been found (as specified by the user) if the CI flag is passed.
This will make the MythX CLI fail when running on a CI server. If no
context is available, this function assumes that it is running
outside a CLI scenario (e.g. a test setup) and will not do anything.
"""
try:
ctx = click.get_current_context()
if ctx.obj["ci"]:
ctx.obj["retval"] = 1
except RuntimeError:
# skip failure when there is no active click context
# i.e. the method has been called outside the click
# application.
pass
def filter_report(
resp: DetectedIssuesResponse,
min_severity: Union[str, Severity] = None,
swc_blacklist: Union[str, List[str]] = None,
swc_whitelist: Union[str, List[str]] = None,
) -> DetectedIssuesResponse:
"""Filter issues based on an SWC blacklist and minimum severity.
This will remove issues of a specific SWC ID or with a too low
severity from the issue reports of the passed
:code:`DetectedIssuesResponse` object. The SWC blacklist can be a
list of strings in the format "SWC-000" or a comma-separated string.
"SWC" is case-insensitive and normalized. The SWC whitelist works in
a similar way, just including selected SWCs into the resulting
response object.
:param resp: The issue report of an analysis job
:param min_severity: Ignore SWC IDs below the designated level
:param swc_blacklist: A comma-separated list of SWC IDs to ignore
:param swc_whitelist: A comma-separated list of SWC IDs to include
:return: The filtered issue report
"""
min_severity = Severity(min_severity.title()) if min_severity else Severity.UNKNOWN
swc_blacklist = normalize_swc_list(swc_blacklist)
swc_whitelist = normalize_swc_list(swc_whitelist)
new_issues = []
for report in resp.issue_reports:
for issue in report.issues:
is_severe = SEVERITY_ORDER.index(issue.severity) >= SEVERITY_ORDER.index(
min_severity
)
not_blacklisted = issue.swc_id not in swc_blacklist
is_whitelisted = issue.swc_id in swc_whitelist if swc_whitelist else True
if all((is_severe, is_whitelisted, not_blacklisted)):
new_issues.append(issue)
set_ci_failure()
report.issues = new_issues
return resp
|
[
"dmuhs@protonmail.ch"
] |
dmuhs@protonmail.ch
|
ca42bf7575d962f0e1942732e0aa257c00973a2a
|
8a66b19e38ea5042dfd454634ff434dfe9f297b0
|
/data/output/4_original
|
3ede26cf818b59cd261aab203d184b456b3769e3
|
[] |
no_license
|
KqSMea8/pythonanalyzer
|
6a8adbf945d8d2febcf3e1084bb9d673f89210d7
|
d4ac77ed1644081e028c791d9b64e9dfebad06e8
|
refs/heads/master
| 2020-05-19T21:08:10.645730
| 2019-05-06T14:45:57
| 2019-05-06T14:45:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twobuntu.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"rares.begu@gmail.com"
] |
rares.begu@gmail.com
|
|
a26ece7a4286b44a672d498df8c9eae03da97dad
|
707d702cd2628938829bbab2275da9a52c88666b
|
/pamlJob.py
|
5f9acbc996bfbede0b001a389d2a609346ef1466
|
[] |
no_license
|
MaciekZZZ/project-lazarus
|
682a7f87ed2f2c857bd839abea0360505156ead6
|
e5b34a4b98c7b064044b98a58a329bf954e65560
|
refs/heads/master
| 2021-01-01T18:42:04.760323
| 2015-03-15T13:51:56
| 2015-03-15T13:51:56
| 32,376,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,841
|
py
|
##############################################################################
#
# PamlJob class:
# a class containing the logic for writing codeml and baseml *.ctl files,
# and logic to parse and store the results from codeml and baseml.
#
#
# Author: Victor Hanson-Smith
# Contact: victorhs@cs.uoregon.edu
#
###############################################################################
import os, re, time
class PamlJob:
def __init__(self):
self.treeNumber = None
self.treePath = None
self.alignmentPath = None
self.executionDirectory = None
self.controlPath = None
self.estimate_branch_lengths = False
self.among_site_rate_variation = False
self.fix_asrv = False
self.alpha = 1.0
self.ncat_gamma = 4
self.cleandata = 0
#
# Call this method after the method named self.startJob is finished.
# This method verifies that PAML built the correct files.
#
# If PAML job finished correctly, then this method will return True
# If we discover any errors in the PAML output, then this method will return an error message (i.e. a string)
#
def verifyResults(self):
# does the file named 'rst' exist?
if False == os.path.exists(self.executionDirectory + "/rst"):
return "The PAML job on tree " + self.treeNumber.__str__() + " did not produce a file named 'rst'. I was expecting to find this file at " + self.executionDirectory + "/rst"
# does the file named 'out.paml' exist?
if False == os.path.exists(self.executionDirectory + "/out.paml"):
return "The PAML job on tree " + self.treeNumber.__str__() + " did not produce a file named 'out.paml'. I was expecting to find this file at " + self.executionDirectory + "/out.paml"
return True
class CodemlJob(PamlJob):
def __init__(self):
PamlJob.__init__(self)
self.modelPath = None
#
# This method assumes the following variables are instantiated:
# self.treePath, self.treeNumber, self.alignmentPath, self.codemlControlPath, and self.executionDirectory
#
def startJob(self):
self.writeControlFile()
previous_directory = os.getcwd()
os.chdir(self.executionDirectory)
os.system("codeml")
os.chdir(previous_directory)
#
# This is a helper method for self.startJob
# This method writes codeml.ctl into self.executionDirectory
#
def writeControlFile(self):
self.controlPath = self.executionDirectory + "/codeml.ctl"
f = open(self.controlPath, "w")
f.write("*\n")
f.write("* This file was auto-generated by Lazarus. " + time.localtime().__str__() + "\n")
f.write("*\n")
f.write("* For more information about the parameters in this file,\n* please consult the official PAML documenation.\n")
f.write("*\n")
# remove the path to the alignment (We want just the filename)
a = self.alignmentPath
tokens = a.split("/")
alignmentPath = tokens[ tokens.__len__() - 1]
f.write("seqfile = " + alignmentPath + "\n")
# remove the path to the tree (We want just the filename)
a = self.treePath
tokens = a.split("/")
treePath = tokens[ tokens.__len__() -1 ]
f.write("treefile = " + treePath + "\n")
f.write("outfile = out.paml\n")
f.write("noisy = 3\n")
f.write("verbose = 9\n")
f.write("runmode = 0\n")
f.write("seqtype = 2\n")
f.write("aaRatefile = model.dat\n")
f.write("model = 3\n")
if self.fix_asrv == False:
f.write("fix_alpha = 0\n")
else:
f.write("fix_alpha = 1\n")
if self.among_site_rate_variation:
f.write("alpha = " + self.alpha.__str__() + "\n")
f.write("ncatG = 4\n")
else:
f.write("alpha = 0\n")
f.write("RateAncestor = 2\n")
f.write("Small_Diff = 1.0e-6\n")
f.write("cleandata = " + self.cleandata.__str__() + "\n") # remove sites with "X" or "?" or other ambiguity
f.write("method = 1\n")
if self.estimate_branch_lengths == False:
f.write("fix_blength = 2\n") # fix the branch lengths at their values in the tree file.
else:
f.write("fix_blength = 1\n") # use the branch lengths as a starting value for the Ml search
f.close()
class BasemlJob(PamlJob):
def __init__(self):
PamlJob.__init__(self)
self.modelName = None
#
# This method assumes the following variables are instantiated:
# self.treePath, self.treeNumber, self.alignmentPath, self.codemlControlPath, and self.executionDirectory
#
def startJob(self):
self.writeControlFile()
previous_directory = os.getcwd()
os.chdir(self.executionDirectory)
os.system("baseml")
os.chdir(previous_directory)
def get_number_for_model(self, modelName):
if modelName == "JC69":
return 0
elif modelName == "K80":
return 1
elif modelName == "F81":
return 2
elif modelName == "F84":
return 3
elif modelName == "HKY85":
return 4
elif modelName == "T92":
return 5
elif modelName == "TN93":
return 6
elif modelName == "REV":
return 7
elif modelName == "UNREST":
return 8
elif modelName == "REVu":
return 9
elif modelName == "UNRESTu":
return 10
#
# This is a helper method for self.startJob
# This method writes baseml.ctl into self.executionDirectory
#
def writeControlFile(self):
self.controlPath = self.executionDirectory + "/baseml.ctl"
f = open(self.controlPath, "w")
f.write("*\n")
f.write("* This file was auto-generated by Lazarus. " + time.localtime().__str__() + "\n")
f.write("*\n")
f.write("* For more information about the parameters in this file,\n* please consult the official PAML documenation.\n")
f.write("*\n")
# remove the path to the alignment (We want just the filename)
a = self.alignmentPath
tokens = a.split("/")
alignmentPath = tokens[ tokens.__len__() - 1]
f.write("seqfile = " + alignmentPath + "\n")
# remove the path to the tree (We want just the filename)
a = self.treePath
tokens = a.split("/")
treePath = tokens[ tokens.__len__() -1 ]
f.write("treefile = " + treePath + "\n")
f.write("outfile = out.paml\n")
f.write("noisy = 3\n")
f.write("verbose = 9\n")
f.write("runmode = 0\n")
modelnumber = self.get_number_for_model(self.modelName)
f.write("model = " + modelnumber.__str__() + "\n")
if self.fix_asrv == False:
f.write("fix_alpha = 0\n")
else:
f.write("fix_alpha = 1\n")
if self.among_site_rate_variation:
f.write("alpha = " + self.alpha.__str__() + "\n")
f.write("ncatG = 4\n")
else:
f.write("alpha = 0\n")
f.write("RateAncestor = 2\n")
f.write("Small_Diff = 1.0e-6\n")
f.write("cleandata = " + self.cleandata.__str__() + "\n") # remove sites with "X" or "?" or other ambiguity
f.write("method = 1\n")
if self.estimate_branch_lengths == False:
f.write("fix_blength = 2\n") # fix the branch lengths at their values in the tree file.
else:
f.write("fix_blength = 1\n") # use the branch lengths as a starting value for the Ml search
f.close()
|
[
"victorhansonsmith@gmail.com"
] |
victorhansonsmith@gmail.com
|
8c4fd996268da9a039352a06d3bf2b53964c5823
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_MovingAverage_Seasonal_MonthOfYear_LSTM.py
|
de9aad1d33d18bc660e961d2021163f758b52956
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 175
|
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['MovingAverage'] , ['Seasonal_MonthOfYear'] , ['LSTM'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
ba2ffe1645eb7f733945e851d169d43d91d13d2c
|
9d5c9d9373002ab4ed1b493136517e8b4ab160e5
|
/saas/settings.py
|
4814fc8298fcc4faf4f4d40742c94562e265bd4f
|
[
"MIT"
] |
permissive
|
robert871126/bk-iam-saas
|
f8299bb632fc853ef0131d445f84c6084fc84aba
|
33c8f4ffe8697081abcfc5771b98a88c0578059f
|
refs/heads/master
| 2023-08-23T19:23:01.987394
| 2021-10-22T09:45:28
| 2021-10-22T09:45:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,674
|
py
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-权限中心(BlueKing-IAM) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
请不要修改该文件
如果你需要对settings里的内容做修改,config/default.py 文件中 添加即可
如有任何疑问,请联系 【蓝鲸助手】
"""
import os
# V3判断环境的环境变量为BKPAAS_ENVIRONMENT
if "BKPAAS_ENVIRONMENT" in os.environ:
ENVIRONMENT = os.getenv("BKPAAS_ENVIRONMENT", "dev")
# V2判断环境的环境变量为BK_ENV
else:
PAAS_V2_ENVIRONMENT = os.environ.get("BK_ENV", "development")
ENVIRONMENT = {"development": "dev", "testing": "stag", "production": "prod"}.get(PAAS_V2_ENVIRONMENT)
DJANGO_CONF_MODULE = "config.{env}".format(env=ENVIRONMENT)
try:
_module = __import__(DJANGO_CONF_MODULE, globals(), locals(), ["*"])
except ImportError as e:
raise ImportError("Could not import config '%s' (Is it on sys.path?): %s" % (DJANGO_CONF_MODULE, e))
for _setting in dir(_module):
if _setting == _setting.upper():
locals()[_setting] = getattr(_module, _setting)
|
[
"zhu327@gmail.com"
] |
zhu327@gmail.com
|
97af4069785fc222565c74b0af560d0127c1eca7
|
e04dbc32247accf073e3089ed4013427ad182c7c
|
/ATC001/A.py
|
4916c3a117c874c47ef9b69f46996605e1b5dd07
|
[] |
no_license
|
twobooks/atcoder_training
|
9deb237aed7d9de573c1134a858e96243fb73ca0
|
aa81799ec87cc9c9d76de85c55e99ad5fa7676b5
|
refs/heads/master
| 2021-10-28T06:33:19.459975
| 2021-10-20T14:16:57
| 2021-10-20T14:16:57
| 233,233,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
# 深さ優先探索
import sys
sys.setrecursionlimit(1000000)
H,W = list(map(int,input().split()))
S = []
for i in range(H):
s = input()
S.append(s)
for i in range(H):
for j in range(W):
if S[i][j]=="s":
si,sj = i,j
if S[i][j]=="g":
gi,gj = i,j
visited = []
for i in range(H):
visited.append([False]*W)
def dfs(i,j):
visited[i][j] = True
for i2,j2 in [[i+1,j],[i-1,j],[i,j+1],[i,j-1]]:
if not (0<=i2<H and 0<=j2<W):
continue
if S[i2][j2]=="#":
continue
if not visited[i2][j2]:
dfs(i2,j2)
dfs(si,sj)
if visited[gi][gj]:
print("Yes")
else:
print("No")
|
[
"twobookscom@gmail.com"
] |
twobookscom@gmail.com
|
73cbc8e528416492b2e495cde3816dc5a35812c6
|
e6793e7eb54d2105c373a8af7ebc653b7ad94575
|
/multibody/shapes/BUILD.bazel
|
7e9deed720cf5296b37fd0ca5a6f0877a3f346dd
|
[
"BSD-3-Clause"
] |
permissive
|
nikaven/drake
|
5c59e88f79b530ddf62496452959abeaf8fff1e3
|
34bab4ecaa34ac09ade6dcb11cf7bc0d13c5bd4e
|
refs/heads/master
| 2020-03-31T01:37:25.441270
| 2018-10-05T19:42:22
| 2018-10-05T19:42:22
| 151,788,663
| 7
| 0
| null | 2018-10-05T23:37:07
| 2018-10-05T23:37:07
| null |
UTF-8
|
Python
| false
| false
| 341
|
bazel
|
# -*- python -*-
load(
"//attic:build_macros.bzl",
"add_attic_aliases",
)
load("//tools/lint:lint.bzl", "add_lint_tests")
load("//multibody/rigid_body_plant:warning.bzl", "WARNING") # For side effect.
package(default_visibility = ["//visibility:public"])
add_attic_aliases([
"shapes",
"test_models",
])
add_lint_tests()
|
[
"jeremy.nimmer@tri.global"
] |
jeremy.nimmer@tri.global
|
04a1ac59ec41153c92aff505a32d2c7f77a81895
|
96c970ebacd9ade1493f4d01537005788b43a49b
|
/pychron/pyscripts/tasks/pyscript_plugin.py
|
374747cc5a5e87b032cd0e4c3bc54be69e15aac7
|
[
"Apache-2.0"
] |
permissive
|
OSUPychron/pychron
|
d2da9051b68024200d0009de634da810ccef2a0d
|
fe0ba9daff9548fa8bebab26db66a1cefff7c1d6
|
refs/heads/master
| 2021-01-14T12:47:26.389887
| 2015-12-18T22:27:02
| 2015-12-18T22:27:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,851
|
py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
from envisage.ui.tasks.task_factory import TaskFactory
from envisage.ui.tasks.task_extension import TaskExtension
from pyface.tasks.action.schema_addition import SchemaAddition
from pyface.tasks.action.task_action import TaskAction
from pyface.tasks.action.schema import SMenu
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.paths import paths
from pychron.pyscripts.tasks.pyscript_actions import OpenPyScriptAction, \
NewPyScriptAction, OpenHopsEditorAction, NewHopsEditorAction
from pychron.pyscripts.tasks.pyscript_preferences import PyScriptPreferencesPane
from pychron.pyscripts.tasks.visual_el_programmer.actions import OpenVisualELScriptAction, NewVisualELScriptAction
class PyScriptPlugin(BaseTaskPlugin):
id = 'pychron.pyscript.plugin'
def _actions_default(self):
return [('pychron.open_pyscript', 'Ctrl+Shift+O', 'Open PyScript'),
('pychron.new_pyscript', 'Ctrl+Shift+N', 'New PyScript'),]
def _task_extensions_default(self):
def _replace_action():
return TaskAction(name='Replace',
method='replace')
exts = [
TaskExtension(
task_id='pychron.pyscript.task',
actions=[SchemaAddition(
id='Edit',
factory=lambda: SMenu(id='Edit', name='Edit'),
path='MenuBar'),
SchemaAddition(id='replace',
path='MenuBar/Edit',
factory=_replace_action)]),
TaskExtension(
actions=[
SchemaAddition(id='open_hops_editor',
path='MenuBar/file.menu/Open',
factory=OpenHopsEditorAction),
SchemaAddition(id='new_hops_editor',
path='MenuBar/file.menu/New',
factory=NewHopsEditorAction),
SchemaAddition(id='open_script',
path='MenuBar/file.menu/Open',
factory=OpenPyScriptAction),
SchemaAddition(id='new_script',
path='MenuBar/file.menu/New',
factory=NewPyScriptAction),
SchemaAddition(id='new_visual',
path='MenuBar/file.menu/New',
factory=NewVisualELScriptAction),
SchemaAddition(id='open_visual',
path='MenuBar/file.menu/Open',
factory=OpenVisualELScriptAction)])]
return exts
def _tasks_default(self):
return [TaskFactory(id='pychron.pyscript.task',
name='PyScript',
factory=self._task_factory,
task_group='experiment',
image='script'),
TaskFactory(id='pychron.pyscript.visual_el_programmer',
name='Visual Programmer',
factory=self._visual_task_factory,
task_group='experiment')]
def _visual_task_factory(self):
from pychron.pyscripts.tasks.visual_el_programmer.visual_el_programmer_task import VisualElProgrammerTask
return VisualElProgrammerTask()
def _task_factory(self):
from pychron.pyscripts.tasks.pyscript_task import PyScriptTask
return PyScriptTask()
def _preferences_panes_default(self):
return [PyScriptPreferencesPane]
def _preferences_default(self):
return ['file://{}'.format(os.path.join(paths.preferences_dir, 'script.ini'))]
# ============= EOF =============================================
|
[
"jirhiker@gmail.com"
] |
jirhiker@gmail.com
|
93bb1b68101317784173698ba340d4098e7a5b85
|
1518483f16e735453a0e154a6619a4f5228874f6
|
/tests/test_decisiontree.py
|
fa371888854879d3dbb15a254f417cc897f666b1
|
[
"MIT"
] |
permissive
|
jdvelasq/decisions-tree
|
8e156c4828e5a1e00f583bbdb88d5a3b9c7f6e3d
|
54f886e82784c4061200d843841ef600b0ac366b
|
refs/heads/master
| 2023-06-04T03:47:45.741438
| 2021-06-28T13:46:40
| 2021-06-28T13:46:40
| 134,651,498
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
"""Tests
"""
from decisions_tree.decision_tree import DecisionTree
def test_terminal_node_creation():
"""test"""
tree = DecisionTree()
tree.terminal_node()
assert tree.data[0].type == "TERMINAL"
assert tree.data[0].expr is None
assert tree.data[0].id == 0
|
[
"jdvelasq@unal.edu.co"
] |
jdvelasq@unal.edu.co
|
066317e9308f2b0748697df1abf9aad98c762cc9
|
e7fab7a59d8d1a3ee5528e5e7505e428764636cd
|
/rich/bar.py
|
65a28dfc8d9b0b5300b60feea35c82b079b2dc12
|
[
"MIT"
] |
permissive
|
diek/rich
|
081c2480d3191316d9d027d31b12a9668fa19666
|
e5c300d5b0f4ada0a0f7588c660a5fd29fbaee64
|
refs/heads/master
| 2021-05-19T11:24:57.445723
| 2020-03-31T15:49:31
| 2020-03-31T15:49:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,719
|
py
|
from typing import Union
from .console import Console, ConsoleOptions, RenderResult
from .measure import Measurement
from .segment import Segment
from .style import StyleType
class Bar:
"""Renders a (progress) bar.
Args:
total (float, optional): Number of steps in the bar. Defaults to 100.
completed (float, optional): Number of steps completed. Defaults to 0.
width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done".
"""
def __init__(
self,
total: float = 100,
completed: float = 0,
width: int = None,
style: StyleType = "bar.back",
complete_style: StyleType = "bar.complete",
finished_style: StyleType = "bar.finished",
):
self.total = total
self.completed = completed
self.width = width
self.style = style
self.complete_style = complete_style
self.finished_style = finished_style
def __repr__(self) -> str:
return f"<Bar {self.completed!r} of {self.total!r}>"
@property
def percentage_completed(self) -> float:
"""Calculate percentage complete."""
completed = (self.completed / self.total) * 100.0
completed = min(100, max(0.0, completed))
return completed
def update_progress(self, completed: float, total: float = None) -> None:
"""Update progress with new values.
Args:
completed (float): Number of steps completed.
total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
"""
self.completed = completed
self.total = total if total is not None else self.total
def __console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
completed = min(self.total, max(0, self.completed))
width = min(self.width or options.max_width, options.max_width)
bar = "━"
half_bar_right = "╸"
half_bar_left = "╺"
complete_halves = int(width * 2 * completed / self.total)
bar_count = complete_halves // 2
half_bar_count = complete_halves % 2
style = console.get_style(self.style)
complete_style = console.get_style(
self.complete_style if self.completed < self.total else self.finished_style
)
if bar_count:
yield Segment(bar * bar_count, complete_style)
if half_bar_count:
yield Segment(half_bar_right * half_bar_count, complete_style)
remaining_bars = width - bar_count - half_bar_count
if remaining_bars:
if not half_bar_count and bar_count:
yield Segment(half_bar_left, style)
remaining_bars -= 1
if remaining_bars:
yield Segment(bar * remaining_bars, style)
def __measure__(self, console: Console, max_width: int) -> Measurement:
if self.width is not None:
return Measurement(self.width, self.width)
return Measurement(4, max_width)
if __name__ == "__main__": # pragma: no cover
console = Console()
bar = Bar(width=50, total=100)
import time
console.show_cursor(False)
for n in range(0, 101, 1):
bar.update_progress(n)
console.print(bar)
console.file.write("\r")
time.sleep(0.05)
console.show_cursor(True)
console.print()
|
[
"willmcgugan@gmail.com"
] |
willmcgugan@gmail.com
|
15906a69c4081328b1d41f3787ea24a51ec6661e
|
2ff7e53d5e512cd762217ca54317982e07a2bb0c
|
/eve-8.51.857815/eve/client/script/ui/eveUIProcs.py
|
9b8e14ca0f30664bf20829b79156c71b233b36df
|
[] |
no_license
|
nanxijw/Clara-Pretty-One-Dick
|
66d3d69426642b79e8fd4cc8e0bec23adeeca6d6
|
50de3488a2140343c364efc2615cf6e67f152be0
|
refs/heads/master
| 2021-01-19T09:25:07.555284
| 2015-02-17T21:49:33
| 2015-02-17T21:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,444
|
py
|
#Embedded file name: eve/client/script/ui\eveUIProcs.py
import uthread
import eve.common.script.sys.eveCfg as util
import locks
import random
import svc
import carbonui.const as uiconst
import localization
class EveUIProcSvc(svc.uiProcSvc):
"""
Manages ActionProcs for UI systems (camera, audio, and UI).
"""
__guid__ = 'svc.eveUIProcSvc'
__replaceservice__ = 'uiProcSvc'
__startupdependencies__ = ['cmd']
def Run(self, *args):
"""
Run the service. First calls into the base, then does local stuff.
"""
svc.uiProcSvc.Run(self, *args)
self.uiCallbackDict = {None: self._NoneKeyIsInvalid_Callback,
'OpenCharacterCustomization': self.__OpenCharacterCustomization_Callback,
'CorpRecruitment': self._CorpRecruitment_Callback,
'OpenCorporationPanel_Planets': self._OpenCorporationPanel_Planets_Callback,
'OpenAuraInteraction': self.cmd.OpenAuraInteraction,
'ExitStation': self.cmd.CmdExitStation,
'OpenFitting': self.cmd.OpenFitting,
'OpenShipHangar': self.cmd.OpenShipHangar,
'OpenCargoBay': self.cmd.OpenCargoHoldOfActiveShip,
'OpenDroneBay': self.cmd.OpenDroneBayOfActiveShip,
'OpenMarket': self.cmd.OpenMarket,
'OpenAgentFinder': self.cmd.OpenAgentFinder,
'OpenStationDoor': self.__OpenStationDoor_Callback,
'EnterHangar': self.cmd.CmdEnterHangar,
'GiveNavigationFocus': self._GiveNavigationFocus_Callback}
def _PerformUICallback(self, callbackKey):
callback = self.uiCallbackDict.get(callbackKey, None)
if callback is not None:
uthread.worker('_PerformUICallback_%s' % callbackKey, self._PerformUICallbackTasklet, callbackKey, callback)
return True
self.LogError('ActionObject.PerformUICallback: Unknown callbackKey', callbackKey)
return False
def _PerformUICallbackTasklet(self, callbackKey, callback):
try:
callback()
except TypeError as e:
self.LogError('ActionObject.PerformUICallback: callbackKey "%s" is associated with a non-callable object: %s' % (callbackKey, callback), e)
def _NoneKeyIsInvalid_Callback(self):
self.LogError('PerformUICallback called from ActionObject without the callbackKey property (it was None)!')
def _CorpRecruitment_Callback(self):
if util.IsNPC(session.corpid):
self.cmd.OpenCorporationPanel_RecruitmentPane()
else:
self.cmd.OpenCorporationPanel()
def _GiveNavigationFocus_Callback(self):
"""
Gives the charControl layer application focus
"""
sm.GetService('navigation').Focus()
def _OpenCorporationPanel_Planets_Callback(self):
with locks.TempLock('OpenCorporationPanel_Planets', locks.RLock):
if sm.GetService('planetSvc').GetMyPlanets():
self.cmd.OpenPlanets()
else:
systemData = sm.GetService('map').GetSolarsystemItems(session.solarsystemid2)
systemPlanets = []
for orbitalBody in systemData:
if orbitalBody.groupID == const.groupPlanet:
systemPlanets.append(orbitalBody)
planetID = systemPlanets[random.randrange(0, len(systemPlanets))].itemID
sm.GetService('planetUI').Open(planetID)
if not settings.user.suppress.Get('suppress.PI_Info', None):
ret, supp = sm.GetService('gameui').MessageBox(localization.GetByLabel('UI/PI/IntroText'), title=localization.GetByLabel('UI/Generic/Information'), buttons=uiconst.OK, modal=False, icon=uiconst.INFO, suppText=localization.GetByLabel('UI/Shared/Suppress1'))
if supp:
settings.user.suppress.Set('suppress.PI_Info', supp)
def __OpenStationDoor_Callback(self):
"""
Put up a "coming soon" message for establishments
"""
uicore.Message('CaptainsQuartersStationDoorClosed')
def __OpenCharacterCustomization_Callback(self):
"""
Prompts the user before entering character customization.
"""
if getattr(sm.GetService('map'), 'busy', False):
return
if uicore.Message('EnterCharacterCustomizationCQ', {}, uiconst.YESNO, uiconst.ID_YES) == uiconst.ID_YES:
self.cmd.OpenCharacterCustomization()
|
[
"billchang.e@gmail.com"
] |
billchang.e@gmail.com
|
9b744106637d8aa581bb460af23d834cce11ef2d
|
1a7487e3d8129c3e469b4faabf7daf0284a2cb15
|
/vedastr/models/bodies/sequences/transformer/decoder.py
|
5d7daa881078194722165ae99efbb4c5acd6c5e2
|
[
"Apache-2.0"
] |
permissive
|
kkalla/vedastr
|
67715bfc05c4ab633fe20f82193e13c21a951a82
|
d9e237fb8fbd79d36ed314bf9f902796ec210fb4
|
refs/heads/master
| 2023-01-03T09:01:18.960161
| 2020-11-02T09:01:35
| 2020-11-02T09:01:35
| 284,680,371
| 0
| 0
|
Apache-2.0
| 2020-11-02T09:01:37
| 2020-08-03T11:20:00
| null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
import logging
import torch.nn as nn
from .position_encoder import build_position_encoder
from .unit import build_decoder_layer
from ..registry import SEQUENCE_DECODERS
from vedastr.models.weight_init import init_weights
logger = logging.getLogger()
@SEQUENCE_DECODERS.register_module
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, position_encoder=None):
super(TransformerDecoder, self).__init__()
if position_encoder is not None:
self.pos_encoder = build_position_encoder(position_encoder)
self.layers = nn.ModuleList([build_decoder_layer(decoder_layer) for _ in range(num_layers)])
logger.info('TransformerDecoder init weights')
init_weights(self.modules())
@property
def with_position_encoder(self):
return hasattr(self, 'pos_encoder') and self.pos_encoder is not None
def forward(self, tgt, src, tgt_mask=None, src_mask=None):
if self.with_position_encoder:
tgt = self.pos_encoder(tgt)
for layer in self.layers:
tgt = layer(tgt, src, tgt_mask, src_mask)
return tgt
|
[
"jun.sun@media-smart.cn"
] |
jun.sun@media-smart.cn
|
60c4aecd5a79cd0c5d197007fbce2340680664f7
|
28c79c447c75e2cd4522c93072d99797c71aae63
|
/BMO/pyamazon.py
|
e63d235103fe3f405e3f2ffe52a0af6dcf70c3d7
|
[] |
no_license
|
tsuyukimakoto/bmo
|
bee8802455a376026879a02f0c12e0c988dbdc20
|
7593eb6ff9d7061d218e9082f122d11d04533c45
|
refs/heads/master
| 2020-03-23T01:38:25.669405
| 2010-09-23T05:46:12
| 2010-09-23T05:46:12
| 140,928,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,379
|
py
|
# -*- coding: utf8 -*-
#
# pyamazon.py
# pyshelf
#
# Created by makoto tsuyuki on 07/11/04.
# Copyright (c) 2007 everes.net All rights reserved.
#
from datetime import datetime
import httplib, urllib
from xml.etree import ElementTree
class Item(object):
def __init__(self, asin='', sales_rank=0, image='',
isbn='', price=0, formattedPrice='',
publisher='', title=''):
self.asin = asin
self.sales_rank = sales_rank
self.image = image
self.isbn=isbn
self.price=price
self.formattedPrice = formattedPrice
self.publisher=publisher
self.title = title
self.recommends = []
self.authors = []
def toXml(self):
tmp = u'''<?xml version="1.0" encoding="utf-8"?>
<ebmo>
<title>%(title)s</title>
<asin>%(asin)s</asin>
<price>%(price)s</price>
<author>%(authors)s</author>
<rank>%(rank)s</rank>
<recommends>%(recs)s</recommends>
<created>%(create_date)s</created>
</ebmo>'''
d = datetime.now()
create_date = '%4d-%02d-%02d %02d:%02d:%02d' % (d.year, d.month, d.day, d.hour, d.minute, d.second)
return tmp % dict(title=self.title, asin=self.asin, price=self.formattedPrice,
authors=u','.join([u'%s' % a for a in self.authors]),
rank=self.sales_rank,
recs=u'\n'.join( [u'<recommend><rasin>%s</rasin><rtitle>%s</rtitle></recommend>' % (r.asin, r.title) for r in self.recommends]),
create_date=create_date)
def __unicode__(self):
tmp = '''
Title: %(title)s
Asin: %(asin)s
Price: %(price)s
Author: %(authors)s
Rank: %(rank)s
Recommends:
%(recs)s'''
return tmp % dict(title=self.title, asin=self.asin, price=self.formattedPrice,
authors=u','.join([a for a in self.authors]),
rank=self.sales_rank,
recs=u'\n'.join( [u'%s(%s)' % (r.title, r.asin,) for r in self.recommends]))
class PylibAPIError(Exception):
def __init__(self, message):
self.message = message
def __unicode__(self):
return self.message
class PylibNotFoundError(Exception):
def __init__(self, message):
self.message = message
def __unicode__(self):
return self.message
class Recommend(object):
def __init__(self, asin, title):
self.asin = asin
self.title = title
def __unicode__(self):
return'%s(%s)' % (self.title, self.asin, )
xml = ''
def parseXML(xml):
def getImageURL(item):
node = item.find('LargeImage')
if node is not None:
return node.find('URL').text
return ''
def isValid(items):
valid = items.find('Request').find('IsValid').text
if valid == 'True': return True
return False
root = ElementTree.fromstring(xml)
#print xml
items = root.find('Items')
if not items:
raise PylibAPIError('No Items')
if not isValid(items):
errors = items.find('Request').find('Errors')
error_message = ''
errorlist = errors.findall('Error')
for e in errorlist:
error_message += '%s: %s\n' % (e.find('Code').text, e.find('Message').text)
raise PylibAPIError(error_message)
item = items.find('Item')
if not item:
raise PylibNotFoundError('Your request item is not found at Amazon.co.jp')
asin = item.find('ASIN') is not None and item.find('ASIN').text or ''
sales_rank = item.find('SalesRank') is not None and item.find('SalesRank').text or ''
image = getImageURL(item)
attributes = item.find('ItemAttributes')
author = attributes.findall('Author')
isbn = attributes.find('ISBN') is not None and attributes.find('ISBN').text or ''
lp = attributes.find('ListPrice')
price = lp is not None and lp.find('Amount') and lp.find('Amount').text or ''
formattedPrice = lp is not None and lp.find('FormattedPrice') is not None and lp.find('FormattedPrice').text or ''
publisher = attributes.find('Publisher') is not None and attributes.find('Publisher').text or ''
title = attributes.find('Title').text
recommends = []
authors = []
obj = Item(asin=asin, sales_rank=sales_rank,
image=image, isbn=isbn, price=price,
formattedPrice=formattedPrice, publisher=publisher,
title = title)
for a in author:
authors.append(a.text)
if item.find('SimilarProducts') is not None:
for p in item.find('SimilarProducts'):
r = Recommend(p.find('ASIN').text, p.find('Title').text)
recommends.append(r)
obj.authors = authors
obj.recommends = recommends
return obj
def getBook(api_key, barcode):
return getMedia(api_key, barcode, searchIndex='Books', type='ISBN')
def getMusic(api_key, barcode):
return getMedia(api_key, barcode, searchIndex='Music', type='UPC')
def getVideo(api_key, barcode):
return getMedia(api_key, barcode, searchIndex='Video', type='UPC')
def getGame(api_key, barcode):
return getMedia(api_key, barcode, searchIndex='VideoGames', type='UPC')
VERSION = '0.1'
API_SERVER = 'ecs.amazonaws.jp:80'
def getMedia(api_key, barcode, searchIndex='Book', type='ISBN'):
param = {'Service': 'AWSECommerceService',
'AWSAccessKeyId': api_key,
'Operation': 'ItemLookup',
'SearchIndex': searchIndex,
'ItemId': barcode,
'IdType': type,
'ResponseGroup': 'Medium,Reviews,SalesRank,Similarities',
'Version': '2007-10-29'}
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept" : "text/plain",
"User-agent" : "pylibrary/%s" % (VERSION)}
params = urllib.urlencode(param)
conn = httplib.HTTPConnection(API_SERVER)
conn.request("POST", "/onca/xml", params, headers)
response = conn.getresponse()
xml = response.read()
xml = xml.replace('xmlns="http://webservices.amazon.com/AWSECommerceService/2007-10-29"', '')
conn.close()
return parseXML(xml)
#try:
# b = getGame(API_KEY, '4976219754576')
#except Exception, e:
# print unicode(e)
|
[
"mtsuyuki@gmail.com"
] |
mtsuyuki@gmail.com
|
b03809df39fd0f8958db0be4187d666d5eb404a3
|
f340b9f47aaf11d95911074efd16e2878b4608c5
|
/200119/Word_Ladder_II.py
|
2e7dde5aba0ce910314145023958a13983352978
|
[] |
no_license
|
Jsonghh/leetcode
|
150020d1250a7e13e7387a545b4eb7df0de8f90b
|
3a83c0b0bcc43f458f7fc54764f60e1104fcc12e
|
refs/heads/master
| 2020-11-25T03:12:48.842151
| 2020-02-11T02:56:58
| 2020-02-11T02:56:58
| 228,475,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
if endWord not in wordList:
return []
d = {}
results = []
wordList = set(wordList)
wordList.add(beginWord)
self.bfs(beginWord, endWord, wordList, d)
self.dfs(beginWord, endWord, wordList, d, [beginWord], results)
return results
# use bfs to record the distance to endWord
def bfs(self, s, e, l, d):
d[e] = 1
q = [e]
while q:
cur = q.pop(0)
for nw in self.fnws(cur, l):
if nw in d:
continue
d[nw] = d[cur] + 1
q.append(nw)
# use dfs to update cur_path and append valid paths to the ans
def dfs(self, cur, e, l, d, path, results):
if cur == e:
results.append(list(path))
return
for nw in self.fnws(cur, l):
if nw not in d or d[nw] != d[cur] - 1:
continue
path.append(nw)
self.dfs(nw, e, l, d, path, results)
path.pop()
# find possbile next words according to the wordlist
def fnws(self, w, l):
aph = 'abcdefghijklmnopqrstuvwxyz'
nws = []
for i in range(len(w)):
for c in aph:
if c == w[i]:
continue
nw = w[:i] + c + w[i + 1:]
if nw not in l:
continue
nws.append(nw)
return nwscd
|
[
"jiesonghe@outlook.com"
] |
jiesonghe@outlook.com
|
3828b38f141bfab5bd065764b8b7c70f84420fc1
|
e818cf45e9c2f081bb64954f515540bdab25b9d1
|
/polls/views.py
|
1e01d3456a36915082fb8d3b3294ec716d39e2bd
|
[] |
no_license
|
Timothy-py/Polls-API
|
bfccefbfe0da4ccbc4d8fd39ba4fe14e66b3227d
|
d354795e4bba43c99bab71020693568e625d677c
|
refs/heads/master
| 2020-09-23T22:30:48.575799
| 2019-12-12T15:07:54
| 2019-12-12T15:07:54
| 225,603,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
from django.shortcuts import render, get_object_or_404
from .models import Poll
from django.http import JsonResponse
# Create your views here.
def polls_list(request):
MAX_OBJECT = 20
polls = Poll.objects.all()[:MAX_OBJECT]
data = {
"results":
list(
polls.values("question", "created_by__username", "pub_date")
)
}
return JsonResponse(data)
def polls_detail(request, pk):
poll = get_object_or_404(Poll, pk=pk)
data = {
"results": {
"question": poll.question,
"created_by": poll.created_by.username,
"pub_date": poll.pub_date
}
}
return JsonResponse(data)
|
[
"adeyeyetimothy33@gmail.com"
] |
adeyeyetimothy33@gmail.com
|
3af06f91d8ff98ed52d65a90a64b68decc2c5ee6
|
8e84a9e8ecd1c34655abb1c13953847d09a0cfa9
|
/flaskify/users/utils.py
|
28dfead42bc9769dda9dcdb2b41b8d19382b5b1b
|
[] |
no_license
|
pzrsa/flaskify
|
7e691003c52120024a118f1d4da423dcaf71bf65
|
2048f985ac146d705ccf0ebecc4aff7557941b90
|
refs/heads/main
| 2023-05-29T02:09:05.945742
| 2021-06-05T12:41:08
| 2021-06-05T12:41:08
| 342,721,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
import os
import secrets
from flask import current_app, url_for
from flask_mail import Message
from flaskify import mail
from PIL import Image
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='noreply@demo.com', recipients=[user.email])
msg.body = f"""To reset your password, please visit this link:
{url_for('users.reset_token', token=token, _external=True)}
If you did not make this request, don't worry. Just ignore this email and no changes will be made.
"""
mail.send(msg)
|
[
"parsamesg@gmail.com"
] |
parsamesg@gmail.com
|
7f964d0809de5e49d94710f14427e6408190922d
|
4795320bf7013c9aabd10b48656e541335647f7f
|
/cov_search/migrations/0002_auto_20150629_0427.py
|
f2a7040a8646d29ebbaf6df46ea4d5f9099cb3af
|
[] |
no_license
|
dgrant/city_of_vancouver_programs_search
|
7295de3aad1ab3885c9b30e086230a6fff740791
|
33dfca03ccd434b39f01981890b195b384c1a2e0
|
refs/heads/master
| 2023-07-09T09:59:03.417301
| 2016-08-20T06:25:52
| 2016-08-20T06:25:52
| 38,360,943
| 0
| 0
| null | 2023-09-05T20:01:58
| 2015-07-01T09:03:22
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cov_search', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='season',
name='name',
field=models.CharField(unique=True, max_length=30),
),
]
|
[
"davidgrant@gmail.com"
] |
davidgrant@gmail.com
|
e06dfef674f9aeccead93fa370a718e85f13e148
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-monitor/azure/mgmt/monitor/models/sender_authorization_py3.py
|
fc908dab0e932162faf915093cfa43a7444cfaf5
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SenderAuthorization(Model):
"""the authorization used by the user who has performed the operation that led
to this event. This captures the RBAC properties of the event. These
usually include the 'action', 'role' and the 'scope'.
:param action: the permissible actions. For instance:
microsoft.support/supporttickets/write
:type action: str
:param role: the role of the user. For instance: Subscription Admin
:type role: str
:param scope: the scope.
:type scope: str
"""
_attribute_map = {
'action': {'key': 'action', 'type': 'str'},
'role': {'key': 'role', 'type': 'str'},
'scope': {'key': 'scope', 'type': 'str'},
}
def __init__(self, *, action: str=None, role: str=None, scope: str=None, **kwargs) -> None:
super(SenderAuthorization, self).__init__(**kwargs)
self.action = action
self.role = role
self.scope = scope
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
e09029b8bdce7a1f1555291c2e52c40ab50564d3
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/18/usersdata/63/6289/submittedfiles/maiormenor.py
|
c4ed26b8ed5ca3e8a1e42c244d6514c6f567a004
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 787
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
a = input('Digite o número 1: ')
b = input('Digite o número 2: ')
c = input('Digite o número 3: ')
d = input('Digite o número 4: ')
e = input('Digite o número 5: ')
if a<b and a<c and a<d and a<e:
print('%d : %a')
if b<a and b<c and b<d and b<e:
print('%d : %b')
if c<a and c<b and c<d and c<e:
print('%d : %c')
if d<a and d<b and d<c and d<e:
print('%d : %d')
if e<a and e<b and e<c and e<d:
print('%d : %e')
if a>b and a>c and a>d and a>e:
print('%d : %a')
if b>a and b>c and b>d and b>e:
print('%d : %b')
if c>a and c>b and c>d and c>e:
print('%d : %c')
if d>a and d>b and d>c and d>e:
print('%d : %d')
if e>a and e>b and e>c and e>d:
print('%d : %e')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e6c0583c3b6f7c457ae78296d323d908036191f0
|
d697fe447506ce4c179d1c77144f84b525304cec
|
/ch4_populations_cdf.py
|
0352412cda206361b24ff49ccdf3b9eb2d8a9a51
|
[] |
no_license
|
leosartaj/thinkstats
|
a97004d1e906660877d1244844f22ab01b13d7d6
|
6345d88e113fa4b347bd45d519c0fb6813402cab
|
refs/heads/master
| 2021-01-10T08:44:09.168668
| 2015-12-07T04:28:05
| 2015-12-07T04:28:05
| 47,359,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
import sys
import populations
import Cdf
import myplot
def main(filename):
pops = populations.ReadData(filename)
cdf = Cdf.MakeCdfFromList(pops)
myplot.Cdf(cdf, transform='pareto')
myplot.Show()
if __name__ == '__main__':
main(sys.argv[1])
|
[
"singhsartaj94@gmail.com"
] |
singhsartaj94@gmail.com
|
6b824b3f9703c6ad5c7f14d9619e1ae1dba1d2c3
|
17bd8f50704f5d8026d298b0c940aab68af591a0
|
/tests/test_common.py
|
2e006a77d00e10bda67230d546cefc19bece66e9
|
[] |
no_license
|
Pebaz/mini-rh-project
|
bdaeef15e1872c7411f274f2949c49d4cb0fdb50
|
d4b219ddb725cf53d08ed46abf3f973d06cc03ce
|
refs/heads/master
| 2020-07-01T18:45:49.437877
| 2019-08-15T02:12:33
| 2019-08-15T02:12:33
| 201,260,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
"""
This needs to be imported on every test file so that the test files can import
modules from the `rh` package.
"""
import sys
sys.path.append('.')
from rh.data import *
top_10_genres_by_profit = get_top_10_genres_by_profit()
top_10_actors_by_profit = get_top_10_actors_by_profit()
top_10_directors_by_profit = get_top_10_directors_by_profit()
|
[
"pebazium@gmail.com"
] |
pebazium@gmail.com
|
96519c0d5f429ea1fea9725f22f98c7d3c367fc6
|
6b96bf434ec11fd2d8b7870861da25d6336b365a
|
/tests/test_project.py
|
3a2911498487fbdf500c5d84c44b6947f97f25b9
|
[
"MIT"
] |
permissive
|
gzcf/polyaxon-schemas
|
68b908badef6bb06a1ad31af337a435ac87ed230
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
refs/heads/master
| 2020-03-10T04:55:48.023279
| 2018-04-12T12:47:53
| 2018-04-12T12:47:53
| 129,204,842
| 0
| 0
|
MIT
| 2018-04-12T06:37:36
| 2018-04-12T06:37:36
| null |
UTF-8
|
Python
| false
| false
| 5,335
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import uuid
from unittest import TestCase
from marshmallow import ValidationError
from tests.utils import assert_equal_dict
from polyaxon_schemas.experiment import ExperimentConfig
from polyaxon_schemas.project import ExperimentGroupConfig, ProjectConfig
from polyaxon_schemas.utils import local_now
class TestProjectConfigs(TestCase):
def test_validate_project_name_config(self):
config_dict = {'name': 'test sdf', 'description': '', 'is_public': True}
with self.assertRaises(ValidationError):
ProjectConfig.from_dict(config_dict)
def test_project_config(self):
config_dict = {
'name': 'test',
'description': '',
'is_public': True,
'has_code': True,
'has_tensorboard': True,
'num_experiments': 0,
'num_independent_experiments': 0,
'num_experiment_groups': 0,
'created_at': local_now().isoformat(),
'updated_at': local_now().isoformat()
}
config = ProjectConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
config_to_dict.pop('experiment_groups', None)
config_to_dict.pop('experiments', None)
config_to_dict.pop('has_notebook', None)
config_to_dict.pop('unique_name', None)
config_to_dict.pop('user', None)
config_to_dict.pop('uuid', None)
assert config_to_dict == config_dict
config_dict.pop('description')
config_dict.pop('updated_at')
config_dict.pop('has_code')
config_to_dict = config.to_light_dict()
config_to_dict.pop('has_notebook', None)
config_to_dict.pop('unique_name', None)
assert config_to_dict == config_dict
config_to_dict = config.to_dict(humanize_values=True)
assert config_to_dict.pop('created_at') == 'a few seconds ago'
assert config_to_dict.pop('updated_at') == 'a few seconds ago'
config_to_dict = config.to_light_dict(humanize_values=True)
assert config_to_dict.pop('created_at') == 'a few seconds ago'
def test_project_experiments_and_groups_config(self):
uuid_value = uuid.uuid4().hex
config_dict = {'name': 'test',
'description': '',
'is_public': True,
'experiment_groups': [
ExperimentGroupConfig(content='content',
uuid=uuid_value,
project=uuid_value).to_dict()],
'experiments': [
ExperimentConfig(config={},
uuid=uuid_value,
project=uuid_value).to_dict()]}
config = ProjectConfig.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
config_dict.pop('description')
config_dict.pop('experiment_groups')
config_dict.pop('experiments')
assert_equal_dict(config_dict, config.to_light_dict())
def test_experiment_group_config(self):
uuid_value = uuid.uuid4().hex
config_dict = {'sequence': 1,
'content': 'some content',
'uuid': uuid_value,
'project': uuid_value,
'project_name': 'user.name',
'num_experiments': 0,
'created_at': local_now().isoformat(),
'updated_at': local_now().isoformat(),
'experiments': [
ExperimentConfig(config={},
uuid=uuid_value,
experiment_group=uuid_value,
project=uuid_value).to_dict()]}
config = ExperimentGroupConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
config_to_dict.pop('concurrency', None)
config_to_dict.pop('description', None)
config_to_dict.pop('num_failed_experiments', None)
config_to_dict.pop('num_pending_experiments', None)
config_to_dict.pop('num_running_experiments', None)
config_to_dict.pop('num_scheduled_experiments', None)
config_to_dict.pop('num_stopped_experiments', None)
config_to_dict.pop('num_succeeded_experiments', None)
config_to_dict.pop('unique_name', None)
config_to_dict.pop('user', None)
assert config_to_dict == config_dict
config_dict.pop('content')
config_dict.pop('uuid')
config_dict.pop('project')
config_dict.pop('updated_at')
config_dict.pop('sequence')
config_dict.pop('experiments')
config_dict.pop('project_name')
assert_equal_dict(config_dict, config.to_light_dict())
config_to_dict = config.to_dict(humanize_values=True)
assert config_to_dict.pop('created_at') == 'a few seconds ago'
assert config_to_dict.pop('updated_at') == 'a few seconds ago'
config_to_dict = config.to_light_dict(humanize_values=True)
assert config_to_dict.pop('created_at') == 'a few seconds ago'
|
[
"mouradmourafiq@gmail.com"
] |
mouradmourafiq@gmail.com
|
8c9dc482eca7116299722e5d57d347fbc4a3126c
|
cfd515074626477b689b5763d7e8cff42af04817
|
/kagenomise/products/vocabulary/clothing_sizes.py
|
cdce81b68c21b87136cab1515c946cbf0052a496
|
[] |
no_license
|
kagesenshi/kagenomise.products
|
cbe29c424b94e8917203254824a3e878aa69f439
|
3eb9e3a74d472e0bd7daa03e1dae4e5aa34a7818
|
refs/heads/master
| 2021-01-15T10:47:49.199010
| 2013-05-19T10:58:42
| 2013-05-19T10:58:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
from five import grok
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from zope.schema.interfaces import IVocabularyFactory
from zope.component import getUtility
from z3c.formwidget.query.interfaces import IQuerySource
class ClothingSizes(grok.GlobalUtility):
grok.name('kagenomise.products.clothing_sizes')
grok.implements(IVocabularyFactory)
def __call__(self, context):
return SimpleVocabulary.fromValues(
['S','M','L','XL','XXL', 'XXXL']
)
|
[
"izhar@inigo-tech.com"
] |
izhar@inigo-tech.com
|
c8334fd40f30e0a61067cbccc1a7767772698b3a
|
9f6fda1683fd3993c62b7c04cbbe54d3f8b932f2
|
/Demo/MysqlDemo/demo.py
|
e4fb2e7d0fc14756ace4fc3e8faed6dc79e57f53
|
[] |
no_license
|
dajun928/Demo
|
89f457bd324f1903ac217223a6f712af18e10bef
|
8318cee62b0508c76ab1106961c0247b5f511e94
|
refs/heads/master
| 2020-03-22T14:28:14.714335
| 2018-07-08T16:59:51
| 2018-07-08T16:59:51
| 140,182,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
# -*- coding:utf-8 -*-
import MySQLdb
conn = MySQLdb.connect(host='127.0.0.1', port=3306, user='root', passwd='1234', db='test')
cur = conn.cursor()
# import records
#
# db = records.Database('mysql://root:1234@127.0.0.1:3306/test')
# rows = db.query('select * from stu') # or db.query_file('sqls/active-users.sql')
# print rows.dataset
|
[
"1663177102@qq.com"
] |
1663177102@qq.com
|
99fab05bc0eea137e4094a3a35c913987d061372
|
74d6a9d33ff4374c3a7b22229819dbed34484f85
|
/app/models.py
|
2323749a4b96fbf227eeb2a1b05694d54f9d1670
|
[
"MIT"
] |
permissive
|
MichelAtieno/Personal-Blog
|
a7bcabb92542b6df4a671a529e7ea373a4197dd5
|
16657391b968e644b99fa0dde5d5a443881698da
|
refs/heads/master
| 2020-03-28T15:38:29.031020
| 2018-09-17T11:45:02
| 2018-09-17T11:45:02
| 148,611,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
from . import db, login_manager
from flask_login import UserMixin
from werkzeug.security import generate_password_hash,check_password_hash
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255))
pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Subscribers(db.Model):
__tablename__='subscribers'
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(255))
def save_subscriber(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_subscribers(cls):
subscribers=Subscribers.query.all()
return subscribers
def __repr__(self):
return f'Subscribers {self.email}'
class BlogPost(db.Model):
__tablename__='blogs'
id = db.Column(db.Integer,primary_key = True)
title = db.Column(db.String())
blog_post = db.Column(db.String)
blog_pic = db.Column(db.String)
photo_url = db.Column(db.String)
comment = db.relationship('Comment',backref='blog',lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_blog(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_blog(cls,id):
blog = BlogPost.query.filter_by(id = id).first()
return blog
@classmethod
def get_all_blogs(cls):
blogs = BlogPost.query.order_by('-id').all()
return blogs
class Comment(db.Model):
__tablename__='comments'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
comment_content = db.Column(db.String())
blog_id = db.Column(db.Integer, db.ForeignKey('blogs.id'))
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_single_comment(cls,id_blog,id):
comment = Comment.query.filter_by(blog_id=id_blog,id=id).first()
return comment
@classmethod
def get_all_comments(cls,id):
comments = Comment.query.filter_by(blog_id=id).order_by('-id').all()
return comments
|
[
"mishqamish@gmail.com"
] |
mishqamish@gmail.com
|
086eb1de9a914373c017cdbde0d741dfa01d752e
|
a44833dae38c9bc81f1dd731b0056e0dfa2df8a7
|
/config/settings.py
|
6687fef7736814cf0257554b8905d0a230591d28
|
[
"MIT"
] |
permissive
|
leroyg/build-a-saas-app-with-flask
|
ca662b4c77a82336d52efda7a379a7f1c035077e
|
6e95c73ccbea621f68464bceb6ce913d26abfe60
|
refs/heads/master
| 2020-12-25T21:55:36.551160
| 2015-10-29T15:10:05
| 2015-10-29T15:10:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,299
|
py
|
# -*- coding: utf-8 -*-
from os import path
from datetime import timedelta
from celery.schedules import crontab
# This value is used for the following properties,
# it really should be your module's name.
# Database name
# Cache redis prefix
APP_NAME = 'catwatch'
APP_ROOT = path.join(path.dirname(path.abspath(__file__)), '..')
# App settings, most settings you see here will change in production.
SECRET_KEY = 'pickabettersecret'
DEBUG = True
TESTING = False
LOG_LEVEL = 'DEBUG'
# You will need to disable this to get Stripe's webhooks to work because you'll
# likely end up using tunneling tooling such as ngrok so the endpoints are
# reachable outside of your private network.
#
# The problem with this is, Flask won't allow any connections to the ngrok
# url with the SERVER_NAME set to localhost:8000. However if you comment out
# the SERVER_NAME below then webbooks will work but now url_for will not work
# inside of email templates.
#
# A better solution will turn up in the future.
SERVER_NAME = 'localhost:8000'
# Public build path. Files in this path will be accessible to the internet.
PUBLIC_BUILD_PATH = path.join(APP_ROOT, 'build', 'public')
# Flask-Webpack (assets) settings.
WEBPACK_MANIFEST_PATH = path.join(APP_ROOT, 'build', 'manifest.json')
# Babel i18n translations.
ACCEPT_LANGUAGES = ['en', 'es']
LANGUAGES = {
'en': 'English',
'es': u'Español'
}
BABEL_DEFAULT_LOCALE = 'en'
# Seed settings.
SEED_ADMIN_EMAIL = 'dev@localhost.com'
# Database settings,
# The username and password must match what's in docker-compose.yml for dev.
db_uri = 'postgresql://catwatch:bestpassword@localhost:5432/{0}'
SQLALCHEMY_DATABASE_URI = db_uri.format(APP_NAME)
SQLALCHEMY_POOL_SIZE = 5
# Cache settings.
CACHE_TYPE = 'redis'
CACHE_REDIS_URL = 'redis://localhost:6379/0'
CACHE_KEY_PREFIX = APP_NAME
# Celery settings.
CELERY_BROKER_URL = 'redis://localhost:6379/0'
CELERY_RESULT_BACKEND = 'redis://localhost:6379/0'
CELERY_REDIS_MAX_CONNECTIONS = 5
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
# Celery recurring scheduled tasks.
CELERYBEAT_SCHEDULE = {
'mark-soon-to-expire-credit-cards': {
'task': 'catwatch.blueprints.billing.tasks.mark_old_credit_cards',
'schedule': crontab(hour=12, minute=1)
},
'mark-invalid-coupons': {
'task': 'catwatch.blueprints.billing.tasks.expire_old_coupons',
'schedule': crontab(hour=12, minute=2)
},
}
# Login settings.
REMEMBER_COOKIE_DURATION = timedelta(days=90)
# Mail settings.
MAIL_DEFAULT_SENDER = 'support@catwatch.com'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
MAIL_USERNAME = 'you@gmail.com'
MAIL_PASSWORD = 'awesomepassword'
# External end points.
ENDPOINT_CADVISOR = 'http://localhost:8080/containers/'
ENDPOINT_FLOWER = 'http://localhost:8081'
# Stripe settings.
#
# API keys, NOTE: you should NOT supply them in this config, put them
# in an instance config file, such as: instance/settings.py
#
# They are only listed below to act as documentation.
STRIPE_SECRET_KEY = None
STRIPE_PUBLISHABLE_KEY = None
# The Stripe API version to use. More information can be found on their docs:
# https://stripe.com/docs/api/python#versioning
STRIPE_API_VERSION = '2015-01-26'
# Documentation for each plan field below can be found on Stripe's API docs:
# https://stripe.com/docs/api#create_plan
#
# After supplying both API keys and plans, you must sync the plans by running:
# run stripe sync_plans
#
# If you are using TEST keys then the plans will be set to livemode: False
# If you are using REAL keys then the plans be set to livemode: True
#
# What the above means is, when you ship your app in production you must sync
# your plans at least once to activate them on your real keys.
STRIPE_PLANS = {
'0': {
'id': 'bronze',
'name': 'Bronze',
'amount': 100,
'currency': 'usd',
'interval': 'month',
'interval_count': 1,
'trial_period_days': 14,
'statement_descriptor': 'BRONZE MONTHLY'
},
'1': {
'id': 'gold',
'name': 'Gold',
'amount': 500,
'currency': 'usd',
'interval': 'month',
'interval_count': 1,
'trial_period_days': 14,
'statement_descriptor': 'GOLD MONTHLY',
'metadata': {
'recommended': True
}
},
'2': {
'id': 'platinum',
'name': 'Platinum',
'amount': 1000,
'currency': 'usd',
'interval': 'month',
'interval_count': 1,
'trial_period_days': 14,
'statement_descriptor': 'PLATINUM MONTHLY'
}
}
# Twitter settings.
#
# API keys, NOTE: you should NOT supply them in this config, put them
# in an instance config file, such as: instance/settings.py
#
# They are only listed below to act as documentation.
TWITTER_CONSUMER_KEY = None
TWITTER_CONSUMER_SECRET = None
TWITTER_ACCESS_TOKEN = None
TWITTER_ACCESS_SECRET = None
# Broadcast (websocket server) settings.
#
# NOTE: you should NOT supply the PUSH_TOKEN/BROADCAST_INTERNAL_URL here,
# put them in an instance config file, such as: instance/settings.py
BROADCAST_PUBLIC_URL = 'http://localhost:4242/stream'
BROADCAST_INTERNAL_URL = None
BROADCAST_PUSH_TOKEN = None
|
[
"nick.janetakis@gmail.com"
] |
nick.janetakis@gmail.com
|
3fb6fa665a6e2c6aba0084149a226f9dde2c26d5
|
8466ebe93688e1dbf60fce7dec98caa8399877ae
|
/python/19-counting-sundays.py
|
36ac9e6c3bb5d746f46c6e9658438b2b106cd394
|
[] |
no_license
|
alextanhongpin/project-euler
|
1b524ea98234bd334fbd3b6a582c35713ab5e810
|
87311c95fdc7f3ef59344b5d8a0b0a0e70b439a1
|
refs/heads/master
| 2020-05-24T00:08:25.376061
| 2017-03-27T07:29:54
| 2017-03-27T07:29:54
| 84,804,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
"""
Problem 19 - Counting Sundays
"""
from datetime import date
# Monday is 0,
# Sunday is 6
def main():
start = date(1901, 1, 1)
end = date(2000, 12, 31)
number_of_months = (end.year - start.year + 1) * 12
is_sunday = 0
start_year = 1901
for month in range(number_of_months):
current_month = (month + 1) % 12
if date(start_year, 12 if current_month is 0 else current_month, 1).weekday() == 6:
print date(start_year, 12 if current_month is 0 else current_month, 1)
is_sunday += 1
if current_month == 0:
start_year += 1
print "The number of sundays that fell for the first of the month is:", is_sunday
if __name__ == '__main__':
import timeit
ITERATIONS = 100
MESSAGE = "Function takes {} s to complete."
print MESSAGE.format(timeit.timeit("main()",
number=ITERATIONS,
setup="from __main__ import main") / ITERATIONS)
|
[
"alextan220990@gmail.com"
] |
alextan220990@gmail.com
|
a8845fabc963784e3b3ec489a682a47ebbde83de
|
b33beb8b5e9bc7fb81b324028d532863109098e2
|
/DB/accounts/migrations/0022_auto_20210401_2142.py
|
ecb7dad14f351af96076e999e0a11c96651db445
|
[] |
no_license
|
forza111/DB
|
78da533a000111a16e651182856b3c0e6c52e2b0
|
102abd68e4a0d9589d5d9f0db62c05e345b51626
|
refs/heads/master
| 2023-06-02T21:27:33.088981
| 2021-06-25T09:05:44
| 2021-06-25T09:05:44
| 351,230,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
# Generated by Django 3.1.7 on 2021-04-01 21:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0021_auto_20210401_2127'),
]
operations = [
migrations.AlterField(
model_name='userlocation',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='location', to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
]
|
[
"nikitos.ionkin@yandex.ru"
] |
nikitos.ionkin@yandex.ru
|
3aa7120556b2df4ab2793f3995f23cc5247b7b05
|
4e30d990963870478ed248567e432795f519e1cc
|
/tests/models/validators/v3_1_1/jsd_a6c71a1e4d2597ea1b5533e9f1b438f.py
|
45753551e1582a4d3a228701b695cdb15342e713
|
[
"MIT"
] |
permissive
|
CiscoISE/ciscoisesdk
|
84074a57bf1042a735e3fc6eb7876555150d2b51
|
f468c54998ec1ad85435ea28988922f0573bfee8
|
refs/heads/main
| 2023-09-04T23:56:32.232035
| 2023-08-25T17:31:49
| 2023-08-25T17:31:49
| 365,359,531
| 48
| 9
|
MIT
| 2023-08-25T17:31:51
| 2021-05-07T21:43:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,318
|
py
|
# -*- coding: utf-8 -*-
"""Identity Services Engine getActiveList data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorA6C71A1E4D2597EA1B5533E9F1B438F(object):
"""getActiveList request schema definition."""
def __init__(self):
super(JSONSchemaValidatorA6C71A1E4D2597EA1B5533E9F1B438F, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"noOfActiveSession": {
"type": "integer"
}
},
"required": [
"noOfActiveSession"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
[
"wastorga@altus.co.cr"
] |
wastorga@altus.co.cr
|
8ff423280f7dea2bb5dfcc49959df70bda9e3433
|
325dab6d1300cdc8c77e0c68a7b588e16acd8ef1
|
/nagerep/urls.py
|
a0cf610f017dd110b86d3cd93002d88e1b81db0d
|
[] |
no_license
|
fishilico/nagerep
|
e003c683929b10d4a8c7af7c5bfa2f39a2a19922
|
75dcecc4f117420ee195a56c86ff6856175ca48f
|
refs/heads/master
| 2020-05-17T05:37:31.967689
| 2013-05-20T19:31:22
| 2013-05-20T19:31:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 157
|
py
|
from django.conf.urls import patterns, include, url
# Enable the admin
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
)
|
[
"nicolas.iooss_git@polytechnique.org"
] |
nicolas.iooss_git@polytechnique.org
|
48d29be5493a6b11086a4272192a6d137a07b492
|
7b7bfbfebd627a3ccfdd52bb7164fa4f94cda7fc
|
/optic_store/patches/v0_8/correct_return_dn_ref_si.py
|
f5a992f71759f0a3e0ec9445c35129ae71e60df4
|
[
"MIT"
] |
permissive
|
f-9t9it/optic_store
|
d117b7ef7c4107ec15d8194fc57d66a18aff5945
|
4682ae99cdb2cbfb1ff99196398d7379b4b6c8f1
|
refs/heads/master
| 2022-07-01T10:29:54.783550
| 2022-06-21T14:34:40
| 2022-06-21T14:34:40
| 171,165,708
| 23
| 43
|
NOASSERTION
| 2022-06-21T14:21:16
| 2019-02-17T19:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,221
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, 9T9IT and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute():
dnis = frappe.db.sql(
"""
SELECT
dni.name AS name,
fdni.against_sales_invoice AS against_sales_invoice,
fdni.si_detail AS si_detail
FROM `tabDelivery Note Item` AS dni
LEFT JOIN `tabDelivery Note` AS dn ON dn.name = dni.parent
LEFT JOIN `tabSales Invoice` AS si ON si.name = dni.against_sales_invoice
LEFT JOIN `tabDelivery Note Item` AS fdni
ON fdni.parent = dn.return_against AND fdni.idx = dni.idx
WHERE
dn.docstatus < 2 AND
dn.is_return = 1 AND
si.is_return = 1
""",
as_dict=1,
)
for dni in dnis:
frappe.db.set_value(
"Delivery Note Item",
dni.get("name"),
"against_sales_invoice",
dni.get("against_sales_invoice"),
)
frappe.db.set_value(
"Delivery Note Item", dni.get("name"), "si_detail", dni.get("si_detail")
)
|
[
"sun@libermatic.com"
] |
sun@libermatic.com
|
69b48fa7fb8ae92349f5d9fc86bc42deadecbc82
|
8ed19eff422d90d62bd735d420ed85d63d0d689b
|
/hw1_code/k.py
|
a71784de23250f52226441083a4eebcbbba02df7
|
[] |
no_license
|
fredshentu/cs189
|
4efd40c4126434b225e59da45a4f40a1f64d8496
|
03f3dfdf4cb0f7b98fc12fb32cd0d648bc144004
|
refs/heads/master
| 2021-01-10T22:54:06.812233
| 2016-10-09T02:45:27
| 2016-10-09T02:45:27
| 70,364,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
from mnist import MNIST
import sklearn.metrics as metrics
import numpy as np
NUM_CLASSES = 10
def load_dataset():
mndata = MNIST('./data/')
X_train, labels_train = map(np.array, mndata.load_training())
X_test, labels_test = map(np.array, mndata.load_testing())
X_train = X_train/255.0
X_test = X_test/255.0
X_train = np.matrix.transpose(X_train)
X_test = np.matrix.transpose(X_test)
return (X_train, labels_train), (X_test, labels_test)
def train(X_train, y_train, reg=0):
''' Build a model from X_train -> y_train '''
XXt=np.dot(X_train,np.matrix.transpose(X_train))
inverse = np.linalg.inv( XXt + 0.1*np.identity(784))
return np.dot(inverse,np.dot(X_train,one_hot(y_train)))
def one_hot(labels_train):
'''Convert categorical labels 0,1,2,....9 to standard basis vectors in R^{10} '''
return np.array([[1 if i == labels_train[k] else 0 for i in range(10)] for k in range(len(labels_train))])
#actually this way we get the transposition of Y
def predict(model, X):
''' From model and data points, output prediction vectors '''
pred=np.dot(np.matrix.transpose(model),X)
return [np.argmax(i) for i in np.matrix.transpose(pred)]#get a single array of the prediction y
if __name__ == "__main__":
(X_train, labels_train), (X_test, labels_test) = load_dataset()
model = train(X_train, labels_train)
y_train = one_hot(labels_train)
y_test = one_hot(labels_test)
pred_labels_train = predict(model, X_train)
pred_labels_test = predict(model, X_test)
print("Train accuracy: {0}".format(metrics.accuracy_score(labels_train, pred_labels_train)))
print("Test accuracy: {0}".format(metrics.accuracy_score(labels_test, pred_labels_test)))
|
[
"fred960315@gmail.com"
] |
fred960315@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.