blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd96799bc0489040b759bd12cf6000dca548499c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_104/292.py | 374308be6bc85def5c0027de6a9827409c83fe69 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | import sys
def parse(filename):
f = open(filename)
T = int(f.readline())
for i in range(T):
line = f.readline().strip()
terms = line.split()
N = int(terms[0])
S = [int(a) for a in terms[1:]]
yield N, S
f.close()
def solve(N, S):
S = sorted(S)
n = 1
diff = {}
diff[S[0]] = 2
solved = False
while not solved:
for i in range(n, N):
s = S[i]
if s in diff:
diff[s] += 1*(10**i) # add left
res = diff[s]
solved = True
break
if -s in diff:
#print diff[-s]
diff[-s] += 2*(10**i) # add right
res = diff[-s]
solved = True
break
toadd = []
for key in diff:
toadd.append((key + s, diff[key] + 2*(10**i))) # add right
toadd.append((key - s, diff[key] + 1*(10**i))) # add left
for k, v in toadd:
diff[k] = v
res = str(res)
set_left = []
set_right = []
order = []
for i in range(len(res)-1, -1, -1):
order.append(res[i])
for i in range(len(order)):
if order[i] == '1':
set_left.append(S[i])
elif order[i] == '2':
set_right.append(S[i])
assert sum(set_left) == sum(set_right)
#print sum(set_left) == sum(set_right), sum(set_left), sum(set_right)
assert not set(set_left).intersection(set_right)
return '\n' + ' '.join([str(a) for a in set_left]) + '\n' + ' '.join([str(b) for b in set_right])
def main(filename):
case = 1
for data in parse(filename):
res = solve(*data)
print "Case #%d:" % case, res
case += 1
if __name__ == '__main__':
#main('a.in')
main(sys.argv[1])
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
7355eee9f76d9698397be0f4f57ef42ff8fb4e62 | 61aa319732d3fa7912e28f5ff7768498f8dda005 | /src/arch/x86/isa/insts/romutil.py | fd06197193ecb448ef218fc59cb086a4a6c8712e | [
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] | permissive | TeCSAR-UNCC/gem5-SALAM | 37f2f7198c93b4c18452550df48c1a2ab14b14fb | c14c39235f4e376e64dc68b81bd2447e8a47ff65 | refs/heads/main | 2023-06-08T22:16:25.260792 | 2023-05-31T16:43:46 | 2023-05-31T16:43:46 | 154,335,724 | 62 | 22 | BSD-3-Clause | 2023-05-31T16:43:48 | 2018-10-23T13:45:44 | C++ | UTF-8 | Python | false | false | 7,367 | py | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
intCodeTemplate = '''
def rom
{
# This vectors the CPU into an interrupt handler in long mode.
# On entry, t1 is set to the vector of the interrupt and t7 is the current
# ip. We need that because rdip returns the next ip.
extern %(startLabel)s:
#
# Get the 64 bit interrupt or trap gate descriptor from the IDT
#
# Load the gate descriptor from the IDT
slli t4, t1, 4, dataSize=8
ld t2, idtr, [1, t0, t4], 8, dataSize=8, addressSize=8, atCPL0=True
ld t4, idtr, [1, t0, t4], dataSize=8, addressSize=8, atCPL0=True
# Make sure the descriptor is a legal gate.
chks t1, t4, %(gateCheckType)s
#
# Get the target CS descriptor using the selector in the gate
# descriptor.
#
srli t10, t4, 16, dataSize=8
andi t5, t10, 0xF8, dataSize=8
andi t0, t10, 0x4, flags=(EZF,), dataSize=2
br rom_local_label("%(startLabel)s_globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t5], dataSize=8, addressSize=8, atCPL0=True
br rom_local_label("%(startLabel)s_processDescriptor")
%(startLabel)s_globalDescriptor:
ld t3, tsg, [1, t0, t5], dataSize=8, addressSize=8, atCPL0=True
%(startLabel)s_processDescriptor:
chks t10, t3, IntCSCheck, dataSize=8
wrdl hs, t3, t10, dataSize=8
# Stick the target offset in t9.
wrdh t9, t4, t2, dataSize=8
#
# Figure out where the stack should be
#
# Record what we might set the stack selector to.
rdsel t11, ss
# Check if we're changing privelege level. At this point we can assume
# we're going to a DPL that's less than or equal to the CPL.
rdattr t10, hs, dataSize=8
andi t10, t10, 3, dataSize=8
rdattr t5, cs, dataSize=8
andi t5, t5, 0x3, dataSize=8
sub t0, t5, t10, flags=(EZF,), dataSize=8
# We're going to change priviledge, so zero out the stack selector. We
# need to let the IST have priority so we don't branch yet.
mov t11, t0, t0, flags=(nCEZF,)
# Check the IST field of the gate descriptor
srli t12, t4, 32, dataSize=8
andi t12, t12, 0x7, dataSize=8
subi t0, t12, 1, flags=(ECF,), dataSize=8
br rom_local_label("%(startLabel)s_istStackSwitch"), flags=(nCECF,)
br rom_local_label("%(startLabel)s_cplStackSwitch"), flags=(nCEZF,)
# If we're here, it's because the stack isn't being switched.
# Set t6 to the new aligned rsp.
mov t6, t6, rsp, dataSize=8
br rom_local_label("%(startLabel)s_stackSwitched")
%(startLabel)s_istStackSwitch:
ld t6, tr, [8, t12, t0], 0x1c, dataSize=8, addressSize=8, atCPL0=True
br rom_local_label("%(startLabel)s_stackSwitched")
%(startLabel)s_cplStackSwitch:
# Get the new rsp from the TSS
ld t6, tr, [8, t10, t0], 4, dataSize=8, addressSize=8, atCPL0=True
%(startLabel)s_stackSwitched:
andi t6, t6, 0xF0, dataSize=1
subi t6, t6, 40 + %(errorCodeSize)d, dataSize=8
##
## Point of no return.
## We're now going to irrevocably modify visible state.
## Anything bad that's going to happen should have happened by now or will
## happen right now.
##
wrip t0, t9, dataSize=8
#
# Set up the target code segment. Do this now so we have the right
# permissions when setting up the stack frame.
#
srli t5, t4, 16, dataSize=8
andi t5, t5, 0xFF, dataSize=8
wrdl cs, t3, t5, dataSize=8
# Tuck away the old CS for use below
limm t10, 0, dataSize=8
rdsel t10, cs, dataSize=2
wrsel cs, t5, dataSize=2
# Check that we can access everything we need to on the stack
ldst t0, hs, [1, t0, t6], dataSize=8, addressSize=8
ldst t0, hs, [1, t0, t6], \
32 + %(errorCodeSize)d, dataSize=8, addressSize=8
#
# Build up the interrupt stack frame
#
# Write out the contents of memory
%(errorCodeCode)s
st t7, hs, [1, t0, t6], %(errorCodeSize)d, dataSize=8, addressSize=8
st t10, hs, [1, t0, t6], 8 + %(errorCodeSize)d, dataSize=8, addressSize=8
rflags t10, dataSize=8
st t10, hs, [1, t0, t6], 16 + %(errorCodeSize)d, dataSize=8, addressSize=8
st rsp, hs, [1, t0, t6], 24 + %(errorCodeSize)d, dataSize=8, addressSize=8
rdsel t5, ss, dataSize=2
st t5, hs, [1, t0, t6], 32 + %(errorCodeSize)d, dataSize=8, addressSize=8
# Set the stack segment
mov rsp, rsp, t6, dataSize=8
wrsel ss, t11, dataSize=2
#
# Adjust rflags which is still in t10 from above
#
# Set IF to the lowest bit of the original gate type.
# The type field of the original gate starts at bit 40.
# Set the TF, NT, and RF bits. We'll flip them at the end.
limm t6, (1 << 8) | (1 << 14) | (1 << 16), dataSize=8
or t10, t10, t6, dataSize=8
srli t5, t4, 40, dataSize=8
srli t7, t10, 9, dataSize=8
xor t5, t7, t5, dataSize=8
andi t5, t5, 1, dataSize=8
slli t5, t5, 9, dataSize=8
or t6, t5, t6, dataSize=8
# Put the results into rflags
wrflags t6, t10
eret
};
'''
microcode = \
intCodeTemplate % {\
"startLabel" : "longModeInterrupt",
"gateCheckType" : "IntGateCheck",
"errorCodeSize" : 0,
"errorCodeCode" : ""
} + \
intCodeTemplate % {\
"startLabel" : "longModeSoftInterrupt",
"gateCheckType" : "SoftIntGateCheck",
"errorCodeSize" : 0,
"errorCodeCode" : ""
} + \
intCodeTemplate % {\
"startLabel" : "longModeInterruptWithError",
"gateCheckType" : "IntGateCheck",
"errorCodeSize" : 8,
"errorCodeCode" : '''
st t15, hs, [1, t0, t6], dataSize=8, addressSize=8
'''
} + \
'''
def rom
{
# This vectors the CPU into an interrupt handler in legacy mode.
extern legacyModeInterrupt:
panic "Legacy mode interrupts not implemented (in microcode)"
eret
};
def rom
{
extern initIntHalt:
rflags t1
limm t2, "~IFBit"
and t1, t1, t2
wrflags t1, t0
halt
eret
};
'''
| [
"sroger48@uncc.edu"
] | sroger48@uncc.edu |
9d52e5d7627216592f1ca7759607b7855e252afa | 853d4cec42071b76a80be38c58ffe0fbf9b9dc34 | /venv/Lib/site-packages/pandas/io/feather_format.py | 635a5726a6f7acec018789cbc6a3fc9d8382277c | [] | no_license | msainTesting/TwitterAnalysis | 5e1646dbf40badf887a86e125ef30a9edaa622a4 | b1204346508ba3e3922a52380ead5a8f7079726b | refs/heads/main | 2023-08-28T08:29:28.924620 | 2021-11-04T12:36:30 | 2021-11-04T12:36:30 | 424,242,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,704 | py | """ feather-format compat """
from distutils.version import LooseVersion
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import deprecate_kwarg
from pandas import DataFrame, Int64Index, RangeIndex
from pandas.io.common import _stringify_path
def to_feather(df, path):
"""
Write a DataFrame to the feather-format
Parameters
----------
df : DataFrame
path : string file path, or file-like object
"""
import_optional_dependency("pyarrow")
from pyarrow import feather
path = _stringify_path(path)
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
valid_types = {"string", "unicode"}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
raise ValueError(
"feather does not support serializing {} "
"for the index; you can .reset_index()"
"to make the index into column(s)".format(type(df.index))
)
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError(
"feather does not support serializing a "
"non-default index for the index; you "
"can .reset_index() to make the index "
"into column(s)"
)
if df.index.name is not None:
raise ValueError(
"feather does not serialize index meta-data on a " "default index"
)
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
feather.write_feather(df, path)
@deprecate_kwarg(old_arg_name="nthreads", new_arg_name="use_threads")
def read_feather(path, columns=None, use_threads=True):
"""
Load a feather-format object from the file path.
.. versionadded 0.20.0
Parameters
----------
path : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.feather``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
columns : sequence, default None
If not provided, all columns are read.
.. versionadded 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame.
.. versionadded 0.21.0
.. deprecated 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads.
.. versionadded 0.24.0
Returns
-------
type of object stored in file
"""
pyarrow = import_optional_dependency("pyarrow")
from pyarrow import feather
path = _stringify_path(path)
if LooseVersion(pyarrow.__version__) < LooseVersion("0.11.0"):
int_use_threads = int(use_threads)
if int_use_threads < 1:
int_use_threads = 1
return feather.read_feather(path, columns=columns, nthreads=int_use_threads)
return feather.read_feather(path, columns=columns, use_threads=bool(use_threads))
| [
"msaineti@icloud.com"
] | msaineti@icloud.com |
cfd5c37c5318b16b361cf515c336bc41345219ab | db575f3401a5e25494e30d98ec915158dd7e529b | /BIO_Stocks/BXRX.py | 46447527ad61d5da209cf7ceddf1a4ccefe6b472 | [] | no_license | andisc/StockWebScraping | b10453295b4b16f065064db6a1e3bbcba0d62bad | 41db75e941cfccaa7043a53b0e23ba6e5daa958a | refs/heads/main | 2023-08-08T01:33:33.495541 | 2023-07-22T21:41:08 | 2023-07-22T21:41:08 | 355,332,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,123 | py | import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'https://www.baudaxbio.com/news-and-investors/press-releases'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
articles = soup.findAll('article', attrs={'class':'media'})
# get first article
FIRST_ARTICLE = articles[0]
article_date = FIRST_ARTICLE.find('div', attrs={'class':'date'})
article_desc = FIRST_ARTICLE.find('h2', attrs={'class':'media-heading'})
v_article_date = article_date.text.lstrip().rstrip()
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.a.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
#InsertData()
if __name__ == "__main__":
main()
| [
"andisc_3@hotmail.com"
] | andisc_3@hotmail.com |
811ffae550dc896665c6c8266f9216f0d4b6f7b8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/457/usersdata/334/109728/submittedfiles/estatistica.py | 1f56e0531e646010deb57a6bf3fef7d12e988bca | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # -*- coding: utf-8 -*-
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio_padrao(lista):
for i in range(m):
soma = 0
for j in range(n):
soma = soma + (((ma[i][j]-x)**2)/2.0)
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
m = int(input())
n = int(input())
ma = []
for i in range(m):
lista = []
for i in range(n):
lista.append(int(input()))
ma.append(lista)
for i in range(m):
x = media(lista[i])
print(x)
y = devio_padrao(lista[i])
print(y) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
6a6a2fc0e0119bdb9209af893f2317ab75470af5 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/services/alliances/all_cso_members.py | 5e188ef50580092dad76900808e5a21c13ee6d36 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,207 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\services\alliances\all_cso_members.py
import allianceObject
import blue
class AllianceMembersO(allianceObject.base):
__guid__ = 'allianceObject.members'
def __init__(self, boundObject):
allianceObject.base.__init__(self, boundObject)
self.members = None
def DoSessionChanging(self, isRemote, session, change):
if 'allianceid' in change:
self.members = None
def GetMembers(self):
if self.members is None:
self.members = self.GetMoniker().GetMembers()
return self.members
def DeclareExecutorSupport(self, corpID):
current = self.base__alliance.GetAlliance()
if current.dictatorial and corpID != current.executorCorpID:
raise UserError('CanNotChooseExecutorAsDictatorial')
self.GetMoniker().DeclareExecutorSupport(corpID)
def DeleteMember(self, corpID):
self.GetMoniker().DeleteMember(corpID)
def OnAllianceMemberChanged(self, allianceID, corpID, change):
if allianceID != eve.session.allianceid:
return
bAdd, bRemove = self.GetAddRemoveFromChange(change)
if self.members is not None:
if bAdd:
if len(change) != len(self.members.columns):
self.LogWarn('IncorrectNumberOfColumns ignoring change as Add change:', change)
return
line = []
for columnName in self.members.columns:
line.append(change[columnName][1])
self.members[corpID] = blue.DBRow(self.members.header, line)
else:
if not self.members.has_key(corpID):
return
if bRemove:
del self.members[corpID]
else:
member = self.members[corpID]
for columnName in change.iterkeys():
setattr(member, columnName, change[columnName][1])
sm.GetService('corpui').OnAllianceMemberChanged(allianceID, corpID, change)
def ResetMembers(self):
self.members = None
| [
"le02005@163.com"
] | le02005@163.com |
d6bc05c67159a03aa159d05249257cb528fe233c | 7a5966941b81b10ba2cc463900675558e7270b76 | /annotypes/py3_examples/reusecls.py | 1abc435dd414ca56af62406891dd2420627f6ed0 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | dls-controls/annotypes | 5b32b579226567fc2f0e1a79cdb9be0d1b483a15 | 3dac2e2970a67f667b707b0d248f58ac9a06551b | refs/heads/master | 2022-03-14T17:13:21.576140 | 2019-11-25T12:31:11 | 2019-11-25T12:31:11 | 110,128,413 | 2 | 1 | Apache-2.0 | 2019-11-25T12:07:57 | 2017-11-09T14:54:23 | Python | UTF-8 | Python | false | false | 450 | py | import time
from annotypes import WithCallTypes, add_call_types, Anno
from .simple import Simple
with Anno("Parameters to take"):
ASimple = Simple
class ReuseCls(WithCallTypes):
@add_call_types
def validate(self, params: ASimple) -> ASimple:
if params.exposure < 0.4:
params.exposure = 0.4
return params
@add_call_types
def configure(self, params: ASimple):
time.sleep(params.exposure)
| [
"tom.cobb@diamond.ac.uk"
] | tom.cobb@diamond.ac.uk |
00467c651a60f2fa2f80057b2492d873ae69e492 | c0239d75a8199ec84ad683f945c21785c1b59386 | /dingtalk/api/rest/OapiUserGetUseridByUnionidRequest.py | ea73cda91b282e538f377e91ead58a7364a714c2 | [] | no_license | luss613/oauth_dingtalk | 9f253a75ce914c577dbabfb84e97fd883e80e04b | 1e2554642d2b16c642a031670d08efa4a74e8252 | refs/heads/master | 2023-04-23T01:16:33.450821 | 2020-06-18T08:22:57 | 2020-06-18T08:22:57 | 264,966,287 | 1 | 1 | null | 2020-06-18T08:31:24 | 2020-05-18T14:33:25 | Python | UTF-8 | Python | false | false | 340 | py | '''
Created by auto_sdk on 2018.07.25
'''
from dingtalk.api.base import RestApi
class OapiUserGetUseridByUnionidRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.unionid = None
def getHttpMethod(self):
return 'GET'
def getapiname(self):
return 'dingtalk.oapi.user.getUseridByUnionid'
| [
"paul.lu@belstar.com.cn"
] | paul.lu@belstar.com.cn |
f2214e2eed2dcb5479c7422af4bb496da6bb4122 | 970e122697f7996fab03dda03e42c490436120ab | /snacks/urls.py | d30580fdad67fea2116bb54086c49ec3f676ab7a | [] | no_license | leeroywking/django_snacks | d2df6808c408384ce0e56002702e0c3c43d0616f | e1a283573a1a951abc6500b4689c562719bbae31 | refs/heads/master | 2022-12-08T06:40:06.447090 | 2020-08-23T00:37:20 | 2020-08-23T00:37:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django.urls import path, include
from .views import HomeView, AboutView
urlpatterns = [
path("", HomeView.as_view(), name="home"),
path("about/", AboutView.as_view(), name="about"),
]
| [
"leeroywking@gmail.com"
] | leeroywking@gmail.com |
dd8390b0ea89616805802fae4a2e1e0cb05def74 | 474525154a4e1d48ef5242d1f44164d05399b145 | /tensorflow_probability/python/experimental/bijectors/sharded_test.py | a0af04d746b9e383d2fb1f518322156773f284a4 | [
"Apache-2.0"
] | permissive | svshivapuja/probability | 9855737790f74a39169688fbfec9671deef804d9 | af7ccb22d972329633530c3b754ed1f49472f6a7 | refs/heads/main | 2023-07-17T04:14:53.703622 | 2021-08-30T17:47:06 | 2021-08-30T17:47:06 | 400,983,015 | 1 | 0 | Apache-2.0 | 2021-08-29T07:51:29 | 2021-08-29T07:51:29 | null | UTF-8 | Python | false | false | 3,976 | py | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.experimental.bijectors.sharded."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.experimental.bijectors import sharded
from tensorflow_probability.python.internal import distribute_lib
from tensorflow_probability.python.internal import distribute_test_lib as test_lib
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
tfb = tfp.bijectors
tfp_dist = tfp.experimental.distribute
JAX_MODE = False
@test_util.test_all_tf_execution_regimes
class ShardedTest(test_lib.DistributedTest):
def test_sharded_log_det_jacobian(self):
def log_prob(y):
return tf.reduce_sum(tfd.Beta(2., 2.).log_prob(y))
def transform_log_prob(log_prob, bijector):
def new_log_prob(x):
y = bijector.forward(x)
return log_prob(y) + bijector.forward_log_det_jacobian(x, len(x.shape))
return new_log_prob
@tf.function
def lp_grad(x):
untransformed_log_prob = distribute_lib.make_sharded_log_prob_parts(
log_prob, self.axis_name)
transformed_log_prob = transform_log_prob(
untransformed_log_prob,
sharded.Sharded(tfb.Sigmoid(), shard_axis_name=self.axis_name))
lp, g = tfp.math.value_and_gradient(transformed_log_prob, (x,))
return lp, g
def true_lp_grad(x):
transformed_log_prob = transform_log_prob(log_prob, tfb.Sigmoid())
lp, g = tfp.math.value_and_gradient(transformed_log_prob, (x,))
return lp, g
y = tf.convert_to_tensor(
np.linspace(0.2, 0.7, test_lib.NUM_DEVICES, dtype=np.float32))
x = self.evaluate(tfb.Sigmoid().inverse(y))
sharded_x = self.shard_values(x)
lp, g = self.evaluate(
self.per_replica_to_tensor(self.strategy_run(lp_grad, (sharded_x,))))
true_lp, true_g = self.evaluate(true_lp_grad(x))
self.assertAllClose(true_lp, lp[0])
self.assertAllClose(true_lp, lp[1])
self.assertAllClose(true_g, g)
def test_sharded_distribution_sharded_bijector(self):
td = tfd.TransformedDistribution(tfd.Normal(loc=0, scale=1), tfb.Sigmoid())
sharded_td = tfd.TransformedDistribution(
tfp_dist.Sharded(
tfd.Normal(loc=0, scale=1), shard_axis_name=self.axis_name),
sharded.Sharded(tfb.Sigmoid(), shard_axis_name=self.axis_name))
x = self.evaluate(td.sample(test_lib.NUM_DEVICES, seed=self.key))
sharded_x = self.shard_values(x)
def true_lp_grad(x):
def log_prob(x):
return tf.reduce_sum(td.log_prob(x))
lp, g = tfp.math.value_and_gradient(log_prob, (x,))
return lp, g
@tf.function
def sharded_lp_grad(x):
lp, g = tfp.math.value_and_gradient(sharded_td.log_prob, (x,))
return lp, g
true_lp, true_grad = self.evaluate(true_lp_grad(x))
sharded_lp, sharded_grad = self.evaluate(
self.per_replica_to_tensor(
self.strategy_run(sharded_lp_grad, (sharded_x,))))
self.assertAllClose(true_lp, sharded_lp[0])
self.assertAllClose(true_lp, sharded_lp[1])
self.assertAllClose(true_grad, sharded_grad)
if __name__ == '__main__':
test_util.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
3751469fca3fe0742554b1ceee3a3d7399bc9e8d | 0b1e404a165c960677d07015bc26aac0569cf84a | /src/combustion/vision/contour.py | 5869f815424f6d62197e0b9f8e2cad450a1cd0cb | [
"Apache-2.0"
] | permissive | johndpope/combustion | d3ec349cd7be086f55b4e3deebd571c97842e1ed | c3f91e62a10a873cfeeae8c675b0683bc5158818 | refs/heads/master | 2023-03-01T14:34:42.149415 | 2021-02-07T17:55:58 | 2021-02-13T17:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
import cv2
import torch
from torch import Tensor
from combustion.util import check_is_tensor
def mask_to_polygon(mask: Tensor, num_classes: int, pad_value: float = -1) -> Tuple[Tuple[Tensor, ...], ...]:
check_is_tensor(mask, "mask")
assert mask.ndim == 4
assert num_classes > 0
batch_size, _, height, width = mask.shape
mask = mask.view(batch_size, height, width)
result = []
for elem in mask:
for cls in range(num_classes):
cls_mask = (elem == cls).byte().numpy()
contours, hierarchy = cv2.findContours(cls_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
result.append(tuple([torch.from_numpy(x).long() for x in contours]))
return tuple(result)
| [
"tidalpaladin@protonmail.com"
] | tidalpaladin@protonmail.com |
b7b163b82ebc15e557179135b6dde4fbdc96ee0d | fd74a044c0037796455ba4bd4fd44f11c3323599 | /Practice/ABC/Bcontest126_b.py | 36d937cbb053050a502070da2af82b5d69f27e88 | [] | no_license | tegetege/tegetege_AtCoder | 5ac87e0a7a9acdd50d06227283aa7d95eebe2e2f | ba6c6472082e8255202f4f22a60953d0afe21591 | refs/heads/master | 2022-03-25T00:29:22.952078 | 2022-02-10T14:39:58 | 2022-02-10T14:39:58 | 193,516,879 | 0 | 0 | null | 2019-06-25T13:53:13 | 2019-06-24T14:02:05 | Python | UTF-8 | Python | false | false | 477 | py | import sys
S = list(input())
S = [S[0]+S[1],S[2]+S[3]]
month = ['01','02','03','04','05','06','07','08','09','10','11','12']
# 月表示、年表示についてのステータス
# [前表示、後ろ表示]
mon = [0,0]
yer = [0,0]
if S[0] in month:
mon[0] = 1
if S[1] in month:
mon[1] = 1
if mon[0] == 1 and mon[1] == 1:
print('AMBIGUOUS')
sys.exit()
if mon[0] == 1:
print('MMYY')
sys.exit()
if mon[1] == 1:
print('YYMM')
sys.exit()
else:
print('NA')
sys.exit()
| [
"m_take7_ex_d@yahoo.co.jp"
] | m_take7_ex_d@yahoo.co.jp |
c3d07a29c54f5a213678adacdcaae6cbfd9c9f9b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/392/usersdata/329/71352/submittedfiles/formula.py | 4bf76caf0210c1e473372fb8519d8918c25e9277 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # -*- coding: utf-8 -*-
p = float(input('digite seus pontos'))
i = float(input('digite seu intervalo'))
n = float(input('digite seu numero de intervalos'))
valor = int((p*((1+i)**n)-1/i))
print('%2.f' valor)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
7e9ff31d0e26e0cb03730f34ab42ed2a15f3f8e3 | a7406d29a53ac2439ed182cccd2546d3bf59d0d2 | /do_tasks.py | a9f2ead27cf383fb2156c077da1ab7fe1a1a3fb6 | [] | no_license | gawel/nuka_pycon_fr | 1cf4bc7a0727a1628a4a0bc515c49796810c0216 | 541d223a0fa2884e19d2c8661babb9cbdb0d3950 | refs/heads/master | 2023-03-31T06:26:37.239647 | 2017-09-24T11:48:12 | 2017-09-24T11:48:12 | 104,308,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | #!bin/python
import nuka
from nuka.hosts import DockerContainer
from tasks import mytask
async def do_tasks(host):
res = await mytask()
print(res.stdout)
nuka.run(do_tasks(DockerContainer('pyconfr')))
| [
"gael@gawel.org"
] | gael@gawel.org |
0592a2a09febf0fe6f59a27d302f0b711de4a92b | 2959849cbcb8c76033e02643ae2fb039634074a8 | /loadFromJsonTester.py | fcc564e2ccc79124dc06364b17f7f3de38c6e1ee | [] | no_license | sebastiengilbert73/icf18 | fc5cc94db472d6483bbb4a3493291cc56fdfeed2 | ac2aff6242b58d33a46fc73fa09da5f03d849210 | refs/heads/master | 2020-03-08T23:54:27.718643 | 2018-04-21T18:26:59 | 2018-04-21T18:26:59 | 128,475,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # loadFromJsonTester.py
import json
import loadFromJson
print ("loadFromJsonTester.py")
importer = loadFromJson.Importer('/home/sebastien/MachineLearning/BenchmarkDatasets/imaterialist-challenge-fashion-2018/train.json',
maximumNumberOfTrainingImages=0)
attributesFrequencies = importer.AttributesFrequencies()
print ("attributesFrequencies =", attributesFrequencies)
print (len(attributesFrequencies)) | [
"sebastiengilbert73@yahoo.ca"
] | sebastiengilbert73@yahoo.ca |
eab1836aac2667e34bedb8b0d94404f89f96f45a | b500996a0b29829fde6afe8b23178ca9df4a239d | /rydinfap/src/apps/empbendem.py | aa5e3495b140fd5a36740bd35f33a07d4ae9eb28 | [] | no_license | eocampo2000/test-code | 48c4d444e323eef5e6fe7e61b018952ef3cd4134 | 49328664243e1a9daf9c567d1aaaa19fd4654c02 | refs/heads/master | 2016-08-11T07:35:31.346464 | 2016-02-13T12:33:55 | 2016-02-13T12:33:55 | 51,642,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,816 | py | '''
Created on Jul 17, 2015
@author: eocampo
'''
__version__ = '20150717'
import sys
import os
#import time
from datetime import datetime
import utils.fileutils as fu
import utils.strutils as su
import utils.filetransf as ft
import procdata.procinfa as pi
from apps.infbaseapp import _InfaBaseApp
# Mandatory to define self.cmdStep
# method _getNextRunDate is sensitive to schedule changes !
RUN_PER_DAY = 1 # Daily runs.
DP_LEN = len('YYYYMM')
# Schedules
SCH_FREQ = 'Mthly'
sch = ()
cur_dayr = su.getTodayDtStr('%Y%m')
class EmpBenefitonDem(_InfaBaseApp):
exitOnError = True
def __init__(self):
super(EmpBenefitonDem,self).__init__()
self.landDir = 'SrcFiles/employee'
self.incFileSet = [] # Incoming Files. Contains full path name.
self.incFiles = []
self.workFiles = [] # Files that were moved to the working dir (ideally same than incSetFile).
self.RowCnt = -1
self.srcFile = ('hp400jnm_sap_dwext.dat','hrcoord5.csv') # File that Informatica expects. Alphabetical.
self.ib.fileName = r"'P.HP400JNM.SAP.DW.EXTRACT',hrcoord5.csv"
self.checkNextRunFlg = False
self.runWkfFlowFlg = False
self.fileDate = ''
self.FILE_SET_LEN = 1
self.ts = su.getTimeSTamp()
# Allowable commands for this application. Make sure to Set
self.cmdStep = { 'A' : self.getLock ,
'B' : self.getFtpFiles , # MF edwetl
'C' : self.getFtpFiles2 , # MF INFAHR
'D' : self.getIncSetFiles ,
'E' : self.copyFilesWorkDir ,
'F' : self.archFilesTS ,
'G' : self.procEmpBenefit ,
}
# Infa Environmental variables/
self.infaEnvVar = {
'PMCMD' : 'mg.pmcmd' ,
'INFA_USER' : 'self.ib.rep_user' ,
'INFA_XPWD' : 'self.ib.rep_xpwd' ,
'DOMAIN' : 'self.ib.dom_name' ,
'INT_SERV' : 'self.ib.IS' ,
'INFA_SHARE' : 'self.ib.shareDir' ,
'INFA_APP_CFG' : 'self.ib.cfgDir' ,
'INFA_APP_LCK' : 'self.ib.lckDir' ,
'INFA_APP_CTL' : 'self.ib.ctlDir' ,
}
# FTP is expecting the following env variables, which should not be in a config file.
# First Set of files from Mainframe.
#
os.environ['RXFILE' ] = ('None') #
# Sets a flag, to check for next run.
def getFtpFiles(self):
os.environ['FILE' ] = (r"\'P.HP400JNM.SAP.DW.EXTRACT\'")
return ft.getn('Employee',self.log)
def getFtpFiles2(self):
os.environ['FILE' ] = ('hrcoord5.csv')
envVars = {'REMOTE_HOST':'REMOTE_HOST1',
'USER' :'USER1',
'PWD' :'PWD1',
'REMOTE_DIR' :'REMOTE_DIR1',
'FTP_MODE' :'FTP_MODE1',
}
rc = self.setEnvVars(envVars)
if rc != 0 : return rc
return ft.get('Employee2',self.log)
# Wrapper Method
def copyFilesWorkDir(self):
for i in range (len(self.srcFile)):
self.incFiles.append('%s' % self.incFileSet[i][0])
return self.cpSrcToTgtFiles()
def archFilesTS(self): return self.archGenFiles(self.incFiles, self.ts,True)
def _wkfEmpBenefitonDem(self):
self.ib.fld = 'Employee'
self.ib.wkf = 'wkf_employee_benefits_on_demand'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def procEmpBenefit(self):
# Run workflows
if self._wkfEmpBenefitonDem() != 0 : return 1
for i in range (len(self.srcFile)):
t = '%s/%s' % (self.ib.workDir,self.srcFile[i])
r = fu.delFile(t)
self.log.info('Removing %s rc = %s ' % (t,r))
return 0
def main(Args):
a = EmpBenefitonDem()
rc = a.main(Args)
return rc
if __name__ == '__main__':
rc= main(sys.argv)
| [
"eocampo1000@hotmail.com"
] | eocampo1000@hotmail.com |
061c2dfb37cde98ccb1b20355fa195952702cbd5 | a857d1911a118b8aa62ffeaa8f154c8325cdc939 | /toontown/building/DistributedCJElevatorAI.py | 686cddd40ddf3723841c10683a09971fb59a8373 | [
"MIT"
] | permissive | DioExtreme/TT-CL-Edition | 761d3463c829ec51f6bd2818a28b667c670c44b6 | 6b85ca8352a57e11f89337e1c381754d45af02ea | refs/heads/main | 2023-06-01T16:37:49.924935 | 2021-06-24T02:25:22 | 2021-06-24T02:25:22 | 379,310,849 | 0 | 0 | MIT | 2021-06-22T15:07:31 | 2021-06-22T15:07:30 | null | UTF-8 | Python | false | false | 448 | py | from ElevatorConstants import *
import DistributedBossElevatorAI
class DistributedCJElevatorAI(DistributedBossElevatorAI.DistributedBossElevatorAI):
def __init__(self, air, bldg, zone, antiShuffle = 0, minLaff = 0):
DistributedBossElevatorAI.DistributedBossElevatorAI.__init__(self, air, bldg, zone, antiShuffle=antiShuffle, minLaff=0)
self.type = ELEVATOR_CJ
self.countdownTime = ElevatorData[self.type]['countdown']
| [
"devinhall4@gmail.com"
] | devinhall4@gmail.com |
e462e5fc073c7fa7875310e8044c5dce34e1153c | 113480ad4471b4fd5500847c08ab1bd12e73e186 | /lonehen/core/urls.py | d1c852d4943df35920ad9a972c6ebe8685b16b77 | [] | no_license | ReedRichards/Django-Rest-API | 1f4b74e95525df33a342ffa7eae54a4da19a260f | 2d45729c7737bf02201264ff2203bd547a599ce6 | refs/heads/master | 2020-03-17T07:20:44.618043 | 2018-05-14T20:50:21 | 2018-05-14T20:50:21 | 133,302,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from core import views
urlpatterns = [
path('about-page/', views.AboutPageView.as_view()),
path('email/', views.Email.as_view()),
path('checkout/', views.Checkout.as_view()),
path('about-page/<int:pk>/', views.AboutPageDetail.as_view()),
path('carousel/',views.CarouselList.as_view()),
path('carousel/<int:pk>/',views.CarouselDetail.as_view()),
path('press/',views.PressList.as_view()),
path('press/<int:pk>/',views.PressDetail.as_view()),
path('event/',views.EventList.as_view()),
path('event/<int:pk>/',views.EventDetail.as_view()),
path('blog/',views.BlogList.as_view()),
path('blog/<int:pk>/',views.BlogDetail.as_view()),
path('makersnotes/',views.MakersNotesList.as_view()),
path('makersnotes/<int:pk>/',views.MakersNotesDetail.as_view()),
path('shop/',views.ShopItemList.as_view()),
path('shop/<int:pk>/',views.ShopItemDetail.as_view()),
path('users/', views.UserList.as_view()),
path('users/login/', views.LoginToLonehen.as_view()),
path('users/register/', views.CreateUserView.as_view()),
path('users/<int:pk>/', views.UserDetail.as_view())
]
| [
"you@example.com"
] | you@example.com |
f47c2eaba301bf364ef9b6e3e921a85eabaddd3e | 353b1065e5dfce1878d367bf9758387cf34ce52f | /cart/migrations/0006_auto_20200716_1310.py | 0f726cb37cd191a179770f792eda190265da8c0e | [] | no_license | Vaibhav-21-git/Gym-Website-Project | 62d19c2708f602b165fb4fecd0659a56b38d3008 | 9ff78ae40a8e14ffdbce4f87e2b9915a8fe5bc0d | refs/heads/master | 2022-11-28T04:16:44.877412 | 2020-08-06T09:48:04 | 2020-08-06T09:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.0.5 on 2020-07-16 08:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0005_cart_online_selected_class'),
]
operations = [
migrations.AlterModelOptions(
name='cart',
options={'verbose_name_plural': 'کلاس های درخواست شده برای ثبت نام'},
),
]
| [
"6438087-Noferesti@users.noreply.gitlab.com"
] | 6438087-Noferesti@users.noreply.gitlab.com |
649b957c5e0d01e02f844bbb7bb17e611c313212 | 68a32523accac405bad9a08a234ca179a227d3ef | /microcephalus7/easy/1768.py | a90ef01fd5004f6ae62a9e3fcfbea36ca917a436 | [] | no_license | plan-bug/LeetCode-Challenge | 592880327a64c23f3b1cd4efc8266e44d936872c | a2508bcb739ed1a095a2e40009fd5e8162fb97c1 | refs/heads/master | 2023-06-12T12:08:28.545925 | 2021-06-30T03:24:52 | 2021-06-30T03:24:52 | 302,037,168 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | class Solution:
def mergeAlternately(self, word1: str, word2: str) -> str:
res = ""
for i in range(min(len(word1), len(word2))):
res += word1[i] + word2[i]
return res + word1[i+1:] + word2[i+1:] | [
"microcephalus7@gmail.com"
] | microcephalus7@gmail.com |
295175fb53a5e530c86461b0629e06f8ce62592b | d5ad13232e3f1ced55f6956bc4cbda87925c8085 | /cc_mcc_seq/16sSNVExome/A1_csv2tsv/1.csv2tsv.py | 975e2803d34296fec397b55a797f6ab273530798 | [] | no_license | arvin580/SIBS | c0ba9a8a41f59cb333517c286f7d80300b9501a2 | 0cc2378bf62359ec068336ea4de16d081d0f58a4 | refs/heads/master | 2021-01-23T21:57:35.658443 | 2015-04-09T23:11:34 | 2015-04-09T23:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import csv
def csv2tsv(iFile):
inFile = open(iFile)
ouFile1 = open(iFile.rstrip('.csv'), 'w')
ouFile2 = open(iFile.rstrip('.csv')+'.indel', 'w')
csvFile = csv.reader(inFile)
head = csvFile.next()
for fields in csvFile:
if fields[33].find('INDEL')==0:
ouFile2.write('\t'.join(fields) + '\n')
else:
ouFile1.write('\t'.join(fields) + '\n')
inFile.close()
ouFile1.close()
ouFile2.close()
csv2tsv('sum_snv16sExome.exome_summary.csv')
csv2tsv('sum_snv16sExome.genome_summary.csv')
| [
"sunhanice@gmail.com"
] | sunhanice@gmail.com |
93ad54d0dd4c4455f694c05499534e8353c5672f | fd899e63d4c33261911f0e35fb8cb286332b7a95 | /algorithm_202102/baek11727.py | 9938d5ef3b42203174139a1470c8533d7783d3eb | [] | no_license | choyj0920/algorithm | 948452d652455973e928ef537729174109972252 | e1287799e7073232cbf8904cea7c348a2f2a9c30 | refs/heads/master | 2023-05-25T22:23:11.044049 | 2021-09-08T03:40:42 | 2021-08-13T08:04:46 | 225,736,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | # baek11727 2xn타일 2
import sys
n=int(input())
dp=[0]*(n+3)
dp[0]=1
for i in range(0,n+1):
dp[i+1]=(dp[i+1]+dp[i])%10007
dp[i+2]=(dp[i+2]+dp[i]+dp[i])%10007
print(dp[n])
| [
"44551544+choyj0920@users.noreply.github.com"
] | 44551544+choyj0920@users.noreply.github.com |
7bfd7a902aaec01c545269ed7d4145b1ac7bd9e5 | 61673ab9a42f7151de7337608c442fa6247f13bb | /getch/main.py | 097b87607831d1d8ff9f4f04d06ce4205a51f90d | [
"MIT"
] | permissive | furas/python-examples | 22d101670ecd667a29376d7c7d7d86f8ec71f6cf | 95cb53b664f312e0830f010c0c96be94d4a4db90 | refs/heads/master | 2022-08-23T23:55:08.313936 | 2022-08-01T14:48:33 | 2022-08-01T14:48:33 | 45,575,296 | 176 | 91 | MIT | 2021-02-17T23:33:37 | 2015-11-04T23:54:32 | Python | UTF-8 | Python | false | false | 281 | py | import getch
print('Press Ctrl+C to exit\n')
while True:
key = getch.getch()
if key == '\x1B':
print('ESC')
elif key == '\n':
print('ENTER')
elif key == '\x0A':
print('ENTER')
else:
print("key:", key, ord(key), hex(ord(key)))
| [
"furas@tlen.pl"
] | furas@tlen.pl |
c625089e78a9ff34a815a1a232057a7b5631f50b | 527a8c6fc8ec797ca6136ebf43c9817aae0abc0b | /Day17/SIPA/SIPA/asgi.py | 5da4409085379d3d46ad70eb5ba25986807fc388 | [] | no_license | tanguturuanusha/APSSDC-Django-BATCH6 | 9d1706202eafb70b6d89883a06b45d1f246c9321 | b5b1ef0f139235d20aab3a54c96f15331d52c21a | refs/heads/master | 2022-12-24T08:52:36.391635 | 2020-10-03T13:00:00 | 2020-10-03T13:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for SIPA project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SIPA.settings')
application = get_asgi_application()
| [
"rravikumar34@gmail.com"
] | rravikumar34@gmail.com |
bdb9384e0b7016381ede949fbdeb078aff6d94c9 | 342bd78786473a47a80ba983f22a01f0e4d69da8 | /confirmation/config/settings/prod.py | 0d69044e5c58ff2ae60528651c0488de20be006e | [] | no_license | JMorris1575/confirmation2018 | a28d4b250170c0b2222ddfa43ee79ddaaa541a9a | 1185524026de658108feb403072cac47f879acff | refs/heads/master | 2018-12-09T18:34:16.535833 | 2018-11-24T18:11:53 | 2018-11-24T18:11:53 | 116,589,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | """
This file will need some work before the website is hosted by WebFaction.
See the christmas17 project.
"""
from .base import *
import os
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': get_secret('PROD_DATABASE_NAME'),
'USER': get_secret('PROD_DATABASE_USER'),
'PASSWORD': get_secret('PROD_DATABASE_PASSWORD'),
'HOST': get_secret('PROD_DATABASE_HOST'),
'PORT': get_secret('PROD_DATABASE_PORT')
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.01/howto/static-files/
STATIC_ROOT = os.path.join(os.path.dirname(os.path.dirname(BASE_DIR)), 'conf_static_secure/')
STATIC_URL = 'https://confirmation.jmorris.webfactional.com/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static', 'site'), )
ALLOWED_HOSTS.append('confirmation.jmorris.webfactional.com')
ADMINS = (
('FrJim', 'jmorris@ecybermind.net'), ('FrJim', 'frjamesmorris@gmail.com')
)
EMAIL_HOST = get_secret('EMAIL_HOST')
EMAIL_HOST_USER = get_secret('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_secret('EMAIL_HOST_PASSWORD')
DEFAULT_FROM_EMAIL = get_secret('DEFAULT_FROM_EMAIL')
SERVER_EMAIL = get_secret('SERVER_EMAIL')
| [
"FrJamesMorris@gmail.com"
] | FrJamesMorris@gmail.com |
1912d36a74982952f273aea38df325788e427f2e | 022cec04ce91ab45cc9fd6ce77b2fc23bba1e1fa | /product_in_cellar_app/views/product_in_cellar.py | 643f5ff84ae02a2eb9e1cf3c904f4660ff04b279 | [] | no_license | AndresOsorio0710/BackenMarca | fa1181234380c38be4815dabc0554766d55f2100 | 78e4f33f585e556c6dbb3d2739339857464d7043 | refs/heads/master | 2023-05-02T05:29:29.977869 | 2021-05-01T02:10:47 | 2021-05-01T02:10:47 | 362,198,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | from django.db.models import F, Sum
from rest_framework import viewsets, mixins
from product_in_cellar_app.models import ProductInCellar
from product_in_cellar_app.serializers import ProductInCellarSerializer, SaveProductInCellarSerializer, \
ProductListSerializer
class ProductInCellarViewSet(
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet
):
queryset = ProductInCellar.objects.all()
serializer_class = ProductInCellarSerializer
def get_queryset(self):
queryset = super(ProductInCellarViewSet, self).get_queryset()
if self.action == self.list.__name__:
queryset = queryset.values(
'uuid',
'name',
'reference',
'description',
'cost',
'unit_cost',
'quantity_entered',
'free_quantity',
'stop',
'show'
).annotate(
cellarName=F('cellar__name'),
cellar=F('cellar'),
providerName=F('provider__name'),
provider=F('provider')
).order_by('name')
return queryset
def get_serializer_class(self):
serializer = {
'create': SaveProductInCellarSerializer,
'list': ProductListSerializer
}
if self.action in serializer:
return serializer[self.action]
return ProductInCellarSerializer
| [
"andres.osorio0710@gmail.com"
] | andres.osorio0710@gmail.com |
4ccea471c32659e05690c9079312162aebc0986a | fd981b47482467291576ae4650d2925d6fa00564 | /robot_ws/build/rtcrobot/rtcrobot_msgs/catkin_generated/pkg.develspace.context.pc.py | 8513a87cb67fd38166b38fb3714bf3ef08aa395c | [] | no_license | Forrest-Z/rtcrobot | 7337271e726db794ce08953f333ad9a0f8e70027 | 229ce1d7e77af9348eac870e00a2c4049e4562f1 | refs/heads/master | 2022-12-16T00:14:17.525845 | 2020-05-18T09:28:09 | 2020-05-18T09:28:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/gaara/robot_ws/devel/include".split(';') if "/home/gaara/robot_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rtcrobot_msgs"
PROJECT_SPACE_DIR = "/home/gaara/robot_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"mtk@mtk"
] | mtk@mtk |
cf217c8e49b23fc2af06cfa3d4d496b7e3a4f875 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/python/prompt-toolkit.py | 656c9f0a0eaabd6ad15f4927992347ec4592f716 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from .base import PipBaseRecipe
class PromptToolkitRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(PromptToolkitRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'cd6523b36adc174cc10d54b1193eb626' \
'b4268609ff6ea92c15bcf1996609599c'
self.name = 'prompt-toolkit'
self.pydepends = ['wcwidth']
self.version = '1.0.9'
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
5dd0523819cf4df709c1eaedde8280c1fa65531f | a8124c3361ec462e076fbe246c3571672a28a54b | /python3/dash_apps/dash_t_15_create_checkbox.py | 4ee6eb695d9aca4dd7a6ee6dab087b878a966636 | [
"MIT"
] | permissive | ashifujjmanRafi/code-snippets | 80ea1300fb2bb5bf4bc1c2fb01222a42127c0438 | 24bd4b81564887822a0801a696001fcbeb6a7a75 | refs/heads/master | 2023-02-17T04:35:32.779975 | 2021-01-12T02:14:47 | 2021-01-12T02:14:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import numpy as np
import plotly.graph_objs as go
app = dash.Dash()
app.layout = html.Div([
html.H1(children='Choose a country',
style={
'textAlign': 'center'
}),
dcc.Dropdown(
id='first-dropdown',
options=[
{'label': 'Bangladesh', 'value': 'BD'},
{'label': 'Pakistan', 'value': 'PAK'},
{'label': 'Turosko', 'value': 'Turky'},
{'label': 'South Africa', 'value': 'SA', 'disabled': True},
],
),
html.Label('This is checkbox'),
html.Br(),
dcc.Checklist(
options=[
{'label': 'Bangladesh', 'value': 'BD'},
{'label': 'Pakistan', 'value': 'PAK'},
{'label': 'Turosko', 'value': 'Turky'},
{'label': 'South Africa', 'value': 'SA', 'disabled': True},
],
values=['BD', "PAK"]
)
])
app.run_server(debug=True, port=8000)
| [
"nahid.cseru@gmail.com"
] | nahid.cseru@gmail.com |
809f692cd72ba8af4ac3f4177f90a90e1b7e973f | f9f8cefc9d150eb26f4e2edd327b858542edb48d | /web/testimonial_plugin/migrations/0001_initial.py | 78eb92211bc4c35c6f4215260bbde8e2a2f388b8 | [] | no_license | phanminhtam/portfolio-tampm | 2ce0893a62f00d395dafc38d4ab5e897e5ce01bb | 05bf8e91fd638eee86f33066f3c7c6f82202dd0f | refs/heads/master | 2020-03-07T09:05:01.907957 | 2018-07-16T02:12:55 | 2018-07-16T02:12:55 | 127,397,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,518 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-02 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0018_pagenode'),
]
operations = [
migrations.CreateModel(
name='Testimonial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_name', models.CharField(max_length=100)),
('content', models.TextField(blank=True, null=True)),
('customer_position', models.TextField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='TestimonialPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='testimonial_plugin_testimonialplugin', serialize=False, to='cms.CMSPlugin')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AddField(
model_name='testimonial',
name='testimonial_plugin_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testimonial_plugin.TestimonialPlugin'),
),
]
| [
"nguyentrisinh0810@gmail.com"
] | nguyentrisinh0810@gmail.com |
e167103093a737eab1630649490e49d7a8707b7e | bd15b526cea746d7230bb72e20b553c503e9923e | /DS/book/05_exploreTheDataset/05_reduce_cost_increase_performance.py | ed589b9a35a54ca5a226c35eab20a15f4c5586a4 | [] | no_license | damiansp/aws_learn | 3fb718a9877cd0e812b2baf55fe7bca3a18ca78e | 72465435d7761d8c3d1fff874245a80512142f05 | refs/heads/master | 2023-06-24T01:50:20.246389 | 2022-07-27T03:27:57 | 2022-07-27T03:27:57 | 187,707,255 | 0 | 0 | null | 2023-06-12T21:30:30 | 2019-05-20T20:15:38 | Jupyter Notebook | UTF-8 | Python | false | false | 623 | py | import pandas as pd
from pyathena import connect
se_staging_dir = f's3://{BUCKET}/athena/staging'
conn = connect(region_name=region, s3_staging_dir=s3_staging_dir)
sql = (
f'SELECT DISTINCT product_category '
f'FROM {SCHEMA}.{TABLE} '
f'ORDER BY product_category')
pd.read_sql(sql, conn)
df = pd.read_sql_query(
f'SELECT APPROXIMATE COUNT(DISTINCT customer_id) '
f'FROM {SCHEMA}.{TABLE} '
f'GROUP BY product_category'.
engine)
# much faster than
df = pd.read_sql_query(
f'SELECT COUNT(DISTINCT customer_id) '
f'FROM {SCHEMA}.{TABLE} '
f'GROUP BY product_category'.
engine)
| [
"damiansp@gmail.com"
] | damiansp@gmail.com |
46b7643cc0666f0f6c1810e8f11e48fee5622a05 | 657d894015f679c2ef9f1e2dd96f6bbaf319d9e0 | /best_api/movies/serializers.py | f9ebab71e2482fca829f4d94af7a50142355ec08 | [] | no_license | bezidejni/best_code_challenge | 33d6bb6429045d4c347531ab0e126162eef0bbc6 | bf2a1cdd3f373df97f17b87255dbf022ba604689 | refs/heads/master | 2021-01-10T19:45:16.862596 | 2014-02-07T17:56:08 | 2014-02-07T17:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 769 | py | from rest_framework import serializers
from .models import Movie
class AbsoluteURLFileField(serializers.FileField):
"""
FileField serializer that shows the absolute URL instead of showing
relative file path.
"""
def to_native(self, value):
request = self.context.get('request', None)
if value:
return request.build_absolute_uri(value.url)
else:
return None
class MovieSerializer(serializers.HyperlinkedModelSerializer):
poster = AbsoluteURLFileField()
class Meta:
model = Movie
fields = ('id', 'title', 'year', 'genre', 'imdb_rating', 'imdb_id',
'runtime', 'plot', 'poster', 'youtube_video_id', 'slug',
'actors', 'writers', 'director')
| [
"filip@jukic.me"
] | filip@jukic.me |
c296d7d9cdba29af08cbf7ebebbc7b7beec7a129 | 1df7ba55c4b61772c1a31c503e6b8881f1456dc5 | /untitled9/apps/organization/migrations/0001_initial.py | 1d7240836a57104deb025754b53664033b81a01a | [] | no_license | fzk466569/python-django-pro | 35918756060fcae375d3c99ea1a6934949b6d605 | 9add086b7a910f255df5b192268f1e117057e053 | refs/heads/master | 2021-01-19T13:18:14.141880 | 2017-02-19T12:16:29 | 2017-02-19T12:16:29 | 82,374,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,042 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-07 12:50
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CityDict',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u57ce\u5e02\u540d\u79f0')),
('desc', models.TextField(verbose_name='\u57ce\u5e02\u63cf\u8ff0')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
],
options={
'verbose_name': '\u57ce\u5e02',
'verbose_name_plural': '\u57ce\u5e02',
},
),
migrations.CreateModel(
name='CourseOrg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u673a\u6784\u540d\u79f0')),
('desc', models.TextField(verbose_name='\u673a\u6784\u63cf\u8ff0')),
('students', models.IntegerField(default=0, verbose_name='\u5b66\u751f\u4eba\u6570')),
('course_num', models.IntegerField(default=0, verbose_name='\u8bfe\u7a0b\u6570')),
('category', models.CharField(choices=[('pxjg', '\u57f9\u8bad\u673a\u6784'), ('gr', '\u4e2a\u4eba'), ('gx', '\u9ad8\u6821')], default='pxjg', max_length=20, verbose_name='\u673a\u6784\u7c7b\u522b')),
('click_nums', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u6b21\u6570')),
('fav_nums', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u6b21\u6570')),
('image', models.ImageField(upload_to='org/%Y/%m', verbose_name='logo')),
('address', models.CharField(max_length=100, verbose_name='\u673a\u6784\u5730\u5740')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.CityDict', verbose_name='\u6240\u5728\u57ce\u5e02')),
],
options={
'verbose_name': '\u8bfe\u7a0b\u673a\u6784',
'verbose_name_plural': '\u8bfe\u7a0b\u673a\u6784',
},
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='\u6559\u5e08\u59d3\u540d')),
('work_year', models.IntegerField(default=0, verbose_name='\u5de5\u4f5c\u5e74\u9650')),
('work_company', models.CharField(max_length=50, verbose_name='\u5c31\u804c\u516c\u53f8')),
('work_position', models.CharField(max_length=50, verbose_name='\u516c\u53f8\u804c\u4f4d')),
('points', models.CharField(max_length=50, verbose_name='\u6559\u5b66\u7279\u70b9')),
('click_nums', models.IntegerField(default=0, verbose_name='\u70b9\u51fb\u6b21\u6570')),
('fav_nims', models.IntegerField(default=0, verbose_name='\u6536\u85cf\u6b21\u6570')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='organization.CourseOrg', verbose_name='\u6240\u5c5e\u673a\u6784')),
],
options={
'verbose_name': '\u6559\u5e08',
'verbose_name_plural': '\u6559\u5e08',
},
),
]
| [
"fzk466569"
] | fzk466569 |
6527570f5a79bb90a7fab38978ba3dc1ae78923b | 715c92b7a70037d1b790f4e4b75631126692faeb | /demos/__main__.py | f34d814f606b0f68b3725fbff70873ce78fbc402 | [
"MIT"
] | permissive | hanzhaozxd/PyWebIO | 894706391d67883c4b1c29c14ff429d89f495d1c | 39b898417f36a86f6fa3d35e2d1790546182c2d3 | refs/heads/master | 2023-02-19T09:00:46.013320 | 2021-01-17T11:09:23 | 2021-01-17T11:10:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,556 | py | import tornado.ioloop
import tornado.web
from demos.bmi import main as bmi
from demos.chat_room import main as chat_room
from demos.input_usage import main as input_usage
from demos.output_usage import main as output_usage
from demos.config import charts_demo_host
from demos.doc_demo import get_app as get_doc_demo_app
from demos.set_env_demo import main as set_env_demo
from pywebio import STATIC_PATH
from pywebio.output import put_markdown
from pywebio.session import set_env
from pywebio.platform.tornado import webio_handler
from tornado.options import define, options
index_md = r"""# PyWebIO demos
### 基本demo
- [BMI计算](./?pywebio_api=bmi): 根据身高体重计算BMI指数 [源码](https://github.com/wang0618/PyWebIO/blob/dev/demos/bmi.py)
- [聊天室](./?pywebio_api=chat_room): 和当前所有在线的人聊天 [源码](https://github.com/wang0618/PyWebIO/blob/dev/demos/chat_room.py)
- [输入演示](./?pywebio_api=input_usage): 演示PyWebIO输入模块的用法 [源码](https://github.com/wang0618/PyWebIO/blob/dev/demos/input_usage.py)
- [输出演示](./?pywebio_api=output_usage): 演示PyWebIO输出模块的用法 [源码](https://github.com/wang0618/PyWebIO/blob/dev/demos/output_usage.py)
- 更多Demo请见[文档](https://pywebio.readthedocs.io)中示例代码的在线Demo
### 数据可视化demo
PyWebIO还支持使用第三方库进行数据可视化
- 使用`bokeh`进行数据可视化 [**demos**]({charts_demo_host}/?pywebio_api=bokeh)
- 使用`plotly`进行数据可视化 [**demos**]({charts_demo_host}/?pywebio_api=plotly)
- 使用`pyecharts`创建基于Echarts的图表 [**demos**]({charts_demo_host}/?pywebio_api=pyecharts)
- 使用`cutecharts.py`创建卡通风格图表 [**demos**]({charts_demo_host}/?pywebio_api=cutecharts)
**数据可视化demo截图**
<a href="{charts_demo_host}/?pywebio_api=bokeh">
<img src="https://cdn.jsdelivr.net/gh/wang0618/pywebio-chart-gallery@master/assets/bokeh.png" alt="bokeh demo">
</a>
<a href="{charts_demo_host}/?pywebio_api=plotly">
<img src="https://cdn.jsdelivr.net/gh/wang0618/pywebio-chart-gallery@master/assets/plotly.png" alt="plotly demo">
</a>
<a href="{charts_demo_host}/?pywebio_api=pyecharts">
<img src="https://cdn.jsdelivr.net/gh/wang0618/pywebio-chart-gallery@master/assets/pyecharts.gif" alt="pyecharts demo">
</a>
<a href="{charts_demo_host}/?pywebio_api=cutecharts">
<img src="https://cdn.jsdelivr.net/gh/wang0618/pywebio-chart-gallery@master/assets/cutecharts.png" alt="cutecharts demo">
</a>
### Links
* PyWebIO Github [github.com/wang0618/PyWebIO](https://github.com/wang0618/PyWebIO)
* 使用手册和实现文档见 [pywebio.readthedocs.io](https://pywebio.readthedocs.io)
""".format(charts_demo_host=charts_demo_host)
def index():
put_markdown(index_md)
if __name__ == "__main__":
define("port", default=8080, help="run on the given port", type=int)
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/io", webio_handler(index)),
(r"/bmi", webio_handler(bmi)),
(r"/chat_room", webio_handler(chat_room)),
(r"/input_usage", webio_handler(input_usage)),
(r"/output_usage", webio_handler(output_usage)),
(r"/doc_demo", webio_handler(get_doc_demo_app())),
(r"/set_env_demo", webio_handler(set_env_demo)),
(r"/(.*)", tornado.web.StaticFileHandler, {"path": STATIC_PATH, 'default_filename': 'index.html'})
])
application.listen(port=options.port)
tornado.ioloop.IOLoop.current().start()
| [
"wang0.618@qq.com"
] | wang0.618@qq.com |
0be1e453e444b05254b801ba80639e7c0d4c78c1 | c59f419d95e92985c133b797fb2089c25b69283d | /ax/modelbridge/tests/test_utils.py | 7996dda2445a0fc19595b9e3b097234b02b6380d | [
"MIT"
] | permissive | michaeldeyzel/Ax | 8f9ede559e8087a7dec320b14d94955845de808e | 16cb868911eecba323759e2e129df8833361e614 | refs/heads/master | 2022-12-24T17:27:36.576326 | 2020-09-30T23:48:25 | 2020-09-30T23:49:55 | 300,192,368 | 1 | 0 | MIT | 2020-10-01T07:45:32 | 2020-10-01T07:45:31 | null | UTF-8 | Python | false | false | 4,989 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.core.arm import Arm
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.modelbridge.modelbridge_utils import (
get_pending_observation_features,
pending_observations_as_array,
)
from ax.utils.common.testutils import TestCase
from ax.utils.testing.core_stubs import get_experiment
class TestModelbridgeUtils(TestCase):
def setUp(self) -> None:
self.experiment = get_experiment()
self.arm = Arm({"x": 1, "y": "foo", "z": True, "w": 4})
self.trial = self.experiment.new_trial(GeneratorRun([self.arm]))
self.experiment_2 = get_experiment()
self.batch_trial = self.experiment_2.new_batch_trial(GeneratorRun([self.arm]))
self.batch_trial.set_status_quo_with_weight(self.experiment_2.status_quo, 1)
self.obs_feat = ObservationFeatures.from_arm(
arm=self.trial.arm, trial_index=np.int64(self.trial.index)
)
def test_get_pending_observation_features(self):
# Pending observations should be none if there aren't any.
self.assertIsNone(get_pending_observation_features(self.experiment))
self.trial.mark_running(no_runner_required=True)
# Now that the trial is deployed, it should become a pending trial on the
# experiment and appear as pending for all metrics.
self.assertEqual(
get_pending_observation_features(self.experiment),
{"tracking": [self.obs_feat], "m2": [self.obs_feat], "m1": [self.obs_feat]},
)
self.experiment.attach_data(
Data.from_evaluations(
{self.trial.arm.name: {"m2": (1, 0)}}, trial_index=self.trial.index
)
)
# m2 should have empty pending features, since the trial was updated for m2.
self.assertEqual(
get_pending_observation_features(self.experiment),
{"tracking": [self.obs_feat], "m2": [], "m1": [self.obs_feat]},
)
# When a trial is marked failed, it should no longer appear in pending...
self.trial.mark_failed()
self.assertIsNone(get_pending_observation_features(self.experiment))
# ... unless specified to include failed trials in pending observations.
self.assertEqual(
get_pending_observation_features(
self.experiment, include_failed_as_pending=True
),
{"tracking": [self.obs_feat], "m2": [], "m1": [self.obs_feat]},
)
def test_get_pending_observation_features_batch_trial(self):
# Check the same functionality for batched trials.
self.assertIsNone(get_pending_observation_features(self.experiment_2))
self.batch_trial.mark_running(no_runner_required=True)
sq_obs_feat = ObservationFeatures.from_arm(
self.batch_trial.arms_by_name.get("status_quo"),
trial_index=self.batch_trial.index,
)
self.assertEqual(
get_pending_observation_features(self.experiment_2),
{
"tracking": [self.obs_feat, sq_obs_feat],
"m2": [self.obs_feat, sq_obs_feat],
"m1": [self.obs_feat, sq_obs_feat],
},
)
def test_pending_observations_as_array(self):
# Mark a trial dispatched so that there are pending observations.
self.trial.mark_running(no_runner_required=True)
# If outcome names are respected, unlisted metrics should be filtered out.
self.assertEqual(
[
x.tolist()
for x in pending_observations_as_array(
pending_observations=get_pending_observation_features(
self.experiment
),
outcome_names=["m2", "m1"],
param_names=["x", "y", "z", "w"],
)
],
[[["1", "foo", "True", "4"]], [["1", "foo", "True", "4"]]],
)
self.experiment.attach_data(
Data.from_evaluations(
{self.trial.arm.name: {"m2": (1, 0)}}, trial_index=self.trial.index
)
)
# There should be no pending observations for metric m2 now, since the
# only trial there is, has been updated with data for it.
self.assertEqual(
[
x.tolist()
for x in pending_observations_as_array(
pending_observations=get_pending_observation_features(
self.experiment
),
outcome_names=["m2", "m1"],
param_names=["x", "y", "z", "w"],
)
],
[[], [["1", "foo", "True", "4"]]],
)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
72e0e8ff5664148d52c0fb82dfb7844b8e86cd6c | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/CodeJamData/12/42/0.py | 714d6dda48f9c5d58e6e930d729fdfa62cf0f92d | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,256 | py | import sys
import random
def calc(a, w, l, b):
res = [[None, None] for i in range(len(a))]
cx = 0.0
cy = None
ny = 0.0
i = b[0]
res[i] = [0.0, 0.0]
cx = a[i]
ny = a[i]
for i in b[1:]:
if cx + a[i] <= w:
ix = cx+a[i]
if cy is None:
iy = 0.0
ny = max(ny, a[i])
else:
iy = cy + a[i]
if iy > l:
return None
res[i] = [ix, iy]
cx = ix + a[i]
ny = max(ny, iy+a[i])
else:
ix = 0.0
iy = ny + a[i]
if iy > l:
return None
res[i] = [ix, iy]
cx = a[i]
cy = ny
ny = iy+a[i]
return ' '.join([' '.join([str(y) for y in x]) for x in res])
def foo(ifile):
n, w, l = [int(x) for x in ifile.readline().split()]
a = [float(x) for x in ifile.readline().split()]
while True:
b = range(n)
random.shuffle(b)
res = calc(a, w, l, b)
if res is not None:
return res
def main():
ifile = sys.stdin
n = int(ifile.readline())
for i in range(n):
print 'Case #%d: %s' % (i+1, foo(ifile))
main()
| [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
c919034f003da4b5fa2d35635ca362243606b96a | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py | bc9bb9006ed7e23ceb781ccb8ac5834c965dbefd | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 1,494 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..model import EstimateContrast
def test_EstimateContrast_inputs():
input_map = dict(
beta_images=dict(
copyfile=False,
mandatory=True,
),
contrasts=dict(mandatory=True, ),
group_contrast=dict(xor=['use_derivs'], ),
matlab_cmd=dict(),
mfile=dict(usedefault=True, ),
paths=dict(),
residual_image=dict(
copyfile=False,
mandatory=True,
),
spm_mat_file=dict(
copyfile=True,
field='spmmat',
mandatory=True,
),
use_derivs=dict(xor=['group_contrast'], ),
use_mcr=dict(),
use_v8struct=dict(
min_ver='8',
usedefault=True,
),
)
inputs = EstimateContrast.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_EstimateContrast_outputs():
output_map = dict(
con_images=dict(),
ess_images=dict(),
spmF_images=dict(),
spmT_images=dict(),
spm_mat_file=dict(),
)
outputs = EstimateContrast.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
71abecf624afa83a467add8275c42a448fabb35f | 6371acdb640e62e4e6addac2ba1aa70002a8c1b1 | /Algorithms/pySINDy/env/lib/python3.6/site-packages/astroid/context.py | 931bbf5ea0745248cc3ad33ae68eca8962d0303d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | M-Vause/SEED | 263307152ebac1e4f49cd81dcd5207ecbdf51139 | cda94a02a5ef47a1e9a885d330eef2821301ebed | refs/heads/master | 2022-12-13T20:11:58.893994 | 2020-04-27T16:10:09 | 2020-04-27T16:10:09 | 252,790,026 | 3 | 3 | MIT | 2022-12-08T01:52:05 | 2020-04-03T16:55:10 | Jupyter Notebook | UTF-8 | Python | false | false | 5,023 | py | # Copyright (c) 2015-2016, 2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015-2016 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Nick Drozd <nicholasdrozd@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Various context related utilities, including inference and call contexts."""
import pprint
from typing import Optional
class InferenceContext:
"""Provide context for inference
Store already inferred nodes to save time
Account for already visited nodes to infinite stop infinite recursion
"""
__slots__ = (
"path",
"lookupname",
"callcontext",
"boundnode",
"inferred",
"extra_context",
)
def __init__(self, path=None, inferred=None):
self.path = path or set()
"""
:type: set(tuple(NodeNG, optional(str)))
Path of visited nodes and their lookupname
Currently this key is ``(node, context.lookupname)``
"""
self.lookupname = None
"""
:type: optional[str]
The original name of the node
e.g.
foo = 1
The inference of 'foo' is nodes.Const(1) but the lookup name is 'foo'
"""
self.callcontext = None
"""
:type: optional[CallContext]
The call arguments and keywords for the given context
"""
self.boundnode = None
"""
:type: optional[NodeNG]
The bound node of the given context
e.g. the bound node of object.__new__(cls) is the object node
"""
self.inferred = inferred or {}
"""
:type: dict(seq, seq)
Inferred node contexts to their mapped results
Currently the key is ``(node, lookupname, callcontext, boundnode)``
and the value is tuple of the inferred results
"""
self.extra_context = {}
"""
:type: dict(NodeNG, Context)
Context that needs to be passed down through call stacks
for call arguments
"""
def push(self, node):
"""Push node into inference path
:return: True if node is already in context path else False
:rtype: bool
Allows one to see if the given node has already
been looked at for this inference context"""
name = self.lookupname
if (node, name) in self.path:
return True
self.path.add((node, name))
return False
def clone(self):
"""Clone inference path
For example, each side of a binary operation (BinOp)
starts with the same context but diverge as each side is inferred
so the InferenceContext will need be cloned"""
# XXX copy lookupname/callcontext ?
clone = InferenceContext(set(self.path), inferred=self.inferred)
clone.callcontext = self.callcontext
clone.boundnode = self.boundnode
clone.extra_context = self.extra_context
return clone
def cache_generator(self, key, generator):
"""Cache result of generator into dictionary
Used to cache inference results"""
results = []
for result in generator:
results.append(result)
yield result
self.inferred[key] = tuple(results)
def __str__(self):
state = (
"%s=%s"
% (field, pprint.pformat(getattr(self, field), width=80 - len(field)))
for field in self.__slots__
)
return "%s(%s)" % (type(self).__name__, ",\n ".join(state))
class CallContext:
"""Holds information for a call site."""
__slots__ = ("args", "keywords")
def __init__(self, args, keywords=None):
"""
:param List[NodeNG] args: Call positional arguments
:param Union[List[nodes.Keyword], None] keywords: Call keywords
"""
self.args = args
if keywords:
keywords = [(arg.arg, arg.value) for arg in keywords]
else:
keywords = []
self.keywords = keywords
def copy_context(context: Optional[InferenceContext]) -> InferenceContext:
"""Clone a context if given, or return a fresh contexxt"""
if context is not None:
return context.clone()
return InferenceContext()
def bind_context_to_node(context, node):
"""Give a context a boundnode
to retrieve the correct function name or attribute value
with from further inference.
Do not use an existing context since the boundnode could then
be incorrectly propagated higher up in the call stack.
:param context: Context to use
:type context: Optional(context)
:param node: Node to do name lookups from
:type node NodeNG:
:returns: A new context
:rtype: InferenceContext
"""
context = copy_context(context)
context.boundnode = node
return context
| [
"58262117+M-Vause@users.noreply.github.com"
] | 58262117+M-Vause@users.noreply.github.com |
f4b1ffeb2e82b02dd7d7314912a076a42efbfa8f | 56be7f6b6a1243c532af9ea98310ccea165a1e66 | /day8/断点续传/client.py | 724d6c495f1e80afb29394076033f7ee1e98bc8d | [] | no_license | 214031230/Python21 | 55b0405ec4ad186b052cde7ebfb3f4bb636a3f30 | d7fc68d3d23345df5bfb09d4a84686c8b49a5ad7 | refs/heads/master | 2021-05-26T06:00:53.393577 | 2019-01-09T02:29:04 | 2019-01-09T02:29:04 | 127,778,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | #!/usr/bin/env python3
import socket
import struct
import os
import hashlib
import json
IP = "127.0.0.1"
PORT = 9999
class FtpClient:
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.client = socket.socket()
self.client.connect((ip, port))
self.code = "utf-8"
self.buffer_size = 8192
self.upload = "./client"
def file(self):
flag = True
while flag:
file = input(">>>:")
header = {"name": os.path.basename(file),
"size": os.path.getsize(file),
"md5": FtpClient.file_md5(file)}
header_json = json.dumps(header)
self.client.send(struct.pack("i", len(header_json)))
self.client.send(header_json.encode(self.code))
with open(file, "rb") as f:
for i in f:
self.client.send(i)
@staticmethod
def file_md5(file_path):
with open(file_path, "rb") as f:
md5obj = hashlib.md5()
for i in f:
md5obj.update(i)
return md5obj.hexdigest()
obj = FtpClient(IP, PORT)
obj.file() | [
"214031230@qq.com"
] | 214031230@qq.com |
3061a60c4925e4d5652097f97500c5bdd39927db | f0355e4d85d428d2400898a8b8132ddad0931f45 | /python_book_exercise/function_two.py | f43f5d64712c361b3257672bffea0900f1d34ea0 | [
"MIT"
] | permissive | itsjw/python_exercise | 289dd74515a1b1456da9dfb84179dfd6eaed8d3b | 7773d95b4c25b82a9d014f7a814ac83df9ebac17 | refs/heads/master | 2021-08-30T11:30:16.661760 | 2017-12-17T18:48:40 | 2017-12-17T18:48:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | def print_max(a,b):
if a > b:
print(a, ' is maximum ')
elif a == b:
print(a,'is equal to ',b)
else:
print(b,'is maximum')
#directly pass literal values
print_max(3,4)
x = 5
y = 7
#pass variable as arguments
print_max(x,y)
| [
"vimmrana0@gmail.com"
] | vimmrana0@gmail.com |
9bfe3fff36fe7e0132b61164166bc6cfbe69ef8c | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/generated_clients/apis/remotebuildexecution/v1alpha/resources.py | 31bb429115a3a2da3f1250276e3d8f8b8718fbd1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 1,969 | py | # -*- coding: utf-8 -*- #
# Copyright 2023 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for Cloud Platform Apis generated from apitools."""
import enum
BASE_URL = 'https://admin-remotebuildexecution.googleapis.com/v1alpha/'
DOCS_URL = 'https://cloud.google.com/remote-build-execution/docs/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS_INSTANCES = (
'projects.instances',
'{+name}',
{
'':
'projects/{projectsId}/instances/{instancesId}',
},
['name'],
True
)
PROJECTS_INSTANCES_WORKERPOOLS = (
'projects.instances.workerpools',
'{+name}',
{
'':
'projects/{projectsId}/instances/{instancesId}/workerpools/'
'{workerpoolsId}',
},
['name'],
True
)
PROJECTS_OPERATIONS = (
'projects.operations',
'{+name}',
{
'':
'projects/{projectsId}/operations/{operationsId}',
},
['name'],
True
)
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
3d9a7e43fe7977316817105d436ae6913620a164 | d8a541a2953c9729311059585bb0fca9003bd6ef | /functions/even_odd.py | 77aaf7a5a9a9cd96dd52f9d2e64e757ddbb547b1 | [] | no_license | grigor-stoyanov/PythonAdvanced | ef7d628d2b81ff683ed8dd47ee307c41b2276dd4 | 0a6bccc7faf1acaa01979d1e23cfee8ec29745b2 | refs/heads/main | 2023-06-10T09:58:04.790197 | 2021-07-03T02:52:20 | 2021-07-03T02:52:20 | 332,509,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | def even_odd(*args):
even_odd_list = list(args)
flag = even_odd_list.pop()
if flag == 'even':
even_odd_list = list(filter(lambda x: True if x % 2 == 0 else False, even_odd_list))
elif flag == 'odd':
even_odd_list = list(filter(lambda x: True if x % 2 == 1 else False, even_odd_list))
return even_odd_list
print(even_odd(1, 2, 3, 4, 5, 6, "even"))
print(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "odd"))
| [
"76039296+codelocks7@users.noreply.github.com"
] | 76039296+codelocks7@users.noreply.github.com |
7e785b738e4e9ade50ee1132565889e9c00a626b | f4ef947106cc41814042309c9de5465e69e39292 | /python/introduction/if-else.py | bb26e001fb363a51246f7268136b6595daaee6c7 | [] | no_license | pkdism/hackerrank | 96583a09577ed1c5130b0eb05a57b5db9e5b2c18 | beb30b5b9a17c80fc65af7c765b7ae5684d5c25a | refs/heads/master | 2020-08-01T23:19:32.182123 | 2019-10-12T17:53:48 | 2019-10-12T17:53:48 | 211,154,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | def print_weird(is_weird):
if is_weird == True:
print("Weird")
else:
print("Not Weird")
n = int(input())
if n%2 == 1:
print_weird(True)
elif n%2 == 0 and n >= 2 and n <= 5:
print_weird(False)
elif n%2 == 0 and n >= 6 and n <= 20:
print_weird(True)
elif n%2 == 0 and n > 20:
print_weird(False)
| [
"pawan.dwivedi94@gmail.com"
] | pawan.dwivedi94@gmail.com |
af58304634321d776ff93fdb3b33cd63d88bd5a7 | b5c17b494204ed215ecfdc65932b2c960fa9e121 | /build_msvc/msvc-autogen.py | af447f1a236eddc919a013b0158853c40645842b | [
"MIT"
] | permissive | syglee7/zenacoin-ver2 | 9c8943c84b8eefad4ce3fee6ac15a9878b87f1df | 90079b95bdf0ea2b7fce644c56d2a9626526e5e4 | refs/heads/master | 2023-03-10T07:29:47.772820 | 2021-02-21T13:57:41 | 2021-02-21T13:57:41 | 340,617,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,654 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Zenacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
import re
import argparse
from shutil import copyfile
SOURCE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src'))
DEFAULT_PLATFORM_TOOLSET = R'v141'
libs = [
'libzenacoin_cli',
'libzenacoin_common',
'libzenacoin_crypto',
'libzenacoin_server',
'libzenacoin_util',
'libzenacoin_wallet_tool',
'libzenacoin_wallet',
'libzenacoin_zmq',
'bench_zenacoin',
'libtest_util',
]
ignore_list = [
]
lib_sources = {}
def parse_makefile(makefile):
with open(makefile, 'r', encoding='utf-8') as file:
current_lib = ''
for line in file.read().splitlines():
if current_lib:
source = line.split()[0]
if source.endswith('.cpp') and not source.startswith('$') and source not in ignore_list:
source_filename = source.replace('/', '\\')
object_filename = source.replace('/', '_')[:-4] + ".obj"
lib_sources[current_lib].append((source_filename, object_filename))
if not line.endswith('\\'):
current_lib = ''
continue
for lib in libs:
_lib = lib.replace('-', '_')
if re.search(_lib + '.*_SOURCES \\= \\\\', line):
current_lib = lib
lib_sources[current_lib] = []
break
def set_common_properties(toolset):
with open(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), 'r', encoding='utf-8') as rfile:
s = rfile.read()
s = re.sub('<PlatformToolset>.*?</PlatformToolset>', '<PlatformToolset>'+toolset+'</PlatformToolset>', s)
with open(os.path.join(SOURCE_DIR, '../build_msvc/common.init.vcxproj'), 'w', encoding='utf-8',newline='\n') as wfile:
wfile.write(s)
def main():
parser = argparse.ArgumentParser(description='Zenacoin-core msbuild configuration initialiser.')
parser.add_argument('-toolset', nargs='?',help='Optionally sets the msbuild platform toolset, e.g. v142 for Visual Studio 2019.'
' default is %s.'%DEFAULT_PLATFORM_TOOLSET)
args = parser.parse_args()
if args.toolset:
set_common_properties(args.toolset)
for makefile_name in os.listdir(SOURCE_DIR):
if 'Makefile' in makefile_name:
parse_makefile(os.path.join(SOURCE_DIR, makefile_name))
for key, value in lib_sources.items():
vcxproj_filename = os.path.abspath(os.path.join(os.path.dirname(__file__), key, key + '.vcxproj'))
content = ''
for source_filename, object_filename in value:
content += ' <ClCompile Include="..\\..\\src\\' + source_filename + '">\n'
content += ' <ObjectFileName>$(IntDir)' + object_filename + '</ObjectFileName>\n'
content += ' </ClCompile>\n'
with open(vcxproj_filename + '.in', 'r', encoding='utf-8') as vcxproj_in_file:
with open(vcxproj_filename, 'w', encoding='utf-8') as vcxproj_file:
vcxproj_file.write(vcxproj_in_file.read().replace(
'@SOURCE_FILES@\n', content))
copyfile(os.path.join(SOURCE_DIR,'../build_msvc/zenacoin_config.h'), os.path.join(SOURCE_DIR, 'config/zenacoin-config.h'))
copyfile(os.path.join(SOURCE_DIR,'../build_msvc/libsecp256k1_config.h'), os.path.join(SOURCE_DIR, 'secp256k1/src/libsecp256k1-config.h'))
if __name__ == '__main__':
main()
| [
"syglee7@gmail.com"
] | syglee7@gmail.com |
430c76009eb76b20c0e40264e92fd2a0bc6178fa | e7a8ab3898ef331ca11c63808c4d9449794308c2 | /Leetcode/71.simplify-path.py | 09babd70be4afa43ae3a43352abb6bc5b4a61481 | [
"MIT"
] | permissive | EdwaRen/Competitve-Programming | 615695e00b13bda8024055f9634a7de30534977c | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | refs/heads/master | 2021-06-06T11:23:18.758911 | 2021-05-29T14:27:04 | 2021-05-29T14:27:04 | 97,161,907 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
if not path:
return ''
# Keep track of directories with stack
stack = []
# split by slash
path_elements = path.split('/')
for i in path_elements:
if i == '' or i == '.':
continue
if i == '..':
if len(stack) > 0:
stack.pop()
else:
stack.append(i)
res = '/' + '/'.join(stack)
return res
z = Solution()
path = "/home/"
res = z.simplifyPath(path)
print(res)
# print([].pop())
| [
"eddie.ren.2013@gmail.com"
] | eddie.ren.2013@gmail.com |
e90511aa1c45420f4147cb49d24e3d0a1f586f32 | 07ecc53b5be6b1a34914a0e02265e847f3ac1a65 | /Python/Sort/归并排序.py | 0fdd5bbb8d48cc5d5ab8d35111854d5c0f4a9597 | [] | no_license | JasmineRain/Algorithm | 764473109ad12c051f5337ed6f22b517ed9bff30 | 84d7e11c1a01b1994e04a3ab446f0a35eb3d362a | refs/heads/master | 2023-03-14T00:39:51.767074 | 2021-03-09T12:41:44 | 2021-03-09T12:41:44 | 289,603,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | class Solution:
def mergeSort(self, nums, start, end):
self.sort(nums, start, end)
def sort(self, nums, start, end):
if start == end:
return
mid = (start + end) // 2
self.sort(nums, start, mid)
self.sort(nums, mid + 1, end)
self.merge(nums, start, mid, end)
def merge(self, nums, start, mid, end):
temp = [0] * (end - start + 1)
left, right = start, mid + 1
index = 0
while left <= mid and right <= end:
if nums[left] < nums[right]:
temp[index] = nums[left]
left += 1
else:
temp[index] = nums[right]
right += 1
index += 1
while left <= mid:
temp[index] = nums[left]
index += 1
left += 1
while right <= end:
temp[index] = nums[right]
index += 1
right += 1
nums[start: end + 1] = temp
if __name__ == "__main__":
S = Solution()
nums = [4, 2, 3, 1, 5, 1]
S.mergeSort(nums, 0, len(nums) - 1)
print(nums)
| [
"530781348@qq.com"
] | 530781348@qq.com |
b25698177869b376581a6e6803c839cd5c03c423 | e08d47be66026209f81c6190901e02064878f47f | /Main.py | 8e7330e63b18002554366aa4698a0c3c094ac33d | [] | no_license | Anthonymcqueen21/Whats-in-name | 06f730ac7a543f43ebc330c2e85a973bb626b7ed | ac4ee5f55e3589b4bf3c7c0f6c12617315dcd31c | refs/heads/master | 2021-04-28T04:04:11.596939 | 2018-02-21T04:15:18 | 2018-02-21T04:15:18 | 122,153,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | https://multiplexer-prod.datacamp.com/proxy/absolute/4d67c28b5588dba2dbfc2f42b73422d7/notebooks/production/users/1258290/j1g5pk1iou/notebooks/notebook.ipynb
| [
"noreply@github.com"
] | Anthonymcqueen21.noreply@github.com |
1b56c3af5bb427f9ef6164f6a46e94cdf58d2bcf | 186662a84fc36e7b21464291dcec5aa6fdc17459 | /ph_plotter/file_io.py | 898e2d18247bd9cd7b956196b1f5d5ede451a3d7 | [
"MIT"
] | permissive | yimaverickxia/ph_plotter | 5d17787060db845e2c2f4da6140767dfbf42b27b | d8642614f8a33745a5e9ff66cee88733c136ed65 | refs/heads/master | 2020-03-28T20:09:32.234439 | 2017-04-28T10:43:35 | 2017-04-28T10:52:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
__author__ = "Yuji Ikeda"
import numpy as np
def read_band_yaml(yaml_file="band.yaml"):
import yaml
data = yaml.load(open(yaml_file, "r"))
nqpoint = data['nqpoint']
npath = data['npath']
natom = data['natom']
nband = natom * 3
nsep = nqpoint // npath
distance = np.zeros((npath, nsep))
frequency = np.zeros((npath, nsep, nband))
for ipath in range(npath):
for isep in range(nsep):
iq = ipath * nsep + isep
distance[ipath, isep] = data['phonon'][iq]['distance']
for iband in range(nband):
frequency[ipath, isep, iband] = (
data['phonon'][iq]['band'][iband]['frequency'])
return distance, frequency
def read_band_hdf5(hdf5_file="band.hdf5"):
import h5py
with h5py.File(hdf5_file, "r") as f:
paths = f["paths"]
distances = f["distances"]
nqstars = f["nqstars"]
frequencies = f["frequencies"]
pr_weights = f["pr_weights"]
paths = np.array(paths)
distances = np.array(distances)
nqstars = np.array(nqstars)
frequencies = np.array(frequencies)
pr_weights = np.array(pr_weights)
return distances, frequencies, pr_weights, nqstars
def read_band_hdf5_dict(hdf5_file="band.hdf5"):
import h5py
data = {}
with h5py.File(hdf5_file, "r") as f:
for k, v in f.items():
data[k] = np.array(v)
return data
| [
"ikeda@IKEDA-MBA.local"
] | ikeda@IKEDA-MBA.local |
45efc66baad740c224df9cd3770d59047bb0c375 | ff5942a60f82610fd75aea3186d0054c6a676554 | /lib/db/migrations/2014-01-15-02-modify_users_notification_rename_frequencies.py | 6cdfbf0af1d312a97517aca53952391bfc6d7367 | [
"MIT"
] | permissive | plastr/extrasolar-game | 58a5841a8fba0018936ad964cbf2a45dbe2f1a0e | 1aad5971556d498e3617afe75f27e2f4132d4668 | refs/heads/main | 2023-04-11T08:43:30.161085 | 2021-04-21T15:41:40 | 2021-04-21T15:41:40 | 359,884,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | from front.lib.db import setup_migration_cursor
from front import activity_alert_types
def forward(conn):
cursor = setup_migration_cursor(conn)
# Rename the previous INSTANT and DIGEST keys to be MEDIUM and LONG
cursor.execute('UPDATE users_notification SET activity_alert_frequency="%s" WHERE activity_alert_frequency="INSTANT"' % activity_alert_types.MEDIUM)
cursor.execute('UPDATE users_notification SET activity_alert_frequency="%s" WHERE activity_alert_frequency="DIGEST"' % activity_alert_types.LONG)
def reverse(conn):
cursor = setup_migration_cursor(conn)
cursor.execute('UPDATE users_notification SET activity_alert_frequency="INSTANT" WHERE activity_alert_frequency="%s"' % activity_alert_types.MEDIUM)
cursor.execute('UPDATE users_notification SET activity_alert_frequency="DIGEST" WHERE activity_alert_frequency="%s"' % activity_alert_types.LONG)
step(forward, reverse)
| [
"37421+plastr@users.noreply.github.com"
] | 37421+plastr@users.noreply.github.com |
cc23d537b613d0a860b7541ab952d1e52f6fd557 | 8bb17133e90f330a112fca1ca84c6374c87a5aa4 | /mcts.py | 9bf7067e3ce3e1fd3ce6e0d0d52cdb824fafb5a6 | [] | no_license | kristogj/general-purpose-mcts | 79731ff51745709ce299b6d961e6e53acc963fbc | 43636642c9806f30d7ec2038c5999e1de50546ce | refs/heads/master | 2022-04-20T22:45:29.629658 | 2020-04-21T14:51:12 | 2020-04-21T14:51:12 | 255,140,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,763 | py | from state_manager import StateManager
import operator
from utils import get_next_player
import random
from numpy import log, sqrt
class MonteCarloSearchTree:
def __init__(self, game_type, game_config):
self.state_manager = StateManager(game_type, game_config)
self.root = None
self.c = game_config["c"] # Exploration constant
self.state_manager.init_new_game()
def set_root(self, node):
self.root = node
def get_augmented_value(self, node, player):
"""
Calculation needed in order to perform the Tree Policy
:param node: Node
:param player: int
:return: float
"""
c = self.c if player == 1 else -self.c
return node.value + c * sqrt(log(node.parent.total) / (1 + node.total))
def select(self, root):
"""
Calculate the the augmented value for each child, and select the best path for the current player to take.
:param root: Node
:return:
"""
# Calculate the augmented values needed for the tree policy
children = [(node, self.get_augmented_value(node, root.player)) for node in root.children]
# Tree Policy = Maximise for P1 and minimize for P2
if root.player == 1:
root, value = max(children, key=operator.itemgetter(1))
else:
root, value = min(children, key=operator.itemgetter(1))
return root
def selection(self):
"""
Tree search - Traversing the tree from the root to a leaf node by using the tree policy.
:return: Node
"""
root = self.root
children = root.get_children()
# While root is not a leaf node
while len(children) != 0:
root = self.select(root)
children = root.get_children()
return root
def expansion(self, leaf):
"""
Node Expansion - Generating some or all child states of a parent state, and then connecting the tree node
housing the parent state (a.k.a. parent node) to the nodes housing the child states (a.k.a. child nodes).
:return:
"""
# Get all legal child states from leaf state
leaf.children = self.state_manager.get_child_nodes(leaf.state)
# Set leaf as their parent node
child_player = get_next_player(leaf.player)
for child in leaf.children:
child.player = child_player
child.parent = leaf
# Tree is now expanded, return the leaf, and simulate to game over
return leaf
def simulation(self, node):
"""
Leaf Evaluation - Estimating the value of a leaf node in the tree by doing a roll-out simulation using the
default policy from the leaf node’s state to a final state.
:return: int - The player who won the simulated game
"""
current_node = node
children = self.state_manager.get_child_nodes(current_node.state)
player = node.player
while len(children) != 0:
# Use the default policy (random) to select a child
current_node = random.choice(children)
player = get_next_player(player)
children = self.state_manager.get_child_nodes(current_node.state)
winner = get_next_player(player) # Winner was actually the prev player who made a move
return int(winner == 1)
@staticmethod
def backward(sim_node, z):
"""
Backward propagation - Passing the evaluation of a final state back up the tree, updating relevant data
(see course lecture notes) at all nodes and edges on the path from the final state to the tree root.
:param sim_node: Node - leaf node to go backward from
:param z: int - 1 if player 1 won, else 0
:return: None
"""
node = sim_node
node.total += 1
while node.parent:
node.parent.total += 1
node.value += (z - node.value) / node.total
node = node.parent
def select_actual_action(self, player):
"""
To select the actual action to take in the game, select the edge with the highest visit count
:return: Node
"""
children = [(child, child.value) for child in self.root.children]
# Tree Policy = Maximise for P1 and minimize for P2
if player == 1:
root, value = max(children, key=operator.itemgetter(1))
else:
root, value = min(children, key=operator.itemgetter(1))
return root
def tree_print(self):
nodes = [self.root]
while nodes:
curr = nodes[0]
nodes = nodes[1:]
print((curr.total, curr.player))
nodes += curr.children
| [
"kristoffergjerde@gmail.com"
] | kristoffergjerde@gmail.com |
b7dcd914cd4285b4aff0e648cd9f27222fb51660 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /fsNMnyjMkErQtvpMW_7.py | dd07074eba6993e697cb7f1b8d977ebfda8a11b9 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
def holey_sort(lst):
dic = {"4":1, "6":1, "9":1, "0":1, "8":2}
result = [sum(dic.get(n,0) for n in num) for num in map(str,lst)]
return sorted(lst, key=lambda x: result[lst.index(x)])
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
2e0cde0f5ccb9e6407e6722496926aa281df2a6c | aa480d8b09dd7ad92c37c816ebcace24a35eb34c | /first-round/748.最短补全词.py | d9431c10c9cd3cf19bbf4a9aa7ab550c35ad287b | [] | no_license | SR2k/leetcode | 7e701a0e99f9f05b21216f36d2f5ac07a079b97f | de131226159865dcb7b67e49a58d2ddc3f0a82c7 | refs/heads/master | 2023-03-18T03:37:02.916453 | 2022-09-16T01:28:13 | 2022-09-16T01:28:13 | 182,083,445 | 0 | 0 | null | 2023-03-08T05:44:26 | 2019-04-18T12:27:12 | Python | UTF-8 | Python | false | false | 3,178 | py | #
# @lc app=leetcode.cn id=748 lang=python3
#
# [748] 最短补全词
#
# https://leetcode-cn.com/problems/shortest-completing-word/description/
#
# algorithms
# Easy (60.13%)
# Likes: 80
# Dislikes: 0
# Total Accepted: 26.9K
# Total Submissions: 40.3K
# Testcase Example: '"1s3 PSt"\n["step","steps","stripe","stepple"]'
#
# 给你一个字符串 licensePlate 和一个字符串数组 words ,请你找出并返回 words 中的 最短补全词 。
#
# 补全词 是一个包含 licensePlate 中所有的字母的单词。在所有补全词中,最短的那个就是 最短补全词 。
#
# 在匹配 licensePlate 中的字母时:
#
#
# 忽略 licensePlate 中的 数字和空格 。
# 不区分大小写。
# 如果某个字母在 licensePlate 中出现不止一次,那么该字母在补全词中的出现次数应当一致或者更多。
#
#
# 例如:licensePlate = "aBc 12c",那么它的补全词应当包含字母 'a'、'b' (忽略大写)和两个 'c' 。可能的 补全词 有
# "abccdef"、"caaacab" 以及 "cbca" 。
#
# 请你找出并返回 words 中的 最短补全词 。题目数据保证一定存在一个最短补全词。当有多个单词都符合最短补全词的匹配条件时取 words 中 最靠前的
# 那个。
#
#
#
# 示例 1:
#
#
# 输入:licensePlate = "1s3 PSt", words = ["step", "steps", "stripe", "stepple"]
# 输出:"steps"
# 解释:最短补全词应该包括 "s"、"p"、"s"(忽略大小写) 以及 "t"。
# "step" 包含 "t"、"p",但只包含一个 "s",所以它不符合条件。
# "steps" 包含 "t"、"p" 和两个 "s"。
# "stripe" 缺一个 "s"。
# "stepple" 缺一个 "s"。
# 因此,"steps" 是唯一一个包含所有字母的单词,也是本例的答案。
#
# 示例 2:
#
#
# 输入:licensePlate = "1s3 456", words = ["looks", "pest", "stew", "show"]
# 输出:"pest"
# 解释:licensePlate 只包含字母 "s" 。所有的单词都包含字母 "s" ,其中 "pest"、"stew"、和 "show" 三者最短。答案是
# "pest" ,因为它是三个单词中在 words 里最靠前的那个。
#
#
# 示例 3:
#
#
# 输入:licensePlate = "Ah71752", words =
# ["suggest","letter","of","husband","easy","education","drug","prevent","writer","old"]
# 输出:"husband"
#
#
# 示例 4:
#
#
# 输入:licensePlate = "OgEu755", words =
# ["enough","these","play","wide","wonder","box","arrive","money","tax","thus"]
# 输出:"enough"
#
#
# 示例 5:
#
#
# 输入:licensePlate = "iMSlpe4", words =
# ["claim","consumer","student","camera","public","never","wonder","simple","thought","use"]
# 输出:"simple"
#
#
#
#
# 提示:
#
#
# 1 <= licensePlate.length <= 7
# licensePlate 由数字、大小写字母或空格 ' ' 组成
# 1 <= words.length <= 1000
# 1 <= words[i].length <= 15
# words[i] 由小写英文字母组成
#
#
#
# @lc code=start
from collections import Counter
class Solution:
def shortestCompletingWord(self, licensePlate: str, words: list[str]) -> str:
counter = Counter(c.lower() for c in licensePlate if c.isalpha())
return min((w for w in words if not counter - Counter(w)), key=len)
# @lc code=end
| [
"luozhou.csy@alibaba-inc.com"
] | luozhou.csy@alibaba-inc.com |
ab44470a38375e1368fd727ad0c482e1dab9d8d4 | 7385c450eca8be719ba45686db698b747e01cd91 | /examples/ad_manager/v201905/proposal_service/update_proposals.py | 65a32176cbe43a73fafb8bbcfcaf22ce4b65ba15 | [
"Apache-2.0"
] | permissive | tanmaykhattar/googleads-python-lib | 44f15b9f6a0c2a3da7f19c17133b5fba842daf07 | 81742dc3571c9413196cfceb57f761c79db6857a | refs/heads/master | 2020-06-01T16:06:05.797538 | 2019-05-22T14:57:29 | 2019-05-22T14:57:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the notes of a single proposal specified by ID.
To determine which proposals exist, run get_all_proposals.py.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE'
def main(client, proposal_id):
# Initialize appropriate service.
proposal_service = client.GetService('ProposalService', version='v201905')
# Create statement object to select a single proposal by an ID.
statement = (ad_manager.StatementBuilder(version='v201905')
.Where('id = :proposalId')
.WithBindVariable('proposalId', long(proposal_id))
.Limit(1))
# Get proposals by statement.
response = proposal_service.getProposalsByStatement(statement.ToStatement())
if 'results' in response and len(response['results']):
# Update each local proposal object by changing its notes.
updated_proposals = []
for proposal in response['results']:
proposal['internalNotes'] = 'Proposal needs review before approval.'
updated_proposals.append(proposal)
# Update proposals remotely.
proposals = proposal_service.updateProposals(updated_proposals)
# Display results.
if proposals:
for proposal in proposals:
print ('Proposal with id "%s", name "%s", and '
'notes "%s" was updated.'
% (proposal['id'], proposal['name'], proposal['internalNotes']))
else:
print 'No proposals were updated.'
else:
print 'No proposals found to update.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, PROPOSAL_ID)
| [
"davidwihl@users.noreply.github.com"
] | davidwihl@users.noreply.github.com |
efc71832a5989b425e4e6e7953f1213963c3ddf8 | 79f1e7932c27eb01483f8764720c672242052e1f | /training_horovod.py | 971939c1966028a90d5aef9125b7ca0854ca0d25 | [] | no_license | pk-organics/uniparc_modeling | 3b16ae5b85dc178fdcab4be3b4ddbdab02c80897 | ab9faaad00c20416ea2ac86f6f91b83f86ffb7a4 | refs/heads/master | 2023-02-13T19:58:29.841889 | 2019-12-05T18:53:05 | 2019-12-05T18:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,087 | py | import os
import argparse
parser = argparse.ArgumentParser(description='BERT model training')
parser.add_argument('--modelName', default='bert', help='model name for directory saving')
parser.add_argument('--batchSize', type=int, default=20, help='batch size per gpu')
parser.add_argument('--stepsPerEpoch', type=int, default=10000, help='steps per epoch')
parser.add_argument('--warmup', type=int, default=16000, help='warmup steps')
arguments = parser.parse_args()
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from bert.dataset import create_masked_input_dataset
from bert.layers import (PositionEmbedding, Attention, Transformer, TokenEmbedding, Bias,
gelu, masked_sparse_cross_entropy_loss, InverseSquareRootSchedule,
initializer, Projection)
import horovod.tensorflow.keras as hvd
# Horovod: initialize Horovod.
hvd.init()
# Print runtime config on head node
if hvd.rank() == 0:
print(arguments)
# Horovod: pin GPU to be used to process local rank (one GPU per process)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
# import tensorflow_addons as tfa
from tensorflow.keras import layers
vocab_size = 8000
max_seq_len = 512
training_data = create_masked_input_dataset(
language_model_path='sentencepiece_models/uniparc_10M_8000.model',
sequence_path='/projects/bpms/pstjohn/uniparc/sequences_train.txt',
max_sequence_length=max_seq_len,
batch_size=arguments.batchSize,
buffer_size=1024,
vocab_size=vocab_size,
mask_index=4,
vocab_start=5,
fix_sequence_length=True,
shard_num_workers=hvd.size(),
shard_worker_index=hvd.rank())
training_data.repeat().prefetch(tf.data.experimental.AUTOTUNE)
valid_data = create_masked_input_dataset(
language_model_path='sentencepiece_models/uniparc_10M_8000.model',
sequence_path='/projects/bpms/pstjohn/uniparc/sequences_valid.txt',
max_sequence_length=max_seq_len,
batch_size=arguments.batchSize,
buffer_size=1024,
vocab_size=vocab_size,
mask_index=4,
vocab_start=5,
fix_sequence_length=True,
shard_num_workers=hvd.size(),
shard_worker_index=hvd.rank())
valid_data.prefetch(tf.data.experimental.AUTOTUNE)
embedding_dimension = 128
model_dimension = 768
transformer_dimension = 4 * model_dimension
num_attention_heads = model_dimension // 64
num_transformer_layers = 12
# embedding_dimension = 32
# model_dimension = 64
# num_attention_heads = model_dimension // 16
# num_transformer_layers = 4
dropout_rate = 0.
# Horovod: adjust learning rate based on number of GPUs.
learning_rate = 1E-4
inputs = layers.Input(shape=(max_seq_len,), dtype=tf.int32, batch_size=None)
input_mask = layers.Input(shape=(max_seq_len,), dtype=tf.bool, batch_size=None)
token_embedding_layer = TokenEmbedding(
vocab_size, embedding_dimension, embeddings_initializer=initializer(), mask_zero=True)
token_embeddings = token_embedding_layer(inputs)
position_embeddings = PositionEmbedding(
max_seq_len + 1, embedding_dimension, embeddings_initializer=initializer(),
mask_zero=True)(inputs)
embeddings = layers.Add()([token_embeddings, position_embeddings])
embeddings = Projection(model_dimension, dropout_rate, use_residual=False)(embeddings)
transformer = Transformer(num_attention_heads, transformer_dimension, dropout=dropout_rate)
for i in range(num_transformer_layers):
embeddings = transformer(embeddings)
out = layers.Dense(embedding_dimension, activation=gelu, kernel_initializer=initializer())(embeddings)
out = token_embedding_layer(out, transpose=True)
out = Bias()([out, input_mask])
out = layers.Softmax()(out)
model = tf.keras.Model([inputs, input_mask], [out], name='model')
if hvd.rank() == 0:
model.summary()
# Horovod: add Horovod DistributedOptimizer.
# opt = tfa.optimizers.AdamW(weight_decay=0.01, learning_rate=learning_rate)
opt = tf.optimizers.Adam(learning_rate=learning_rate)
opt = hvd.DistributedOptimizer(opt)
# Horovod: Specify `experimental_run_tf_function=False` to ensure TensorFlow
# uses hvd.DistributedOptimizer() to compute gradients.
model.compile(
loss=tf.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'],
optimizer=opt,
experimental_run_tf_function=False)
model_name = arguments.modelName
checkpoint_dir = f'{model_name}_checkpoints'
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt_{epoch}.h5")
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(0),
# Horovod: average metrics among workers at the end of every epoch.
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first three epochs. See https://arxiv.org/abs/1706.02677 for details.
InverseSquareRootSchedule(learning_rate=learning_rate, warmup_updates=arguments.warmup),
]
# Horovod: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if hvd.rank() == 0:
callbacks.append(tf.keras.callbacks.CSVLogger(f'{checkpoint_dir}/log.csv'))
callbacks.append(tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix))
# Horovod: write logs on worker 0.
verbose = 1 if hvd.rank() == 0 else 0
model.fit(training_data, steps_per_epoch=arguments.stepsPerEpoch, epochs=500, verbose=verbose,
validation_data=valid_data, validation_steps=100,
callbacks=callbacks)
| [
"peterc.stjohn@gmail.com"
] | peterc.stjohn@gmail.com |
d47c9b7d28f679cf05843563cc24ffb625dbd6c8 | efadb5a7beec3302f08e1ce296a40462382eca9a | /fusionlcd.py | dc646a1344a1f0d1d5b66b8649aa8d18dbd06f5f | [
"MIT"
] | permissive | kidzik/micropython-fusion | c0edf559c16eb1f96b4178d8fdbe8ccecf254862 | cb41b1c8bfb6a3c7a712165c5e5b4323a6e03462 | refs/heads/master | 2020-03-25T01:35:49.535068 | 2018-08-02T06:45:57 | 2018-08-02T06:45:57 | 143,245,312 | 0 | 0 | MIT | 2018-08-02T05:11:20 | 2018-08-02T05:11:20 | null | UTF-8 | Python | false | false | 2,530 | py | # fusionlcd.py Test for asynchronous sensor fusion on Pyboard. Uses LCD display and uasyncio.
# Author: Peter Hinch
# Released under the MIT License (MIT)
# Copyright (c) 2017 Peter Hinch
# V0.8 16th May 2017 Adapted for uasyncio
# V0.7 25th June 2015 Adapted for new MPU9x50 interface
# Requires:
# uasyncio (official or modified version)
# MPU9150 on X position
# Normally open pushbutton connected between pin Y7 and ground
# LCD driver alcd.py from https://github.com/peterhinch/micropython-async.git
# Hitachi HD44780 2 row LCD display wired using 4 bit data bus as follows:
# Name LCD connector Board
# Rs 4 1 red Y1
# E 6 2 Y2
# D7 14 3 Y3
# D6 13 4 Y4
# D5 12 5 Y5
# D4 11 6 Y6
from machine import Pin
import uasyncio as asyncio
import gc
from mpu9150 import MPU9150
from fusion_async import Fusion # Using async version
from alcd import LCD, PINLIST # Library supporting Hitachi LCD module
switch = Pin('Y7', Pin.IN, pull=Pin.PULL_UP) # Switch to ground on Y7
imu = MPU9150('X') # Attached to 'X' bus, 1 device, disable interruots
lcd = LCD(PINLIST, cols = 24) # Should work with 16 column LCD
# User coro returns data and determines update rate.
# For 9DOF sensors returns three 3-tuples (x, y, z) for accel, gyro and mag
# For 6DOF sensors two 3-tuples (x, y, z) for accel and gyro
async def read_coro():
imu.mag_trigger()
await asyncio.sleep_ms(20) # Plenty of time for mag to be ready
return imu.accel.xyz, imu.gyro.xyz, imu.mag_nonblocking.xyz
fuse = Fusion(read_coro)
async def mem_manage(): # Necessary for long term stability
while True:
await asyncio.sleep_ms(100)
gc.collect()
gc.threshold(gc.mem_free() // 4 + gc.mem_alloc())
async def display():
lcd[0] = "{:5s}{:5s} {:5s}".format("Yaw","Pitch","Roll")
while True:
lcd[1] = "{:4.0f} {:4.0f} {:4.0f}".format(fuse.heading, fuse.pitch, fuse.roll)
await asyncio.sleep_ms(500)
async def lcd_task():
print('Running test...')
if switch.value() == 1:
lcd[0] = "Calibrate. Push switch"
lcd[1] = "when done"
await asyncio.sleep_ms(100) # Let LCD coro run
await fuse.calibrate(lambda : not switch.value())
print(fuse.magbias)
await fuse.start() # Start the update task
loop = asyncio.get_event_loop()
loop.create_task(display())
loop = asyncio.get_event_loop()
loop.create_task(mem_manage())
loop.create_task(lcd_task())
loop.run_forever()
| [
"peter@hinch.me.uk"
] | peter@hinch.me.uk |
4eb80491f9fcc0c7fed307c097114d3a9fa5e0cf | 8ead2814623465191200c3e37f07f9c582a3b0b1 | /src/hw_4/task_4_4.py | 20f5a54eb644063186f2f6e58ff5fe4744dfb288 | [] | no_license | alexshchegretsov/Teach_Me_Skills_Python_homeworks | 8293dc21fa13a6b3090a80ebe4bdc9079276bac2 | eab577ad566de02af6b767e866518bafdf9561d0 | refs/heads/master | 2020-05-01T05:31:41.780358 | 2019-05-05T17:49:57 | 2019-05-05T17:49:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | """
Дан список. Создать новый список, сдвинутый на 1 элемент влево
Пример: 1 2 3 4 5 -> 2 3 4 5 1
"""
sequence_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sequence_2 = []
length = len(sequence_1)
i = 0
while i < length:
add_num = sequence_1[i + 1]
sequence_2.append(add_num)
if i == length - 2:
add_first_num = sequence_1[0]
sequence_2.append(add_first_num)
break
i += 1
print(sequence_2)
| [
"nydollz77@gmail.com"
] | nydollz77@gmail.com |
1d4cce807c1861d03f5fe67e7e06205ff4f3409c | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/cutting-ribbons.py | 9e9019596016b059148f7a592efb02dd949c238e | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 571 | py | # Time: O(nlogr), r is sum(ribbons)/k
# Space: O(1)
class Solution(object):
def maxLength(self, ribbons, k):
"""
:type ribbons: List[int]
:type k: int
:rtype: int
"""
def check(ribbons, k, s):
return reduce(lambda total,x: total+x//s, ribbons, 0) >= k
left, right = 1, sum(ribbons)//k
while left <= right:
mid = left + (right-left)//2
if not check(ribbons, k, mid):
right = mid-1
else:
left = mid+1
return right
| [
"noreply@github.com"
] | kamyu104.noreply@github.com |
9ce11b55914400c57bd74701e9f7be215b788b96 | b76e65ce109a57f2d9513715afebd67be806be62 | /urlson/build/lib.linux-x86_64-2.6/urlson/helpers.py | fea1d665ac11459ee406bede988d2713a7a2c0b5 | [] | no_license | ericmoritz/experiments | 2dab718194da8414ec54ca1d856ad0e42ed0e2d6 | efc1627a1795144e908451d1c239ee87cdfcc8ea | refs/heads/master | 2020-05-18T15:25:52.300902 | 2011-01-02T04:37:58 | 2011-01-02T04:37:58 | 1,091,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,308 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This code is based on mongokit.helpers, therefore, the following
# License information is requried to be included. The only modification
# I made was strip the mongokit dependancies.
# Copyright (c) 2009-2010, Nicolas Clairon
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Berkeley nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import logging
log = logging.getLogger(__name__)
def totimestamp(value):
"""
convert a datetime into a float since epoch
"""
import calendar
return int(calendar.timegm(value.timetuple()) * 1000 + value.microsecond / 1000)
def fromtimestamp(epoch_date):
"""
convert a float since epoch to a datetime object
"""
seconds = float(epoch_date) / 1000.0
return datetime.datetime.utcfromtimestamp(seconds)
from copy import deepcopy
class DotedDict(dict):
"""
Dot notation dictionnary access
"""
def __init__(self, doc=None, warning=False):
self._dot_notation_warning = warning
if doc is None: doc = {}
super(DotedDict, self).__init__(doc)
self.__dotify_dict(self)
def __dotify_dict(self, doc):
for k,v in doc.iteritems():
if isinstance(v, dict):
doc[k] = DotedDict(v)
self.__dotify_dict(v)
def __setattr__(self, key, value):
if key in self:
self[key] = value
else:
if self._dot_notation_warning and not key.startswith('_') and\
key not in ['db', 'collection', 'versioning_collection', 'connection', 'fs']:
log.warning("dot notation: %s was not found in structure. Add it as attribute instead" % key)
dict.__setattr__(self, key, value)
def __getattr__(self, key):
if key in self:
return self[key]
def __deepcopy__(self, memo={}):
obj = dict(self)
return deepcopy(obj, memo)
class EvalException(Exception):pass
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
# code taken from Django source code http://code.djangoproject.com/
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
parent = None
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
def flatten_lists(dct):
for name, value in dct.iteritems():
# If the value is an iterable,
# yield each item with the
# param's name
if hasattr(value, "__iter__"):
for item in value:
yield (name, item)
# If the value is not iterable,
# just yield the name and value
else:
yield (name, value)
| [
"eric@themoritzfamily.com"
] | eric@themoritzfamily.com |
2dc86c73519e45606d8814fb9660060d9b4efdeb | 51108a50ffb48ad154f587c230045bb783f22240 | /bfgame/queries/skills.py | cc9f0b832fed1f38964baf8517bb094e02e106d4 | [
"MIT"
] | permissive | ChrisLR/BasicDungeonRL | c90bd0866c457557cccbad24e14689d5d6db7b00 | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | refs/heads/master | 2021-06-15T13:56:53.888646 | 2019-08-05T16:33:57 | 2019-08-05T16:33:57 | 104,269,987 | 3 | 0 | MIT | 2019-08-05T16:28:23 | 2017-09-20T21:35:19 | Python | UTF-8 | Python | false | false | 1,909 | py | from collections import Iterable
from inspect import isclass
from core.queries import listing
from core.queries.base import CumulativeQuery
@listing.register_query
class Skills(CumulativeQuery):
"""
This Query will allow registered components to return their special abilities.
"""
name = "skills"
def __init__(self, querier):
super().__init__(querier)
self._result = {}
def do_query(self, skills=None):
"""
:param skills: An Optional filter to specify which type to retrieve
"""
if skills is not None:
if not isinstance(skills, Iterable):
skills = [skills]
self.skills = skills
return super().do_query()
def respond(self, values):
if isinstance(values, Iterable):
for value in values:
if self.valid_type(value):
self.add_cumulative(value)
else:
if self.valid_type(values):
self.add_cumulative(values)
def valid_type(self, value):
if self.skills:
if isclass(value):
if value not in self.skills:
return False
else:
if not any([isinstance(value, skill_type) for skill_type in self.skills]):
return False
return True
def add_cumulative(self, value):
if isclass(value) or not hasattr(value, 'value'):
current = self._result.get(value)
if not current:
self._result[value] = 1
else:
current = self._result.get(value, 0)
self._result[value] = current + value.value
@property
def result(self):
amount_of_filters = len(self.skills)
if amount_of_filters == 1:
return sum(self._result.values())
if not amount_of_filters:
return self._result
| [
"arzhul@gmail.com"
] | arzhul@gmail.com |
0a50bf050033e8687bab10819fdf84400ef10c23 | e450ba913f006d6eae8a62dfc3d18a1535982f30 | /notification_service/tools/__init__.py | 042e6b95e74f2321a014a099423bce4b7d05acd4 | [] | no_license | OneIdea-IRNITU/Smart-schedule-IRNITU | d3c927a3be21aee45ca1195a20bea03e25db3502 | e13d95d7ff4737581203a1804669a2672da92f5a | refs/heads/master | 2022-12-12T03:12:37.779932 | 2021-05-30T01:15:52 | 2021-05-30T01:15:52 | 247,898,585 | 0 | 4 | null | 2022-12-08T11:37:52 | 2020-03-17T06:45:16 | Python | UTF-8 | Python | false | false | 3,515 | py | from datetime import datetime, timedelta
DEBUG = False
def find_week():
"""Определение текущей недели"""
now = datetime.now()
sep = datetime(now.year if now.month >= 9 else now.year - 1, 9, 1)
d1 = sep - timedelta(days=sep.weekday())
d2 = now - timedelta(days=now.weekday())
parity = ((d2 - d1).days // 7) % 2
return 'odd' if parity else 'even'
def forming_user_to_submit(
chat_id: int,
group: str,
notifications: int,
day_now: str,
time_now: datetime,
week: str) -> dict:
"""Формирование информации о пользователе для отправки"""
# определяем фактическое время пары (прибавляем к текущему времени время напоминания)
lesson_time = (time_now + timedelta(minutes=notifications)).strftime('%H:%M')
user = {
'chat_id': chat_id,
'group': group,
'week': week,
'day': day_now,
'notifications': notifications,
'time': lesson_time
}
return user
def check_that_user_has_reminder_enabled_for_the_current_time(time_now, user_day_reminder_time: list) -> bool:
"""Проверка, что у пользователя включено напоминание на текущее время"""
if DEBUG:
return True
hours_now = int(time_now.strftime('%H'))
minutes_now = time_now.strftime('%M')
return user_day_reminder_time and f'{hours_now}:{minutes_now}' in user_day_reminder_time
def get_schedule_from_right_day(schedule, day_now) -> list:
"""Получение расписания из нужного дня"""
for day in schedule:
# находим нужный день
if day['day'] == day_now:
lessons = day['lessons']
return lessons
def check_that_the_lesson_has_the_right_time(time, lesson_time, lesson, week) -> bool:
"""Проверка, что урок имеет нужное время и неделю"""
if DEBUG:
return True
return time in lesson_time and (lesson['week'] == week or lesson['week'] == 'all')
def forming_message_text(lessons, week, time):
"""Формирование текста для сообщения"""
lessons_for_reminders = ''
count = 0
for lesson in lessons:
lesson_time = lesson['time']
# находим нужные пары (в нужное время)
if check_that_the_lesson_has_the_right_time(time, lesson_time, lesson, week):
name = lesson['name']
# пропускаем свободные дни
if name == 'свободно':
continue
# формируем сообщение
lessons_for_reminders += '-------------------------------------------\n'
aud = lesson['aud']
if aud:
aud = f'Аудитория: {",".join(aud)}\n'
time = lesson['time']
info = lesson['info']
prep = lesson['prep']
lessons_for_reminders += f'Начало в {time}\n' \
f'{aud}' \
f'{name}\n' \
f'{info} {",".join(prep)}\n'
count += 1
if count > 0:
lessons_for_reminders += '-------------------------------------------\n'
return lessons_for_reminders
| [
"alexleskov2000@gmail.com"
] | alexleskov2000@gmail.com |
63f30dde45fc2325aca77ff85693ea5060d5fd15 | 7dc502a62dcc4ff39f572040ba180315981e3ba8 | /src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/aio/operations/_collection_region_operations.py | f24252943a4fedad7d1700efed77f9bce9b14500 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | dbradish-microsoft/azure-cli-extensions | 2bec15b90666fee7a0a833b407ca2619e25fed86 | fe44a1bb123a58b7e8248850bdc20555ca893406 | refs/heads/master | 2023-08-31T15:19:35.673988 | 2022-02-09T08:50:18 | 2022-02-09T08:50:18 | 252,317,425 | 0 | 0 | MIT | 2020-04-02T00:29:14 | 2020-04-02T00:29:13 | null | UTF-8 | Python | false | false | 6,847 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CollectionRegionOperations:
"""CollectionRegionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.cosmosdb.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_metrics(
self,
resource_group_name: str,
account_name: str,
region: str,
database_rid: str,
collection_rid: str,
filter: str,
**kwargs: Any
) -> AsyncIterable["_models.MetricListResult"]:
"""Retrieves the metrics determined by the given filter for the given database account, collection
and region.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: Cosmos DB database account name.
:type account_name: str
:param region: Cosmos DB region, with spaces between words and each word capitalized.
:type region: str
:param database_rid: Cosmos DB database rid.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid.
:type collection_rid: str
:param filter: An OData filter expression that describes a subset of metrics to return. The
parameters that can be filtered are name.value (name of the metric, can have an or of multiple
names), startTime, endTime, and timeGrain. The supported operator is eq.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.cosmosdb.models.MetricListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-15-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=50, min_length=3, pattern=r'^[a-z0-9]+(-[a-z0-9]+)*'),
'region': self._serialize.url("region", region, 'str'),
'databaseRid': self._serialize.url("database_rid", database_rid, 'str'),
'collectionRid': self._serialize.url("collection_rid", collection_rid, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MetricListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/region/{region}/databases/{databaseRid}/collections/{collectionRid}/metrics'} # type: ignore
| [
"noreply@github.com"
] | dbradish-microsoft.noreply@github.com |
242b2ee5fa4ba96ad60bff79a79e45fe3a1a355c | b9d9fee309786ec53d50467b9ecbcc452da56a19 | /luminarApp/migrations/0025_auto_20210401_1518.py | 1e3c641ce02fb3b1db2f54b276d0a6b8db5c8491 | [] | no_license | amalmhn/luminar_crm | 912009b58d8c5672d952bc4615638aa0eceae224 | 0877aa7ed1f1f0400feec89b4de4aff36bcf96c4 | refs/heads/main | 2023-04-26T13:58:56.387577 | 2021-05-18T17:52:08 | 2021-05-18T17:52:08 | 352,746,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # Generated by Django 3.1.6 on 2021-04-01 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('luminarApp', '0024_auto_20210401_1518'),
]
operations = [
migrations.RemoveField(
model_name='enquirythree',
name='status',
),
migrations.AlterField(
model_name='batch',
name='batch_status',
field=models.CharField(choices=[('1', 'Yet to Begin'), ('3', 'Completed'), ('2', 'Ongoing')], max_length=30),
),
migrations.AlterField(
model_name='enquiry',
name='status',
field=models.CharField(choices=[('3', 'Cancel'), ('1', 'Call back'), ('2', 'Admitted')], max_length=20),
),
]
| [
"amalmhnofficial@gmail.com"
] | amalmhnofficial@gmail.com |
d248153e21a9f17b4ced3b7f2ef56be7bfae25af | eacfc1c0b2acd991ec2cc7021664d8e79c9e58f6 | /ccpnmr2.4/python/ccp/examples/workshop/session2/loadSequence.py | 2a2bc1fd92afce70c5be8f3cc298f5b26deab0ad | [] | no_license | edbrooksbank/ccpnmr2.4 | cfecb0896dcf8978d796e6327f7e05a3f233a921 | f279ca9bb2d972b1ce075dad5fcc16e6f4a9496c | refs/heads/master | 2021-06-30T22:29:44.043951 | 2019-03-20T15:01:09 | 2019-03-20T15:01:09 | 176,757,815 | 0 | 1 | null | 2020-07-24T14:40:26 | 2019-03-20T14:59:23 | HTML | UTF-8 | Python | false | false | 2,000 | py | # Exercise 2.3: Load a fasta sequence from disk.
#
# - Use FormatConverter sub-routines to load a sequence located in
# '../data/seq/fasta.seq'.
#
# - Use an instance of the FastaFormat class located in
# ccpnmr.format.converters.FastaFormat, and then the readSequence()
# method for that object.
#
# - Print out the molSystem and molecule information and navigate some
# of the residue information.
#
# - Hint: create a Tk object from the Tkinter module when making the
# FastaFormat instance and also create the FastaFormat object with
# a newly made CCPN project.
#
import os
# Get Tkinter for popups.
import Tkinter
import memops.api.Implementation as Implementation
# Get FastaFormat class for format conversion.
from ccpnmr.format.converters.FastaFormat import FastaFormat
if __name__ == '__main__':
project = Implementation.MemopsRoot(name = 'readFasta')
# Create a Tkinter object.
guiRoot = Tkinter.Tk()
# Create a FastaFormat object and associate it with the CCPN project and
# Tkinter object.
fastaObj = FastaFormat(project, guiRoot)
# Give the location of the sequence file.
seqDir = os.path.join(os.path.abspath('..'), 'data', 'seq')
seqFile = os.path.join(seqDir, 'fasta.seq')
# Call the readSequence() method from the fastaFormat instance
# and pass in the name of the file to read.
fastaObj.readSequence(seqFile, minimalPrompts = 1)
# Check to see if molSystem and molecule objects are in the CCPN project.
print '\nProject object: [%s]' % project
print "\n'list' of MolSystem objects: [%s]" % project.sortedMolSystems()
print "\n'list' of Molecule objects: [%s]" % project.sortedMolecules()
# Select first available molecule.
molecule = project.findFirstMolecule()
print '\nMolecule type of first molecule: [%s]' % molecule.molType
# Check that the residues have also been made.
res1Type = molecule.findFirstMolResidue(seqCode = 1).ccpCode
print '\nResidue 1 type: [%s]\n' % res1Type
| [
"ejb66@le.ac.uk"
] | ejb66@le.ac.uk |
b053ca28877887977390ace416445a41f15355b9 | c4a0669126f2fbf757ac3b33a8279ef32305bbd7 | /Python Crash Course/Chapter 12/12.3 Roket/game_fuction.py | 7e7dc03569fa8cddf66660a60111e2f726ea9bec | [] | no_license | ezeutno/PycharmProject | 822b5a7da05729c5241a03b7413548a34b12e4a5 | bdb87599885287d2d7cd5cd703b62197563722b8 | refs/heads/master | 2021-07-18T20:55:08.605486 | 2017-10-24T03:14:10 | 2017-10-24T03:14:10 | 105,782,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,106 | py | import sys
import pygame
def check_keydown_event(event,rocket):
if event.key == pygame.K_RIGHT:
rocket.moving_right = True
if event.key == pygame.K_UP:
rocket.moving_up = True
if event.key == pygame.K_LEFT:
rocket.moving_left = True
if event.key == pygame.K_DOWN:
rocket.moving_down = True
def check_keyup_event(event, rocket):
if event.key == pygame.K_RIGHT:
rocket.moving_right = False
if event.key == pygame.K_UP:
rocket.moving_up = False
if event.key == pygame.K_LEFT:
rocket.moving_left = False
if event.key == pygame.K_DOWN:
rocket.moving_down = False
def check_event(rocket):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_event(event,rocket)
elif event.type == pygame.KEYUP:
check_keyup_event(event,rocket)
def update_screen(ai_settings, screen, thing):
screen.fill(ai_settings.bg_color)
for i in thing:
i.blitme()
pygame.display.flip() | [
"ivan.suratno@gmail.com"
] | ivan.suratno@gmail.com |
807dc9f71f52f1478c5acf46032f4716ee88a590 | bb150497a05203a718fb3630941231be9e3b6a32 | /models/PaddleHub/CI/test_senta_lstm.py | 320ebc05e83a087606200a8c027a70e81844bbb4 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 436 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
senta_lstm
"""
import paddlehub as hub
senta = hub.Module(name="senta_lstm")
test_text = ["这家餐厅很好吃", "这部电影真的很差劲"]
results0 = senta.sentiment_classify(texts=test_text, use_gpu=True, batch_size=2)
print(results0)
results1 = senta.get_labels()
print(results1)
results2 = senta.get_vocab_path()
print(results2)
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
14d1abbb6a9e4191fb1bf0ace776efbc43f5553b | 63b864deda44120067eff632bbb4969ef56dd573 | /PyTorch/learning_note/test_cython/setup3.py | 55cf794a818dfd221758cd1afd87ff440bf8496b | [] | no_license | lizhe960118/Deep-Learning | d134592c327decc1db12cbe19d9a1c85a5056086 | 7d2c4f3a0512ce4bd2f86c9f455da9866d16dc3b | refs/heads/master | 2021-10-29T06:15:04.749917 | 2019-07-19T15:27:25 | 2019-07-19T15:27:25 | 152,355,392 | 5 | 2 | null | 2021-10-12T22:19:33 | 2018-10-10T03:06:44 | Jupyter Notebook | UTF-8 | Python | false | false | 172 | py | from distutils.core import setup
from Cython.Build import cythonize
setup(
name='compute_module',
ext_modules=cythonize('compute3.pyx'),
)
# python setup3.py build | [
"2957308424@qq.com"
] | 2957308424@qq.com |
f2de12cd086b9826d00fd85b75f9ea0d21254e06 | 1368678277782c10f3823f523ba5fb3d3c5bbc87 | /chatbot_api/venv/Lib/site-packages/pyowm/utils/config.py | af66fa1df895e77efdee780dcfdc6cd2b43c2aec | [
"MIT"
] | permissive | arpitkumar1412/shellhacks-hospitalCheckin | d92177dc8abc028d722af568f7975d1f164feab3 | 39388561e92c94b22f5f25375d37c626b3609992 | refs/heads/main | 2023-08-22T01:14:32.623756 | 2021-09-26T15:24:49 | 2021-09-26T15:24:49 | 410,179,902 | 1 | 2 | MIT | 2021-09-25T16:27:44 | 2021-09-25T04:59:50 | Python | UTF-8 | Python | false | false | 2,302 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from pyowm.commons import exceptions
from pyowm.config import DEFAULT_CONFIG
from pyowm.commons.enums import SubscriptionTypeEnum
def get_config_from(path_to_file):
"""Loads configuration data from the supplied file and returns it.
:param path_to_file: path to the configuration file
:type path_to_file: str
:returns: the configuration `dict`
:raises: `ConfigurationNotFoundError` when the supplied filepath is not a regular file; `ConfigurationParseError`
when the supplied file cannot be parsed
"""
assert path_to_file is not None
if not os.path.isfile(path_to_file):
raise exceptions.ConfigurationNotFoundError(
'Configuration file not found: {}'.format(path_to_file))
with open(path_to_file, 'r') as cf:
try:
config_data = json.load(cf)
config_data['subscription_type'] = SubscriptionTypeEnum.lookup_by_name(config_data['subscription_type'])
return config_data
except Exception:
raise exceptions.ConfigurationParseError()
def get_default_config():
"""Returns the default PyOWM configuration.
:returns: the configuration `dict`
"""
return DEFAULT_CONFIG
def get_default_config_for_subscription_type(name):
"""Returns the PyOWM configuration for a specific OWM API Plan subscription type
:param name: name of the subscription type
:type name: str
:returns: the configuration `dict`
"""
assert isinstance(name, str)
config = get_default_config()
config['subscription_type'] = SubscriptionTypeEnum.lookup_by_name(name)
return config
def get_default_config_for_proxy(http_url, https_url):
"""Returns the PyOWM configuration to be used behind a proxy server
:param http_url: URL connection string for HTTP protocol
:type http_url: str
:param https_url: URL connection string for HTTPS protocol
:type https_url: str
:returns: the configuration `dict`
"""
assert isinstance(http_url, str)
assert isinstance(https_url, str)
config = get_default_config()
config['connection']['use_proxy'] = True
config['proxies']['http'] = http_url
config['proxies']['https'] = https_url
return config
| [
"86039408+Farewell-SDSk17@users.noreply.github.com"
] | 86039408+Farewell-SDSk17@users.noreply.github.com |
36c38a219b134e262f8cab1a09068979794b9e4d | eb61d62ca1f6f0123e3771105f5dfbbd6115138d | /.history/23-08-21_20210912090919.py | 985640455776b12f3247458d979aedaebdd981c3 | [] | no_license | Alopezm5/CORRECTO-2 | e0f14bcc3a88c0e222d10e3261e68532008bc42e | 223613f1fb04dce3fac9f82f243cb2f22fe100f3 | refs/heads/main | 2023-07-29T06:52:48.147424 | 2021-09-12T20:33:27 | 2021-09-12T20:33:27 | 388,995,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,174 | py | class Empresa:
def __init__(self,nom="El mas barato",ruc="0999999999",tel="042971234",dir="Juan Montalvo"):
self.nombre=nom
self.ruc=ruc
self.telefono=tel
self.direccion=dir
def mostrarEmpresa(self):
print("Empresa: {:17}, RUC: {}".format(self.nombre,self.ruc))
class Cliente:
def __init__(self,nom,ced,tel):
self.nombre=nom
self.cedula=ced
self.telefono=tel
def mostrarCliente(self):
print(self.nombre,self.cedula,self.telefono)
class ClienteCorporativo(Cliente):
def __init__(self,nomb,cedu,telecontrato):
super().__init__(nomb,cedu,tele,contrato)
self.__contrato=contrato
@property
def contrato(self): #getter: obtener el valor del atributo privado
return self.__contrato
@contrato.setter
def contrato(self,value): #setter: asigna el valor del atributo privado
if value:
self.__contrato=value
else:
self.__contrato="Sin contrato"
def mostrarCliente(self):
print(self.nombre, self.__contrato)
class ClientePersonal(Cliente):
def __init__(self,nom,ced,tel,promocion=True):
super().__init__(nom,ced,tel,)
self.__promocion=promocion
@property
def promocion(self): #getter: obtener el valor del atributo privado
return self.__promocion
def mostrarCliente(self):
print("Cliente: {:13} Cedula:{}".format(self.nombre,self.cedula))
class Articulo:
secuencia=0
iva=0.12
def __init__(self,des,pre,sto):
Articulo.secuencia+=1
self.codigo=Articulo.secuencia
self.descripcion= des
self.precio=pre
self.stock=sto
def mostraArticulo(self):
print(self.codigo,self.nombre)
class DetVenta:
linea=0
def __init__(self,articulo,cantidad):
DetVenta.linea+=1
self.lineaDetalle=DetVenta.linea
self.articulo=articulo
self.precio=articulo.precio
self.cantidad=cantidad
class CabVenta:
def __init__(self,fac,fecha,cliente,tot=0):
self.factura=fac
self.fecha=fecha
self.cliente=cliente
self.total=tot
self.detalleVen=[]
def agregarDetalle(self,articulo,cantidad):
detalle=DetVenta(articulo,cantidad)
self.total+=detalle.precio*detalle.cantidad
self.detalleVen.append(detalle)
def mostrarVenta(self,empNombre,empRuc):
print("Empresa {:17} Ruc:{} ".format(empNombre,empRuc))
print("Factura#:{:13} Fecha:{}".format(self.factura,self.fecha))
self.cliente.mostrarCliente()
print("Linea Articulo Precio Cantidad Subtotal")
for det in self.detalleVen:
print("{:5} {} {:6} {:7}".format(det.linea,))
print("Total venta:{:26}".format(self.total))
emp=Empresa()
cli1=ClientePersonal("Jose","0912231499","042567890",True)
art1=Articulo("Aceite",2,100)
art1.mostraArticulo()
art2=Articulo("Coca Cola",1,200)
art2.mostraArticulo()
art3=Articulo("Leche",1.5,200)
art3.mostraArticulo()
print(Articulo.iva())
| [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
9925babfe5f66825d042b007d07d368b2f35ef27 | d46147c2db8a9e4225f3df4f81cc3f04dd57b03d | /exercise/ex01-pandas-selection/data.py | f177d0bbeac2b4d7d706cb90082299950c733053 | [] | no_license | biychen/machine-learning-training-exercise | 83741dbf182b84e206fc26c3d3b3a83ad9c50416 | 946dcc435142d9b89bd98688d6f957452e46c96a | refs/heads/master | 2021-01-23T08:48:52.607520 | 2017-09-05T17:08:30 | 2017-09-05T17:08:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD')) | [
"zj0512@gmail.com"
] | zj0512@gmail.com |
09aba1132f88729b3686ea03d643b6da871bd6f9 | eadd15064aa74811e7a3718b617636627ef4fd47 | /web/migrations/0022_auto_20210503_1552.py | 47ddebe5764e78697bcbfb57abd80f79cd27bd65 | [] | no_license | topsai/plasrefine_backstage | 262f7bb032daa4d018aac1519e1139cb060c3f91 | 1eb34dd0b13ebdc2a42dd6ed1aaa2d08c18ab5fb | refs/heads/master | 2023-04-12T13:24:22.710108 | 2021-05-08T14:16:41 | 2021-05-08T14:16:41 | 361,993,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | # Generated by Django 3.2 on 2021-05-03 07:52
import ckeditor_uploader.fields
from django.db import migrations, models
import imagekit.models.fields
class Migration(migrations.Migration):
dependencies = [
('web', '0021_aboutpage'),
]
operations = [
migrations.CreateModel(
name='ProductsPage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text1', models.CharField(max_length=30, verbose_name='文本1')),
('img1', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片1')),
('img2', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片2')),
('img3', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片3')),
('img4', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片4')),
('text2', models.CharField(max_length=30, verbose_name='文本1')),
('img5', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片1')),
('img6', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片2')),
('img7', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片3')),
('img8', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片4')),
],
options={
'verbose_name': 'products',
'verbose_name_plural': 'products',
},
),
migrations.CreateModel(
name='TechnologyPage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30, verbose_name='标题')),
('contents', ckeditor_uploader.fields.RichTextUploadingField(verbose_name='内容')),
('img1', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片1')),
('img2', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片2')),
('img3', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片3')),
('img4', imagekit.models.fields.ProcessedImageField(upload_to='DF_goods/Image/%Y/%m', verbose_name='图片4')),
],
options={
'verbose_name': 'technology',
'verbose_name_plural': 'technology',
},
),
migrations.AlterField(
model_name='aboutpage',
name='contents',
field=ckeditor_uploader.fields.RichTextUploadingField(verbose_name='内容'),
),
]
| [
"hurte@foxmail.com"
] | hurte@foxmail.com |
81e9a2517d8558ee793d6c406576694e307aacc3 | ac95b268c02ea0d3d59b5ef6662a27b2970dc482 | /assessor/checkpoint_and_make_plots.py | baf8109643a3b966fb334510e2c1d735b45ab2e0 | [] | no_license | voletiv/lipreading-in-the-wild-experiments | 3f20b9029f54d2a0229ad6c1a56eb313b1923af5 | 77e5938a1dd66f9ffece668c74dfc738f672bd30 | refs/heads/master | 2023-03-21T22:52:04.083884 | 2021-03-14T21:44:02 | 2021-03-14T21:44:02 | 105,631,097 | 54 | 23 | null | 2021-03-14T21:44:03 | 2017-10-03T08:47:57 | Python | UTF-8 | Python | false | false | 3,753 | py | import os
if 'voleti.vikram' in os.getcwd():
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import Callback
#########################################################
# CALLBACK
#########################################################
class CheckpointAndMakePlots(Callback):
# Init
def __init__(self, file_name_pre="assessor_cnn_adam", save_dir="."):
self.file_name_pre = file_name_pre
self.save_dir = save_dir
# On train start
def on_train_begin(self, logs={}):
self.train_losses = []
self.val_losses = []
self.train_accuracies = []
self.val_accuracies = []
self.best_val_loss = 1000
# At every epoch
def on_epoch_end(self, epoch, logs={}):
# Get
tl = logs.get('loss')
ta = logs.get('acc')
vl = logs.get('val_loss')
va = logs.get('val_acc')
print("\n", tl, ta, vl, va)
# Append losses and accs
self.train_losses.append(tl)
self.val_losses.append(vl)
self.train_accuracies.append(ta)
self.val_accuracies.append(va)
# Save model
if vl < self.best_val_loss:
self.best_val_loss = vl
self.save_model_checkpoint(epoch, tl, ta, vl, va)
# Save history
self.save_history()
# Plot graphs
self.plot_and_save_losses_and_accuracies(epoch)
# Save model checkpoint
def save_model_checkpoint(self, epoch, tl, ta, vl, va):
model_file_path = os.path.join(self.save_dir,
self.file_name_pre + "_epoch{0:03d}_tl{1:.4f}_ta{2:.4f}_vl{3:.4f}_va{4:.4f}.hdf5".format(epoch, tl, ta, vl, va))
print("Saving model", model_file_path)
self.model.save_weights(model_file_path)
def save_history(self):
print("Saving history in", self.save_dir)
np.savetxt(os.path.join(self.save_dir, self.file_name_pre + "_loss_history.txt"), self.train_losses, delimiter=",")
np.savetxt(os.path.join(self.save_dir, self.file_name_pre + "_acc_history.txt"), self.train_accuracies, delimiter=",")
np.savetxt(os.path.join(self.save_dir, self.file_name_pre + "_val_loss_history.txt"), self.val_losses, delimiter=",")
np.savetxt(os.path.join(self.save_dir, self.file_name_pre + "_val_acc_history.txt"), self.val_accuracies, delimiter=",")
# Plot and save losses and accuracies
def plot_and_save_losses_and_accuracies(self, epoch):
print("Saving plot for epoch", str(epoch), ":",
os.path.join(self.save_dir, self.file_name_pre + "_plots.png"))
plt.subplot(121)
plt.plot(self.train_losses, label='train_loss')
plt.plot(self.val_losses, label='val_loss')
leg = plt.legend(loc='upper right', fontsize=11, fancybox=True)
leg.get_frame().set_alpha(0.3)
plt.xlabel('epochs')
plt.ylabel('loss')
plt.title("Loss")
plt.subplot(122)
plt.plot(self.train_accuracies, label='train_acc')
plt.plot(self.val_accuracies, label='val_acc')
leg = plt.legend(loc='lower right', fontsize=11, fancybox=True)
leg.get_frame().set_alpha(0.3)
plt.xlabel('epochs')
plt.ylabel('acc')
plt.yticks(np.arange(0, 1.05, 0.05))
plt.tick_params(axis='y', which='both',
labelleft='on', labelright='on')
plt.gca().yaxis.grid(True)
plt.title("Accuracy")
plt.tight_layout()
# plt.subplots_adjust(top=0.85)
plt.suptitle(self.file_name_pre, fontsize=10)
plt.savefig(os.path.join(self.save_dir,
self.file_name_pre + "_plots_loss_acc.png"))
plt.close()
| [
"vikky2904@gmail.com"
] | vikky2904@gmail.com |
9dea8f7fe952c9fd9cb255926144c6e3373e628d | 54f6c0e0d1e83d3a7026caaf8837f1fc1d1ec647 | /examples/example06.py | 59048b80d4ad9b3278af81742f509656f7d620fd | [
"BSD-3-Clause"
] | permissive | battyone/mystic | 2faaf0e9112582adb732505167390f4e9d5d3ace | b96905466b0c06dd379c968a23e4c867c609a353 | refs/heads/master | 2023-01-02T12:48:22.014100 | 2020-10-28T00:17:20 | 2020-10-28T00:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# Copyright (c) 2016-2020 The Uncertainty Quantification Foundation.
# License: 3-clause BSD. The full license text is available at:
# - https://github.com/uqfoundation/mystic/blob/master/LICENSE
"""
Example:
- Solve 8th-order Chebyshev polynomial coefficients with Powell's method.
- Plot of fitting to Chebyshev polynomial.
Demonstrates:
- standard models
- minimal solver interface
"""
# Powell's Directonal solver
from mystic.solvers import fmin_powell
# Chebyshev polynomial and cost function
from mystic.models.poly import chebyshev8, chebyshev8cost
from mystic.models.poly import chebyshev8coeffs
# tools
from mystic.math import poly1d
from mystic.tools import getch
import matplotlib.pyplot as plt
plt.ion()
# draw the plot
def plot_exact():
plt.title("fitting 8th-order Chebyshev polynomial coefficients")
plt.xlabel("x")
plt.ylabel("f(x)")
import numpy
x = numpy.arange(-1.2, 1.2001, 0.01)
exact = chebyshev8(x)
plt.plot(x,exact,'b-')
plt.legend(["Exact"])
plt.axis([-1.4,1.4,-2,8],'k-')
plt.draw()
plt.pause(0.001)
return
# plot the polynomial
def plot_solution(params,style='y-'):
import numpy
x = numpy.arange(-1.2, 1.2001, 0.01)
f = poly1d(params)
y = f(x)
plt.plot(x,y,style)
plt.legend(["Exact","Fitted"])
plt.axis([-1.4,1.4,-2,8],'k-')
plt.draw()
plt.pause(0.001)
return
if __name__ == '__main__':
print("Powell's Method")
print("===============")
# initial guess
import random
from mystic.tools import random_seed
random_seed(123)
ndim = 9
x0 = [random.uniform(-100,100) for i in range(ndim)]
# draw frame and exact coefficients
plot_exact()
# use Powell's method to solve 8th-order Chebyshev coefficients
solution = fmin_powell(chebyshev8cost,x0)
# use pretty print for polynomials
print(poly1d(solution))
# compare solution with actual 8th-order Chebyshev coefficients
print("\nActual Coefficients:\n %s\n" % poly1d(chebyshev8coeffs))
# plot solution versus exact coefficients
plot_solution(solution)
getch() #XXX: or plt.show() ?
# end of file
| [
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] | mmckerns@968178ea-60bd-409e-af13-df8a517b6005 |
5d8554d713e4e7ddb712d54f02ce3185dd1bd4a8 | b9b06d86d43e738b62ab9289fc13aae4c2b2670b | /nsd1808/devops/day06/myansible/library/mycopy.py | 661f66b6c900d26a216fae53bddf5a4c01d4a8ba | [] | no_license | MrZhangzhg/nsd_2018 | 31a7a8d54e2cb3ff4f4eb5c736fbd76601718356 | 458a1fef40c5e15ba7689fcb3a00baf893ac0218 | refs/heads/master | 2020-04-08T19:08:48.237646 | 2019-09-08T04:31:07 | 2019-09-08T04:31:07 | 159,642,127 | 5 | 7 | null | 2019-01-04T05:33:40 | 2018-11-29T09:37:27 | Python | UTF-8 | Python | false | false | 411 | py | #!/usr/bin/env python3
import shutil
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
yuan=dict(required=True, type='str'),
mudi=dict(required=True, type='str')
)
)
shutil.copy(module.params['yuan'], module.params['mudi'])
module.exit_json(changed=True)
if __name__ == '__main__':
main()
| [
"zhangzg@tedu.cn"
] | zhangzg@tedu.cn |
ebd53c6a7338e323e438a6dfff82dec038698809 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/vulcan/calendar.py | cfa962e51a33b6bea4ad665e80357383054272b2 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 5,622 | py | """Support for Vulcan Calendar platform."""
from __future__ import annotations
from datetime import date, datetime, timedelta
import logging
from aiohttp import ClientConnectorError
from vulcan import UnauthorizedCertificateException
from homeassistant.components.calendar import (
ENTITY_ID_FORMAT,
CalendarEntity,
CalendarEvent,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN
from .fetch_data import get_lessons, get_student_info
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the calendar platform for entity."""
client = hass.data[DOMAIN][config_entry.entry_id]
data = {
"student_info": await get_student_info(
client, config_entry.data.get("student_id")
),
}
async_add_entities(
[
VulcanCalendarEntity(
client,
data,
generate_entity_id(
ENTITY_ID_FORMAT,
f"vulcan_calendar_{data['student_info']['full_name']}",
hass=hass,
),
)
],
)
class VulcanCalendarEntity(CalendarEntity):
"""A calendar entity."""
def __init__(self, client, data, entity_id) -> None:
"""Create the Calendar entity."""
self.student_info = data["student_info"]
self._event: CalendarEvent | None = None
self.client = client
self.entity_id = entity_id
self._unique_id = f"vulcan_calendar_{self.student_info['id']}"
self._attr_name = f"Vulcan calendar - {self.student_info['full_name']}"
self._attr_unique_id = f"vulcan_calendar_{self.student_info['id']}"
self._attr_device_info = {
"identifiers": {(DOMAIN, f"calendar_{self.student_info['id']}")},
"entry_type": DeviceEntryType.SERVICE,
"name": f"{self.student_info['full_name']}: Calendar",
"model": f"{self.student_info['full_name']} - {self.student_info['class']} {self.student_info['school']}",
"manufacturer": "Uonet +",
"configuration_url": f"https://uonetplus.vulcan.net.pl/{self.student_info['symbol']}",
}
@property
def event(self) -> CalendarEvent | None:
"""Return the next upcoming event."""
return self._event
async def async_get_events(
self, hass: HomeAssistant, start_date: datetime, end_date: datetime
) -> list[CalendarEvent]:
"""Get all events in a specific time frame."""
try:
events = await get_lessons(
self.client,
date_from=start_date,
date_to=end_date,
)
except UnauthorizedCertificateException as err:
raise ConfigEntryAuthFailed(
"The certificate is not authorized, please authorize integration again"
) from err
except ClientConnectorError as err:
if self.available:
_LOGGER.warning(
"Connection error - please check your internet connection: %s", err
)
events = []
event_list = []
for item in events:
event = CalendarEvent(
start=datetime.combine(item["date"], item["time"].from_),
end=datetime.combine(item["date"], item["time"].to),
summary=item["lesson"],
location=item["room"],
description=item["teacher"],
)
event_list.append(event)
return event_list
async def async_update(self) -> None:
"""Get the latest data."""
try:
events = await get_lessons(self.client)
if not self.available:
_LOGGER.info("Restored connection with API")
self._attr_available = True
if events == []:
events = await get_lessons(
self.client,
date_to=date.today() + timedelta(days=7),
)
if events == []:
self._event = None
return
except UnauthorizedCertificateException as err:
raise ConfigEntryAuthFailed(
"The certificate is not authorized, please authorize integration again"
) from err
except ClientConnectorError as err:
if self.available:
_LOGGER.warning(
"Connection error - please check your internet connection: %s", err
)
self._attr_available = False
return
new_event = min(
events,
key=lambda d: (
datetime.combine(d["date"], d["time"].to) < datetime.now(),
abs(datetime.combine(d["date"], d["time"].to) - datetime.now()),
),
)
self._event = CalendarEvent(
start=datetime.combine(new_event["date"], new_event["time"].from_),
end=datetime.combine(new_event["date"], new_event["time"].to),
summary=new_event["lesson"],
location=new_event["room"],
description=new_event["teacher"],
)
| [
"noreply@github.com"
] | Adminiuga.noreply@github.com |
e7b6894c0a50d9f580da4cc238b9578c46f7f057 | 192314e7a28810215e793c12e56bb78d8311f5bc | /enarocanje/accountext/migrations/0015_auto__add_field_user_service_notifications.py | cf43a9a68c172999839f7e48761ae97261271d5d | [] | no_license | km4054/eNarocanje | e07cd02e4bc6fd6b9118a7667562ded70b33e0b8 | 1abf2392fa2357dfd9a61bc5c8ebd6d418bbb9ce | refs/heads/master | 2021-01-18T20:07:33.708188 | 2014-06-08T21:11:50 | 2014-06-08T21:11:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,478 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'User.service_notifications'
db.add_column(u'accountext_user', 'service_notifications',
self.gf('django.db.models.fields.BooleanField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'User.service_notifications'
db.delete_column(u'accountext_user', 'service_notifications')
models = {
u'accountext.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'accountext.serviceprovider': {
'Meta': {'object_name': 'ServiceProvider'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accountext.Category']", 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'gcal_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'gcal_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'logo_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'reservation_confirmation_needed': ('django.db.models.fields.BooleanField', [], {}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subscription_end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 5, 7, 0, 0)'}),
'subscription_mail_sent': ('django.db.models.fields.BooleanField', [], {}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'UTC'", 'max_length': '30'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
u'accountext.serviceproviderimage': {
'Meta': {'object_name': 'ServiceProviderImage'},
'delete_image': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'image_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'service_provider': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accountext.ServiceProvider']"})
},
u'accountext.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '5'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'referral': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accountext.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'service_notifications': ('django.db.models.fields.BooleanField', [], {}),
'service_provider': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accountext.ServiceProvider']", 'unique': 'True', 'null': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accountext'] | [
"simonrakovic@gmail.com"
] | simonrakovic@gmail.com |
9033fc4743c174f35f4232041a5fa522cf5511bb | aef1ea6df9f7fa7a812d9330873dca97ef205e53 | /employee/migrations/0004_auto_20200222_2002.py | 1e5910dbb0ebee6f1f1e95c919379ddcec0578fa | [] | no_license | iamshakibulislam/bakery-management | 0033fec1178d24e427ef68d025682501c5ba6320 | 2751b2cc1f76eeb5825bc3133234ba97e1415569 | refs/heads/master | 2023-02-17T17:05:51.078466 | 2021-01-18T08:36:18 | 2021-01-18T08:36:18 | 254,834,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | # Generated by Django 3.0.3 on 2020-02-22 14:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employee', '0003_auto_20200221_1619'),
]
operations = [
migrations.AlterField(
model_name='attendence',
name='date',
field=models.DateField(default='2020-02-22'),
),
migrations.AlterField(
model_name='pay_employee',
name='date',
field=models.DateField(default='2020-02-22'),
),
]
| [
"iamshakibulislam@gmail.com"
] | iamshakibulislam@gmail.com |
488cad88b223d986363dcf96de77845135c84eb4 | 0c3db34634cb85e778c95a4b4ff64514eca0477f | /setup.py | e1230d208fa7d0491432cff41dae05f25a0a1fb6 | [] | no_license | EUREC4A-UK/lagtraj_aux | 4efad4c94bcb9a2a367a6794abe0bc96e99a06af | da39ec1f6afa04a5a808130175b595c9bd9d01af | refs/heads/master | 2023-03-05T17:06:14.938118 | 2021-02-09T12:17:51 | 2021-02-09T14:14:29 | 337,395,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
INSTALL_REQUIRES = open("requirements.txt").readlines()
setup(
name="lagtraj_aux",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Utility for sampling auxiliary fields along trajectories",
url="https://github.com/EUREC4A-UK/lagtraj_aux",
maintainer="Leif Denby",
maintainer_email="l.c.denby@leeds.ac.uk",
py_modules=["lagtraj_aux"],
packages=find_packages(include=["lagtraj_aux"]),
package_data={"": ["*.csv", "*.yml", "*.html"]},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
| [
"leif@denby.eu"
] | leif@denby.eu |
1ba01a3cd767a65624d2e525466317208e434229 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03103/s940880137.py | 9fca731c2da7f6c58cd177394a94b96666210919 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | import sys
input = sys.stdin.readline
def main():
ans = 0
n, m = map(int, input().split())
all = []
for i in range(n):
all.append(list(map(int, input().split())))
all.sort()
for a,b in all:
if m >= b:
m -= b
ans += a*b
else:
ans += a*(m)
break
print(ans)
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
56f56fff61aba3d17455a3528d7e78668f52c10f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/299/67023/submittedfiles/testes.py | 39e00d4dc422e89a752c8c0eabf9431496714064 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | altura=1,82
idade=20
print('a idade do individuo eh %d' %idade)
print('a idade do indivíduo eh %d e a altura é %f' %(idade,altura)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
749ceac199de6c4c428915ea05ff2d723dff0fab | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/not/sample_good172.py | f755cd22a6c7c294d8b53bab38250c079edae4be | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | import math
import random
import array
import difflib
nterms = 740
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 740 > 0:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count = count + 1
| [
"barnsa@uni.coventry.ac.uk"
] | barnsa@uni.coventry.ac.uk |
c7cb7b9a61a3d200d92536daaf9f2de23c6bca9e | ba86ef56fb2ff1a8bf9be3058b58b9e48e7b50ce | /apps/landing/views.py | 14787b0251c082cc34287281d821d45629b97824 | [] | no_license | robertowest/lubre_homepage | 277f8fc81512b482fbea539234f30ef3eb801480 | 9de02443ba2ee3cd48afd2b7d580a09081fe84f2 | refs/heads/master | 2023-07-14T04:39:38.640155 | 2021-08-30T17:43:56 | 2021-08-30T17:43:56 | 223,473,409 | 0 | 0 | null | 2020-05-07T13:50:46 | 2019-11-22T19:34:22 | Python | UTF-8 | Python | false | false | 4,433 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.views.generic import DetailView, ListView, TemplateView
from django.core.mail import BadHeaderError, EmailMessage, send_mail
from django.template.loader import render_to_string
from apps.homepage.models import Entries, Grupo, Producto
class IndexView(TemplateView):
template_name = 'landing/index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# opciones de servicios
context['agro'] = [
{'texto': 'Fertilizantes', 'imagen': 'landing/img/fertilizante.svg', 'categoria': 6},
{'texto': 'Cultivos', 'imagen': 'landing/img/cultivo.svg', 'categoria': 2},
{'texto': 'Silo Bolsa', 'imagen': 'landing/img/silo.svg', 'categoria':11},
{'texto': 'Semillas', 'imagen': 'landing/img/semilla.svg', 'categoria':12},
{'texto': 'Lubricantes', 'imagen': 'landing/img/lubricante.svg', 'categoria': 5},
{'texto': 'Combustibles', 'imagen': 'landing/img/combustible.svg', 'categoria': 1},
]
context['eess'] = [
{'texto': 'Combustible', 'imagen': 'landing/img/surtidor.svg', 'categoria': 68,
'descripcion': 'Diseñados para lograr el máximo desempeño, excelente poder de limpieza y óptimo rendimiento.'},
{'texto': 'Full', 'imagen': 'landing/img/coffee.svg', 'categoria': 69,
'descripcion': 'Red con cobertura nacional ubicados en estaciones de servicio YPF.'},
{'texto': 'Boxes', 'imagen': 'landing/img/tools.svg', 'categoria': 70,
'descripcion': 'Espacio de encuentro y permanencia placentera.'},
{'texto': 'Serviclub', 'imagen': 'landing/img/serviclub.svg', 'categoria': 71,
'descripcion': 'Programa de fidelización de clientes en las estaciones de servicio YPF.'},
]
return context
def post(self, request, *args, **kwargs):
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('subject')
subject = "enviado desde la web"
message = request.POST.get('message')
message = message + \
"\n\nNombre: " + name + \
"\nCorreo: " + email + \
"\Teléfono: " + phone
# # grabamos el mensaje en la tabla
# from apps.homepage.models import Mensaje
# mensaje = Mensaje()
# mensaje.nombre = name
# mensaje.correo = email
# mensaje.asunto = "contacto desde la web"
# mensaje.mensaje = message
# mensaje.save()
# envío de correo
body = render_to_string(
'email_content.html',
{
'name': name,
'email': email,
'subject': subject,
'message': '<br />'.join(message.splitlines()), # reemplazamos los saltos de línea por html
},
)
send_mail = EmailMessage(
subject = subject,
body = body,
from_email = email,
to = ['info@lubresrl.com.ar']
)
send_mail.content_subtype = 'html'
try:
send_mail.send()
except BadHeaderError:
return HttpResponse('Invalid header found.')
return HttpResponseRedirect(reverse('landing:index'))
class CategoriaListView(ListView):
model = Grupo
template_name = 'landing/categorias.html'
def get_context_data(self, **kwargs):
context = super().get_context_data()
try:
categoria_id = self.kwargs['pk']
except:
categoria_id = None
grupos = None
productos = None
if categoria_id:
grupos = Grupo.objects.filter(activo=1).filter(categoria=categoria_id)
productos = Producto.objects.filter(activo=1).filter(grupo__in=grupos)
context = {
'grupos': grupos,
'productos': productos
}
return context
class ProductoDetail(DetailView):
model = Producto
template_name = 'landing/producto_detalle.html' # grupo/producto_detail.html
class ServicioDetail(DetailView):
model = Entries
template_name = 'landing/servicio_detalle.html' # service/service_detail.html
| [
"roberto.west@gmail.com"
] | roberto.west@gmail.com |
9f137e63c78da3d9c8366edc2ff65fec7817d894 | f5648d8e46f8394e367c33fe5f4272e32950baa7 | /docs/conf.py | adb8eb8928d0016c8a18c3132911e66e7e3a00b8 | [
"MIT"
] | permissive | johnnygreco/gala | ea90f20602e21ffa5c7fd343b2cd2deb9daeb844 | 93ded8a7d79ba0e01cf9e610890d9e38a820a23a | refs/heads/main | 2021-06-25T11:32:32.593330 | 2020-11-12T18:56:45 | 2020-11-12T18:56:45 | 166,590,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,947 | py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import os
import sys
import datetime
from importlib import import_module
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
# Get configuration information from setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.3")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog = """
"""
# Add sympy to intersphinx mapping
intersphinx_mapping['sympy'] = ('https://docs.sympy.org/latest/', None)
intersphinx_mapping['scipy'] = ('https://docs.scipy.org/doc/scipy/reference',
None)
# Show / hide TODO blocks
todo_include_todos = True
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# Note: For gala, the package name is different from the project name!
package_name = 'gala'
import_module(package_name)
package = sys.modules[package_name]
# TODO: Use Gala style when building docs
mpl_style = None
exec('from {0}.mpl_style import mpl_style'.format(package_name))
if mpl_style is not None:
plot_rcparams = mpl_style
plot_apply_rcparams = True
plot_formats = [('png', 200)]
plot_include_source = False
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
html_theme_options = {
'logotext1': '', # white, semi-bold
'logotext2': 'Gala', # red, light
'logotext3': ':docs' # white, light
}
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
# html_theme_path = ['_themes/sphinx_rtd_theme']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
# html_theme = "sphinx_rtd_theme"
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '_static'))
html_favicon = os.path.join(path, 'm104.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# Static files to copy after template files
html_static_path = ['_static']
html_style = 'gala.css'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
# show inherited members for classes
automodsumm_inherited_members = True
# Add nbsphinx
extensions += ['nbsphinx', 'IPython.sphinxext.ipython_console_highlighting']
exclude_patterns += ['_build', '**.ipynb_checkpoints']
# Custom setting for nbsphinx - timeout for executing one cell
nbsphinx_timeout = 300
| [
"adrian.prw@gmail.com"
] | adrian.prw@gmail.com |
98b5d2f8925aad7f853420a2261030621736ff65 | 156eeb39549bbe83f004d0439897d4c2b4f3950c | /day0c/w1.py | a66d2563b6fa571d45ae7be6c57ded4eb86bd26e | [] | no_license | zhangzongyan/python20180319 | 235a50609713d436b13207c205fcfedb8250c8f7 | 8335d324b3fcf142316552b9e3774052a5f9d739 | refs/heads/master | 2021-04-12T10:09:36.882132 | 2018-04-25T06:17:42 | 2018-04-25T06:17:42 | 126,292,592 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 614 | py |
'这是一个装饰器(deractor)'
# 函数作为另一个函数的返回值(高阶函数)
def lazy_fun():
def newnow():
print("2018-4-4")
return newnow
import functools
# 装饰器:在函数执行的过程中为函数加功能
def log(fun): # "闭包":内部函数可以保存住外部函数的参数变量
@functools.wraps(fun) # 将fun函数的属性赋值给wrapper 包括__name__
def wrapper(*args, **kw):
print("%s is called" % fun.__name__)
return fun(*args, **kw)
return wrapper
@log # now = log(now)
def now():
print("2018-4-4")
now()
print("%s" % now.__name__)
#print(lazy_fun()())
| [
"zhangzongyan@uplooking.com"
] | zhangzongyan@uplooking.com |
71c9e24b8e56de6df403cbc048b86a89c290ab1b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch74_2020_04_08_17_31_24_944920.py | cda189c277d634bf91594f056c341b5ccf0223f8 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | def conta_bigramas(s):
dici={}
li=[]
s1=s[0]
s2=s[1]
soma= s1+s2
li.append(soma)
for s1 in range(2, len(s)):
soma=s[s1]+s2
li.append(soma)
s2=s[s1]
i=0
while i<len(li):
b=0
c=0
while b<len(li):
if li[i]==li[b]:
c+=1
b+=1
else:
b+=1
dici[li[0]]=c-1
i+=1
return dici
| [
"you@example.com"
] | you@example.com |
d11bbf20bf9c55706dbd88f3157a5d28fefe56dd | 77eb9612112673c67ef6db412818d0e0d4cd9f0d | /backend/run_from_the_monste_18241/settings.py | fa113d755e4511b3fba83dd63c60b6df389e1aa4 | [] | no_license | crowdbotics-apps/run-from-the-monste-18241 | adbddf3a709023b71144dcd7df1da410c0866272 | 8c40954691cd52ba33fcfce57926c909cffd6eb6 | refs/heads/master | 2022-10-24T14:06:44.947707 | 2020-06-19T01:10:35 | 2020-06-19T01:10:35 | 273,372,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,772 | py | """
Django settings for run_from_the_monste_18241 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"event",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "run_from_the_monste_18241.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "run_from_the_monste_18241.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
408294b5bc212b4736b0b873dc3c448c62d532cc | 735360a55198b02ccfc3f7e57e7089c4a0ce45bd | /UnderstandingWeights/readWWWSMEFTLHE_V1.py | ed68e659ac3c2581830c55004ae5e987b1d559d0 | [] | no_license | sgnoohc/EFTAnalysis | d74991624117c6af42366f171edaf20b9ac56c77 | 7c9e37c71bd055af90f3110708e72357e45da3cd | refs/heads/master | 2023-04-15T07:06:30.808735 | 2021-04-21T17:23:37 | 2021-04-21T17:23:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,551 | py | import sys
import ROOT as rt
import math
from LHEevent import *
from LHEfile import *
import plotTools
if __name__ == '__main__':
#Bprime histograms
MW_jj = rt.TH1D("MW_jj", "MW_jj", 500, 0., 500)
MW_jj.Sumw2()
MInvariantMass_mumu = rt.TH1F("MInvariantMass_mumu", "MInvariantMass_mumu", 500, 0., 500);
MInvariantMass_mumu.Sumw2()
MInvariantMass_qq = rt.TH1F("MInvariantMass_qq", "MInvariantMass_qq", 500, 0., 1000.0);
MInvariantMass_qq.Sumw2()
EW_jj = rt.TH1D("EW_jj", "EW_jj", 500, 0., 1000)
EW_jj.Sumw2()
EW_qq = rt.TH1D("EW_qq", "EW_qq", 500, 0., 1000)
EW_qq.Sumw2()
pTW_jj = rt.TH1D("pTW_jj", "pTW_jj", 500, 0., 500)
pTW_jj.Sumw2()
W1_lv = rt.TLorentzVector()
W2_lv = rt.TLorentzVector()
W3_lv = rt.TLorentzVector()
massW_1 = rt.TH1F("massW_1", "massW_1", 500, 0., 500.0)
massW_1.Sumw2()
massW_2 = rt.TH1F("massW_2", "massW_2", 500, 0., 500.0)
massW_2.Sumw2()
massW_3 = rt.TH1F("massW_3", "massW_3", 500, 0., 500.0)
massW_3.Sumw2()
el_lv = rt.TLorentzVector()
nuel_lv = rt.TLorentzVector()
h_el_pT = rt.TH1F("h_el_pT", "h_el_pT", 500, 0., 500.0)
h_el_pT.Sumw2()
h_nu_pT = rt.TH1F("h_nu_pT", "h_nu_pT", 500, 0., 500.0)
h_nu_pT.Sumw2()
M_www_sm = rt.TH1D("M_www_sm", "M_www_sm", 500, 0.0, 5000.0)
M_www_sm.Sumw2()
M_www_cWm05 = rt.TH1D("M_www_cWm05", "M_www_cWm05", 500, 0.0, 5000.0)
M_www_cWm05.Sumw2()
M_www_cWm5 = rt.TH1D("M_www_cWm5", "M_www_cWm5", 500, 0.0, 5000.0)
M_www_cWm5.Sumw2()
M_www_cWm1 = rt.TH1D("M_www_cWm1", "M_www_cWm1", 500, 0.0, 5000.0)
M_www_cWm1.Sumw2()
M_www_cWp1 = rt.TH1D("M_www_cWp1", "M_www_cWp1", 500, 0.0, 5000.0)
M_www_cWp1.Sumw2()
M_www_cWp5 = rt.TH1D("M_www_cWp5", "M_www_cWp5", 500, 0.0, 5000.0)
M_www_cWp5.Sumw2()
M_www_cWp05 = rt.TH1D("M_www_cWp05", "M_www_cWp05", 500, 0.0, 5000.0)
M_www_cWp05.Sumw2()
# find events in file
myLHEfile = LHEfile(sys.argv[1])
myLHEfile.setMax(100000)
#myLHEfile.setMax(2)
eventsReadIn = myLHEfile.readEvents()
weightsReadIn = myLHEfile.readWeights()
sm = []
cWm05 = []
cWm5 = []
cWm1 = []
cWp5 = []
cWp1 = []
cWp05 = []
for oneWeight in weightsReadIn:
myLHEevent = LHEevent()
myLHEevent.fillWeight(oneWeight)
for i in range(0,len(myLHEevent.Weights)):
p = myLHEevent.Weights[i]
if(p['weightID']=='EFT_SM'): sm.append(p['weightValue'])
if(p['weightID']=='EFT_cW_m05'): cWm05.append(p['weightValue'])
if(p['weightID']=='EFT_cW_m5'): cWm5.append(p['weightValue'])
if(p['weightID']=='EFT_cW_m1'): cWm1.append(p['weightValue'])
if(p['weightID']=='EFT_cW_p5'): cWp5.append(p['weightValue'])
if(p['weightID']=='EFT_cW_p1'): cWp1.append(p['weightValue'])
if(p['weightID']=='EFT_cW_p05'): cWp05.append(p['weightValue'])
#print len(sm)
eventIdx=0
for oneEvent in eventsReadIn:
eventIdx += 1
#print eventIdx
#print oneEvent
myLHEevent = LHEevent()
myLHEevent.fillEvent(oneEvent)
n_mu = 0
n_q = 0
n_el = 0
n_nuel = 0
mass = []
for i in range(0,len(myLHEevent.Particles)):
p = myLHEevent.Particles[i]
if abs(p['ID']) == 24: MW_jj.Fill(p['M'])
if abs(p['ID']) == 24: EW_jj.Fill(p['E'])
if (abs(p['ID']) == 24 and rt.TMath.Sqrt(p['Px']*p['Px'] + p['Py']*p['Py']) > 50.0): pTW_jj.Fill(rt.TMath.Sqrt(p['Px']*p['Px'] + p['Py']*p['Py']))
if abs(p['ID']) == 24:
mass.append(p['M'])
mass.sort()
if(len(mass)==1): massW_1.Fill(mass[0])
if(len(mass)==2): massW_2.Fill(mass[1])
if(len(mass)==3): massW_3.Fill(mass[2])
if(len(mass)==1): W1_lv = rt.TLorentzVector(p['Px'], p['Py'], p['Pz'], p['E'])
if(len(mass)==2): W2_lv = rt.TLorentzVector(p['Px'], p['Py'], p['Pz'], p['E'])
if(len(mass)==3): W3_lv = rt.TLorentzVector(p['Px'], p['Py'], p['Pz'], p['E'])
if(len(mass)==3): M_www_sm.Fill((W1_lv+W2_lv+W3_lv).M(), sm[eventIdx-1]/sm[eventIdx-1])
if(len(mass)==3): M_www_cWm05.Fill((W1_lv+W2_lv+W3_lv).M(), cWm05[eventIdx-1]/sm[eventIdx-1])
if(len(mass)==3): M_www_cWp05.Fill((W1_lv+W2_lv+W3_lv).M(), cWp05[eventIdx-1]/sm[eventIdx-1])
if(len(mass)==3): M_www_cWm1.Fill((W1_lv+W2_lv+W3_lv).M(), cWm1[eventIdx-1]/sm[eventIdx-1])
if(len(mass)==3): M_www_cWp1.Fill((W1_lv+W2_lv+W3_lv).M(), cWp1[eventIdx-1]/sm[eventIdx-1])
if(len(mass)==3): M_www_cWm5.Fill((W1_lv+W2_lv+W3_lv).M(), cWm5[eventIdx-1]/sm[eventIdx-1])
if(len(mass)==3): M_www_cWp5.Fill((W1_lv+W2_lv+W3_lv).M(), cWp5[eventIdx-1]/sm[eventIdx-1])
#print eventIdx
#print sm[eventIdx-1]
print cWp5[eventIdx-1]
del oneEvent, myLHEevent
# write the histograms
histoFILE = rt.TFile(sys.argv[2],"RECREATE")
MW_jj.Write()
EW_jj.Write()
MInvariantMass_mumu.Write();
MInvariantMass_qq.Write();
pTW_jj.Write()
massW_1.Write()
massW_2.Write()
massW_3.Write()
M_www_sm.Write()
M_www_cWm05.Write()
M_www_cWp05.Write()
M_www_cWm1.Write()
M_www_cWp1.Write()
M_www_cWm5.Write()
M_www_cWp5.Write()
histoFILE.Close()
| [
"saptaparna@gmail.com"
] | saptaparna@gmail.com |
5a7e8e0caabc73c065826a7358d742e6c72cbcc9 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_004_20180620125837.py | 202fa907284aed5e7c7d3ad3fbc832a22382abf5 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,675 | py | from random import randint
# Sudoku1 almost solved
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, " "]
]
# Sudoku 2 almost solved
# row1 = [9,8,7,4,3,2,5,6,1]
# row2 = [2,4,3,5,1,6,8,7,9]
# row3 = [5,6,1,7,9,8,4,3,2]
# row4 = [3,9,5,6,4,7,2,1,8]
# row5 = [8,2,4,3,5,1,6,9,7]
# row6 = [1,7,6,2,8,9,3,4,5]
# row7 = [7,1,2,8,6,3,9,5,4]
# row8 = [4,3,8,9,7,5,1,2,6]
# row9 = [' ',5,' ',' ',2,' ',7,' ',' ']
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
spaceBar = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku1[i], spaceBar,i+1))
i = i + 1
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku1[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try:
i = 0
for item in sudoku1:
column = column + int(item[i])
print("Suma columny" , )
print("suma1 =", column1)
print("suma2 =", column2)
i = 0
for item in sudoku1:
if sum(item) == 45:
i = i + 1
if i == 9:
print("@@@@@@@@@@ YOU WIN @@@@@@@@@@")
break
except TypeError:
print()
'''
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
sudoku_number = randint(0, 1)
rows_fill(sudoku_number)
elif int(choice) == 1:
rows_fill(0)
elif int(choice) == 2:
rows_fill(1)
elif int(choice) == 3:
rows_fill(0)
'''
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
345378b2169dca467af4d7231ffe8a84bbd82b45 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /android_webview/tools/cts_archive/3pp/fetch.py | e27e9f3d67727c75e9eaed90b4e0c61b6e16166e | [
"BSD-3-Clause"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 1,656 | py | #!/usr/bin/env python3
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
def do_latest():
print('2023.03.01') # Update to current date when updating the URLs below
def get_download_url():
filenames = [
"android-cts-6.0_r32-linux_x86-arm.zip",
"android-cts-6.0_r32-linux_x86-x86.zip",
"android-cts-7.0_r33-linux_x86-arm.zip",
"android-cts-7.0_r33-linux_x86-x86.zip",
"android-cts-8.0_R26-linux_x86-arm.zip",
"android-cts-8.0_R26-linux_x86-x86.zip",
"android-cts-9.0_r20-linux_x86-arm.zip",
"android-cts-9.0_r20-linux_x86-x86.zip",
"android-cts-10_r15-linux_x86-arm.zip",
"android-cts-10_r15-linux_x86-x86.zip",
"android-cts-11_r11-linux_x86-arm.zip",
"android-cts-11_r11-linux_x86-x86.zip",
"android-cts-12_r7-linux_x86-arm.zip",
"android-cts-12_r7-linux_x86-x86.zip",
"android-cts-13_r3-linux_x86-arm.zip",
"android-cts-13_r3-linux_x86-x86.zip",
]
url_prefix = "https://dl.google.com/dl/android/cts/"
urls = [url_prefix + f for f in filenames]
partial_manifest = {
'url': urls,
'name': filenames,
'ext': '.zip',
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(func=lambda _opts: get_download_url())
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| [
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] | chromium-scoped@luci-project-accounts.iam.gserviceaccount.com |
406ea6e8a4d0b6f11537b17e9381d3ced513dee4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02691/s400764885.py | 7224a67c3349f540080134940093f8e2859b5465 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | n = int(input())
a = list(map(int, input().split()))
dict_diffs = dict()
for i in range(1, n+1):
dict_diffs[i+a[i-1]] = dict_diffs.get(i+a[i-1], 0) + 1
total = 0
for j in range(1, n+1):
total += dict_diffs.get(j-a[j-1], 0)
print(total) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3c3757c27fb15ab2ef3b2a5a0211621bd2d3c7cb | 150464efa69db3abf328ef8cd912e8e248c633e6 | /_4.python/__code/Python自學聖經(第二版)/ch03/floor.py | 918f21e2c941654eaabeed6d109074468c10a37c | [] | no_license | bunshue/vcs | 2d194906b7e8c077f813b02f2edc70c4b197ab2b | d9a994e3afbb9ea84cc01284934c39860fea1061 | refs/heads/master | 2023-08-23T22:53:08.303457 | 2023-08-23T13:02:34 | 2023-08-23T13:02:34 | 127,182,360 | 6 | 3 | null | 2023-05-22T21:33:09 | 2018-03-28T18:33:23 | C# | UTF-8 | Python | false | false | 195 | py | n = int(input("請輸入大樓的樓層數:"))
print("本大樓具有的樓層為:")
if(n > 3):
n += 1
for i in range(1, n+1):
if(i==4):
continue
print(i, end=" ")
print() | [
"david@insighteyes.com"
] | david@insighteyes.com |
98204de8b55b02edefa7274b827be91b2dc91d7a | a33aeaf053fc020d39d75dc55d563e1e40389fcd | /polyglotdb/graph/attributes/__init__.py | e3afecef82b5725599aec7a89e9d7c06ded8aeae | [
"MIT"
] | permissive | orianakc/PolyglotDB | 0589a8248b9b28ce340e4a1bf15af9f03f596814 | 210c6e6cccba692c145422109e5872ed5df4d161 | refs/heads/master | 2021-01-20T23:52:08.755394 | 2016-03-03T18:48:17 | 2016-03-03T18:48:17 | 50,045,371 | 0 | 0 | null | 2016-01-20T17:02:27 | 2016-01-20T17:02:26 | null | UTF-8 | Python | false | false | 398 | py |
from .base import AnnotationAttribute, Attribute
from .aggregate import AggregateAttribute
from .path import (PathAnnotation, PathAttribute, SubPathAnnotation,
PositionalAnnotation, PositionalAttribute)
from .discourse import DiscourseAnnotation
from .speaker import SpeakerAnnotation
from .subannotation import SubAnnotation
from .hierarchical import HierarchicalAnnotation
| [
"michael.e.mcauliffe@gmail.com"
] | michael.e.mcauliffe@gmail.com |
55c2b39333001ba8b4d35ef33a295aab48fd38af | 4c33c000ef250d16f20ded29b8d212a55dbdd625 | /polls/tests.py | 024674198163fa966441e10f571724790741bcc0 | [] | no_license | hansunho/djangoTutorial | 3ec8aee1666c02dc84873ea9646f7e65b3a50772 | 360f16dabc396c1133dfb363569e5038cbd87a7f | refs/heads/master | 2021-01-20T04:08:13.133934 | 2017-05-04T21:45:54 | 2017-05-04T21:45:54 | 89,641,862 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,983 | py | import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
should be displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
| [
"="
] | = |
74e8e0f1afda986b1f35a1aaf5b0d3dae2295961 | 3e6c0e46a318fd8583f207103e96fff755e0e128 | /10. Interactivity with Mouse/reference_pmouseX/reference_pmouseX.pyde | 62a7cded548d35c503bf90e97b01a6dcd1ed7b8e | [] | no_license | demikaiser/BeautifulCodingWithProcessingPyBasic | c16842df79671baa5282480b35989671c95738c1 | 69d82be9a183ba89598934ebd5de81ed0c83f52a | refs/heads/main | 2023-08-26T10:12:25.761523 | 2021-10-22T17:12:50 | 2021-10-22T17:12:50 | 387,465,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | pyde | def setup():
size(640, 320)
def draw():
background(0)
textSize(64)
text("pmouseX: " + str(pmouseX), 50, 100)
delay(1000) # Delay 1 second.
text("mouseX: " + str(mouseX), 50, 200)
| [
"demikaiser13@gmail.com"
] | demikaiser13@gmail.com |
237f9f6eb29bfa0188f763b7069a79bd69d30dd9 | a54b2e587763e1ef80572f89086ba85f9616e8d9 | /day11/exercise_2_server.py | 5118d0fe93a7cca658862f470f5bfe63bfdf2fc2 | [] | no_license | Linkin-1995/test_code2 | 66848a5c00f747a911929d9a0fe2e869e1d50555 | fae7a7fb36583acbe4a592736acd88823506d548 | refs/heads/master | 2022-12-05T03:08:48.386272 | 2020-08-21T10:42:55 | 2020-08-21T10:42:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | """
练习: 从客户端传递一张图片给服务端,在服务端
以当前日期为名字保存起来
思路: 客户端 将文件内容读取出来发送
服务端 接收文件内容,写入本地
要求 : 文件可能很大,不允许一次性读取
循环读取发送
"""
from socket import *
import time
# 获取图片函数 边接收边写入
def get_image(connfd):
filename = "%s-%s-%s.jpg"%time.localtime()[:3]
file = open(filename,'wb')
# 边接收边写入
while True:
data = connfd.recv(1024)
if not data:
break
file.write(data) # 直接写入字节串
file.close()
def main():
# 创建tcp套接字 其实使用默认值就是tcp套接字
tcp_socket = socket(AF_INET,SOCK_STREAM)
# 绑定地址
tcp_socket.bind(("0.0.0.0",8888))
# 设置监听,让tcp套接字可以被链接
tcp_socket.listen(5)
# 处理客户端连接
while True:
print("等待客户端连接....")
connfd,addr = tcp_socket.accept()
print("连接:",addr) # 客户端地址
# 接受图片
get_image(connfd)
connfd.close() # 客户端退出链接套接字就没用了
# 关闭
tcp_socket.close()
if __name__ == '__main__':
main()
| [
"1105377455@qq.com"
] | 1105377455@qq.com |
a00e35fa54e961a5f4d211b26701e93759a8c43f | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /K4aKGbfmzgyNNYEcM_23.py | 680be4587d222015a58a405596f277b3cff43d55 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | """
Given the _number_ `n` and a _list of interior angles_ `angles`, return
whether or not it's possible to make a convex polygon with `n` sides with the
`angles` given. Remember that angles must be **under 180°**.
is_shape_possible(3, [80, 70, 30]) ➞ True

A shape with **3** sides and the angles **80°, 70° and 30°** is a possible
shape.
### Examples
is_shape_possible(4, [90, 90, 90, 90]) ➞ True
is_shape_possible(3, [20, 20, 140]) ➞ True
is_shape_possible(1, [21]) ➞ False
# n must be larger than 2
is_shape_possible(5, [500, 10, 10, 10, 10]) ➞ False
# You can't have an interior angle bigger than 180°
### Notes
* Return `False` if `n` is less than 3 (see example #3).
* There will always be an `n` number of positive integers given as `angles`.
* The sum of interior angles is **(n - 2) x 180°**.
"""
def is_shape_possible(n, angles):
sum_of_interior = (n - 2) * 180
sum_angles = sum(angles)
all_below_180 = all([angle < 180 for angle in angles])
if sum_of_interior == sum_angles and all_below_180:
return True
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
be6c8b4f9a99d07a23538b86eb5a5ecfcf8657e7 | 6c2608bc87b522da77c792e20330989de17b3005 | /Chap-1/ex5.py | 40de0c934600749567fb9d49588b1daca785f6cb | [] | no_license | AleByron/AleByron-The-Python-Workbook-second-edition | 8a0b408c1bbd90c82e6b837fc898ee10341ca8fa | 491b2fd394aa04e29a4b2dbe9a615c547e239028 | refs/heads/main | 2023-01-13T21:01:17.757669 | 2020-11-11T01:29:28 | 2020-11-11T01:29:28 | 306,487,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | small = input("how many one liter bottles you will deposit?: ")
big = input("how many bottles of more than one liter will you deposit?: ")
small = float(small)*0.10
big = float(big)*0.25
total = big+small
print("The total value of your refund is:", total, "$" ) | [
"noreply@github.com"
] | AleByron.noreply@github.com |
bfa97789cc8d5de9570bd3dc7395444c75edfcb0 | d7cec9bc446d07dc71708ec8b854dc756f18c417 | /plotting_scripts/plot_rollback_time_box_and_whiskers.py | e9a81156911c806a866fba95de25417b23ccbec0 | [] | no_license | ICGog/falkirk-experiments | 65d9c9ae7e1169a5d80b5fed585382a8aac66d10 | 1e9de3233b93490f4996ed499bc735b6099fec3e | refs/heads/master | 2021-10-23T22:54:36.543899 | 2021-10-13T01:00:00 | 2021-10-13T01:00:00 | 83,070,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | # Copyright (c) 2016, Ionel Gog
import csv
import gflags
import math
import matplotlib
matplotlib.use("agg")
import os, sys
import matplotlib.pyplot as plt
import numpy as np
from utils import *
from matplotlib import pylab
from scipy.stats import scoreatpercentile
from box_and_whisker import *
FLAGS = gflags.FLAGS
gflags.DEFINE_bool('paper_mode', False, 'Adjusts the size of the plots.')
gflags.DEFINE_string('begin_tag', '', 'Name of begin the event')
gflags.DEFINE_string('end_tag', '', 'Name of end the event')
gflags.DEFINE_string('ftmanager_log_paths', '',
'Comma-separated list of paths to ftmanager log files.')
gflags.DEFINE_string("xlabels", '', 'list of xlabels')
gflags.DEFINE_string('file_format', 'pdf', 'Plot file format')
def get_action_duration(ftmanager_log_path, begin_tag, end_tag):
logfile = open(ftmanager_log_path)
last_begin_tag = -1
seen_end_tag = True
durations = []
for row in logfile.readlines():
fields = [x.strip() for x in row.split(':')]
time = int(fields[0])
if fields[1] == begin_tag:
if seen_end_tag is False:
print "Two consecutive begin tags!"
last_begin_tag = time
seen_end_tag = False
if fields[1] == end_tag:
if last_begin_tag >= 0:
seen_end_tag = True
durations.append(time - last_begin_tag)
else:
print "Two consecutive end tags!"
logfile.close()
print "Durations count: ", len(durations)
return durations
def plot_rollback_duration(durations, labels, colors):
if FLAGS.paper_mode:
plt.figure(figsize=(3, 1.5))
set_paper_rcs()
else:
plt.figure()
set_rcs()
ax = plt.gca()
bp = percentile_box_plot(ax, durations, color=colors)
for duration in durations:
perc90 = np.percentile(duration, 90)
perc99 = np.percentile(duration, 99)
avg = np.mean(duration)
max_val = np.max(duration)
print 'Divided by 795 ', avg / 759
print perc90, perc99, max_val
# plt.plot(-1, -1, label='Differential dataflow', color='r', lw=1.0)
# plt.plot(-1, -1, label='Incremental', color='b', lw=1.0)
# for i in range(2, len(durations), 2):
# plt.axvline(i + 0.5, ls='-', color='r')
# ax.legend(frameon=False, loc="upper center", ncol=6,
# bbox_to_anchor=(0.0, 1.04, 1.0, 0.1), handletextpad=0.2,
# columnspacing=0.2)
#plt.errorbar(range(1, len(setups) + 1), [np.mean(x) for x in runtimes],
# yerr=[np.std(x) for x in runtimes], marker="x")
plt.xlim(0.5, len(durations) + 0.5)
plt.ylim(0, 5)
# plt.xticks([x * 2 + 1.5 for x in range(0, len(labels))], labels)
plt.xticks([x + 1 for x in range(0, len(labels))], labels,
rotation='vertical')
plt.yticks(range(0, 5000001, 1000000), range(0, 6, 1))
plt.ylabel("Max span algo. runtime [sec]")
plt.xlabel("Number of processing vertices")
plt.savefig("rollback_computation_box_whiskers." + FLAGS.file_format,
format=FLAGS.file_format, bbox_inches="tight")
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError as e:
print('%s\\nUsage: %s ARGS\\n%s' % (e, sys.argv[0], FLAGS))
manager_paths = FLAGS.ftmanager_log_paths.split(',')
xlabels = FLAGS.xlabels.split(',')
colors = ['k', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b']
durations = []
for manager_path in manager_paths:
if 'incremental' in manager_path:
colors.append('r')
elif 'differential' in manager_path:
colors.append('b')
durations.append(get_action_duration(manager_path, FLAGS.begin_tag, FLAGS.end_tag))
print len(durations)
plot_rollback_duration(durations, xlabels, colors)
if __name__ == '__main__':
main(sys.argv)
| [
"gogionel@gmail.com"
] | gogionel@gmail.com |
7a872db5d31cf765a75d17f76c790fab4cefff14 | 9660ea2e94b1057249a5f86c8535535cb5bbbd06 | /backend/campusmap/apps.py | e9b25a56729b33a29651353757e1ea20540814e3 | [] | no_license | PSU-OIT-ARC/campusmap | 4330cd26671dffb822a5ed9b6894bb4c6db8cd4d | 7505f8fbda9d7f8de6d48539993986c10c44dce3 | refs/heads/develop | 2021-05-01T19:42:57.101201 | 2017-08-28T22:16:46 | 2017-08-28T22:16:46 | 31,921,978 | 0 | 1 | null | 2017-07-18T22:26:20 | 2015-03-09T20:52:38 | Python | UTF-8 | Python | false | false | 487 | py | from django.apps import AppConfig
class DefaultAppConfig(AppConfig):
name = 'campusmap'
def ready(self):
from django.contrib.gis.db.models import GeometryField
from rest_framework.serializers import ModelSerializer
from campusmap.serializers import GeoJSONField
# This makes DRF use our GeoJSON field serializer by default for
# all geometry field types.
ModelSerializer.serializer_field_mapping[GeometryField] = GeoJSONField
| [
"wbaldwin@pdx.edu"
] | wbaldwin@pdx.edu |
eca1e9e1cba026a9170a3dbf45f88970996fd3cc | 1aa340a8de26a83767a2491dca35f0c5dbd0e1f3 | /code/train_2.py | b51e1789922d516d87ef16c65560d46ffaa3f140 | [] | no_license | zhangxu0307/transport-forecasting-tianchi | 023df5bb891781f478add1fa68abe3cdb0aa7acf | d8584a34ee5250d3b846de58835175b5063337d6 | refs/heads/master | 2021-08-19T06:42:14.408395 | 2017-11-25T01:24:33 | 2017-11-25T01:24:45 | 111,967,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,253 | py | import pandas as pd
import numpy as np
from code.util import SMAPE, MAPE, crossValidation, gridSearch, kFoldCV
from code.model import buildTrainModel
from sklearn.externals import joblib
import matplotlib as mpl
mpl.use('Agg')
import xgboost as xgb
import matplotlib.pyplot as plt
def train(features, trainPath, testPath, index, saveName, onehotFeature):
trainDF = pd.read_csv(trainPath, dtype={'link_ID': str})
print("original dataset columns:", trainDF.columns)
testDF = pd.read_csv(testPath, dtype={'link_ID': str})
print(len(trainDF))
totalDF = pd.concat([trainDF, testDF], axis=0)
print(len(totalDF))
trainX = totalDF[features]
for catgoryFeature in onehotFeature:
Onehot = pd.get_dummies(trainX[catgoryFeature], prefix=catgoryFeature, sparse=True)
trainX = pd.concat([trainX, Onehot], axis=1)
trainX = trainX[trainX["month"] < 6]
trainX = trainX.drop(onehotFeature, axis=1)
print(trainX.columns)
# label 做对数变换
trainY = trainDF['travel_time']
trainY = np.log1p(trainY)
print("trainx shape", trainX.values.shape)
print("trainY shape", trainY.values.shape)
rf = buildTrainModel(modelIndex=index)
# rf = gridSearch(trainX, trainY, modelIndex=modelIndex)
rf.fit(trainX, trainY)
# scores, skscores = crossValidation(trainX, trainY, index)
scores = kFoldCV(trainX, trainY, modelIndex, k=5)
print("cross validation scores:", scores)
# print("sklearn cross validation scores:", skscores)
if index == 1 or index == 2:
print("feature score ", pd.DataFrame(rf.feature_importances_))
if index == 3:
# print("feature score ", pd.DataFrame(rf.feature_importances_))
xgb.plot_importance(rf)
plt.savefig("../model/importance3.jpg")
saveSuffix = "../model/"
joblib.dump(rf, saveSuffix + saveName)
return rf
def predict(features, testPath, modelPath, resultPath, onehotFeature):
rf = joblib.load(modelPath)
trainDF = pd.read_csv(trainPath, dtype={'link_ID': str})
testDF = pd.read_csv(testPath, dtype={'link_ID': str})
totalDF = pd.concat([trainDF, testDF], axis=0)
testX = totalDF[features]
for catgoryFeature in onehotFeature:
Onehot = pd.get_dummies(testX[catgoryFeature], prefix=catgoryFeature, sparse=True)
testX = pd.concat([testX, Onehot], axis=1)
testX = testX[testX["month"] >= 6]
testX = testX.drop(onehotFeature, axis=1)
print(testX.columns)
ans = rf.predict(testX)
ans = np.expm1(ans)
# np.set_printoptions(threshold=np.nan)
result = pd.DataFrame()
result["link_ID"] = testDF["link_ID"]
result["date"] = testDF["date"]
result['time_interval'] = testDF['time_interval']
result["travel_time"] = ans
# submission = pd.read_csv("../data/submission2.txt", delimiter="#")
# print(submission)
# submission = pd.merge(submission, result)
result.to_csv(resultPath, index=False, sep="#", header=False)
if __name__ == "__main__":
# 选择特征
# features = [
# # 'link_ID',
# # 'date', 'time_interval', 'travel_time',
# # 'in_links', 'out_links','link_class',
# 'encode_link_ID',
#
# 'month',
# # 'day',
# 'weekday', 'hour', 'minute',
# # 'morning_peak', 'evening_peak',
#
# 'length', 'width', 'in_links_num',
# 'in_length_sum', 'in_length_diff', 'in_width_sum', 'in_width_diff',
# 'out_links_num', 'out_length_sum', 'out_length_diff', 'out_width_sum',
# 'out_width_diff',
# # #
# 'mean', 'last_mean_10', 'last_mean_20', 'last_mean_30', 'median', 'min', 'max',
# 'std', 'range',
# ]
features = [
# 'link_ID', 'date', 'time_interval', 'travel_time', 'link_class','in_links', 'out_links','satrt_date_time',
'month',
# 'day','start_date_time',
'weekday',
'is_weekend',
#'holiday',
'hour', 'minute',
'morning_peak', 'evening_peak',
'length', 'width',
'in_links_num',
'in_length_sum',
'in_length_diff', 'in_width_sum', 'in_width_diff',
'out_links_num',
'out_length_sum', 'out_length_diff', 'out_width_sum', 'out_width_diff',
'encode_link_ID',
'last_mean_10',
'last_mean_15', 'last_mean_20',
'last_mean_5',
'max', 'max_10',
'mean', 'median',
'median_10',
'min',
'min_10',
'range', 'range_10', 'std', 'std_10',
'trend_1', 'trend_2', 'trend_3']
onehotFeature = [
'month',
# 'day',
'encode_link_ID',
#'is_weekend','holiday',
'weekday', 'hour', 'minute',]
# 模型序号 # 输入参数为模型序号,1是GBDT,2是随机森林,3是xgboost,
# 4是adaboost回归,5是多层感知器,6是k近邻回归 7是lightGBM, 8是模型融合stacking
modelIndex = 3
testPath = "../data/testB_2.csv"
trainPath = "../data/trainB_2.csv"
modelPath = "../model/xgboost2.m"
resultPath = "../result/result2.txt"
rf = train(features, trainPath, testPath, modelIndex, modelPath, onehotFeature)
predict(features, testPath, modelPath, resultPath, onehotFeature)
| [
"zhangxu0307@163.com"
] | zhangxu0307@163.com |
ff1f419bd2388e15eb3e5c51d0723e4ef35c6954 | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Bit Manipulation/reverse_bits.py | ce72381592a8a7d418c62ecee2a3953ffb737c86 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | class Solution:
def reverseBits(self, n: int) -> int:
res = 0
for i in range(32):
res += n & 1
n = n >> 1
if i != 31:
res = res << 1
return res | [
"iamharshvirani7@gmail.com"
] | iamharshvirani7@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.