blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
be345fd726693b30f34806cf9fd8a3132ed73a97
|
73a6fa696dd8934760f3db4881c22f0f25f20a2c
|
/hovatrace.py
|
cb87206b17a74f7400d8b669f9caa3219265cd91
|
[
"MIT"
] |
permissive
|
jeweg/hovatrace
|
4bc4b769b573cd6dfda038b0827eab4537363177
|
80798c612a5e935063786e82c93ca7c72437ea50
|
refs/heads/main
| 2023-01-19T19:38:16.586911
| 2020-12-03T14:30:52
| 2020-12-03T14:30:52
| 315,776,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,812
|
py
|
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2020 Jens Weggemann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import namedtuple
import re
from enum import Enum, auto
REGISTERS = ['ALU', 'A', 'B', 'C', 'D', 'F', 'W', 'I1', 'I2', 'O1', 'O1e', 'O2', 'O2e', 'PC']
TraceLine = namedtuple('TraceLine', REGISTERS, defaults=[0]*len(REGISTERS))
ChangedRegisters = namedtuple('ChangedRegisters', REGISTERS, defaults=[False]*len(REGISTERS))
#====================================================================
# Helpers
def dec_to_2scompl(num, num_bits):
n = min(int(num), (1 << (num_bits - 1)) - 1)
if n < 0: n = (1 << num_bits) + n
return n
def dec_from_2scompl(num, num_bits):
n = int(num) & ((1 << num_bits) - 1)
ceiling = (1 << num_bits)
if n >= (1 << (num_bits - 1)): n -= (1 << num_bits)
return n
#====================================================================
# Trace log parsing
def parse_trace_file(file_name):
def parse_trace_number(n, R):
# Parses a number constant from the trace. Interpretation depends on the specific register.
if n is None: return None
n = int(n, 16) # Parse hex
if R == 'PC': # PC is unsigned
pass
elif R == 'ALU': # ALU is 16-bit 2s-complement signed
n = dec_from_2scompl(n, 16)
else: # Everything else is 12-bit 2s-complement signed
# The hovalaag trace shows W as if it was a 64-bit value when it has a negative value.
# This is contrary to the documentation. Copying to OUT1 drops the upper bits again.
# Is this a bug? Anyway, the helper function will mask out all but the lower 12 bits which
# fixed this.
n = dec_from_2scompl(n, 12)
return n
re_log_line = re.compile(r'<tr.*?</tr>')
re_log_line_entry = re.compile(r'(ALU|I1|I2|O1|O2|PC|[ABCDFW])[=:]([0-9a-fA-F]+)(?:\(([0-9a-fA-F]+)\))?')
trace_lines = []
changed_registers = []
prev_trace_line = TraceLine()
with open(file_name) as f:
counter = -1
for line in f.readlines():
counter += 1
match = re.match(re_log_line, line)
# Collect in dict first b/c the TraceLine namedtuple is immutable. Not terribly elegant code.
values = {k:None for k in REGISTERS}
if match:
line = match.group(0)
for match in re.finditer(re_log_line_entry, line):
R, reg_value, expected = match.groups()[:3]
values[R] = parse_trace_number(reg_value, R)
if R == 'O1': values['O1e'] = parse_trace_number(expected, R)
elif R == 'O2': values['O2e'] = parse_trace_number(expected, R)
elif R == 'PC': break # terminate before the instruction parts.
trace_line = TraceLine(*[values[R] for R in REGISTERS])
trace_lines.append(trace_line)
changed_registers.append(ChangedRegisters(*[values[R] != getattr(prev_trace_line, R) for R in REGISTERS]))
prev_trace_line = trace_line
return (trace_lines, changed_registers)
#====================================================================
# Code parsing
CodeLine = namedtuple('CodeLine', ['line_num', 'text'])
# line: CodeLine, comments: CodeLine list, label: CodeLine
Statement = namedtuple('Statement', ['pc_line_num', 'line', 'comments', 'labels'])
def parse_code_file(file_name):
re_line_remove_trailing_ws = re.compile(r'^(.*?)[\s\t\n\r]*$')
re_line_strip_comments = re.compile(r'^(.*?);.*$')
re_line_empty = re.compile(r'^\s*$')
re_code_comment_line = re.compile(r'^\s*;.*$')
re_code_label = re.compile(r'^\s*([^\s]+):.*$')
statements = []
curr_comments = []
curr_labels = []
file_line_num = 0
with open(file_name) as f:
for line in f.readlines():
file_line_num += 1
# Optionally strip any comments. Line may become empty.
if args.strip_comments:
match = re_line_strip_comments.match(line)
if match: line = match.group(1)
# Remove any trailing WS
match = re_line_remove_trailing_ws.match(line)
line = match.group(1)
# Handle empty line
match = re_line_empty.match(line)
if match:
if not args.comments_across_empty_lines:
# Forget comments but keep a label
curr_comments = []
continue
code_line = CodeLine(file_line_num, line)
match = re_code_comment_line.match(line)
if match:
curr_comments.append(code_line)
continue
match = re_code_label.match(line)
if match:
curr_labels.append(code_line)
continue
# Everything else must be a statement.
# We commit it to the result and reset relevant parsing state.
statements.append(Statement(len(statements), code_line, curr_comments, curr_labels))
curr_labels = []
curr_comments = []
return statements
#====================================================================
# Syntax highlighting machinery
class TokenTag(Enum):
COMMENT = auto()
LABEL = auto()
CONSTANT = auto()
JUMP = auto()
OPERATOR = auto()
FUNCTION = auto()
DSTREGISTER = auto()
SRCREGISTER = auto()
WHITESPACE = auto()
UNKNOWN = auto()
class Tokenizer(object):
def __init__(self):
self.rules = [
(TokenTag.COMMENT, re.compile(r';.*$')),
(TokenTag.LABEL, re.compile(r'[^\s]+:')),
(TokenTag.DSTREGISTER, re.compile(r'([ABCDFW]|IN[12]|OUT[12])(?==)')),
(TokenTag.JUMP, re.compile(r'(JMP[TF]?|DECNZ)\s+([^\s,]+)')),
(TokenTag.FUNCTION, re.compile(r'(ZERO|NEG|POS|DEC)|[()]')),
(TokenTag.SRCREGISTER, re.compile(r'([ABCDFW]|IN[12]|OUT[12])')),
(TokenTag.CONSTANT, re.compile(r'-?(([0-9]+)|(\$[0-9A-F]+))')),
(TokenTag.OPERATOR, re.compile(r'[+->|&^~]')),
(TokenTag.WHITESPACE, re.compile(r'\s+')),
(TokenTag.UNKNOWN, re.compile(r'.')),
]
def tokenize_line(self, text):
tokens = []
last_token = None
pos = 0
while pos < len(text):
for tag, regexp in self.rules:
match = regexp.match(text, pos=pos)
if match:
# Some token types are collapsed right away.
if last_token and last_token[0] == tag and tag in [TokenTag.WHITESPACE, TokenTag.UNKNOWN]:
last_token[1] += match.group(0)
else:
token = [tag, match.group(0)]
tokens.append(token)
last_token = token
pos = match.end(0)
break
return tokens
#====================================================================
if __name__ == '__main__':
import sys
import argparse
import math
from pathlib import Path
parser = argparse.ArgumentParser(description='generates annotated HOVALAAG traces.')
parser.add_argument('-c', '--code', metavar='FILE', required=True, help='path to the code input file, required')
parser.add_argument('-l', '--log', metavar='FILE', default='log.html', help='path to the trace input file, defaults to "log.html"')
parser.add_argument('-o', '--output', metavar='FILE', help='path to the HTML output file, stdout is used if omitted')
parser.add_argument('-f', '--force', action='store_true', help='overwrite the output file if it exists')
parser.add_argument('-n', '--numbers', choices=['s', 'u', 'h', 'b'], default='s', help='display type for numbers: [s]igned decimal, [u]nsigned decimal, [h]exadecimal, or [b]inary, defaults to s')
parser.add_argument('--strip-comments', action='store_true', help='strip all code comments')
parser.add_argument('--comments-across-empty-lines', action='store_true', help='if specified, empty lines will not disassociate comment lines with the statement succeeding them')
parser.add_argument('--always-print-comment-lines', action='store_true', help='if specified, comment lines will be printed for a statement every time, by default they are only printed the first time')
parser.add_argument('--theme', choices=['dark', 'light'], help='the color theme, defaults to dark')
args = parser.parse_args()
def err(*argv):
print(*argv, file=sys.stderr)
sys.exit(-1)
if not args.code:
err("Must specify a code file!")
#====================================================================
# Output helpers
def as_hex(n, num_bits):
s = '{:X}'.format(n)
return '$' + '0' * max(0, num_bits // 4 - len(s)) + s
def as_binary(n, num_bits):
s = '{:b}'.format(n)
return 'b' + '0' * max(0, num_bits - len(s)) + s
def make_number_text(n, num_bits):
if args.numbers == 's': return n
n_2sc = dec_to_2scompl(n, num_bits)
if args.numbers == 'u': return n_2sc + 'u'
elif args.numbers == 'h': return as_hex(n_2sc, num_bits)
elif args.numbers == 'b': return as_binary(n_2sc, num_bits)
def make_number_tooltip(n, num_bits):
n_2sc = dec_to_2scompl(n, num_bits)
return "{} {}u {} {}".format(n, n_2sc, as_hex(n_2sc, num_bits).upper(), as_binary(n_2sc, num_bits))
#====================================================================
# Output HTML
ColorTheme = namedtuple("ColorTheme", [
'bg',
'bg_regstate',
'bg_regstate_handle',
'bg_changedreg',
'jump_separator',
'wrong_output_border',
'correct_output_border',
'syn_dstregister',
'syn_srcregister',
'syn_operator',
'syn_outputline',
'syn_linenumber',
'syn_comment',
'syn_label',
'syn_jump',
'syn_function',
'syn_constant',
'line_marker1',
'line_marker2',
'line_marker3',
'bg_tooltip',
'fg_tooltip',
'tooltip_elem_bg' ])
# Based on light style from https://github.com/rakr/vim-two-firewatch
LightTheme = ColorTheme(
bg='#FAF8F5',
bg_regstate='#eAe8e5',
bg_regstate_handle='#0003',
bg_changedreg='#ffffff',
jump_separator='#2D2107',
wrong_output_border='#ff9000',
correct_output_border='#20f030',
syn_dstregister='#718ECD',
syn_srcregister='#718ECD',
syn_operator='#896724',
syn_outputline='#E4DBD7',
syn_linenumber='#c2aEa7',
syn_comment='#B6AD9A',
syn_label='#2D2107',
syn_jump='#0A5289',
syn_function='#896724',
syn_constant='#0A5289',
line_marker1='#FDF962',
line_marker2='#B6FB8B',
line_marker3='#FCC8B8',
bg_tooltip='#0A5289d0',
fg_tooltip='#FAF8F5',
tooltip_elem_bg='#ffe04850')
# Based on dark style from https://github.com/rakr/vim-two-firewatch
DarkTheme = ColorTheme(
bg='#282c34',
bg_regstate='#11151D',
bg_regstate_handle='#000000',
bg_changedreg='#32363E',
jump_separator='#8E9DAE',
wrong_output_border='#e02000',
correct_output_border='#20d010',
syn_dstregister='#D6E9FF',
syn_srcregister='#D6E9FF',
syn_operator='#8EBCF2',
syn_outputline='#3D4854',
syn_linenumber='#616C78',
syn_comment='#55606C',
syn_label='#D5E8FD',
syn_jump='#DE6A6F',
syn_function='#C4AB9A',
syn_constant='#eaAE9D',
line_marker1='#181C24',
line_marker2='#0F4909',
line_marker3='#702714',
bg_tooltip='#0A5289ff',
fg_tooltip='#FAF8F5',
tooltip_elem_bg='#ff803050')
if args.theme == 'light':
color_theme = LightTheme
else:
color_theme = DarkTheme
if args.output and Path(args.output).is_file() and not args.force:
err("Output file exists, use --force to overwrite!")
output_file = open(args.output, "w") if args.output else sys.stdout
def pr(*argv, **kwargv):
print(*argv, **kwargv, file=output_file)
pr('''\
<html>
<head>
<style>
body {{
font-family: monospace;
background-color: {bg};
position: relative;
left: 0;
top: 0;
padding-top: 2ch;
}}
#code {{
white-space: pre;
}}
#regstate {{
white-space: pre;
background-color: {bg_regstate};
opacity: .9;
position: absolute;
left: 50ch;
top: 2ch;
padding-left: 3px;
}}
#regstate_handle {{
width: 1ch;
height: 100%;
text-align: center;
left: 0;
top: 0;
cursor: col-resize;
background: {bg_regstate_handle};
position: absolute;
}}
.reg_unchanged {{
opacity: 0.7;
}}
.reg_changed {{
background-color: {bg_changedreg};
}}
.reg_value_changed {{
}}
.reg_value_unchanged {{
}}
.wrong_output_value {{
box-shadow: 0px 0px 0px 2px {wrong_output_border};
display: inline-block;
z-index: 100;
}}
.correct_output_value {{
box-shadow: 0px 0px 0px 2px {correct_output_border};
display: inline-block;
z-index: 100;
}}
.code_line {{
}}
.linebg0 {{
}}
.linebg1 {{
background-color: {line_marker1};
}}
.linebg2 {{
background-color: {line_marker2};
}}
.linebg3 {{
background-color: {line_marker3};
}}
.syn_outputline {{
color: {syn_outputline};
cursor: hand;
}}
.syn_linenumber {{
color: {syn_linenumber};
cursor: hand;
}}
.syn_comment {{
color: {syn_comment};
font-style: italic;
}}
.syn_label {{
color: {syn_label};
font-weight: bold;
}}
.syn_dstregister {{
color: {syn_dstregister};
}}
.syn_srcregister {{
color: {syn_srcregister};
}}
.syn_jump {{
color: {syn_jump};
}}
.syn_function {{
color: {syn_function};
}}
.syn_constant {{
color: {syn_constant};
}}
.syn_operator {{
color: {syn_operator};
}}
.syn_jump_not_taken {{
text-decoration: line-through;
}}
.jump_separator {{
border-top: 1px solid;
border-color: {jump_separator};
background-color: transparent;
opacity: 0.5;
width: 100%;
position: absolute;
z-index: 5;
}}
/* Tooltip CSS from https://www.w3schools.com/css/css_tooltip.asp */
.tooltip {{
position: relative;
display: inline-block;
z-index: 100;
}}
.tooltip:hover {{
background-color: {tooltip_elem_bg};
}}
.tooltip .tooltiptext {{
visibility: hidden;
pointer-events: none; /* invisible to hover */
background-color: {bg_tooltip};
color: {fg_tooltip};
text-align: center;
border-radius: 3px;
padding: 5px 5px;
bottom: 110%;
left: -100%;
position: absolute;
z-index: 100;
filter: drop-shadow(4px 3px 2px #00000080);
}}
.tooltip:hover .tooltiptext {{
visibility: visible;
}}
</style>
<script>
function line_click(the_id) {{
let elem = document.getElementById(the_id);
let cl = elem.classList;
const CYCLE = ["linebg0", "linebg1", "linebg2", "linebg3"];
for (let i = 0; i < CYCLE.length; ++i) {{
const next_i = (i + 1) % CYCLE.length;
if (cl.contains(CYCLE[i])) {{
cl.remove(CYCLE[i]);
cl.add(CYCLE[next_i]);
return;
}}
}}
cl.add(CYCLE[1]);
}}
</script>
</head>
<body><div id="code">\
'''.format(**color_theme._asdict()), end='')
#====================================================================
# Output code
def css_for_tag(tag):
return tag.name.lower()
def padded_line_num(num, digits):
s = str(num)
return (' ' * max(0, digits - len(s))) + s
trace_lines, changed_registers = parse_trace_file(args.log)
statements = parse_code_file(args.code)
tokenizer = Tokenizer()
output_line_digits = 4 # Currently a guess.. we'd have to collect output lines beforehand and prepend afterwards.
output_line_counter = 0
file_line_num_digits = math.ceil(math.log(len(statements), 10))
def is_jump_taken(token, trace_line):
if re.match(r'^JMPT\s+.*', token):
return trace_line.F != 0
elif re.match(r'^JMPF\s+.*', token):
return trace_line.F == 0
elif re.match(r'^DECNZ\s+.*', token):
return trace_line.C != 0
return True
def output_line(line, trace_line=None, previous_trace_line=None):
global output_line_counter
output_line_counter += 1
pr('<span class="code_line" id="line{}" onclick="line_click(id)">'.format(output_line_counter), end='')
pr('<span class="syn_outputline">{}</span> '.format(padded_line_num(output_line_counter, output_line_digits)), end='')
pr('<span class="syn_linenumber">{}</span> '.format(padded_line_num(line.line_num, file_line_num_digits)), end='')
tokens = tokenizer.tokenize_line(line.text)
for tok_tag, tok_value in tokens:
if tok_tag is TokenTag.CONSTANT:
# Convert constant text and add tooltip
if tok_value.startswith('$'): num = int(tok_value[1:], 16)
elif tok_value.startswith('-$'): num = int(tok_value[2:], 16)
else: num = int(tok_value)
num_2sc = dec_to_2scompl(num, 12)
pr('<div class="tooltip syn_{}">{}<span class="tooltiptext">{}</span></div>'.format(
css_for_tag(tok_tag), make_number_text(num, 12), make_number_tooltip(num, 12)), end='')
else:
extra_class = ''
if tok_tag is TokenTag.JUMP and previous_trace_line:
# We can be extra smart about this!
if not is_jump_taken(tok_value, previous_trace_line):
extra_class = " syn_jump_not_taken"
pr('<span class="syn_{}{}">{}</span>'.format(css_for_tag(tok_tag), extra_class, tok_value), end='')
pr('</span>')
PRINT_COMMENT_BLOCKS_ONLY_ONCE = True
comment_blocks_already_printed_for_pc = set()
displayed_trace_info = []
previous_pc = None
previous_trace_line = None
for trace_line, changed_regs in zip(trace_lines, changed_registers):
pc = trace_line.PC
statement = statements[pc]
if previous_pc and pc != previous_pc + 1:
# Deviating program counter -> some jump occured.
pr('<div class="jump_separator"></div>', end='')
if not args.always_print_comment_lines and pc in comment_blocks_already_printed_for_pc:
# Don't show comment again, but do show associated labels
for label in statement.labels:
output_line(label)
displayed_trace_info.append(None)
else:
comment_blocks_already_printed_for_pc.add(pc)
if not statement.comments:
for label in statement.labels:
output_line(label)
displayed_trace_info.append(None)
else:
# Display associated comments with label mixed in
for line in sorted(statement.labels + statement.comments, key=lambda elem: elem.line_num):
output_line(line)
displayed_trace_info.append(None)
output_line(statement.line, trace_line, previous_trace_line)
displayed_trace_info.append((trace_line, changed_regs))
previous_pc = pc
previous_trace_line = trace_line
#====================================================================
# Output register state
pr('\n\n<div id="regstate"><div id="regstate_handle"></div>', end='')
def properly_padded(n, R):
max_chars = 6
if args.numbers == 's': max_chars = 6 if R == 'ALU' else 1 if R == 'F' else 5
elif args.numbers == 'u': max_chars = 7 if R == 'ALU' else 2 if R == 'F' else 6
elif args.numbers == 'h': max_chars = 4 if R == 'ALU' else 1 if R == 'F' else 3
elif args.numbers == 'b': max_chars = 16 if R == 'ALU' else 1 if R == 'F' else 12
s = str(n)
# Roughly center the value.. I liked that look the most.
padding = max_chars - len(s)
half_padding = padding // 2
return ' ' * half_padding + s + ' ' * (padding - half_padding)
for thing in displayed_trace_info:
if not thing:
pr()
continue
trace_line, changed_regs = thing
printed_line = ' '
for R in ['A', 'B', 'C', 'D', 'F', 'W', 'ALU', 'I1', 'I2', 'O1', 'O2']:
reg_value = getattr(trace_line, R)
reg_changed = getattr(changed_regs, R)
if reg_value is None: continue
overall_css_class = 'reg_{}changed'.format('un' if not reg_changed else '')
value_css_class = 'reg_value_{}changed'.format('un' if not reg_changed else '')
reg_bits = 16 if R == 'ALU' else 1 if R == 'F' else 12
exp_value = None
if R in ['O1', 'O2']:
exp_value = getattr(trace_line, 'O1e' if R == 'O1' else 'O2e')
if exp_value is None:
printed_line += '''\
<div class="tooltip {occ}">\
<span class="syn_dstregister ">{R}</span>\
<span class="syn_operator ">=</span>\
<span class="syn_constant {vcc}">{rv}</span><span class="tooltiptext">{tt}</span></div> '''.format(
occ=overall_css_class,
vcc=value_css_class,
R=R,
rv=properly_padded(make_number_text(reg_value, reg_bits), R),
tt=make_number_tooltip(reg_value, reg_bits))
else:
printed_line += '''\
<div class="{wrong_or_correct}">\
<div class="tooltip {occ}">\
<span class="syn_dstregister ">{R}</span>\
<span class="syn_operator ">=</span>\
<span class="syn_constant {vcc}">{rv1}</span><span class="tooltiptext">{tt1}</span>\
</div>\
<div class="tooltip {occ}">\
<span class="syn_constant {vcc}">({rv2})</span><span class="tooltiptext">{tt2}</span>\
</div>\
</div> '''.format(
wrong_or_correct='wrong_output_value' if exp_value != reg_value else 'correct_output_value',
occ=overall_css_class,
vcc=value_css_class,
R=R,
rv1=properly_padded(make_number_text(reg_value, reg_bits), R),
tt1=make_number_tooltip(reg_value, reg_bits),
rv2=make_number_text(exp_value, reg_bits),
tt2=make_number_tooltip(exp_value, reg_bits))
pr(printed_line)
pr('''\
</div>
</div>
<script>
// From https://www.w3schools.com/howto/howto_js_draggable.asp
dragElement(document.getElementById("regstate"));
function dragElement(elmnt) {
var pos1 = 0, pos2 = 0, pos3 = 0, pos4 = 0;
if (document.getElementById(elmnt.id + "_handle")) {
/* if present, the header is where you move the DIV from:*/
document.getElementById(elmnt.id + "_handle").onmousedown = dragMouseDown;
} else {
/* otherwise, move the DIV from anywhere inside the DIV:*/
elmnt.onmousedown = dragMouseDown;
}
function dragMouseDown(e) {
e = e || window.event;
e.preventDefault();
// get the mouse cursor position at startup:
pos3 = e.clientX;
pos4 = e.clientY;
document.onmouseup = closeDragElement;
// call a function whenever the cursor moves:
document.onmousemove = elementDrag;
}
function elementDrag(e) {
e = e || window.event;
e.preventDefault();
// calculate the new cursor position:
pos1 = pos3 - e.clientX;
pos2 = pos4 - e.clientY;
pos3 = e.clientX;
pos4 = e.clientY;
// set the element's new position:
//elmnt.style.top = (elmnt.offsetTop - pos2) + "px";
elmnt.style.left = (elmnt.offsetLeft - pos1) + "px";
}
function closeDragElement() {
/* stop moving when mouse button is released:*/
document.onmouseup = null;
document.onmousemove = null;
}
}
</script>
</body>
</html>''')
|
[
"jens@weggemann.de"
] |
jens@weggemann.de
|
258b822919f40b51fca5ac8df50a6ce96448a09d
|
f50bfc3130937c6666c6041641818e841bcc3c4a
|
/debug/um_translate_binary.py
|
d8966c46c74050c1c82219faa23cac4e434ce61e
|
[] |
no_license
|
RagePly/icfp-06
|
c067c43644cc282b28d321f46b667491572185a3
|
1d1c64e439bb69572bba98ef75a54a3489699c20
|
refs/heads/master
| 2023-01-21T16:59:59.438177
| 2020-12-04T16:12:40
| 2020-12-04T16:12:40
| 319,267,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
fileName = input("Filepath: ")
with open(fileName, "r") as f:
tmp = f.readlines()
with open("out/binaryOutput.txt","w") as f:
for line in tmp:
f.write("{}\n".format(str(bin(int(line.strip())))))
|
[
"the.hugo.lom@gmail.com"
] |
the.hugo.lom@gmail.com
|
8cf8195d1def3ae3a4d1f092592eb10cafb46799
|
b603733d0e2033571f978c204cd27ccb468108b7
|
/src/wai/bynning/extraction/_Extractor.py
|
f132cb6097aea369435a95f713405574b2e0614f
|
[
"MIT"
] |
permissive
|
waikato-datamining/wai-bynning
|
38095edbecd7c65e13417692ad42e7180ea900c3
|
01b7368d4dc1094651d7cbe067576dfb3756a1d3
|
refs/heads/master
| 2020-07-07T20:30:56.565145
| 2020-03-31T03:22:41
| 2020-03-31T03:22:41
| 203,469,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 857
|
py
|
from abc import abstractmethod
from typing import Generic, Iterator, Iterable, TypeVar
InputType = TypeVar("InputType")
OutputType = TypeVar("OutputType")
class Extractor(Generic[InputType, OutputType]):
"""
Interface for classes which extract data of one type from data of another type.
"""
@abstractmethod
def extract(self, item: InputType) -> OutputType:
"""
Extracts data from a given object.
:param item: The object to extract from.
:return: The extracted data.
"""
pass
def extract_all(self, items: Iterable[InputType]) -> Iterator[OutputType]:
"""
Extracts data from all the given items.
:param items: The items to extract from.
:return: An iterator of extracted data.
"""
return map(self.extract, items)
|
[
"coreytsterling@gmail.com"
] |
coreytsterling@gmail.com
|
cc2965e2337b8c65a2a78976c6e2ade58df647c6
|
88ff16e29df9ad953584b9ab065077c95f60e350
|
/test/HTTP.py
|
2c3f7b9c1140abab322d889b8ef9e9282c663e92
|
[
"MIT"
] |
permissive
|
Mon-ius/weapp_api
|
e42ba47342eb19d91bf6d1e97c1e8ef9a170a402
|
3e5b5fcfaee7ec01d093cbe5769b599ce4244c24
|
refs/heads/master
| 2021-06-28T22:40:30.109765
| 2019-04-16T01:08:22
| 2019-04-16T01:08:22
| 146,171,294
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 785
|
py
|
import base64
import sys
import requests as r
from avatar_generator import Avatar
from faker import Faker
from requests.auth import HTTPBasicAuth
url_01 = "http://127.0.0.1:5000/res"
url_02 = "https://weapi.monius.top/res"
url_03 = "https://xideas.herokuapp.com/res"
urls = [url_01, url_02, url_03]
def get_test(res):
q=r.get(res)
print(q.json())
def post_test(res):
q = r.post(res,json={"id":"233"})
print(q.json())
def put_test(res):
q = r.put(res)
print(q.json())
def delete_test(res):
q = r.delete(res)
print(q.json())
if __name__ == '__main__':
n = input("Input number for url : ")
url = urls[int(n)]
print(url)
fucs = [get_test, post_test, put_test, delete_test]
xxx = [1, 1, 2, 3]
for f in fucs:
f(url)
|
[
"m1105930979@gmail.com"
] |
m1105930979@gmail.com
|
66297d4af53a4f0fb39520ef4132934e2585601f
|
c6214ca3fd69299e2ea9c2d2d77b32b6f503a91c
|
/AtCoder/ABC/170's/171/171B.py
|
aa02ff2ec328654fa874c3d58ec9965b7acb9a94
|
[] |
no_license
|
sAto-3/AtCoder
|
efe024b1ca000c1b70471af19f21407f70ba94cb
|
d5ef48bee11371cebffd04ef43d1d4b2027842ab
|
refs/heads/master
| 2023-08-19T17:05:24.759779
| 2020-07-12T13:40:05
| 2020-07-12T13:40:05
| 279,054,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 109
|
py
|
# B
n, k = map(int, input().split())
p = list(map(int, input().split()))
p.sort()
print(p)
print(sum(p[:k]))
|
[
"17214@fukushima.kosen-ac.jp"
] |
17214@fukushima.kosen-ac.jp
|
c9ea740e4e72b76cedee92076d445f7e738f14f7
|
8004831758776360a421b6cb458b48a120d1586e
|
/chapter_10/greet_user.py
|
52e505301fe294fe854aedcd6d32515c6337c950
|
[] |
no_license
|
scott-gordon72/python_crash_course
|
025d15952d7372c2a40780b7038008f9b39c42d2
|
605f1f7e8d90534bd9cb63f44098d95dec739e50
|
refs/heads/master
| 2022-06-04T19:29:40.142291
| 2020-04-25T01:13:48
| 2020-04-25T01:13:48
| 255,947,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
import json
filename = 'username.json'
with open(filename) as f:
username = json.load(f)
print(f"Welcome back, {username}")
|
[
"scott.gordon72@outlook.com"
] |
scott.gordon72@outlook.com
|
59ff58e79151fd4fe156f65b1912bd72fbb537f8
|
72d7e53e53d0fd60a5dbc6ece5e6c3a19a1cddc8
|
/Scripts/current-glyph/nudge.py
|
ca453d7f06e1b8cf41f6f64cabfb5fa679cf69ed
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
miguelsousa/hTools2
|
d814d92f79d5f5bdaab16415626dd1f47692cfa9
|
eab400677c1b21bb2519a7354a142e167c2b39ba
|
refs/heads/master
| 2021-01-15T18:36:09.522992
| 2013-09-27T13:48:05
| 2013-09-27T13:48:05
| 6,453,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
# [h] nudge selected points
'''Nudge selected points by a given amount of units, with differents modes possible.'''
# debug
import hTools2
reload(hTools2)
if hTools2.DEBUG:
import hTools2.dialogs.glyph
reload(hTools2.dialogs.glyph)
# import
from hTools2.dialogs.glyph import nudgePointsDialog
# run
nudgePointsDialog()
|
[
"gustavo@hipertipo.com"
] |
gustavo@hipertipo.com
|
e5a45c2582c32769a4022236b9100eba97fe856d
|
754b18bc08e0417722ba0efa2e4333ecb67b8268
|
/setup.py
|
181874a836193005d29daf6feda19f469dc12ed2
|
[
"MIT"
] |
permissive
|
b-ggs/Twitch-Chat-Downloader
|
0ba26a31d2d176cfb096d935d95f0ddd27a285fe
|
7d8b00d1836cbb804489a75b57d6af131fc2cc55
|
refs/heads/master
| 2021-11-23T04:00:44.567651
| 2021-05-15T11:34:58
| 2021-05-15T11:34:58
| 330,633,877
| 0
| 0
|
MIT
| 2021-01-18T10:41:20
| 2021-01-18T10:41:19
| null |
UTF-8
|
Python
| false
| false
| 1,444
|
py
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'readme.md'), encoding='utf-8') as f:
readme = f.read()
requirements = ['requests>=2.23.0', 'twitch-python>=0.0.18', 'pytz>=2020.1', 'python-dateutil>=2.8.1']
test_requirements = ['twine>=3.1.1', 'wheel>=0.34.2']
setup_requirements = ['pipenv>=2020.5.28', 'setuptools>=47.1.1']
setup(
author='Petter Kraabøl',
author_email='petter.zarlach@gmail.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
entry_points=
'''
[console_scripts]
tcd=tcd:main
''',
description='Twitch Chat Downloader',
install_requires=requirements,
license='MIT',
long_description=readme,
long_description_content_type='text/markdown',
include_package_data=True,
keywords='Twitch',
name='tcd',
packages=find_packages(),
python_requires=">=3.8",
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/PetterKraabol/Twitch-Chat-Downloader',
version='3.2.1',
zip_safe=True,
)
|
[
"petter.zarlach@gmail.com"
] |
petter.zarlach@gmail.com
|
692a05543507c9a45449365de713ade53d90ef33
|
53c6a4b8fc143399cd12ac03cadd7c51b9d5dec3
|
/my_django_project/my_django_project/settings.py
|
55f620c6b87b974ede90a5ec637707cf23af408f
|
[] |
no_license
|
karan-coder/my-first-blog
|
13002f47cc1fd68edc705128375f799524d31c05
|
d9f207ee72a8aadd2fbcf0f5fb07d39e9a433b53
|
refs/heads/master
| 2020-07-19T19:43:17.829360
| 2019-09-06T09:53:47
| 2019-09-06T09:53:47
| 206,503,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
"""
Django settings for my_django_project project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bqsd++bwo-7(alc2@$y020w=o6k(@cy49m84^mz@(1f#k(g_g8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'Homestead',
'USER': 'homestead',
'PASSWORD': 'secret',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"karan.artomate@studio"
] |
karan.artomate@studio
|
27b9be34e5cc147280f36f8f4c2142b99745df23
|
ee021f7f3f4986d3d16e8313124537462bea1a37
|
/app/views.py
|
3af7180dc053e000117bc0b767e75c071cd04359
|
[] |
no_license
|
pyprogs/analog
|
2815b61dd43c68656c8f78221e8d217ca85e697d
|
d98956f6a14545ed93a9209c5213b0280f08cf4a
|
refs/heads/master
| 2020-07-27T02:04:47.369662
| 2019-09-13T00:44:00
| 2019-09-13T00:44:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,668
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout, models, forms
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.core.mail import get_connection, send_mail
from django.conf import settings
from catalog.models import DataFile, Manufacturer
from catalog import choices
from catalog.handlers import ProcessingSearchFile
from app.forms import MyAuthenticationForm, MyRegistrationForm, AppSearchForm, SearchFromFile, EmailConfirmationForm
from app.decorators import a_decorator_passing_logs
from app.models import MainLog
import hashlib
@a_decorator_passing_logs
def login_view(request):
auth_form = MyAuthenticationForm(request)
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
print(user)
if user is not None:
if user.is_active:
login(request, user)
return redirect('app:home')
return render(request, 'login.html', {'auth_form': auth_form, 'error': 'Неверно введён логин или пароль'})
return render(request, 'login.html', {'auth_form': auth_form})
@a_decorator_passing_logs
@login_required(login_url='login/')
def search(request):
form = AppSearchForm()
if request.method == 'POST':
form = AppSearchForm(request.POST)
return render(request, 'search.html', {'user': request.user, 'form': form})
@a_decorator_passing_logs
@login_required(login_url='login/')
def search_from_file_view(request):
if request.method == 'POST':
form = SearchFromFile(request.POST, request.FILES)
if form.is_valid():
instance = DataFile(file=request.FILES['file'],
type=choices.TYPES_FILE[1][0],
created_by=request.user,
updated_by=request.user)
instance.save()
file_response = ProcessingSearchFile(request.FILES['file'], instance.file, form, request).csv_processing()
response = HttpResponse(file_response, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=' + file_response.name
return response
else:
form = SearchFromFile()
return render(request, 'search_from_file.html', {'form': form})
@a_decorator_passing_logs
@login_required(login_url='login/')
def advanced_search(request):
return redirect('catalog:search')
@a_decorator_passing_logs
def check_in_view(request):
reg_form = MyRegistrationForm()
# reg_form = forms.UserCreationForm()
if request.method == 'POST':
if request.POST['password'] != request.POST['double_password']:
return render(request, 'check_in.html', {'reg_form': reg_form, 'error': "Введённые пароли не совпадают"})
if len(request.POST['password']) < 8:
return render(request, 'check_in.html', {'reg_form': reg_form, 'error': "Введённый пароль слишком короткий. Он должен содержать как минимум 8 символов. "})
user, created = models.User.objects.get_or_create(username=request.POST['username'],
defaults={
# 'password': request.POST['password'],
'email': request.POST['email'],
'is_active': False
})
if not created:
return render(request, 'check_in.html', {'reg_form': reg_form, 'error': 'пользователь уже существует'})
else:
if not user.check_password(request.POST['password']):
user.delete()
return render(request, 'check_in.html', {'reg_form': reg_form, 'error': 'Введённый пароль состоит только из цифр, некорректен.'})
user.set_password(request.POST['password'])
user.save()
try:
connection = get_connection(host=settings.EMAIL_HOST, port=settings.EMAIL_PORT, username=settings.EMAIL_HOST_USER,
password=settings.EMAIL_HOST_PASSWORD, use_tls=settings.EMAIL_USE_TLS)
verif_code = hashlib.md5('{}'.format(user.pk).encode()).hexdigest()
href = 'http://analogpro.ru/email_confirmation/{}-{}/'.format(verif_code, user.pk)
send_mail('Подтверждение почты',
'Ваш верификационный код - {}, введите его или перейдите по ссылке: {}\n'.format(verif_code,
href),
'info@analogpro.ru', [request.POST['email']], connection=connection, fail_silently=False)
# print(response_email, [request.POST['email']])
except:
return render(request, 'check_in.html', {'reg_form': reg_form, 'error': 'Произошла проблема при отправке email'})
# return render(request, 'check_in.html', {'reg_form': reg_form, 'error': 'пользователь успешно создан'})
return redirect('app:email_confirmation', user.pk, user.pk)
return render(request, 'check_in.html', {'reg_form': reg_form})
# @login_required(redirect_field_name='app:login')
@login_required(login_url='login/')
@a_decorator_passing_logs
def home_view(request):
return render(request, 'home.html')
@login_required(login_url='login/')
@a_decorator_passing_logs
def profile_view(request):
return render(request, 'profile.html',
{
'actions_count': MainLog.objects.filter(user=request.user).count(),
'search_count': MainLog.objects.filter(user=request.user, message__icontains='search').filter(
message__icontains='post').count(),
'files_count': DataFile.objects.filter(created_by=request.user).count(),
'files': DataFile.objects.filter(created_by=request.user)})
@login_required(login_url='login/')
@a_decorator_passing_logs
def faq_view(request):
return render(request, 'faq.html')
@login_required(login_url='login/')
@a_decorator_passing_logs
def partners_view(request):
return render(request, 'to_partners.html')
@login_required(login_url='login/')
@a_decorator_passing_logs
def contacts_view(request):
return render(request, 'contacts.html')
@a_decorator_passing_logs
def logout_view(request):
logout(request)
return redirect('app:login')
def email_confirmation(request, verification_code, user_id):
user = get_object_or_404(models.User, pk=user_id)
check_code = hashlib.md5('{}'.format(user.pk).encode()).hexdigest()
msg = 'Подтвердите email'
if verification_code == check_code:
user.is_active = True
user.save()
msg = 'E-mail подтверждён'
return render(request, 'email_confirmation.html', {'confirmation': True, 'msg': msg})
elif verification_code == user_id:
confirmation_form = EmailConfirmationForm()
if request.method == 'POST':
confirmation_form = EmailConfirmationForm(request)
if check_code == request.POST['code']:
user.is_active = True
user.save()
msg = 'E-mail подтверждён'
return render(request, 'email_confirmation.html',
{'confirmation': True, 'msg': msg})
else:
msg = 'Код неверен'
return render(request, 'email_confirmation.html', {'conf_form': confirmation_form, 'msg': msg})
def landing_page_view(request):
# https://ianlunn.github.io/Hover/
manufacturers = Manufacturer.objects.all()
return render(request, 'landing_page.html', {'manufacturers': manufacturers})
|
[
"1v1expert@gmail.com"
] |
1v1expert@gmail.com
|
fe5e2cf35e5bbfda8bb53747c0cbf1bc642f00e1
|
c0dad0d83af72c6e974c5e9626cb0949fa666456
|
/personal_portfolio/projects/urls.py
|
554dc122ff5bccfce374524d34d891a1510edf65
|
[] |
no_license
|
Hontiris1/Django-Portfolio-Resume
|
df3d6513a15fd2753469809a5eaee3590ed0c9f6
|
b0fa968aae6143846e003381be0fb4550cb098e3
|
refs/heads/master
| 2022-12-25T10:28:09.188483
| 2019-12-21T16:35:46
| 2019-12-21T16:35:46
| 229,437,543
| 0
| 1
| null | 2022-12-19T22:54:39
| 2019-12-21T14:16:48
|
Python
|
UTF-8
|
Python
| false
| false
| 226
|
py
|
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path("", views.project_index, name="project_index"),
path("<slug:slug>/", views.post_detail, name="project_detail"),
]
|
[
"56845190+Hontiris1@users.noreply.github.com"
] |
56845190+Hontiris1@users.noreply.github.com
|
15d1c4dc00b96e0980199d3ab17065d53f04ffce
|
cf1e19f7b6354302037bca563b42218df7d79400
|
/삼성 기출/[14499]주사위 굴리기.py
|
02d746b8dd4fc841288e428981068ad16d41a419
|
[] |
no_license
|
kim-kiwon/Baekjoon-Algorithm
|
680565ddeced2d44506ae6720cf32d8004db42f8
|
4699e6551d3e7451648b9256c54ea4318b71bd4d
|
refs/heads/master
| 2023-04-13T11:10:21.031969
| 2021-04-26T10:50:08
| 2021-04-26T10:50:08
| 325,209,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,259
|
py
|
#요구조건에 따라 구현
n, m, x, y, k = map(int, input().split())
data = []
for _ in range(n):
data.append(list(map(int, input().split())))
orders = list(map(int, input().split()))
dice = [0, 0, 0, 0, 0, 0]
dx = [0, 0, 0, -1, 1]
dy = [0, 1, -1, 0, 0]
for o in orders:
#가능하다면 현재 좌표 변화
nx = x + dx[o]
ny = y + dy[o]
if nx < 0 or nx >= n or ny < 0 or ny >= m:
continue
x = nx
y = ny
#주사위 굴림에 따라 각 면의 위치 변화
if o == 1:
temp = dice[5]
dice[5] = dice[3]
dice[3] = dice[2]
dice[2] = dice[1]
dice[1] = temp
elif o == 2:
temp = dice[5]
dice[5] = dice[1]
dice[1] = dice[2]
dice[2] = dice[3]
dice[3] = temp
elif o == 3:
temp = dice[0]
dice[0] = dice[2]
dice[2] = dice[4]
dice[4] = dice[5]
dice[5] = temp
elif o == 4:
temp = dice[0]
dice[0] = dice[5]
dice[5] = dice[4]
dice[4] = dice[2]
dice[2] = temp
#지도값에 따라 바닥값 & 지도값 변화
if data[x][y] == 0:
data[x][y] = dice[5]
else:
dice[5] = data[x][y]
data[x][y] = 0
print(dice[2])
|
[
"76721493+kim-kiwon@users.noreply.github.com"
] |
76721493+kim-kiwon@users.noreply.github.com
|
4a5409dff72262d7e433c20870594b8b03ce19dc
|
18e929c1128d3a1d01336db5d748962625b2435f
|
/intropylab-classifying-images/get_input_args.py
|
13b37ac57cc802d57e89b2eabf8669b42456f80d
|
[
"MIT"
] |
permissive
|
AustraliaAlexandra11/Udacity_AI_Python
|
e4f58fd33101bb3695a293f6dbff71d88356a622
|
8d66f4efabee6cf73674acefdce3c019fa902870
|
refs/heads/master
| 2022-05-22T12:20:47.220506
| 2020-04-24T17:30:41
| 2020-04-24T17:30:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,300
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/get_input_args.py
#
# PROGRAMMER: Alexandra
# DATE CREATED: 14.04.2020
# REVISED DATE:
# PURPOSE: Create a function that retrieves the following 3 command line inputs
# from the user using the Argparse Python module. If the user fails to
# provide some or all of the 3 inputs, then the default values are
# used for the missing inputs. Command Line Arguments:
# 1. Image Folder as --dir with default value 'pet_images'
# 2. CNN Model Architecture as --arch with default value 'vgg'
# 3. Text File with Dog Names as --dogfile with default value 'dognames.txt'
#
##
# Imports python modules
import argparse
def get_input_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image Folder as --dir with default value 'pet_images'
2. CNN Model Architecture as --arch with default value 'vgg'
3. Text File with Dog Names as --dogfile with default value 'dognames.txt'
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
def get_input_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
Command Line Arguments:
1. Image Folder as --dir with default value 'pet_images'
2. CNN Model Architecture as --arch with default value 'vgg'
3. Text File with Dog Names as --dogfile with default value 'dognames.txt'
This function returns these arguments as an ArgumentParser object.
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
# Creates parse
parser = argparse.ArgumentParser()
# Creates 3 command line arguments args.dir for path to images files,
# args.arch which CNN model to use for classification, args.labels path to
# text file with names of dogs.
parser.add_argument('--dir', type=str, default='pet_images/',
help='Image Folder Path')
# --arch - the CNN model architecture
parser.add_argument('--arch', type=str, default = 'vgg', help='CNN Model Architecture')
# --dogfile - text file of names of dog breeds
parser.add_argument('--dogfile', type=str, default = 'dognames.txt', help='Dog Breeds Text File')
# Return parser.parse_args() parsed argument
return parser.parse_args()
|
[
"noreply@github.com"
] |
AustraliaAlexandra11.noreply@github.com
|
1bbbe0b99b9e11cf90de82fdc5527584ed64257a
|
eb89c747244588f5185a42b024ee306263ba6fde
|
/r_eco_mmender.py
|
bbe7c691eee733c969463a00016d843bad288680
|
[] |
no_license
|
mlisovyi/hackzurich2021_eco_recommender
|
4004491c3b0413161406e58d8f6be9f6cb901cff
|
84a118af427acf5fdb7c1ea688946182a347e198
|
refs/heads/main
| 2023-08-30T11:50:42.627905
| 2021-09-26T04:53:07
| 2021-09-26T04:53:07
| 410,081,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,787
|
py
|
from typing import Any, Dict, Union
import numpy as np
import pandas as pd
import abc
from dataclasses import dataclass
class RecommenderError(Exception):
pass
@dataclass
class BaseRecommender(abc.ABC):
col_product_id: str = "id"
_fitted: bool = False
@abc.abstractmethod
def fit(self, X: pd.DataFrame) -> None:
...
def predict(self, X: pd.DataFrame) -> pd.Series:
if not self._fitted:
raise RecommenderError("The recommender must be trained first.")
preds = self._predict(X)
return preds
@abc.abstractmethod
def _predict(self, X: pd.DataFrame) -> pd.Series:
...
def predict_single_instance(self, x: Dict[str, Union[str, int, float]]) -> str:
X = pd.DataFrame.from_records([x])
pred = self.predict(X)
return pred.iloc[0]
@dataclass
class BasicBossThemaRecommender(BaseRecommender):
col_group: str = "boss_thema_id"
def fit(self, X: pd.DataFrame) -> None:
self.groups = (
X.groupby(self.col_group)[self.col_product_id]
.apply(list)
.rename("__preds__")
)
self._fitted = True
def _predict(self, X: pd.DataFrame) -> pd.Series:
if self.col_group not in X:
raise RecommenderError(f"the required column is missing: {self.col_group}")
df_with_preds = X.merge(
self.groups, left_on=self.col_group, right_index=True, validate="m:1"
)
preds = df_with_preds.set_index(self.col_product_id)["__preds__"]
return preds
class BaseSelector(abc.ABC):
col_product_id: str = "id"
@abc.abstractmethod
def select(self, X: pd.Series) -> pd.Series:
...
@dataclass
class TopOneSelector(BaseSelector):
col_target = "carbon_footprint_co2_in_car_km"
def select(self, X: pd.Series, product_data: pd.DataFrame) -> pd.DataFrame:
col_preds = "recommendation_id"
X_ = X.copy()
# expand lists into multiple rows
stacked_preds = (
X_.apply(pd.Series)
.stack()
.reset_index()
.rename(columns={0: col_preds})[[self.col_product_id, col_preds]]
)
# add targets for the tested products
col_target_test = f"{self.col_target}_test"
product_scores = product_data[[self.col_product_id, self.col_target]].rename(
columns={self.col_target: col_target_test}
)
stacked_preds = stacked_preds.merge(
product_scores,
left_on=self.col_product_id,
right_on=self.col_product_id,
)
# add targets for the recommendations
col_target_reco = f"{self.col_target}_reco"
product_scores = product_data[[self.col_product_id, self.col_target]].rename(
columns={self.col_target: col_target_reco, self.col_product_id: col_preds}
)
stacked_preds = stacked_preds.merge(
product_scores,
left_on=col_preds,
right_on=col_preds,
)
# choose the product that is the best
col_diff = "diff_preds"
stacked_preds[col_diff] = (
stacked_preds[col_target_test] - stacked_preds[col_target_reco]
)
idx_best_options = stacked_preds.groupby(self.col_product_id)[col_diff].idxmax()
# drop entries with missing bestalternative
best_options = stacked_preds.loc[idx_best_options.loc[lambda x: x.notnull()]]
# drop entries with the best alternative having the same metric value
best_options = best_options[best_options[col_diff] > 0]
# format the output
best_options = best_options[
[self.col_product_id, col_preds, col_diff]
].reset_index(drop=True)
return best_options
|
[
"mischa.lisovyi@mgb.ch"
] |
mischa.lisovyi@mgb.ch
|
cda5871aaa1cd77fa56ab3b28dc623079fdd31e7
|
7abc50131d92120ebeefe4c5b3f27e94622eb866
|
/001_Python基础/031_pyhton类.py
|
9c1f0d087986d630de8bbe34e97b29fda46e5e4e
|
[] |
no_license
|
dogpi/StudyPython
|
4b94b3b655cf509ad9002ea9da0d48746964bd7d
|
87cabbfe46455e6e8864207cf614d7cd65062f89
|
refs/heads/master
| 2020-08-31T11:30:02.808766
| 2019-10-31T04:16:00
| 2019-10-31T04:16:00
| 218,680,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
# object:基类,超类,所有类的父类
class Person(object):
# 定义属性
# name = ""
# age = 0
# height = 0
# weight = 0
'''
构造函数:
在创建对象时自动调用
'''
def __init__(self, name, age, height, weight):
print("__init__")
print(name, age, height, weight)
self.name = name # self.name 表示具体实例的name
self.age = age
self.height = height
self.weight = weight
# 析构函数
# 对象释放时自动调用
def __del__(self):
print("析构函数")
#定义方法
# 方法的第一个参数必须是self
# self代表类的实例(某个对象)
def run(self):
print("run")
def eat(self,food):
print("eat",food)
def openDoor(self):
print("我已经打开了冰箱门")
def fillEle(self):
print("我已经把大=大象装进了冰箱")
def closeDoor(self):
print("我已经关闭冰箱门")
'''
实例化对象
格式:对象名 = 类名(参数列表)
'''
'''
# 实例化一个对象
per1 = Person()
print(per1)
print(type(per1))
per2 = Person()
print(per2)
print(type(per2))
# 设置属性
per1.name = "tom"
per1.age = 18
per1.height = 180
per1.weight = 140
print(per1.name,per1.age,per1.height,per1.weight)
# 调用方法
per1.openDoor()
per1.fillEle()
per1.closeDoor()
per1.eat("apple")
'''
per = Person("tom",18,180,140)
# 手动释放对象,调用该对象的析构函数
del per
|
[
"noreply@github.com"
] |
dogpi.noreply@github.com
|
241af9ec509daa6b7d7db01173e6c44daffe6716
|
f2946cbfe41fc5cdc16d104af1b1f054553e86fa
|
/dashboard/admin.py
|
1c90b69f5faedb5c7394380ddaa1a633bf6ca069
|
[] |
no_license
|
ahmdkhan-cs/Facebook-Sentiments
|
bb12821ea575ed4ff40d99ca00e8b1542f409267
|
4b46e8f605dacb91061af897fdada633264b06be
|
refs/heads/master
| 2022-12-01T16:31:51.107499
| 2020-08-17T16:10:57
| 2020-08-17T16:10:57
| 288,225,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Page)
|
[
"akhn358@gmail.com"
] |
akhn358@gmail.com
|
861a7a1732edf6f26a65e98a821b9b2502a797ca
|
89ee8d0ba8756a728c64841655bed4b980594f3a
|
/HOMEWORK 02/ZyLabs 8.10.py
|
909c393416af64a7df4716129cb894c21d6f3475
|
[] |
no_license
|
cchan27/CIS2348
|
4fb9e9aad8614f321da47ff35b046a327866f496
|
333891851a65307fe6bb3f0ba43daf2c882c2fd2
|
refs/heads/main
| 2023-08-22T12:13:14.601183
| 2021-10-17T02:23:58
| 2021-10-17T02:23:58
| 401,477,742
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
# Caleb Chan, 1831296
usr_text = input()
word_a = ''
word_b = ''
for i in usr_text:
if i != "":
word_a = word_a + i.replace(' ', '')
word_b = i.replace(' ', '') + word_b
print(word_a)
print(word_b)
if word_a == word_b:
print('{} is a palindrome'.format(usr_text))
else:
print('{} is not a palindrome'.format(usr_text))
|
[
"noreply@github.com"
] |
cchan27.noreply@github.com
|
8c7bbb0bbdab34e88b5bf60a7150a80da745acb5
|
001ce777a39dc272b5cfaec4dd368e984f30df61
|
/Python/DL/Dev/neural/matrixcalc.py
|
59e2f6af47c177313ff598f0e0086221c0348434
|
[] |
no_license
|
ogawa-yu/DevHome
|
d4c5dbce00822c8f19166d4cbf0d0713d928c653
|
6642a48af13fc02edd6e0ffa134f2ded5e89452d
|
refs/heads/master
| 2021-07-07T08:55:45.393209
| 2019-01-06T03:07:56
| 2019-01-06T03:07:56
| 112,095,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def printArray(X):
print("======Print Array=======")
print(X)
print(np.ndim(X))
print(X.shape)
print(X.shape[0])
print("======Print END =======")
A = np.array([1, 2, 3, 4])
B = np.array([[1.0, 2.0],
[3.0, 4.0],
[5.0, 6.0]])
C = np.array([[1.0, 2.0],
[3.0, 4.0]])
D = np.array([[5.0, 6.0],
[7.0, 8.0]])
E = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
printArray(A)
printArray(B)
printArray(np.dot(C, D))
printArray(np.dot(D, C))
print(np.dot(C, D) is np.dot(D, C))
printArray(np.dot(B, C))
printArray(np.dot(B, D))
printArray(np.dot(B,E))
|
[
"badm.y.ogawa@gmail.com"
] |
badm.y.ogawa@gmail.com
|
4a041294c21b7209e66526b08ddc74cc9d670592
|
f59baca7628598192b004b9ce69850034e17ffe9
|
/src/posts/urls.py
|
a1d5da2108733cf150eec704fb33e6614aae367b
|
[] |
no_license
|
ihdba/django_ajax
|
8c41946b88f29947e19dde8282a351d736781d93
|
17f7ab9b8d0d80542b36d65de5623404025e3827
|
refs/heads/main
| 2023-03-31T14:27:36.087686
| 2021-04-11T07:43:47
| 2021-04-11T07:43:47
| 356,800,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
from django.urls import path
from .views import (
post_list_and_create,
load_post_data_view,
like_unlike_post,
post_detail,
post_detail_data_view,
delete_post,
update_post,
)
app_name = 'posts'
urlpatterns = [
path('', post_list_and_create, name = 'main-board'),
path('data/<int:num_posts>/', load_post_data_view, name = 'posts-data'),
path('like-unlike/', like_unlike_post, name = 'like-unlike'),
path('<pk>/', post_detail, name = 'post-detail'),
path('<pk>/data/', post_detail_data_view, name = 'post-detail-data'),
path('<pk>/update/', update_post, name = 'post-update'),
path('<pk>/delete/', delete_post, name = 'post-delete'),
]
|
[
"ioannis@Ioanniss-iMac.local"
] |
ioannis@Ioanniss-iMac.local
|
5b1acdbbbc44548e7b618008e2273206942ec172
|
1d867cb4bc3eb7ae0acc66ad002745d717b0ef95
|
/server/models/__init__.py
|
6687ad890fa1d5113ebc80857af0282e48767225
|
[] |
no_license
|
plollaton/treinamento_python1_back
|
641007af6c71902518e04a293dcea2968ad3d64b
|
7932690cd10a75f8794cd87028e359abfb73bab9
|
refs/heads/main
| 2023-02-15T16:03:52.886082
| 2021-01-15T19:52:31
| 2021-01-15T19:52:31
| 330,005,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from .models import Album, Photos
try:
Album.create_table()
except:
pass
try:
Photos.create_table()
except:
pass
|
[
"plollaton@hotmail.com"
] |
plollaton@hotmail.com
|
8b955993a7f52285a6cd43ba2209597c4449cbec
|
958d5bd8cde0c058ae44aad5d108c1adaef9a06a
|
/mysite/polls/migrations/0001_initial.py
|
c518310caddba276db9007b9e8fe573d22147b68
|
[] |
no_license
|
divya2424/django-basic
|
342f83043a598ca4ebf9d59c234dbc4ca63c75c7
|
cdfd96f0667e55a647cd9a70a1e134a7e3f7f247
|
refs/heads/master
| 2022-04-24T16:20:39.662803
| 2020-04-23T23:34:20
| 2020-04-23T23:34:20
| 258,354,225
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-04-23 19:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
|
[
"divya2401@bitbucket.org"
] |
divya2401@bitbucket.org
|
25489f7c159b13c6cf582874ffd69d3ed7ec89e3
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2126/60604/259679.py
|
502aa7b2a7ea5ca5d11513058f3eccf895093494
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
n=input().split(',')
for i in range(len(n)):
n[i]=int(n[i])
n.sort()
res=[]
res.append(n[0])
for i in range(1,len(n)):
#print(res)
j=True
for j in res:
if n[i]%j!=0 and j%n[i]!=0:
j=False
if j:
res.append(n[i])
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
22839aa3e04df540b1da346258eed2b11d22b3e4
|
08fe752455b2e8bea36f69bd7eb0d70641691514
|
/12월 3주차/[LEETCODE] 4Sum.py
|
14110fd2126e3b1a5e36f8385f5a5fa01940b85c
|
[] |
no_license
|
2020-ASW/gayoung_yoon
|
69a7a20c073710aaec76261cac59aa6541250588
|
0ad21c5484ed087d704d72b03fc1f14c3fa9e25f
|
refs/heads/main
| 2023-02-14T19:28:38.966502
| 2021-01-10T07:56:10
| 2021-01-10T07:56:10
| 320,287,097
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,016
|
py
|
'''
정렬 후, a,b,c,d를 결정해야한다.
4중for문보다 a, b는 for문으로 결정하고, c,d는 while문 이용
'''
# class Solution:
def fourSum(nums, target):
nums = [1,0,-1,0,-2,2]
target = 0
nums = sorted(nums)
nums_len = len(nums)
# a, b를 설정하고 나서 c, d도 설정해야하기때문에
# for문 돌린 후 2개 값을 설정할 수 있어야한다.
check = set()
for a in range(len(nums) - 3):
for b in range(a + 1, len(nums) - 2):
new_target = target - nums[a] - nums[b]
c, d = b + 1, nums_len - 1
while d > c:
if new_target == nums[c] + nums[d]:
check.add((nums[a], nums[b], nums[c], nums[d]))
c += 1
d -= 1
elif new_target > nums[c] + nums[d]:
c += 1
else:
d -= 1
return list(check)
# nums = [1,0,-1,0,-2,2]
# target = 0
# print(fourSum(nums, target))
|
[
"gyyoon4u@naver.com"
] |
gyyoon4u@naver.com
|
4013c6f42b3aa290fa59fe04f8976bdb1f342c12
|
1abcfe11e193ec840c1ec0969d36e9bd0916f61f
|
/source/helpers.py
|
b37cf20b34a820b7dd7168a9707449e8c91a29cf
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-3-Clause"
] |
permissive
|
jcushman/password_change_tool
|
ed690070f8de4ceede1a20ac74f62b990c9433d6
|
efbf2968eef9d4e1f9b9053384b65386c918b75d
|
refs/heads/master
| 2020-07-20T18:06:06.311266
| 2014-08-27T13:50:33
| 2014-08-27T13:50:33
| 22,996,875
| 1
| 0
| null | 2015-01-19T18:13:26
| 2014-08-15T17:13:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,095
|
py
|
import ctypes
from multiprocessing.pool import ThreadPool
import os
import sys
import threading
import wx
from wx.lib.pubsub import pub
import crypto
import platform_tools
from ramdisk import RamDisk
from global_state import GlobalState
def get_data_dir():
if hasattr(sys, "frozen"):
this_module = unicode(sys.executable, sys.getfilesystemencoding())
if sys.platform == 'darwin':
return os.path.dirname(this_module)
else:
raise NotImplementedError("Don't know where to find resources on this OS yet.")
# running as regular script
return os.path.dirname(os.path.dirname(__file__))
def data_path(path):
return os.path.join(get_data_dir(), path)
def bind_click_event(button, message, **kwargs):
button.Bind(wx.EVT_BUTTON,
lambda evt: pub.sendMessage(message, **kwargs))
return button
def show_message(message, title='', options=wx.OK):
dlg = wx.MessageDialog(None, message, title, options)
dlg.ShowModal()
dlg.Destroy()
def show_error(message, title="Error"):
show_message(message, title)
def ask(parent=None, message=''):
dlg = wx.TextEntryDialog(parent, message)
dlg.ShowModal()
result = dlg.GetValue()
dlg.Destroy()
return result
def use_ramdisk():
""" Return True if password managers can use a ramdisk on this platform for file exchange. """
return sys.platform == 'darwin'
def get_password_managers():
# process password manager plugins
# TODO: make this auto-discover and move it somewhere sensible
from managers.onepassword import OnePasswordImporter
return {
'onepassword':OnePasswordImporter
}
def set_up_import_ramdisk(name="FreshPass Secure Disk"):
"""
Mount a ramdisk in a background thread and configure it for file import.
Callers can subscribe to 'ramdisk.loaded' and 'ramdisk.failed' to be alerted when load is complete.
If successful, ramdisk object will be stored in GlobalState.ramdisk.
Callers can subscribe to 'ramdisk.files_added' to be alerted when files are added to the disk.
"""
def load_ramdisk():
try:
ramdisk = RamDisk(name, source_image=data_path('resources/mac_disk_image_compressed.dmg'))
ramdisk.mount()
GlobalState.cleanup_message.send({'action': 'unmount', 'path': ramdisk.path, 'device': ramdisk.device})
if sys.platform == 'darwin':
platform_tools.run_applescript(file=data_path('resources/display disk image.scpt'))
crypto.set_access_control_for_import_folder(ramdisk.path)
ramdisk.watch()
GlobalState.ramdisk = ramdisk
wx.CallAfter(pub.sendMessage, 'ramdisk.loaded')
except NotImplementedError:
wx.CallAfter(pub.sendMessage, 'ramdisk.failed')
ramdisk_loading_thread = threading.Thread(target=load_ramdisk)
ramdisk_loading_thread.start()
def get_first_result_from_threads(calls):
calls = list(enumerate(calls))
def run_func(call):
i, call = call
func = call[0]
args = call[1] if len(call)>1 else []
kwargs = call[2] if len(call)>2 else {}
try:
return i, func(*args, **kwargs)
except Exception as e:
return i, e
pool = ThreadPool(processes=len(calls))
result = pool.imap_unordered(run_func, calls).next()
for thread in pool._pool:
# via http://stackoverflow.com/a/15274929
if not thread.isAlive():
continue
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
return result
|
[
"jcushman@gmail.com"
] |
jcushman@gmail.com
|
161d73575db110913f2bf402aa5a22d649d786ff
|
fa37de9fa7699e92cdcd4f753fa54b933830b967
|
/rookery/checkers/ip.py
|
263b11af5629e569d906b3931146a548a29a9328
|
[] |
no_license
|
BorisPlus/otus_webpython_020_021_celery
|
1b37b3dcc3db3c95fc31bdf0136baa877325cbc5
|
68ea6597c19ba525ab52db158318276111a2546c
|
refs/heads/master
| 2022-05-26T10:55:19.997403
| 2018-11-07T11:17:56
| 2018-11-07T11:17:56
| 156,196,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
#!/usr/bin/env python3
from .core.base_checkers import ip
def check(domain_url_or_ip):
return ip.check(domain_url_or_ip)
|
[
"boris-plus@mail.ru"
] |
boris-plus@mail.ru
|
e52b50956b795494549c1453cc42a4e77b02fd23
|
a15ca7096a1de7372569e8defb4fb012f9c00966
|
/inference.py
|
00cd5289945c89d84f00d45f46b0ed0d0cec5257
|
[] |
no_license
|
AniketGurav/Attention-OCR-pytorch
|
9a32412eef2162f4937145304eea35498991251a
|
eacbb2f8f83eb47af08da6a3666715d714565071
|
refs/heads/main
| 2023-07-03T00:52:51.062816
| 2021-07-28T05:54:52
| 2021-07-28T05:54:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,408
|
py
|
# Inference Script --> Given Image, Output Label
import torch
import os
from PIL import Image
from torchvision import transforms
from model.attention_ocr import OCR
from utils.tokenizer import Tokenizer
import json
import matplotlib.pyplot as plt
# Train and infer on same CNN Backbone
# Loading Json File
json_file="config_infer.json"
f = open(json_file, )
data = json.load(f)
cnn_option = data["cnn_option"]
cnn_backbone = data["cnn_backbone_model"][str(cnn_option)] # list containing model, model_weight
# Tokenizer
tokenizer = Tokenizer(list(data["chars"]))
# Model Architecture
model = OCR(data["img_width"], data["img_height"], data["nh"], tokenizer.n_token,
data["max_len"] + 1, tokenizer.SOS_token, tokenizer.EOS_token,cnn_backbone).to(device=data["device"])
# Model checkpoint Load
model.load_state_dict(torch.load(data["model_path"]))
# Image Transformation
img_trans = transforms.Compose([
transforms.Resize((data["img_height"], data["img_width"])),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=(0.229, 0.224, 0.225)),
])
# Inference Function --> Input: Source Image directory, Text File containing name of Image
def _inference(test_dir,filename):
count=0
model.eval()
with torch.no_grad():
with open(os.path.join(test_dir, filename), 'r') as fp:
for img_name in fp.readlines():
img_name = img_name.strip("\n")
img_filename=img_name.split(data["data_file_separator"])[0]
#act_label=img_name.split(data["data_file_separator"])[1]
image_file = os.path.abspath(os.path.join(test_dir, img_filename))
img=Image.open(image_file)
pred = model(img_trans(img).unsqueeze(0).to(device=data["device"]))
pred_label = tokenizer.translate(pred.squeeze(0).argmax(1))
print(pred_label)
plt.imshow(img)
plt.savefig(data["res_output"] + "/" + pred_label + '.png')
count=count+1
print("Saved {} file".format(count))
# plt.title(pred_label)
# plt.imshow(img)
# plt.show()
print("Complete Saving {} files".format(count))
#print(pred_label,act_label)
if __name__=="__main__":
_inference(data["test_dir"],data["inference_file"])
|
[
"ankanroni3@gmail.com"
] |
ankanroni3@gmail.com
|
94622d6fcc9d234946d65e22c3520e93bf586eed
|
f715e2ff4f264cf4bc805a45a5019f527479e5ce
|
/unused/us07.py
|
0e5bae80c851d21eaa2ceedf8c7ec16a9f9135a2
|
[] |
no_license
|
tarikkdiry/spring2018-ssw555
|
4b1207329f120c91fa12c9e9b06b6dfb38b5fede
|
3b452a6479a569119b8d1d32a143bc8b80425419
|
refs/heads/master
| 2021-04-28T19:37:41.410394
| 2018-04-12T17:38:38
| 2018-04-12T17:38:38
| 121,900,740
| 0
| 1
| null | 2018-04-12T17:38:39
| 2018-02-17T23:15:24
|
Python
|
UTF-8
|
Python
| false
| false
| 430
|
py
|
from datetime import datetime
LEVELS = {'INDI':'0', 'NAME':'1', 'SEX':'1', 'BIRT':'1', 'DEAT':'1', 'FAMC':'1', 'FAMS':'1', 'FAM':'0', 'MARR':'1', 'HUSB':'1', 'WIFE':'1', 'CHIL':'1', 'DIV':'1', 'DATE':'2', 'HEAD':'0', 'TRLR':'0', 'NOTE':'0'}
def us07(Children):
'''Check to see if there are fifteen siblings, 15 kids is ridiculous'''
for l in line:
if len(Children) >= 15:
return False
return True
|
[
"noreply@github.com"
] |
tarikkdiry.noreply@github.com
|
b2737dd27d92f56ef1286581da06f988d6e0ac27
|
28986e1726c3f7aed7a444a4d505ed6e62ed29df
|
/robot.py
|
b8f34403a8caec2f2dcd451ca0234e9a0284dfc5
|
[
"MIT"
] |
permissive
|
alexcc4/robot
|
5e66c16a35a4fee8009af61b7f5e77510cd27f0a
|
713b95a12aa7aa47aab97a0a812dcb7cd457e2ff
|
refs/heads/master
| 2021-01-10T23:20:30.741602
| 2016-10-11T14:27:06
| 2016-10-11T14:27:06
| 70,598,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
#!usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import time
from slackclient import SlackClient
# bot 的 ID 作为一个环境变量
BOT_ID = os.environ.get("BOT_ID")
# 常量
AT_BOT = "<@" + BOT_ID + ">:"
EXAMPLE_COMMAND = "do"
# 实例化 Slack 和 Twilio 客户端
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# 返回 @ 之后的文本,删除空格
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 从 firehose 读取延迟 1 秒
if slack_client.rtm_connect():
print("Moutainchicken connected and running!")
while True:
command_, channel_ = parse_slack_output(slack_client.rtm_read())
if command_ and channel_:
handle_command(command_, channel_)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
|
[
"liangbinsi@gmail.com"
] |
liangbinsi@gmail.com
|
43ba1376aaedf3c6c736bd7a359121850f6e872d
|
1590478d46b990b79ec9f6ca45f68273cbb0f6e8
|
/oneD.py
|
678515105c9c0fa55afdd468014887041c7a81a0
|
[] |
no_license
|
satyamshree22/oneDkinematicpy
|
8e3362239b1db2b6dc9e2579f1f95ee9e9524486
|
0483f0dd462141d704e5f16a8b2c97d6fa2dffd1
|
refs/heads/master
| 2022-11-13T21:41:13.408759
| 2020-06-29T06:01:01
| 2020-06-29T06:01:01
| 275,740,986
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,678
|
py
|
import math
def final_velocity(initial_velocity=0,acceleration=0,time=0,final_position=0,initial_position=0):
# when time is t given
if time != 0 and acceleration !=0 :
# vx=vox +ax*t
final_velocity = initial_velocity + acceleration * (time)
return final_velocity
elif time == 0 and acceleration != 0:
# vx^2=v0x^2 +2ax(x-xo)
final_velocity =math.sqrt((pow(initial_velocity,2)+ 2*acceleration*(final_position-initial_velocity)))
return final_velocity
elif time != 0 and acceleration == 0:
final_velocity =2*(final_position-initial_position)/time - initial_velocity
return final_velocity
def initial_velocity(final_velocity = 0,acceleration = 0,time = 0,final_position = 0,initial_position = 0):
# when time is t given
if time != 0 and acceleration != 0 :
# vx=vox +ax*t
initial_velocity= acceleration * pow(time,2)-initial_velocity
return initial_velocity
elif time == 0 and acceleration !=0:
# vx^2=v0x^2 +2ax(x-xo)
initial_velocity =math.sqrt((2*acceleration*(final_position-initial_velocity))-pow(final_velocity,2))
return initial_velocity
elif time != 0 and acceleration == 0:
initial_velocity =2*(final_position-initial_position)/time - final_velocity
return initial_velocity
def find_acceleration(initial_velocity=0,final_velocity=0,initial_position=0,final_position=0,time=0):
if time==0:
acceleration=(pow(final_velocity,2)-pow(initial_velocity,2))/(2*(final_position-initial_position))
else:
if final_position == 0 and initial_position ==0:
acceleration=(final_velocity-initial_velocity)/time
return acceleration
else:
acceleration=(2*(final_position-initial_position) - initial_velocity*time)/(time*time)
return acceleration
def find_displacement(initial_velocity=0,final_velocity=0,acceleration=0,time=0):
if time == 0 and acceleration !=0:
displacement= (pow(final_velocity,2)-pow(initial_velocity,2))/(2*acceleration)
return displacement
elif time!=0 and acceleration == 0:
displacement = ((initial_velocity+final_velocity)*time)/2
return displacement
else:
displacement =((initial_velocity*time + .5*acceleration*time*time))
return displacement
def kmph_mps(velocity):
return 5*velocity/18
def mps_kmph(velocity):
return 18*velocity/5
|
[
"satyamshree22@gmail.com"
] |
satyamshree22@gmail.com
|
364d667b5e4354fe5c21f9390a0aa9c02c5bdcae
|
f613efb275bbb74dbee93e9bf5b89175a79bf9ef
|
/tweet.py
|
9fa766d8aa91d6bae44cbeaedbb6718a6c29eeab
|
[] |
no_license
|
plmwa/nittc-bot
|
73321cdbf181c7b0dfad15e9ee8c513bf674ddb3
|
eef9d3e97deeff824a1c75ad4089282855ff1d3d
|
refs/heads/main
| 2023-06-08T03:51:46.642970
| 2020-12-16T08:01:01
| 2020-12-16T08:01:01
| 321,910,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
import os
from dotenv import load_dotenv
from requests_oauthlib import OAuth1Session
def tweet(a,b):
CONSUMER_KEY = os.environ["CONSUMER_KEY"]
CONSUMER_SECRET = os.environ["CONSUMER_SECRET"]
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = os.environ["ACCESS_TOKEN_SECRET"]
url = "https://api.twitter.com/1.1/statuses/update.json"
twitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
tweet = a+" "+b #ツイート内容
params = {"status" : tweet}
req = twitter.post(url, params = params) #ここでツイート
if req.status_code == 200: #成功
print("Succeed!")
else: #エラー
print("ERROR : %d"% req.status_code)
|
[
"townstgak@gmail.com"
] |
townstgak@gmail.com
|
bbede51527b82585df7b0832bdaf134cac397749
|
86e7542c386aca7386e5a107ee4c93e3480b9e50
|
/kbackend/core/wsgi.py
|
2b949153980f34f44777315de6cc6d8bddf69b9f
|
[] |
no_license
|
patrikturi/kBackend
|
9c07481396a0a2883f6d04c170cd27c0afa5e666
|
e2c3d94251a17db748abe406c8d5b52a7f6d3e19
|
refs/heads/master
| 2023-02-06T21:47:28.346658
| 2020-12-26T14:06:17
| 2020-12-26T14:06:17
| 288,246,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
"""
WSGI config for kbackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise import WhiteNoise
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings.production')
application = get_wsgi_application()
application = WhiteNoise(application)
application.add_files('./core/static', prefix='static')
|
[
"patrik.turi.0xff@gmail.com"
] |
patrik.turi.0xff@gmail.com
|
dfecf2f243803bdb56737a4fa2c9e671acc37da9
|
2b73d950cfd1a617111eeed37ac58c2fde465a98
|
/src/aihwkit/cloud/converter/definitions/onnx_common_pb2.py
|
1279f04146c04ea97cb80b13677c1f8d17d8150e
|
[
"Apache-2.0"
] |
permissive
|
tunghoang290780/aihwkit
|
218756911dac42190ec504f99672e54eba709d46
|
801aa174f4b808aa6595f1fb2403944d0aad142f
|
refs/heads/master
| 2023-05-06T20:19:40.692587
| 2021-05-17T10:15:31
| 2021-05-17T10:15:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 18,916
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: onnx_common.proto
# Some of the data structures in this file are derivatives of the ONNX
# specification (https://github.com/onnx/onnx), licensed under Apache-2.0
# as per the terms below.
# Copyright (c) ONNX Project Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='onnx_common.proto',
package='aihwx',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x11onnx_common.proto\x12\x05\x61ihwx\"\x91\x03\n\x0e\x41ttributeProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\x04type\x18\x14 \x01(\x0e\x32#.aihwx.AttributeProto.AttributeType\x12\t\n\x01\x66\x18\x02 \x01(\x02\x12\t\n\x01i\x18\x03 \x01(\x03\x12\t\n\x01s\x18\x04 \x01(\x0c\x12\x1d\n\x01t\x18\x05 \x01(\x0b\x32\x12.aihwx.TensorProto\x12\t\n\x01\x62\x18\x65 \x01(\x08\x12\x0e\n\x06\x66loats\x18\x07 \x03(\x02\x12\x0c\n\x04ints\x18\x08 \x03(\x03\x12\x0f\n\x07strings\x18\t \x03(\x0c\x12#\n\x07tensors\x18\n \x03(\x0b\x32\x12.aihwx.TensorProto\x12\r\n\x05\x62ools\x18\x66 \x03(\x08\"\x8f\x01\n\rAttributeType\x12\r\n\tUNDEFINED\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\x07\n\x03INT\x10\x02\x12\n\n\x06STRING\x10\x03\x12\n\n\x06TENSOR\x10\x04\x12\x08\n\x04\x42OOL\x10\x65\x12\n\n\x06\x46LOATS\x10\x06\x12\x08\n\x04INTS\x10\x07\x12\x0b\n\x07STRINGS\x10\x08\x12\x0b\n\x07TENSORS\x10\t\x12\t\n\x05\x42OOLS\x10\x66\"\x89\x02\n\x0bTensorProto\x12\x0c\n\x04\x64ims\x18\x01 \x03(\x03\x12\x11\n\tdata_type\x18\x02 \x01(\x05\x12\x16\n\nfloat_data\x18\x04 \x03(\x02\x42\x02\x10\x01\x12\x16\n\nint32_data\x18\x05 \x03(\x05\x42\x02\x10\x01\x12\x13\n\x0bstring_data\x18\x06 \x03(\x0c\x12\x16\n\nint64_data\x18\x07 \x03(\x03\x42\x02\x10\x01\"|\n\x08\x44\x61taType\x12\r\n\tUNDEFINED\x10\x00\x12\t\n\x05\x46LOAT\x10\x01\x12\t\n\x05UINT8\x10\x02\x12\x08\n\x04INT8\x10\x03\x12\n\n\x06UINT16\x10\x04\x12\t\n\x05INT16\x10\x05\x12\t\n\x05INT32\x10\x06\x12\t\n\x05INT64\x10\x07\x12\n\n\x06STRING\x10\x08\x12\x08\n\x04\x42OOL\x10\t\"\x96\x01\n\x10TensorShapeProto\x12.\n\x03\x64im\x18\x01 \x03(\x0b\x32!.aihwx.TensorShapeProto.Dimension\x1aR\n\tDimension\x12\x13\n\tdim_value\x18\x01 \x01(\x03H\x00\x12\x13\n\tdim_param\x18\x02 \x01(\tH\x00\x12\x12\n\ndenotation\x18\x03 \x01(\tB\x07\n\x05value')
)
_ATTRIBUTEPROTO_ATTRIBUTETYPE = _descriptor.EnumDescriptor(
name='AttributeType',
full_name='aihwx.AttributeProto.AttributeType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TENSOR', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOL', index=5, number=101,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOATS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTS', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRINGS', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TENSORS', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOLS', index=10, number=102,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=287,
serialized_end=430,
)
_sym_db.RegisterEnumDescriptor(_ATTRIBUTEPROTO_ATTRIBUTETYPE)
_TENSORPROTO_DATATYPE = _descriptor.EnumDescriptor(
name='DataType',
full_name='aihwx.TensorProto.DataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNDEFINED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT8', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT8', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UINT16', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT16', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT32', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INT64', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOL', index=9, number=9,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=574,
serialized_end=698,
)
_sym_db.RegisterEnumDescriptor(_TENSORPROTO_DATATYPE)
_ATTRIBUTEPROTO = _descriptor.Descriptor(
name='AttributeProto',
full_name='aihwx.AttributeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='aihwx.AttributeProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='aihwx.AttributeProto.type', index=1,
number=20, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f', full_name='aihwx.AttributeProto.f', index=2,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='i', full_name='aihwx.AttributeProto.i', index=3,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s', full_name='aihwx.AttributeProto.s', index=4,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='t', full_name='aihwx.AttributeProto.t', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='b', full_name='aihwx.AttributeProto.b', index=6,
number=101, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='floats', full_name='aihwx.AttributeProto.floats', index=7,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ints', full_name='aihwx.AttributeProto.ints', index=8,
number=8, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strings', full_name='aihwx.AttributeProto.strings', index=9,
number=9, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensors', full_name='aihwx.AttributeProto.tensors', index=10,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bools', full_name='aihwx.AttributeProto.bools', index=11,
number=102, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_ATTRIBUTEPROTO_ATTRIBUTETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=430,
)
_TENSORPROTO = _descriptor.Descriptor(
name='TensorProto',
full_name='aihwx.TensorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dims', full_name='aihwx.TensorProto.dims', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data_type', full_name='aihwx.TensorProto.data_type', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='float_data', full_name='aihwx.TensorProto.float_data', index=2,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int32_data', full_name='aihwx.TensorProto.int32_data', index=3,
number=5, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='string_data', full_name='aihwx.TensorProto.string_data', index=4,
number=6, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='int64_data', full_name='aihwx.TensorProto.int64_data', index=5,
number=7, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_TENSORPROTO_DATATYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=433,
serialized_end=698,
)
_TENSORSHAPEPROTO_DIMENSION = _descriptor.Descriptor(
name='Dimension',
full_name='aihwx.TensorShapeProto.Dimension',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dim_value', full_name='aihwx.TensorShapeProto.Dimension.dim_value', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dim_param', full_name='aihwx.TensorShapeProto.Dimension.dim_param', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='denotation', full_name='aihwx.TensorShapeProto.Dimension.denotation', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='aihwx.TensorShapeProto.Dimension.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=769,
serialized_end=851,
)
_TENSORSHAPEPROTO = _descriptor.Descriptor(
name='TensorShapeProto',
full_name='aihwx.TensorShapeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dim', full_name='aihwx.TensorShapeProto.dim', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TENSORSHAPEPROTO_DIMENSION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=701,
serialized_end=851,
)
_ATTRIBUTEPROTO.fields_by_name['type'].enum_type = _ATTRIBUTEPROTO_ATTRIBUTETYPE
_ATTRIBUTEPROTO.fields_by_name['t'].message_type = _TENSORPROTO
_ATTRIBUTEPROTO.fields_by_name['tensors'].message_type = _TENSORPROTO
_ATTRIBUTEPROTO_ATTRIBUTETYPE.containing_type = _ATTRIBUTEPROTO
_TENSORPROTO_DATATYPE.containing_type = _TENSORPROTO
_TENSORSHAPEPROTO_DIMENSION.containing_type = _TENSORSHAPEPROTO
_TENSORSHAPEPROTO_DIMENSION.oneofs_by_name['value'].fields.append(
_TENSORSHAPEPROTO_DIMENSION.fields_by_name['dim_value'])
_TENSORSHAPEPROTO_DIMENSION.fields_by_name['dim_value'].containing_oneof = _TENSORSHAPEPROTO_DIMENSION.oneofs_by_name['value']
_TENSORSHAPEPROTO_DIMENSION.oneofs_by_name['value'].fields.append(
_TENSORSHAPEPROTO_DIMENSION.fields_by_name['dim_param'])
_TENSORSHAPEPROTO_DIMENSION.fields_by_name['dim_param'].containing_oneof = _TENSORSHAPEPROTO_DIMENSION.oneofs_by_name['value']
_TENSORSHAPEPROTO.fields_by_name['dim'].message_type = _TENSORSHAPEPROTO_DIMENSION
DESCRIPTOR.message_types_by_name['AttributeProto'] = _ATTRIBUTEPROTO
DESCRIPTOR.message_types_by_name['TensorProto'] = _TENSORPROTO
DESCRIPTOR.message_types_by_name['TensorShapeProto'] = _TENSORSHAPEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AttributeProto = _reflection.GeneratedProtocolMessageType('AttributeProto', (_message.Message,), dict(
DESCRIPTOR = _ATTRIBUTEPROTO,
__module__ = 'onnx_common_pb2'
# @@protoc_insertion_point(class_scope:aihwx.AttributeProto)
))
_sym_db.RegisterMessage(AttributeProto)
TensorProto = _reflection.GeneratedProtocolMessageType('TensorProto', (_message.Message,), dict(
DESCRIPTOR = _TENSORPROTO,
__module__ = 'onnx_common_pb2'
# @@protoc_insertion_point(class_scope:aihwx.TensorProto)
))
_sym_db.RegisterMessage(TensorProto)
TensorShapeProto = _reflection.GeneratedProtocolMessageType('TensorShapeProto', (_message.Message,), dict(
Dimension = _reflection.GeneratedProtocolMessageType('Dimension', (_message.Message,), dict(
DESCRIPTOR = _TENSORSHAPEPROTO_DIMENSION,
__module__ = 'onnx_common_pb2'
# @@protoc_insertion_point(class_scope:aihwx.TensorShapeProto.Dimension)
))
,
DESCRIPTOR = _TENSORSHAPEPROTO,
__module__ = 'onnx_common_pb2'
# @@protoc_insertion_point(class_scope:aihwx.TensorShapeProto)
))
_sym_db.RegisterMessage(TensorShapeProto)
_sym_db.RegisterMessage(TensorShapeProto.Dimension)
_TENSORPROTO.fields_by_name['float_data']._options = None
_TENSORPROTO.fields_by_name['int32_data']._options = None
_TENSORPROTO.fields_by_name['int64_data']._options = None
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
tunghoang290780.noreply@github.com
|
7d45910362770b2b3ae5bd05ee081e705984bf84
|
b2584b0905f41aa079f7cfa4a4177794e997e6ea
|
/store/admin.py
|
14722e35266d138646e438ee21e5b169aa95ca6e
|
[] |
no_license
|
kazi-akib-abdullah/greatkart-django
|
f4d393b84b19bc57cb98d66b7c3978cb31f19731
|
093ac8d327ae3c81a639b1a7d747fe7ea2422182
|
refs/heads/main
| 2023-06-06T07:30:38.139142
| 2021-06-17T23:24:22
| 2021-06-17T23:24:22
| 377,981,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 357
|
py
|
from django.contrib import admin
from django.contrib import admin
from .models import Product
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = ('product_name', 'price', 'stock', 'category', 'modified_date', 'is_available')
prepopulated_fields = {'slug':('product_name',)}
admin.site.register(Product, ProductAdmin)
|
[
"45953236+kazi-akib-abdullah@users.noreply.github.com"
] |
45953236+kazi-akib-abdullah@users.noreply.github.com
|
925f4b82b689fec23eb3d316b785b7da9fab7966
|
a05f2d84a3c418c16976a04426eed58488362431
|
/test1.py
|
d1b6555d9b319d2290934f378c2fecbc7e4ad6da
|
[] |
no_license
|
codeproy/newPython
|
8819180d81740c0a7c10f8cb18c1cdb9407ffa93
|
4f2097949d8f8de714aaf77a5d1f145ec9938ef4
|
refs/heads/master
| 2023-05-08T20:25:27.596265
| 2021-05-23T04:48:01
| 2021-05-23T04:48:01
| 69,076,074
| 0
| 0
| null | 2021-05-23T04:14:38
| 2016-09-24T03:01:19
|
Python
|
UTF-8
|
Python
| false
| false
| 60
|
py
|
# test
a = []
for b in a[:]:
else:
print ("not found")
|
[
"partho.ece@gmail.com"
] |
partho.ece@gmail.com
|
96f9ea4e947740bc699e18205ebf697fa111a4f7
|
fd53427af48fa3100e60f91a37285d0c3f1b70d5
|
/jenkins/jenkins_lascon_sql.py
|
bfb880e1f13bc9a2da78ed8ca178eb1fe22043af
|
[] |
no_license
|
devGregA/lascon-demo
|
93da7fdf35fad61392bad1e9f6bd8f3f97de5332
|
7449756b87f459a09c902e7845ee2bbeb8ae155d
|
refs/heads/master
| 2021-01-10T12:00:02.745825
| 2015-10-23T16:04:22
| 2015-10-23T16:04:22
| 44,783,250
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
# -*- coding: utf-8 -*-
import imp
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
import subprocess
report_parser = imp.load_source('out_parse', 'ABSOLUTE_PATH/utils/report_parser.py')
map_wrapper = imp.load_source('map_wrapper', 'ABSOLUTE_PATH/utils/map_wrapper.py')
parser = report_parser.out_parse()
parser.start()
path = "ABSOLUTE_PATH"
class Testing(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://127.0.0.1:9080/"
self.verificationErrors = []
self.accept_next_alert = True
def test_security(self):
self.sql_inj_testing()
def sql_inj_testing(self):
driver = self.driver
driver.get(self.base_url )
parser.parse_suite("SQL Injection")
driver.get(self.base_url + 'login.jsp')
test_result = map_wrapper.execute('python %s/sqlmap/sqlmap.py -r %s/utils/search-text.txt -p username password --level=3 --risk=3 --flush-session --batch' % (path, path) )
status = True
if test_result != "Not Found":
status = False
parser.parse(status, test_result)
else:
pass
self.assertTrue(status)
if __name__ == "__main__":
unittest.main()
|
[
"you@example.com"
] |
you@example.com
|
95b4a05c7e3d418a43d634c186390312caa0c739
|
54b1eb176245485ea46e89f17b9d4d5ac8e8b0a3
|
/quebap/projects/inferbeddings/sampling.py
|
28d4654eb959b403df1543bef7a53f939e1dfff5
|
[] |
no_license
|
mitchelljeff/modelf
|
1823c5001904c0beeee74c4394632256ae12d3f2
|
2e66a8f26e4d59a8e27c81a52e0e0e6d7ccab410
|
refs/heads/master
| 2021-01-11T18:02:00.436319
| 2017-01-20T16:45:00
| 2017-01-20T16:45:00
| 79,473,771
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,517
|
py
|
import tensorflow as tf
def random_truncated_exponential(w, num_samples):
"""
Sample from truncated independent exponential distributions using weights w (or inverse exponential distribution for
positive weight components).
Args:
w: a [num_rows, dim] matrix of `num_rows` weight vectors.
num_samples: the number of samples to produce.
Returns:
x,p where `x` is a tensor [num_samples, num_rows, dim] of samples and `p` is a matrix [num_samples, num_rows]
of the probabilities of each sample with respect to each row.
"""
eps = 0.000001
# w: [num_rows, dim]
# returns: [num_samples, num_rows, dim] batch of samples drawn from a truncated exponential over [0,1] using w
# as parameter
shape = tf.concat(0, ((num_samples,), tf.shape(w))) # [num_samples, num_rows, dim]
f1 = tf.minimum(w, -eps)
f2 = tf.maximum(w, eps)
is_neg = tf.to_float(tf.less(w, 0.0))
w_ = is_neg * f1 + (1.0 - is_neg) * f2
u = tf.random_uniform(shape, 0.0, 1.0) # [num_samples, num_rows, dim]
z = (tf.exp(w_) - 1.0) # [num_rows, dim]
x = tf.log(z * u + 1) / w_
# p = tf.reduce_prod(tf.exp(x * w_) * w_ / z,0)
# TODO: replace with robuster version
# p = tf.exp(tf.reduce_sum(tf.log(tf.exp(x * w_) * w_ / z),1))
# p = tf.exp(tf.reduce_sum(x * w_ + tf.log(w_) - tf.log(z),1))
log_p_components = x * w_ + tf.log(tf.abs(w_)) - tf.log(tf.abs(z))
p = tf.exp(tf.reduce_sum(log_p_components, 2))
return x, p
|
[
"sebastian.riedel@gmail.com"
] |
sebastian.riedel@gmail.com
|
8d7a0fb342ea8e2c84a29ddb6e44354ef089308b
|
97f1a7a537865b3b5697100d2b94d5b526d6d1a4
|
/mysite/settings.py
|
573933c0f9f8b068833356345cdb018d24405e9b
|
[] |
no_license
|
farhanulsheikh/Blog-App
|
f6b39e48d02f83df0792dd77764d40dbe7c25165
|
aca10cf9b6ca7eba291cb918492f7a40c582783c
|
refs/heads/main
| 2023-01-01T14:29:48.126782
| 2020-10-28T01:20:54
| 2020-10-28T01:20:54
| 305,951,493
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,306
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$j3j=7ucx*3pt4@%8xea4w=bqq6-uhu*mruu*gb#z2pvt7exgz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'crispy_forms',
'users.apps.UsersConfig',
'blog.apps.BlogConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL='/media/'
CRISPY_TEMPLATE_PACK ='bootstrap4'
LOGIN_REDIRECT_URL ='blog-home'
LOGIN_URL = 'login'
|
[
"farhan_i@live.ca"
] |
farhan_i@live.ca
|
cda69b02b2d17bd7a70d342fa445f30e0f22a236
|
a5109ed60e25ddaa9cc2488df714f0f19a1b0620
|
/hello.py
|
699d5c38f793fd03829a7d17816bf9fb0c1605ff
|
[] |
no_license
|
sukesuke0718/WebPythonProject01
|
ed6a6e408ef0467db2d4778db99b57a9580cab77
|
a08527e5f45b7cb4049bcad255f8245fbeed7a35
|
refs/heads/master
| 2020-03-31T04:29:27.119163
| 2018-10-07T05:09:04
| 2018-10-07T05:09:04
| 151,907,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
###########################################
# PythonでWebアプリケーションを作ってみよう
# Flaskを使ってみる
###########################################
from flask import Flask
myapp = Flask(__name__)
@myapp.route('/')
def index():
return '''
<h2>Flaskの練習ページです</h2>
<p><a href="/hello">Helloページへ</a></p>
'''
@myapp.route('/hello')
def hello():
return 'こんにちは'
@myapp.route('/item/<int:item_id>')
def select_item(item_id):
items = [('コーヒー',300),('紅茶',300),('ジュース',280),('牛乳',250),('ウーロン茶', 220)]
item = items[item_id]
return '{0}は{1}円です'.format(item[0],item[1])
|
[
"sukesuke0718@gmail.com"
] |
sukesuke0718@gmail.com
|
4cd07865f55f378aa5da8e8aa8a16765655310c9
|
886fc24e518143d3602c78be10dfaead1068460b
|
/ios_notifications/__init__.py
|
cb159b8b4efbb1b3c13bf86519957f1425cc406a
|
[
"BSD-3-Clause"
] |
permissive
|
tapanpandita/django-ios-notifications
|
e5558f1c3d7f0ed70afde2b01623005ac4f111ab
|
74b5d03dda770eb2fc2690b0698a2d33e4f29442
|
refs/heads/master
| 2021-01-16T22:35:37.808222
| 2013-02-18T03:49:59
| 2013-02-18T03:49:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 43
|
py
|
# -*- coding: utf-8 -*-
VERSION = '0.1.3'
|
[
"stephenmuss@gmail.com"
] |
stephenmuss@gmail.com
|
733d810f56a73cb57206d1b5840580439b6f728e
|
32a02fa05cdf63e7c72bcbc3e8b35ec7e2f33130
|
/toutiao_crawler/test_offline_script/filter_hyperlink_subjective.py
|
7627735acd73764bc1f68871e106f63ad1613ad0
|
[] |
no_license
|
TTurn/haozhuo-script
|
f0893279d578ac853829017fcda6039345bb1eea
|
a4b3a927597b02ebc33065ca2a2c55e1673b0369
|
refs/heads/master
| 2020-12-02T21:24:21.616920
| 2017-07-16T05:16:29
| 2017-07-16T05:24:01
| 96,310,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,859
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/6/22 15:30
# @Author : Tuhailong
# @Site :
# @File : url_me_filter.py
# @Software: PyCharm Community Edition
import pymysql
import re
from bs4 import BeautifulSoup
def get_sample():
print("获取样本")
conn = pymysql.connect(host='116.62.106.69', port=3306, user='datag',passwd='yjkdatag',db='news_crawler',charset='utf8')
cursor = conn.cursor()
sql = "select id from toutiao_app_combine_unique_20170623"
cursor.execute(sql)
conn.commit()
length = len(cursor.fetchall())
print("一共有{0}个样本".format(length))
sql = "select id, htmls from toutiao_app_combine_unique_20170623"
cursor.execute(sql)
conn.commit()
results = cursor.fetchall()
id_list = [result[0] for result in results]
content_list = [BeautifulSoup(result[1], 'lxml').get_text() for result in results]
html_list = [result[1] for result in results]
cursor.close()
conn.close()
return id_list, content_list, html_list
def me_filter(id_list,content_list):
pattern1 = re.compile('我们')
pattern2 = re.compile('我')
del_me_id_list = []
for i in range(len(id_list)):
id = id_list[i]
content = content_list[i]
match1 = pattern1.findall(content)
num_we = len(match1)
match2 = pattern2.findall(content)
num_me = len(match2)
if num_me - num_we >= 12:
del_me_id_list.append(id)
return del_me_id_list
def delete_news(id_list):
conn = pymysql.connect(host='116.62.106.69', port=3306, user='datag', passwd='yjkdatag', db='news_crawler',
charset='utf8')
cursor = conn.cursor()
for id in id_list:
sql = "delete from toutiao_app_combine_unique_20170623 where id = %s"
cursor.execute(sql, (id,))
conn.commit()
cursor.close()
conn.close()
def url_filter(id_list, content_list):
# url识别的正则表达式
pattern = re.compile('[\(|(|\[|①|②|③|:]?(https|http|www){1}(:|\.|[a-z]|\d|_|[A-Z]|/| |\?|%|=|&|#|;)+[\)|)|\]|]]?')
revise_link_id_list = []
revise_link_loc_list = []
revise_link_spec_list = []
for i in range(len(id_list)):
id = id_list[i]
content = content_list[i]
match = re.search(pattern, content)
if match:
spec = match.group()
revise_link_id_list.append(id)
revise_link_loc_list.append(i)
revise_link_spec_list.append(spec)
return revise_link_id_list, revise_link_loc_list, revise_link_spec_list
def sub_html(html, spec):
html_clean = html.replace(spec, "")
return html_clean
def revise_news(revise_link_id_list, revise_link_loc_list, revise_link_spec_list, html_list):
conn = pymysql.connect(host='116.62.106.69', port=3306, user='datag', passwd='yjkdatag', db='news_crawler',
charset='utf8')
cursor = conn.cursor()
for i in range(len(revise_link_id_list)):
id = revise_link_id_list[i]
print("正在修正第{0}篇文章 {1}".format(i+1, id))
loc = revise_link_loc_list[i]
html = html_list[loc]
spec = revise_link_spec_list[i]
html_clean = sub_html(html, spec)
sql = "update toutiao_app_combine_unique_20170623 set htmls = %s where id = %s"
cursor.execute(sql, (html_clean, id))
conn.commit()
cursor.close()
conn.close()
if __name__ == "__main__":
id_list, content_list, html_list = get_sample()
del_me_id_list = me_filter(id_list, content_list)
print("包含“我”的文章:{0}".format(del_me_id_list))
print("长度:{0}".format(len(del_me_id_list)))
delete_news(del_me_id_list)
revise_link_id_list, revise_link_loc_list, revise_link_spec_list = url_filter(id_list, content_list)
print("包含链接的文章:{0}".format(revise_link_id_list))
print("长度:{0}".format(len(revise_link_id_list)))
revise_news(revise_link_id_list, revise_link_loc_list, revise_link_spec_list, html_list)
for i in range(len(revise_link_id_list)):
id = revise_link_id_list[i]
spec = revise_link_spec_list[i]
print(str(id)+"-------------"+spec)
|
[
"tuhailong@hotmail.com"
] |
tuhailong@hotmail.com
|
61f3a65a9aec4745f98829c8e888a7d436ff3a81
|
fb344544d2692df41c4186e63cc5b316a47f30ca
|
/pycoinone/response/v2.py
|
c945f22de7ba644dc9892dedecdc8e65c36b5174
|
[] |
no_license
|
jaehong-park-net/python-coinone
|
5aecf101ec1bfe5354292fb781dbcd2c93bb6fe6
|
94a9cdb14f5c8b3b654d6275b3f834a4903b40c9
|
refs/heads/master
| 2020-08-31T02:03:01.370617
| 2020-01-02T06:14:22
| 2020-01-02T06:14:22
| 218,552,972
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,714
|
py
|
from pycoinone.response import (
coinone_api_response,
UserInfoResponse,
BalanceResponse,
LimitOrderResponse,
PendingOrdersResponse,
CompleteOrdersResponse
)
@coinone_api_response
class UserInfoResponseV2(UserInfoResponse):
"""
{
"result": "success",
"errorCode": "0",
"userInfo": {
"virtualAccountInfo": {
"depositor": "John",
"accountNumber": "0123456789",
"bankName": "bankName"
},
"mobileInfo": {
"userName": "John",
"phoneNumber": "0123456789",
"phoneCorp": "1",
"isAuthenticated": "true"
},
"bankInfo": {
"depositor": "John",
"bankCode": "20",
"accountNumber": "0123456789",
"isAuthenticated": "true"
},
"emailInfo": {
"isAuthenticated": "true",
"email": "john@coinone.com"
},
"securityLevel": "4",
"feeRate": {
"btc": {
"maker": "0.001",
"taker": "0.001"
},
"bch": {
"maker": "0.001",
"taker": "0.001"
},
"eth": {
"maker": "0.001",
"taker": "0.001"
}
}
}
}
"""
pass
@coinone_api_response
class BalanceResponseV2(BalanceResponse):
"""
{
"result": "success",
"errorCode": "0",
"normalWallets": [
{
"balance": "6.1151",
"label": "First Wallet"
},
{
"balance": "6.9448",
"label": "Second Wallet"
}
],
"btc": {
"avail": "344.33703699",
"balance": "344.33703699"
},
"bch": {
"avail": "1.00001234",
"balance": "1.00001234"
},
"eth": {
"avail": "1.00001234",
"balance": "1.00001234"
},
"krw": {
"avail": "6901425",
"balance": "6901430"
}
}
"""
pass
@coinone_api_response
class LimitOrderResponseV2(LimitOrderResponse):
"""
{
"result": "success",
"errorCode": "0",
"orderId": "8a82c561-40b4-4cb3-9bc0-9ac9ffc1d63b"
}
"""
pass
@coinone_api_response
class PendingOrdersResponseV2(PendingOrdersResponse):
"""
{
"result": "success",
"errorCode": "0",
"limitOrders": [
{
"index": "0",
"orderId": "68665943-1eb5-4e4b-9d76-845fc54f5489",
"timestamp": "1449037367",
"price": "444000.0",
"qty": "0.3456",
"type": "ask",
"feeRate": "-0.0015"
}
]
}
"""
pass
@coinone_api_response
class CompleteOrdersResponseV2(CompleteOrdersResponse):
"""
{
"result": "success",
"errorCode": "0",
"completeOrders": [
{
"timestamp": "1416561032",
"price": "419000.0",
"type": "bid",
"qty": "0.001",
"feeRate": "-0.0015",
"fee": "-0.0000015",
"orderId": "E84A1AC2-8088-4FA0-B093-A3BCDB9B3C85"
}
]
}
"""
pass
|
[
"jaehong.park.net@gmail.com"
] |
jaehong.park.net@gmail.com
|
222632780ec42a784bc66d6176316fa5ba923b58
|
f3d40d64a166cc0089efc49ee4477ca50ea27e91
|
/23. Merge k Sorted Lists.py
|
903c4f7fac86aa55ef59c34de0b4c14028d3a951
|
[] |
no_license
|
liuxiaonan1990/leetcode
|
129965efdb2218744fda29a12ea1d25e9170ae8d
|
84f61ec2aa21b27c0d96b1c9a191c2b0c1ba5810
|
refs/heads/master
| 2020-12-14T08:51:45.540202
| 2017-10-12T03:19:10
| 2017-10-12T03:19:10
| 95,516,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def merge2Lists(self, l1, l2):
head = ListNode(0)
p = ListNode(0)
head = p
while l1 != None and l2 != None:
if l1.val < l2.val:
p.next = l1
p = p.next
l1 = l1.next
else:
p.next = l2
p = p.next
l2 = l2.next
while l1 != None:
p.next = l1
p = p.next
l1 = l1.next
while l2 != None:
p.next = l2
p = p.next
l2 = l2.next
return head.next
def mergeKLists_old(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if len(lists) == 0:
return lists
if len(lists) == 1:
return lists[0]
head = lists[0]
for i in range(1, len(lists)):
head = self.merge2Lists(head, lists[i])
return head
def func(self, lists):
ret = []
if len(lists) < 2:
return lists
i = 0
while i < len(lists):
if i + 1 < len(lists):
l = self.merge2Lists(lists[i], lists[i + 1])
ret.append(l)
i += 2
else:
ret.append(lists[i])
break
return ret
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
ret = lists
while len(ret) > 1:
ret = self.func(ret)
if len(ret) == 0:
return ret
if len(ret) == 1:
return ret[0]
return ret
|
[
"liuxiaonan@100tal.com"
] |
liuxiaonan@100tal.com
|
034cb6e4a5eff7b56eb07284e9c5c6d26dc1bfe0
|
a78514f0f9f738d8da864622d5ca97fd58e33a17
|
/pysh/tests/test_examples/test_posix.py
|
17eea77e9c44c81501d740f481944a74124cda7c
|
[] |
no_license
|
caervs/pysh
|
2e89d5c0adab849f2bfb9d77ecb1a89ffd312672
|
fa9e2c16b50e587c40b97b93ce012b52c67e13b1
|
refs/heads/master
| 2021-01-10T12:56:50.550520
| 2016-11-12T07:07:13
| 2016-11-15T22:08:15
| 49,346,479
| 18
| 1
| null | 2016-04-19T04:16:06
| 2016-01-09T23:55:35
|
Python
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
import io
import unittest
from pysh.examples import posix
class PosixTestCase(unittest.TestCase):
pass
class GrepWorks(PosixTestCase):
def test_matched_output(self):
stdin = io.StringIO("Hello\nWorld!")
stdout = io.StringIO()
posix.grep("Wo")(stdin=stdin, stdout=stdout)
assert stdout.getvalue() == "World!\n"
|
[
"rdabrams@gmail.com"
] |
rdabrams@gmail.com
|
de73ff367943c136bc322b995e41b1487114249c
|
7527086cdf51cef2f7ad6064ffb580011b29cc24
|
/routeme/google/models.py
|
d8417b69652d42057890967c99d30556c95f3538
|
[] |
no_license
|
COMU/routeme
|
41da683867fcd7d88c035dac7dfe2d7f36315043
|
c48fed00f9bb8747effe2109febeb9585488037d
|
refs/heads/master
| 2021-05-26T12:49:40.162932
| 2012-03-07T11:03:41
| 2012-03-07T11:03:41
| 3,165,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 513
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from routeme.auth.models import LoginProfile
from routeme.google.backend import GoogleBackend
class GoogleProfile(LoginProfile):
email = models.CharField(max_length=100)
firstname = models.CharField(max_length=100)
lastname = models.CharField(max_length=100)
def getLoginBackend(self, request):
return GoogleBackend(self, request)
def __unicode__(self):
return self.email
|
[
"demircan.serhat@gmail.com"
] |
demircan.serhat@gmail.com
|
5fe5b432ef2a1add3ff376f555b355954413d5a4
|
4f381c00eeedaf9d1f2b0acd8b751a484b110d37
|
/studybuddy/urls.py
|
8b76e1607a7cd1a32902c937dbca595bfe16bd20
|
[] |
no_license
|
rafsan99/studdy-buddy
|
acde8df476ab47b3da69a2f6101595c36ffce74e
|
50f3ff3a84d46f35497cdadce6cd2f642a750dcd
|
refs/heads/main
| 2023-08-21T20:49:51.324298
| 2021-10-10T11:48:55
| 2021-10-10T11:48:55
| 415,570,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
"""studybuddy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"rafsan.redwan69@gmail.com"
] |
rafsan.redwan69@gmail.com
|
c21fd10cd11e7750a5a0ad15ea4afe2db3c54928
|
32d07c93e6b71fba0fa6318b1e17f3b0952dcfd5
|
/face.py
|
73fa4fe1ebd5ce82dbd5a709fd7d1c2f5492c898
|
[] |
no_license
|
mrabhi05/FaceDetection
|
22906ffb1200388ab1e99cb6caa15cee82172085
|
afb1aac41b56b19536ddb3597645d46f14c9d0c3
|
refs/heads/main
| 2022-12-18T18:05:34.537288
| 2020-10-02T18:31:03
| 2020-10-02T18:31:03
| 300,702,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
import cv2
# Loading pre-trained Data
trained_face_data = cv2.CascadeClassifier('D:\Abhi\Study Zone\Projects\Face Detector\haarcascade_frontalface_default.xml')
# Choose an image to detect faces in
#img = cv2.imread('sop.png')
# To capture video from webcam
webcam = cv2.VideoCapture(0)
# Iterate for video
while True:
# Read the current frame
successful_capture, frame = webcam.read()
#
#cv2.imshow('Face Detectiom', frame)
#cv2.waitKey()
# Convert to Greyscale
grayscale_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect Faces
face_coordinates = trained_face_data.detectMultiScale(grayscale_img)
# Draw Rect around the face
for (x,y,w,h) in face_coordinates:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Face Detectiom', frame)
key = cv2.waitKey(1)
# Stop when Q is pressed
if key==81 or key==113:
break
#Relaease the Webcam
webcam.release()
|
[
"noreply@github.com"
] |
mrabhi05.noreply@github.com
|
873d8b1768d130de659f3272c9a1ce25cb9be07a
|
88107d99bba6f2cc6a0b24b8ec40c0c425079e0f
|
/Punto_1.py
|
2a1d6e89278e8c058a3f303b8c90a176a31c26e1
|
[] |
no_license
|
Magnodev12/Prueba-Tecnica
|
0eae0d85ca3f1b6050611fa658578984f3b3fe74
|
e505a18d4a01abe089ef8ebd3ce11b006ab832cd
|
refs/heads/master
| 2023-07-14T14:14:15.178745
| 2021-08-22T22:48:49
| 2021-08-22T22:48:49
| 398,903,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
n=5
def cruz(n):
resultado=""
if n == 0:
print("ERROR")
else:
for i in range(n):
for j in range(n):
resultado += "X" if (j==i or j==n-i-1) else "_"
resultado += "\n"
return resultado
print(cruz(n))
|
[
"noreply@github.com"
] |
Magnodev12.noreply@github.com
|
f2985c2acb7271795c1237493f9ab033931a22e5
|
28a32fa0a1cbead19bac3f54ace2cb3f2f8b3e23
|
/stream/api/subscription.py
|
f989747db84b3629723cdb4b74ad14d5737da4b2
|
[] |
no_license
|
ethanray123/Channelfix-Internship
|
5fba70b3d5e874e79176c84b05e3ae426d651197
|
5735223344db9f6c86054355e723d3b1795cd94c
|
refs/heads/master
| 2020-03-17T13:36:07.869433
| 2018-06-15T11:30:15
| 2018-06-15T11:30:15
| 133,637,679
| 0
| 0
| null | 2018-06-15T11:30:16
| 2018-05-16T08:53:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
from django.views import generic
from stream.models import Subscription
from django.http import Http404, JsonResponse
class SubscriptionAPI(generic.View):
model = Subscription
def get(self, request, *args, **kwargs):
if not request.is_ajax():
raise Http404
# Create queryset.
queryset = self.model.objects.filter(
subscriber__pk=request.GET['subsciber__pk'])
# Get offset.
queryset = queryset[int(request.GET['start']):int(request.GET['end'])]
# Generate response.
response = []
for obj in queryset:
data = {}
data['id'] = obj.pk
data['subscriber'] = {
'id': obj.subscriber.pk,
'username': obj.subscriber.username
}
data['publisher'] = {
'id': obj.publisher.pk,
'username': obj.publisher.username
}
# List all fields here
response.append(data)
return JsonResponse(
response, content_type="application/json", safe=False)
|
[
"rendave.lecciones3@gmail.com"
] |
rendave.lecciones3@gmail.com
|
752dfa2c8ef5b98706578a165c6fd4679eb50c42
|
d8a935d00a9a77aac031cdf497f55a75fb0cb610
|
/Scripts_confg_camera/cap_date_timelapse.py
|
a23a4cb741cab833f6cce86a7434b79a721ceac3
|
[] |
no_license
|
Vihelisa/PiCameraTests
|
b7909d957c834e41853bdc7808a190dad06abd5a
|
3fa88f24acf587aa306342389a0f413c94bebd43
|
refs/heads/main
| 2023-06-30T19:16:42.007947
| 2021-08-09T16:37:25
| 2021-08-09T16:37:25
| 394,365,629
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
from time import sleep
from picamera import PiCamera
from datetime import datetime, timedelta
'''
Capitura imagens e salva com o tempo entre as capturas de acordo com a hora da máquina local
salvando o nome dos arquivos com a data em que foram tiradas as fotos.
'''
#Não deu certo nos testes, ver o que é
def wait():
# Calculate the delay to the start of the next hour
next_hour = (datetime.now() + timedelta(hour=1)).replace(
minute=0, second=0, microsecond=0)
delay = (next_hour - datetime.now()).seconds
sleep(delay)
camera = PiCamera()
camera.start_preview()
wait()
for filename in camera.capture_continuous('img{timestamp:%Y-%m-%d-%H-%M}.jpg'):
print('Captured %s' % filename)
wait()
|
[
"72274329+Vihelisa@users.noreply.github.com"
] |
72274329+Vihelisa@users.noreply.github.com
|
e31d555c76b6e861f608c827fd4c72afa496f6af
|
7ec9c49fa0f8f283b2c793566276d00f612a3b8b
|
/oldcontrib/media/image/admin.py
|
fadf2f181224895ed2c5ba6f1edcd8e89fe26a39
|
[
"BSD-3-Clause"
] |
permissive
|
servee/django-servee-oldcontrib
|
f432cae589d617d841b79111f6315a1853887a30
|
836447ebbd53db0b53879a35468c02e57f65105f
|
refs/heads/master
| 2021-01-18T22:54:25.062212
| 2016-04-18T20:30:43
| 2016-04-18T20:30:43
| 1,524,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
from django.contrib import admin
from oldcontrib.media.image.models import Image
class ImageAdmin(admin.ModelAdmin):
list_display = ('title',)
admin.site.register(Image, ImageAdmin)
|
[
"issac.kelly@gmail.com"
] |
issac.kelly@gmail.com
|
e9ec1e470a754514330b03be6a7cb0561bc86464
|
5e83d62064ea4fd954820960306fb06cc8f0f391
|
/products/serializers.py
|
365966d6816dda4260eea788289ea5d68d1c05b9
|
[] |
no_license
|
bharatkumarrathod/cfe_ecommerce2_RESTapi
|
eff2fad0cbff7cb3def2c13de282b085aba7291d
|
a081cdbf10c1fbde58e128b9c9b287443c726071
|
refs/heads/master
| 2020-12-25T21:43:44.166109
| 2015-10-27T21:04:19
| 2015-10-27T21:04:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,598
|
py
|
from rest_framework import serializers
from .models import Category, Product, Variation
class VariationSerializer(serializers.ModelSerializer):
class Meta:
model = Variation
fields = [
'id',
'title',
'price',
]
class ProductSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='product_detail_api')
variation_set = VariationSerializer(many=True, read_only=True)
image = serializers.SerializerMethodField()
class Meta:
model = Product
fields = [
'url',
'id',
'title',
'image',
'variation_set',
]
def get_image(self, obj):
try:
return obj.productimage_set.first().image.url
except:
return None
class ProductDetailSerializer(serializers.ModelSerializer):
variation_set = VariationSerializer(many=True, read_only=True)
image = serializers.SerializerMethodField()
class Meta:
model = Product
fields = [
'id',
'title',
'description',
'price',
'image',
'variation_set',
]
def get_image(self, obj):
return obj.productimage_set.first().image.url
class ProductDetailUpdateSerializer(serializers.ModelSerializer):
variation_set = VariationSerializer(many=True, read_only=True)
image = serializers.SerializerMethodField()
class Meta:
model = Product
fields = [
'id',
'title',
'description',
'price',
'image',
'variation_set',
]
def get_image(self, obj):
try:
return obj.productimage_set.first().image.url
except:
return None
def create(self, validated_data):
title = validated_data['title']
Product.objects.get(title=title)
product = Product.objects.create(**validated_data)
return product
def update(self, instance, validated_data):
instance.title = validated_data["title"]
instance.save()
return instance
class CategorySerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='category_detail_api')
product_set = ProductSerializer(many=True)
class Meta:
model = Category
fields = [
'url',
'id',
'title',
'description',
'product_set',
# 'default_category',
]
|
[
"carlofusiello@gmail.com"
] |
carlofusiello@gmail.com
|
f4c82d74b5f27744442849fdd0b7e0378486fa25
|
2210eb46294c460fc635f4a45a62744f159b7737
|
/backend/uncle_park/spot/admin.py
|
0cc46629cb3a7b2e237d938a7fe5e002ae300d70
|
[] |
no_license
|
Kashish-2001/uncle-park
|
5dc294cd04e8c3eaac96f00f4b29d736dbe9f97b
|
b27e065d565a42e95c81ac45253a4ed8ff632135
|
refs/heads/main
| 2023-03-19T00:09:19.721001
| 2021-03-12T00:49:25
| 2021-03-12T00:49:25
| 346,879,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
from django.contrib import admin
from .models import ParkingSpot
admin.site.register(ParkingSpot)
|
[
"kashish.agrawal1907@gmail.com"
] |
kashish.agrawal1907@gmail.com
|
84c9a51305ac8e16c0952914654b4baabdc6ebec
|
b05a76d9b05b7dd09a0a0ddcaef041dfb7299247
|
/stackl/__init__.py
|
a3ce7e90ae4d9e48303ba531c5b2cd995595f54b
|
[
"MIT"
] |
permissive
|
ArtOfCode-/stackl
|
5af7d3b8ddc0c4af402ed92bcc51e0dfbbca9193
|
c66c025fa39c5fc5a6dbda7f0ea0628ee526b4b6
|
refs/heads/master
| 2020-04-03T00:48:50.545277
| 2018-10-28T13:19:06
| 2018-10-28T13:19:06
| 154,909,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,865
|
py
|
import sys
import threading
from logging import StreamHandler
import logging
import os.path
import re
import pickle
import json
import requests
from bs4 import BeautifulSoup
from stackl.errors import LoginError, InvalidOperationError
from stackl.models import Room, Message
from stackl.events import Event
from stackl.wsclient import WSClient
VERSION = '0.0.6b0'
class ChatClient:
def __init__(self, **kwargs):
"""
Initialise a new ChatClient object. Valid kwargs are:
:param kwargs['default_server']: one of stackexchange.com, stackoverflow.com, or meta.stackexchange.com,
depending on where you want the client to default to.
:param kwargs['log_location']: a logging.Handler object (e.g. StreamHandler or FileHandler) specifying a log
location
:param kwargs['log_level']: an integer, usually one of the logging.* constants such as logging.DEBUG, specifying
the minimum effective log level
"""
self.default_server = kwargs.get('default_server') or 'stackexchange.com'
log_location = kwargs.get('log_location') or StreamHandler(stream=sys.stdout)
log_level = kwargs.get('log_level') or logging.DEBUG
self.logger = logging.getLogger('stackl')
self.logger.setLevel(log_level)
self.logger.addHandler(log_location)
self.session = requests.Session()
self.session.headers.update({'User-Agent': 'stackl'})
self.rooms = []
self._handlers = []
self._sockets = {}
self._fkeys = {}
self._authed_servers = []
self._ids = {}
def login(self, email, password, **kwargs):
"""
Log the client instance into Stack Exchange. Will default to logging in using cached cookies, if provided, and
fall back to logging in with credentials.
:param email: the email of the Stack Exchange account you want to log in as
:param password: the corresponding account password
:param kwargs: pass "cookie_file" to specify where cached cookies are located (must be a pickle file)
:return: the logged-in requests.Session if successful
"""
logged_in = False
if 'cookie_file' in kwargs and os.path.exists(kwargs['cookie_file']):
with open(kwargs['cookie_file'], 'rb') as f:
self.session.cookies.update(pickle.load(f))
logged_in = self._verify_login(kwargs.get('servers') or [self.default_server])
if logged_in is False:
self.logger.warn('Cookie login failed. Falling back to credential login.')
for n, v in self.session.cookies.items():
self.logger.info('{}: {}'.format(n, v))
if not logged_in:
logged_in = self._credential_authenticate(email, password, kwargs.get('servers') or [self.default_server])
if not logged_in:
self.logger.critical('All login methods failed. Cannot log in to SE.')
raise LoginError('All available login methods failed.')
else:
self._authed_servers = kwargs.get('servers') or [self.default_server]
return self.session
def id(self, server):
"""
Get the ID of the logged-in user on the specified server.
:param server: the chat server from which to return a user ID
:return: Integer
"""
return self._ids[server]
def join(self, room_id, server):
"""
Join a room and start processing events from it.
:param room_id: the ID of the room you wish to join
:param server: the server on which the room is hosted
:return: None
"""
if server not in self._authed_servers:
raise InvalidOperationError('Cannot join a room on a host we haven\'t authenticated to!')
room = Room(server, room_id=room_id)
self.rooms.append(room)
self.session.get("https://chat.{}/rooms/{}".format(server, room_id), data={'fkey': self._fkeys[server]})
events = self.session.post("https://chat.{}/chats/{}/events".format(server, room_id), data={
'fkey': self._fkeys[server],
'since': 0,
'mode': 'Messages',
'msgCount': 100
}).json()['events']
event_data = [Event(x, server, self) for x in events]
room.add_events(event_data)
ws_auth_data = self.session.post("https://chat.{}/ws-auth".format(server), data={
'fkey': self._fkeys[server],
'roomid': room_id
}).json()
cookie_string = ''
for cookie in self.session.cookies:
if cookie.domain == 'chat.{}'.format(server) or cookie.domain == '.{}'.format(server):
cookie_string += '{}={};'.format(cookie.name, cookie.value)
last_event_time = sorted(events, key=lambda x: x['time_stamp'])[-1]['time_stamp']
ws_uri = '{}?l={}'.format(ws_auth_data['url'], last_event_time)
if server in self._sockets and self._sockets[server].open:
self._sockets[server].close()
self._sockets[server] = WSClient(ws_uri, cookie_string, server, self._on_message)
def send(self, content, room=None, room_id=None, server=None):
"""
Send a message to the specified room.
:param content: the contents of the message you wish to send
:param room: the ID of the room you wish to send it to
:param server: the server on which the room is hosted
:return: None
"""
if (room is None and room_id is None) or server is None:
raise InvalidOperationError('Cannot send a message to a non-existent room or a non-existent server.')
if "\n" not in content and len(content) > 500:
raise ValueError('Single-line messages must be a maximum of 500 chars long.')
room_id = room_id or room.id
for i in range(1, 3):
response = self.session.post('https://chat.{}/chats/{}/messages/new'.format(server, room_id), data={
'fkey': self._fkeys[server],
'text': content
})
if response.status_code == 200:
break
elif i == 3:
raise RuntimeError('Failed to send message. No, I don\'t know why.')
message_data = response.json()
parent_match = re.match(r'^:(\d+) ', content)
message = Message(server, message_id=message_data['id'], timestamp=message_data['time'], content=content,
room_id=room_id, user_id=self._ids[server],
parent_id=None if parent_match is None else parent_match[1])
return message
def add_handler(self, handler, **kwargs):
"""
Add an event handler for messages received from the chat websocket.
:param handler: the handler method to call for each received event
:return: None
"""
self._handlers.append([handler, kwargs])
def _credential_authenticate(self, email, password, servers):
"""
Authenticate with Stack Exchange using provided credentials.
:param email: the email of the Stack Exchange account you want to log in as
:param password: the corresponding account password
:return: a success boolean
"""
fkey_page = self.session.get("https://stackapps.com/users/login")
fkey_soup = BeautifulSoup(fkey_page.text, 'html.parser')
fkey_input = fkey_soup.select('input[name="fkey"]')
if len(fkey_input) <= 0:
raise LoginError('Failed to get fkey from StackApps. Wat?')
fkey = fkey_input[0].get('value')
login_post = self.session.post("https://stackapps.com/users/login", data={
'email': email,
'password': password,
'fkey': fkey
})
login_soup = BeautifulSoup(login_post.text, 'html.parser')
iframes = login_soup.find_all('iframe')
if any(['captcha' in x.get('src') for x in iframes]):
raise LoginError('Login triggered a CAPTCHA - cannot proceed.')
tokens = self.session.post("https://stackapps.com/users/login/universal/request", headers={
'Referer': 'https://stackapps.com/'
}).json()
for site_token in tokens:
self.session.get("https://{}/users/login/universal.gif".format(site_token['Host']), data={
'authToken': site_token['Token'],
'nonce': site_token['Nonce']
}, headers={
'Referer': 'https://stackapps.com/'
})
return self._verify_login(servers)
def _verify_login(self, servers):
"""
Verifies that login with cached cookies has been successful for all the given chat servers.
:param servers: a list of servers to check for successful logins
:return: a success boolean
"""
statuses = []
for server in servers:
chat_home = self.session.get("https://chat.{}/".format(server))
chat_soup = BeautifulSoup(chat_home.text, 'html.parser')
self._fkeys[server] = chat_soup.select('input[name="fkey"]')[0].get('value')
topbar_links = chat_soup.select('.topbar-links span.topbar-menu-links a')
if len(topbar_links) <= 0:
raise LoginError('Unable to verify login because page layout wasn\'t as expected. Wat?')
elif topbar_links[0].text == 'log in':
raise LoginError('Failed to log in to {}'.format(server))
else:
statuses.append(True)
self._ids[server] = int(re.match(r'/users/(\d+)', topbar_links[0].get('href'))[1])
return len(statuses) == 3 and all(statuses)
def _on_message(self, data, server):
"""
Internal. Handler passed to WSClient to handle incoming websocket data before it reaches the client application.
:param data: the raw text data received from the websocket
:param server: the server on which the message was received
:return: None
"""
try:
data = json.loads(data)
except json.JSONDecodeError:
self.logger.warn('Received non-JSON data from WS. Bail!')
return
events = [v['e'] for k, v in data.items() if k[0] == 'r' and 'e' in v]
events = [x for s in events for x in s]
for event_data in events:
event = Event(event_data, server, self)
handlers = [x[0] for x in self._handlers
if all([k in event_data and event_data[k] == v for k, v in x[1].items()])]
for handler in handlers:
def run_handler():
handler(event, server)
threading.Thread(name='handler_runner', target=run_handler).start()
def _chat_post_fkeyed(self, server, path, data=None):
"""
Sends a POST request to chat to perform an action, automatically inserting the chat server and fkey.
:param server: the server on which to perform the action
:param path: the host-less path to send the request to
:return: requests.Response
"""
req_data = {'fkey': self._fkeys[server]}
if data is not None:
req_data.update(data)
return self.session.post('https://chat.{}{}'.format(server, path), data=req_data)
def get_message(self, message_id, server):
soup = BeautifulSoup(self.session.get('https://chat.{}/transcript/message/{}'.format(server, message_id)).text,
'html.parser')
message = soup.select('#message-{}'.format(message_id))
user_id = re.match(r'/users/(\d+)', message.parent.parent.select('.signature .username a')[0].get('href'))[1]
room_id = re.match(r'/rooms/(\d+)', soup.select('.room-name a')[0].get('href'))[1]
content = self.session.get('https://chat.{}/message/{}?plain=true'.format(server, message_id)).text
return Message(server, message_id=message_id, room_id=room_id, user_id=user_id, content=content)
def get_message_source(self, message_id, server):
return self.session.get('https://chat.{}/message/{}?plain=true'.format(server, message_id)).text
def toggle_star(self, message_id, server):
self._chat_post_fkeyed(server, '/messages/{}/star'.format(message_id))
def star_count(self, message_id, server):
star_soup = BeautifulSoup(self.session.get('https://chat.{}/transcript/message/{}'.format(server, message_id)),
'html.parser')
counter = star_soup.select('#message-{} .flash .star .times'.format(message_id))
if len(counter) > 0:
return int(counter[0].text)
else:
return 0
def star(self, message_id, server):
if not self.has_starred(message_id, server):
self.toggle_star(message_id, server)
def unstar(self, message_id, server):
if self.has_starred(message_id, server):
self.toggle_star(message_id, server)
def has_starred(self, message_id, server):
star_soup = BeautifulSoup(self.session.get('https://chat.{}/transcript/message/{}'
.format(server, message_id)).text,
'html.parser')
counter = star_soup.select('#message-{} .flash .stars'.format(message_id))
return len(counter) > 0 and 'user-star' in counter[0].get('class')
def cancel_stars(self, message_id, server):
self._chat_post_fkeyed(server, '/messages/{}/unstar'.format(message_id))
def delete(self, message_id, server):
self._chat_post_fkeyed(server, '/messages/{}/delete'.format(message_id))
def edit(self, message_id, server, new_content):
self._chat_post_fkeyed(server, '/messages/{}'.format(message_id), data={'text': new_content})
def toggle_pin(self, message_id, server):
self._chat_post_fkeyed(server, '/messages/{}/owner-star'.format(message_id))
def pin(self, message_id, server):
if not self.is_pinned(message_id, server):
self.toggle_pin(message_id, server)
def unpin(self, message_id, server):
if self.is_pinned(message_id, server):
self.toggle_pin(message_id, server)
def is_pinned(self, message_id, server):
star_soup = BeautifulSoup(self.session.get('https://chat.{}/transcript/message/{}'
.format(server, message_id)).text,
'html.parser')
counter = star_soup.select('#message-{} .flash .stars'.format(message_id))
return len(counter) > 0 and 'owner-star' in counter[0].get('class')
|
[
"hello@artofcode.co.uk"
] |
hello@artofcode.co.uk
|
41e5021c0e1be2fbcbc2a62efcefce360bddb194
|
da3a0f4a31b31f8c9d4141994f55437fc4dd93f0
|
/todo/settings copy.py
|
8b3a85bb6c7fffbb261cceb09b6910058cd7b9cb
|
[] |
no_license
|
zackington/todo_python
|
b9a39e643a2b280639454dfb9e2542561f97c767
|
f387ee4b52816d4e1a946ac1654ddf4e4da82f04
|
refs/heads/main
| 2023-02-23T05:48:12.302105
| 2021-01-29T15:28:56
| 2021-01-29T15:28:56
| 330,905,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,068
|
py
|
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's$_u$u+$695+&w_-yb&$rd!eirhva8$!2y)4yc+og&*h=c%2j@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"zagatol96@gmail.com"
] |
zagatol96@gmail.com
|
894aef2056395bebd4c1c73efa7b7f804b4f5f01
|
284d20180008b09489a48d57180ac0037c09edfa
|
/drinks/mydrinks/views.py
|
fdacb0b5111bfa980705f0032d195b845192ed8d
|
[] |
no_license
|
abki/django-drinks
|
ddbd97685ff84f9d2ad9b7a71275b16d5ca63295
|
827f4aa3365f2fbf507891eb43f6d9d78981ea6f
|
refs/heads/master
| 2020-04-22T21:03:10.757767
| 2009-05-20T19:59:10
| 2009-05-20T19:59:10
| 205,957
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
from django import forms
from mydrinks.models import DrinkForm, CommentForm
class DrinkForm(forms.Form):
name = forms.CharField(max_length=254)
content = forms.TextField()
rating = forms.EmailField()
cc_myself = forms.BooleanField(required=False)
def addDrink(request):
if request.method == 'POST': # If the form has been submitted...
form = ContactForm(request.POST) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
# Process the data in form.cleaned_data
# ...
return HttpResponseRedirect('/thanks/') # Redirect after POST
else:
form = ContactForm() # An unbound form
return render_to_response('contact.html', {
'form': form,
})
|
[
"boubekki@enseirb.fr"
] |
boubekki@enseirb.fr
|
de42c53734da3da9116fffe9e3e19452e35b07cb
|
09ea0b61ea2df531468e32877ad0e4883a6b812b
|
/csv_converter.py
|
b38af52dbc89e3b48d3dd2a263125e585c5dac24
|
[] |
no_license
|
matthewj301/lastpass_csv_to_dashlane_converter
|
d342d9144e11b84fdafc16b556f0654ed86053eb
|
1919ffca754b2850fe6c1f78a8bbfa237326f8e0
|
refs/heads/master
| 2023-06-19T18:02:40.824618
| 2021-07-22T01:21:52
| 2021-07-22T01:21:52
| 388,290,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
import csv
from pathlib import Path
data_dir = Path(__file__).resolve().parent.joinpath('data')
password_csv = data_dir.joinpath('lastpass.csv')
fixed_csv = data_dir.joinpath('dashlane.csv')
if not fixed_csv.exists():
fixed_csv.touch()
site_name_to_proper_url_name = {'steam': 'store.steampowered'}
def fix_bad_url(_site_name, _url):
http_type = _url.split(':')[0]
if _site_name:
formatted_site_name = _site_name.replace("'", '').replace(' ', '').lower()
formatted_site_name = fix_edgecase_site_name(formatted_site_name)
else:
formatted_site_name = ''
fixed_url = f'{http_type}://www.{formatted_site_name}.com'
return fixed_url
def fix_edgecase_site_name(_site_name):
if _site_name.lower() in site_name_to_proper_url_name:
_site_name = site_name_to_proper_url_name[_site_name]
return _site_name
if __name__ == '__main__':
with open(password_csv, 'r') as pw_csv:
with open(fixed_csv, 'w') as fixed_csv:
csv_writer = csv.writer(fixed_csv)
csv_reader = csv.reader(pw_csv, delimiter=',')
for row in csv_reader:
username = row[1]
password = row[2]
url = row[0]
site_name = row[5]
if url == 'https://' or url == 'http://' or url is None:
url = fix_bad_url(site_name, url)
csv_writer.writerow([site_name, url, username, password])
|
[
"matthewj301@gmail.com"
] |
matthewj301@gmail.com
|
b8075d05508ad9f1869fce993a34b58e3a860165
|
cde9143ed2535c147a6aafc1ef9c147d48668e9c
|
/Payload/ADLC/ShapeCharacterization/PythonScripts/Matching-2.py
|
97b4903c24e53179042f7f6b57d2b74675063ecd
|
[] |
no_license
|
Gowabby/Payload
|
2eafdc0d1679923592aeed92a4288ecc68f0892b
|
4e20a294afd07d99c1475f1590b25bd2ead1f376
|
refs/heads/master
| 2021-01-25T11:15:06.086970
| 2017-06-16T07:32:14
| 2017-06-16T07:32:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,455
|
py
|
###################################################
##Name: Andrew Olguin
##
##Date: 1-1-15
##
##Functionality: This code is meant to match objects in an image to a objects in training set.
##it uses FLANN and surf features to match two images together. This code is still
##under development.
##See this link for documentation: http://bit.ly/1tH9a6P
##
##Version: 1
##
##Changes log: none
###################################################
import numpy as np
import cv2
from matplotlib import pyplot as plt
SZ=20
bin_n = 16 # Number of bins
svm_params = dict( kernel_type = cv2.SVM_LINEAR,
svm_type = cv2.SVM_C_SVC,
C=2.67, gamma=5.383 )
affine_flags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR
def nothing(x):
pass
def Sobel(gray):
scale = 1
delta = 0
ddepth = cv2.CV_16S
# Gradient-X
grad_x = cv2.Sobel(gray,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
#grad_x = cv2.Scharr(gray,ddepth,1,0)
# Gradient-Y
grad_y = cv2.Sobel(gray,ddepth,0,1,ksize = 3, scale = scale, delta = delta, borderType = cv2.BORDER_DEFAULT)
#grad_y = cv2.Scharr(gray,ddepth,0,1)
# converting back to uint8
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
dst = cv2.addWeighted(abs_grad_x,5,abs_grad_y,5,0)
return dst
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img,M,(SZ, SZ),flags=affine_flags)
return img
def Clust(img, K):
Z = img.reshape((-1,3))
# convert to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
#ret, label, center = cv2.kmeans(Z, K, criteria, 10, 0)
# Now convert back into uint8, and make original image
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img.shape))
return res2
def hog(img):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
# quantizing binvalues in (0...16)
bins = np.int32(bin_n*ang/(2*np.pi))
# Divide to 4 sub-squares
bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
return hist
#this function is meant to detect surf features in an image and uses an image pyramid to upscale an image
#it uses the image preprocessing methods show in the meanClust-1 script
#Parameters:
#K= number of colors in the clustered output
#lb/ub= the bounds for the canny edge detection
#surfVal= surf feature threshold, higher the number the less feautures
#img- input image
def SURFINPYR(img, K, lb, ub, srfVal):
deskew(img)
img1 = Clust(img, K)
surf = cv2.SURF(srfVal)
img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2LUV)
uv_img = img[:,:,1]+ img[:,:,2]
img2 = cv2.pyrUp(uv_img)
img2 = cv2.pyrUp(img2)
grad = Sobel(img2)
edges = cv2.Canny(grad,lb,ub)
kp, des = surf.detectAndCompute(edges, None)
return kp, des
#this function is meant to detect surf features in an image
#it uses the image preprocessing methods show in the meanClust-1 script
#Parameters:
#K= number of colors in the clustered output
#lb/ub= the bounds for the canny edge detection
#surfVal= surf feature threshold, higher the number the less feautures
#img- input image
def SURFIN(img, K, lb, ub, srfVal):
img = Clust(img, K)
surf = cv2.SURF(srfVal)
img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
uv_img = img[:,:,1]+ img[:,:,2]
grad = Sobel(img2)
edges = cv2.Canny(grad,lb,ub)
kp, des = surf.detectAndCompute(edges, None)
return kp, des
###########################################################################################
test = cv2.imread('T.png',0)
train1 = cv2.imread('F.png',0)
train2 = cv2.imread('G.png',0)
#cells = [np.hsplit(row,100) for row in np.vsplit(img,50)]
# First half is trainData, remaining is testData
#train_cells = [ i[:50] for i in cells ]
#test_cells = [ i[50:] for i in cells]
###### Now training ########################
train1 = hog(train1)
train2 = hog(train2)
#deskewed = [map(deskew,row) for row in train_cells]
#hogdata = [map(hog,row) for row in deskewed]
trainData1 = np.float32(train1).reshape(-1,64)
trainData2 = np.float32(train2).reshape(-1,64)
responses = np.float32(np.repeat(np.arange(2),1)[:,np.newaxis])
trainData = np.vstack((trainData1, trainData2))
print trainData.shape
print responses.shape
svm = cv2.SVM()
svm.train(trainData,responses, params=svm_params)
svm.save('svm_data.dat')
###### Now testing ########################
#deskewed = [map(deskew,row) for row in test_cells]
#hogdata = [map(hog,row) for row in deskewed]
hogdata = hog(test)
testData = np.float32(hogdata).reshape(-1,bin_n*4)
result = svm.predict_all(testData)
print result
####### Check Accuracy ########################
mask = result==responses
correct = np.count_nonzero(mask)
print correct*100.0/result.size
|
[
"andrew.olguin556@gmail.com"
] |
andrew.olguin556@gmail.com
|
f22c019ebbea833c2a0e0693688c327d115e88f5
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/kuv_02570-0126/sdB_KUV_02570-0126_coadd.py
|
c4cb9b302dad383bd2a7935c0b65bc65388dc997
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 441
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[44.897583,-1.235028], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_KUV_02570-0126/sdB_KUV_02570-0126_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_KUV_02570-0126/sdB_KUV_02570-0126_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
92cf58834da067af6b6f57cf1bb9a5d5f0b835c7
|
a85f4a09b01facb31f2c83005eecf1a7f1a1966b
|
/app/views.py
|
70a5ae46e4ca1d84902aaddee6c5e43dd2eda41b
|
[] |
no_license
|
aogz/flask-mysql-docker
|
2226835ec5d5161ec4164ccede1acf9ce6745318
|
00740ea19277e3ee5e505ce2fde7f1a59742ff29
|
refs/heads/master
| 2021-07-13T06:22:00.537011
| 2017-10-08T09:12:04
| 2017-10-08T09:12:04
| 106,135,369
| 0
| 0
| null | 2017-10-08T09:24:27
| 2017-10-07T22:43:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 389
|
py
|
# -*- coding: utf-8 -*-
from app import app
from flask import render_template
from flask_login import current_user
@app.route('/')
def main():
if current_user.is_authenticated:
return render_template('main/profile.html')
else:
return render_template('main/landing.html')
@app.route('/settings')
def settings():
return render_template('main/settings.html')
|
[
"aogurzow@gmail.com"
] |
aogurzow@gmail.com
|
486bd69cc197bef259f8a8fa395e948d4c8b003d
|
88cc3cadf2718133eeb8b9ebcec8c6827ba21cb3
|
/Lesson7/oop/example3_namemangling.py
|
ae0a14e892ca545d4f7c01014ededd00f52878e1
|
[] |
no_license
|
misamoylov/otus-qa
|
5324ceb26baec55697f02dcd54d4fd529c098057
|
2ad73495dcedb42e8e4eb7893d0bbd088eed3d2b
|
refs/heads/master
| 2020-05-02T16:06:51.753814
| 2019-07-29T17:03:40
| 2019-07-29T17:03:40
| 178,060,763
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
class Example:
class_variable = "class_variable" # class variable
def __init__(self, variable):
self.variable = variable # instance variable, public
self._variable = " ".join(["private", variable]) # instance variable, private
self.__variable = " ".join(["Name mangling", variable])
Example.class_variable
example = Example("test")
example.variable
example._variable
example.__variable
example._Example__variable
|
[
"msamoylov@mirantis.com"
] |
msamoylov@mirantis.com
|
9e7abd882854f4b323766f990e57fa62d2206544
|
dde4c00075eca23b94b8a5dd23292d82e170b34f
|
/PYTHON/python学习/bs4实战2.py
|
3cb91f837680ba0a96b2d20b2719c31268d1622e
|
[] |
no_license
|
ggw12138/hnu-osdesign-ghw
|
5f405331413f21a958bacb1067c0452f268b34fe
|
8e74c69ba39fa0af799b29d46e58e8ea966b21bb
|
refs/heads/master
| 2023-02-04T23:31:54.575152
| 2020-12-23T05:12:59
| 2020-12-23T05:12:59
| 323,810,403
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
#-*- encoding:UTF-8 -*-
#誉天暑期训练营
import requests
from bs4 import BeautifulSoup
def getHTMLText(url):
r=requests.get(url,timeout=15)
r.raise_for_status()
r.encoding='utf-8'
return r.text
def getSoup(url):
txt=getHTMLText(url)
soup=BeautifulSoup(txt,"html.parser")
return soup
def getContent(soup):
contents=soup.find('div',{'class':'usoft-listview-basic'})
articles=[]
for item in contents.find_all('li'):
date1=item.find('span',{'class':'usoft-listview-item-date'})
datestr=date1.string
title=item.find('a')['title']
articles.append([title,"---",datestr])
return articles
if __name__ == '__main__':
url='http://www.upln.cn/html/Channel_01/Column_0103/2.html'
soup=getSoup(url)
articleslist=getContent(soup)
#显示爬取的信息
for item in articleslist:
for i in item:
print(i,end='')
print()
print('-----------------------')
|
[
"893398418@qq.com"
] |
893398418@qq.com
|
4c75dc681050d798e04dbde8ddf1199957ddb3fa
|
7b613836abdfda73e29ca2d7eec9295c0c1c3005
|
/python/misc/modules/jacob/import_module.py
|
e0b67daea8e5b2313713eea4988b4a1d6e4ae517
|
[] |
no_license
|
bennywinefeld/scripts
|
31790202215277d33ad8204655f22227645a4e43
|
753e480849189072eabd705489606a21909cbebb
|
refs/heads/master
| 2020-12-24T11:33:29.467917
| 2017-10-03T23:18:24
| 2017-10-03T23:18:24
| 73,028,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
import my_module
my_module.testFunction()
|
[
"benny.winefeld@gmail.com"
] |
benny.winefeld@gmail.com
|
97aa99c9a48d8eabd5f5ec4369635876825b0609
|
2ce2eb3998445ee6a1383c6828abf1e9afbb5616
|
/impyute/imputation/ts/arima.py
|
b7b896d17b5a39c948981b4dc1e7ddd9d5c7e5dc
|
[
"MIT"
] |
permissive
|
benchpress100/impyute
|
325600ab8858d325dbb665c6dd85b4a44757ce88
|
5214761c627be83dda95b36cf9403fc3ea3d223b
|
refs/heads/master
| 2020-03-27T06:45:46.882343
| 2018-07-06T04:28:54
| 2018-07-06T04:28:54
| 146,133,959
| 0
| 0
|
MIT
| 2018-08-25T22:30:00
| 2018-08-25T22:30:00
| null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
""" impyute.imputation.ts.arima """
from impyute.util import find_null
from impyute.util import checks
# pylint: disable=invalid-name
@checks
def arima(data, p, d, q, axis=0):
"""Autoregressive Integrated Moving Average Imputation
Stationary model
PARAMETERS
----------
data: numpy.ndarray
The matrix with missing values that you want to impute
p: int
Number of autoregressive terms. Ex (p,d,q)=(1,0,0).
d: int
Number of nonseasonal differences needed for stationarity
q: int
Number of lagged forecast errors in the prediction equation
axis: boolean (optional)
0 if time series is in row format (Ex. data[0][:] is 1st data point).
1 if time series is in col format (Ex. data[:][0] is 1st data point).
RETURNS
-------
numpy.ndarray
"""
assert isinstance(p, int), "Parameter `p` must be an integer"
assert isinstance(d, int), "Parameter `d` must be an integer"
assert isinstance(q, int), "Parameter `q` must be an integer"
null_xy = find_null(data)
for x, y in null_xy:
print(x, y)
return data
|
[
"eltonlaw296@gmail.com"
] |
eltonlaw296@gmail.com
|
2c77fbb9d5a9a2c09f218a1ad7dca7782fbbee87
|
27cdea0b9826229c75f49f717d07d0dddc023893
|
/Assignment 1 - Simple Python Code/first_module.py
|
d6792e90361e2ff7030befb5e6784c51c2618949
|
[
"MIT"
] |
permissive
|
masher1/SocialMediaMining
|
6132a70a9772609cd1896c7fbcc0a11afee55188
|
615205159f363bffd8d6cd8fd32afd65cdfe4332
|
refs/heads/master
| 2020-12-20T04:44:01.065426
| 2020-03-01T07:36:34
| 2020-03-01T07:36:34
| 235,963,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,017
|
py
|
"""
Assignment 1: Simple Python Program
Author: Malkiel Asher
Requirements:
* Create a Python module with a __main__ , and at least 100 lines of code. You should use: if __name__ == "__main__":
* Define at least 1 class, and at least 1 function for each class you have defined. Your __main__ should instantiate objects of the classes you have designed, and use them to invoke the methods defined in those classes.
* Use list comprehensions to create lists.
* Use dictionary comprehensions to create dictionaries.
* Use at least 1 decision-making statement (if-elif)
* Use at least 1 looping statement (for or while).
* Use at least 1 try-except to catch some exceptions.
* Use the input() function, or command-line arguments, to get some user input
* Produce some, hopefully interesting, output
* Add comments to make your script easy to understand (not counted toward the 100 line requirement)
"""
import time
import WordBank
import random
# REVIEWED: implement if statement for each letter that the user inputs
# REVIEWED: implement try catch for if the user inputs a non-number into the inputs where necessary
#function made to have the player choose a letter that might be in the chosen word
def guess_the_letter(word):
print("We have now chosen a word!"
"\nLet's start the game!")
wordLen = len(word)
wordChars = WordBank.split(word)
answer = []
# for i in range(wordLen-1):
# answer.insert(i,'_')
answer = ['_' for w in range(wordLen)]
#print("The default answer is:", answer) #for debugging purposes
newWord = ""
usedLetters = []
spacePositions = []
num = 0;
missed = 0;
print("The word contains:", wordLen, "letters")
print_(wordLen)
for i in range(wordLen):
if(" " in wordChars[i]):
num = num + 1;
spacePositions.append(i)
for i in range(num):
answer[spacePositions[i]] = ' '
print("\nThere are", num, "spaces in the word")
#print("\nThe word is", word) #for debugging purposes ONLY
#REVIEWED: make sure only small case letters are acceptable NO NUMBERS
ready = False
while (not ready):
try:
letter = input("What is your first letter choice?")
if letter.isalpha() and letter.islower() and len(letter) == 1:
ready = True
else:
raise Exception()
except:
print(WordBank.font.RED + "ERROR: Please enter lowercase, single character letters only" + WordBank.font.END)
(answer, usedLetters, missed) = WordBank.letCompare(letter, word, answer, usedLetters, missed)
newWord = ""
for x in answer:
newWord += x
print(WordBank.font.DARKCYAN + "Your Word so far:", newWord + WordBank.font.END)
#REVIEWED: implement the hangman structure somewhere here and limit it to 7 wrong guesses
while (newWord != word):
ready = False
while (not ready):
try:
letter = input("What is your next letter choice?")
if letter.isalpha() and letter.islower() and len(letter) == 1:
ready = True
else:
raise Exception()
except:
print(WordBank.font.RED + "ERROR: Please enter lowercase, single character letters only" + WordBank.font.END)
(answer, usedLetters, missed) = WordBank.letCompare(letter, word, answer, usedLetters, missed)
if((answer, usedLetters, missed) == (0,0,0)):
return 0
newWord = ""
for x in answer:
newWord += x
print(WordBank.font.DARKCYAN + "Your Word so far:", newWord + WordBank.font.END)
#REVIEWED: make a decision here to display congrats if the word was guessed correctly or if the game finished with a dead man
print(WordBank.font.DARKCYAN + WordBank.font.BOLD + WordBank.font.UNDERLINE + "Congratulations!" + WordBank.font.END)
def print_(wordLen):
for x in range(wordLen):
print(WordBank.font.UNDERLINE + ' ' + WordBank.font.END, end =" ")
print("\n")
# function made to draw the basic hangman base in ASCII
# REVIEWED: make a switch statement to make a new hangman for each infraction from 1 to wordLen-1
def HangDraw(missed, new_list):
print("# of misses:",missed)
miss = missed
done = False;
bodyParts = [' | (*_*)', ' | |', ' | \|', ' | \|/', ' | |', ' | /', ' | / \\']
# creating enumerate objects
obj1 = enumerate(bodyParts)
listOfMistakes = list(enumerate(bodyParts, 1))
if (miss >= 1):
new_list[2] = ' | (*_*)'
if(miss == 2):
new_list[3] = ' | |'
if(miss == 3):
new_list[3] = ' | \|'
if(miss >= 4):
new_list[3] = ' | \|/'
if(miss >= 5):
new_list[4] = ' | |'
if(miss == 6):
new_list[5] = ' | /'
if (miss == 7):
new_list[5] = ' | / \\'
done = True
print(WordBank.font.BLUE + '\n'.join(new_list) + WordBank.font.END)
if done:
print(WordBank.font.RED + WordBank.font.BOLD + "\nUnfortunately you have lost the game :(\n" + WordBank.font.END)
return done
def hangman(): # Makes the drawing for hangman
print(WordBank.font.BLUE + " _______" + WordBank.font.END)
print(WordBank.font.BLUE + " |/ |" + WordBank.font.END)
print(WordBank.font.BLUE + " | O" + WordBank.font.END)
print(WordBank.font.BLUE + " |" + WordBank.font.END)
print(WordBank.font.BLUE + " |" + WordBank.font.END)
print(WordBank.font.BLUE + " |" + WordBank.font.END)
print(WordBank.font.BLUE + " |" + WordBank.font.END)
print(WordBank.font.BLUE + "_|___" + WordBank.font.END)
#__name__ function to make sure client is starting from the correct module
# if __name__ == "__main__":
# main()
if __name__ == "__main__":
print("Welcome to the Hangman Game!")
print("Here is the hangman structure:")
hangman()
time.sleep(3)
genre = 0
word = ""
ready = False
while (not ready):
try:
genre = int(input("What genre would you like to get a word from? (ENTER NUMBER)"
"\n1) Kitchen Utensils"
"\n2) Office Supplies"
"\n3) Popular Artists"
"\n4) Popular Actors"
"\n5) Popular Countries"
"\n6) RANDOM MODE\n\nChoice: "))
if (genre < 6 and genre > 0):
word = WordBank.word_bank(genre);
elif (genre == 6):
print("Entering rAnDoM MoDe!")
rand_genre = (random.randint(1, 5))
word = WordBank.word_bank(rand_genre)
else:
raise Exception()
ready = True
except:
print(WordBank.font.RED + "ERROR: Please enter a number between 1 and 6" + WordBank.font.END)
word = word.lower()
guess_the_letter(word)
|
[
"masher@syr.edu"
] |
masher@syr.edu
|
9a18fcbd5b6a8af3346248cf9bdb59ea679eb5fd
|
3af8bd42cbf1f3a6f275cc7f5299a643511b56ff
|
/sentiment_analysis/bert/scripts/rerun.py
|
953f1c0e8d9f757e14302718ffa26354013ce13e
|
[] |
no_license
|
shravanc/msc_project
|
d54fbf6fda764038ca52d113ec5b582212f9a5bd
|
9d815e2130a9c4c2ad9286a8f3471c2bf860ca93
|
refs/heads/master
| 2022-12-13T21:59:51.269615
| 2020-09-08T10:50:55
| 2020-09-08T10:50:55
| 276,747,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,347
|
py
|
import os
import math
import datetime
from tqdm import tqdm
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import bert
from bert import BertModelLayer
from bert.loader import StockBertConfig, map_stock_config_to_params, load_stock_weights
from bert.tokenization.bert_tokenization import FullTokenizer
import seaborn as sns
import matplotlib.pyplot as plt
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
train_base_dir = "/home/shravan/Downloads/train/"
valid_base_dir = "/home/shravan/Downloads/valid/"
def load_datasets():
train_df = pd.DataFrame()
for name in os.listdir(train_base_dir):
file_path = os.path.join(train_base_dir, name)
train_df = pd.concat([train_df,
pd.read_csv(file_path, sep=',', names=["sentences", "polarity"])],
ignore_index=True
)
break
valid_df = pd.DataFrame()
for name in os.listdir(valid_base_dir):
file_path = os.path.join(valid_base_dir, name)
valid_df = pd.concat([valid_df,
pd.read_csv(file_path, sep=',', names=["sentences", "polarity"])],
ignore_index=True
)
break
return train_df, valid_df
train, test = load_datasets()
bert_abs_path = '/home/shravan/Downloads/'
bert_model_name = 'multi_cased_L-12_H-768_A-12'
bert_ckpt_dir = os.path.join(bert_abs_path, bert_model_name)
bert_ckpt_file = os.path.join(bert_ckpt_dir, "bert_model.ckpt")
bert_config_file = os.path.join(bert_ckpt_dir, "bert_config.json")
# Preprocessing
class IntentDetectionData:
DATA_COLUMN = 'sentences'
LABEL_COLUMN = 'polarity'
def __init__(self, train, test, tokenizer: FullTokenizer, classes, max_seq_len):
self.tokenizer = tokenizer
self.max_seq_len = 0
self.classes = classes
# print(train[IntentDetectionData.DATA_COLUMN].str.len().sort_values().index())
train, test = map(lambda df: df.reindex(df[IntentDetectionData.DATA_COLUMN].str.len().sort_values().index),
[train, test])
((self.train_x, self.train_y), (self.test_x, self.test_y)) = map(self._prepare, [train, test])
print("max seq_len", self.max_seq_len)
self.max_seq_len = min(self.max_seq_len, max_seq_len)
self.train_x, self.test_x = map(self._pad, [self.train_x, self.test_x])
def _prepare(self, df):
x, y = [], []
for _, row in tqdm(df.iterrows()):
text, label = row[IntentDetectionData.DATA_COLUMN], row[IntentDetectionData.LABEL_COLUMN]
tokens = self.tokenizer.tokenize(text)
tokens = ['[CLS]'] + tokens + ['[SEP]']
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
self.max_seq_len = max(self.max_seq_len, len(token_ids))
x.append(token_ids)
y.append(self.classes.index(label))
return np.array(x), np.array(y)
def _pad(self, ids):
x = []
for input_ids in ids:
input_ids = input_ids[:min(len(input_ids), self.max_seq_len - 2)]
input_ids = input_ids + [0] * (self.max_seq_len - len(input_ids))
x.append(np.array(input_ids))
return np.array(x)
tokenizer = FullTokenizer(vocab_file=os.path.join(bert_ckpt_dir, 'vocab.txt'))
t = tokenizer.tokenize('ಶುಭ ದಿನ')
print(t)
ds = tokenizer.convert_tokens_to_ids(t)
print(ds)
def create_model(max_seq_len, bert_ckpt_file):
with tf.io.gfile.GFile(bert_config_file, 'r') as reader:
bc = StockBertConfig.from_json_string(reader.read())
bert_params = map_stock_config_to_params(bc)
bert_params.adapter_size = None
bert = BertModelLayer.from_params(bert_params, name='bert')
input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name='input_ids')
print('----intput_ids', input_ids)
bert_output = bert(input_ids)
print('bert shape', bert_output.shape)
cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(bert_output)
cls_out = keras.layers.Dropout(0.5)(cls_out)
logits = keras.layers.Dense(units=768, activation='tanh')(cls_out)
logits = keras.layers.Dropout(0.5)(logits)
logits = keras.layers.Dense(units=len(classes), activation='softmax')(logits)
model = keras.Model(inputs=input_ids, outputs=logits)
model.build(input_shape=(None, max_seq_len))
load_stock_weights(bert, bert_ckpt_file)
return model
classes = train.polarity.unique().tolist()
data = IntentDetectionData(train, test, tokenizer, classes, max_seq_len=128)
print(data.train_x.shape)
# Training:
#model = create_model(data.max_seq_len, bert_ckpt_file)
check_point_path = '/home/shravan/dissertation/bert_model/saved_model/1'
model = tf.keras.models.load_model(check_point_path)
print(model.summary())
model.compile(
#optimizer=keras.optimizers.Adam(1e-5),
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
#metrics=[keras.metrics.SparseCategoricalAccuracy(name='acc')]
#metrics=['mae']
metrics=['acc']
)
log_dir = 'log/intent_detection' + datetime.datetime.now().strftime("%Y%m%d-%H%M%s")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir)
history = model.fit(
x=data.train_x,
y=data.train_y,
validation_data=(data.test_x, data.test_y),
batch_size=32,
shuffle=True,
epochs=5,
callbacks=[tensorboard_callback]
)
check_point_path = '/home/shravan/dissertation/bert_model/saved_model/1'
tf.saved_model.save(model, check_point_path)
# model.save(check_point_path)
mae = history.history['acc']
val_mae = history.history['val_acc']
epochs = range(len(mae))
accuracy_file = '/home/shravan/aws_scripts/cronjob/accuracy.png'
plt.figure(figsize=(15,10))
plt.plot(epochs, mae, label=['Training Accuracy'])
plt.plot(epochs, val_mae, label=['Validation Accuracy'])
plt.legend()
plt.savefig(accuracy_file)
loss = history.history['loss']
val_loss = history.history['val_loss']
validation_file = '/home/shravan/aws_scripts/cronjob/validation.png'
plt.figure(figsize=(15, 10))
plt.plot(epochs, loss, label=['Training Loss'])
plt.plot(epochs, val_loss, label=['Validation Loss'])
plt.legend()
plt.savefig(validation_file)
|
[
"shravan007.c@gmail.com"
] |
shravan007.c@gmail.com
|
7d6dbf62200acf666c154e067071df74342c1d85
|
745e2421cd2d110c37ec91315567678f50a7647f
|
/Decorators_Pattern.py
|
2a6272a0972432cd8e30af73a5fe8db581cb92cc
|
[] |
no_license
|
TetianaSob/Python-Projects
|
3a5760b21029db2de1123d4aa23a614c3ba41c33
|
c37528db7e8d65e903357e2cdb7fa64e46537f15
|
refs/heads/main
| 2023-03-19T15:30:58.800231
| 2021-03-02T16:51:56
| 2021-03-02T16:51:56
| 310,383,298
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
# Decorators_Pattern.py
from functools import wraps
'''
def my_decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
#do some stuff with fn(*args, **kwargs)
pass
return wrapper
'''
def log_function_data(fn):
def wrapper(*args, **kwargs):
print(f"you are about to call {fn.__name__}")
print(f"Here'a the documentation: {fn.__doc__}")
return fn(*args, **kwargs)
return wrapper
@log_function_data
def add(x, y):
return x + y
print(add(10, 30))
# you are about to call add
# Here'a the documentation: None
# 40
print(add.__doc__) # None
print(add.__name__) # wrapper
#print(add) # wrappers
|
[
"noreply@github.com"
] |
TetianaSob.noreply@github.com
|
2bd3e8b73a200a47e022e2141e88a0e091cbf31a
|
e7069d85fd4a6fac4958f19b4d14503ffa42b4bb
|
/connecting_silos_kththesis_TCOMK_CINTE/mysite/polls/src/Canvas/unit_test/config.py
|
6266a1209287643b1c764696f43ceb0a29bda5e3
|
[] |
no_license
|
ShivaBP/Bechelor-degree-project
|
cd062ff10e207e380a2c59bc0a50f073c2e866bd
|
9f055d69ec9deabb6bd8ab3768c9d56787eed94d
|
refs/heads/master
| 2022-07-21T01:18:41.893027
| 2018-11-16T14:38:13
| 2018-11-16T14:38:13
| 137,949,087
| 0
| 0
| null | 2022-07-06T19:49:14
| 2018-06-19T21:47:51
|
HTML
|
UTF-8
|
Python
| false
| false
| 454
|
py
|
# Configuration file for Automation Unit Testing Module in Canvas Module
# Please setup this file before start of the main application
#expected answer
#Configure before start of the program
kth_canvas="https://kth.instructure.com/"
#course_id 2139(sandbox)
thesis_course_id = 2139 #sandbox
#assignment_id 24565 testing 1; 24566 testing 2 ; 24567 testing 3;
proporsal_assignment_id=24565 #test_1
Thesis_assignment_id=24566 #test_2
User_id=11185
|
[
"shivabp@icloud.com"
] |
shivabp@icloud.com
|
dcaa3b2565bd2460e0ebd11c7443bf42bf8255a9
|
1e8b7dcbbc4ed9afcd24127c44c8f6975ce30e45
|
/server/characters/migrations/0028_race_fieldvalues.py
|
243b6894f3268856ae969ccf40cd9fcdfdfaee3d
|
[
"MIT"
] |
permissive
|
Etskh/pathfinder-app
|
8de61c6592f952141da417bcbb54c76937930b92
|
c33dc0d2645d11ff5be3d29fe3167bdc12c61a79
|
refs/heads/master
| 2021-01-20T02:42:04.066255
| 2017-09-26T16:33:37
| 2017-09-26T16:33:37
| 101,333,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 573
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-25 17:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('characters', '0027_auto_20170924_1835'),
]
operations = [
migrations.AddField(
model_name='race',
name='fieldValues',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='characters.FieldValues'),
),
]
|
[
"etskh@hotmail.com"
] |
etskh@hotmail.com
|
e9f8308420ff20436e91c925c1895f85cfe0fee1
|
149010198ee955b9aa2d9cae535d20456f566239
|
/static/urls.py
|
111e334aa682c1e2c7a7d22120810f5352646383
|
[] |
no_license
|
Eslam-Elsawy/teigeneratortool
|
01693cef01be1106a577a3877bed107bd5a3af4f
|
ed4e5161bbcbf63ed89f5ea10bb892c03f9b1c88
|
refs/heads/master
| 2020-03-19T01:30:09.547367
| 2018-06-10T03:26:11
| 2018-06-10T03:26:11
| 135,550,553
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('generatemarkup/', views.generatemarkup, name='generatemarkup'),
]
|
[
"eslam@uw.edu"
] |
eslam@uw.edu
|
65333f58eba150374b4791c10dff57bae3783292
|
cf212b536a144ab727d7f76011ec723a1883e119
|
/blog/urls.py
|
b5119a0f0be848e16719a49ad5e380989797c6b0
|
[] |
no_license
|
2011180025/my-first-blog
|
372d1e6507760aead2c983b808bf69d55a2bc1e1
|
788ff320df83c032343248d802818f0ff2bde137
|
refs/heads/master
| 2021-01-13T00:37:02.733309
| 2015-12-23T14:28:53
| 2015-12-23T14:28:53
| 48,492,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = [
url(r'^$', 'blog.views.post_list', name='post_list'),
url(r'^Net_present_value/$', 'blog.views.Net_present_value', name='Net_present_value'),
url(r'^Annuity/$', 'blog.views.Annuity', name='Annuity'),
url(r'^Bond/$', 'blog.views.Bond', name='Bond'),
url(r'^Stock/$', 'blog.views.Stock', name='Stock'),
url(r'^Portfolio/$', 'blog.views.Portfolio', name='Portfolio'),
]
|
[
"cro@koreatech.ac.kr"
] |
cro@koreatech.ac.kr
|
d2a33da2412d0aa4015ea6b87bad6c8262b51874
|
d1d24981c7c89f76d8db931db64ce7c0f9bfd7e5
|
/viewProfile/apps.py
|
8e2b9e23d54365873586a79a05bd951b268c23b7
|
[] |
no_license
|
Rafia26/Team_STAR_CSE327
|
13e3521b9668f34f12bf63883e0a13206d5d64c6
|
f93f68e5c8ed94c762c74e644ed1018e3488922f
|
refs/heads/main
| 2023-04-20T08:05:57.949141
| 2021-05-10T11:44:27
| 2021-05-10T11:44:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from django.apps import AppConfig
class ViewprofileConfig(AppConfig):
"""
This ViewprofileConfig class used to register
the app ViewprofileConfig in our project.
Attributes: name.
"""
name = 'viewProfile'
|
[
"shahriar.shovon@northsouth.edu"
] |
shahriar.shovon@northsouth.edu
|
0407d709e31e375d0f7acf25e256100085d8200d
|
af866adac3d72520fee26f23d603c7f76c8aca51
|
/Section3- Functions/returnFunction.py
|
7006180439c77f70fb1070a6564f6d3680790200
|
[] |
no_license
|
SpencerTruett/AutomateTheBoringStuff
|
3fda3efa7959166ceb61161968e178663a139e84
|
94233c6509087a7541dc6fad7751fb5d2bf01953
|
refs/heads/master
| 2023-03-27T20:11:55.139252
| 2021-03-22T23:41:02
| 2021-03-22T23:41:02
| 347,491,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
def plusOne(number):
return number + 1
newNumber = plusOne(5)
print(newNumber)
|
[
"srtruett1993@gmail.com"
] |
srtruett1993@gmail.com
|
0cbf0ce46b4fe9c95eb58ea252a95298641b4671
|
1833d12882a9725d7e75494951ce64bb50dccc1f
|
/algo/interesting_problems/python/skyline_problem.py
|
53cbabacdbf35990b96d207dd7729ccb36fd07c8
|
[] |
no_license
|
vivekkumark/BigO
|
c1c4c194a3568e66c6c494d4dfc6c6eb36af3f94
|
0d7ae294588aec1da1590bbb1abae2e8a96d7e3a
|
refs/heads/master
| 2021-09-24T04:36:00.016018
| 2018-10-03T04:10:48
| 2018-10-03T04:10:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
from __future__ import print_function
def append_skyline(result, sk):
# redundancy is handled by not appending a strip if the
# previous strip in result has same height
if not len(result):
result.append(sk)
elif result[-1][1] != sk[1]:
result.append(sk)
def skyline_merge(s1, s2):
h1 = h2 = 0
l1 = l2 = 0
result = []
while l1 < len(s1) and l2 < len(s2):
s1_x, s1_ht = s1[l1]
s2_x, s2_ht = s2[l2]
if s1_x <= s2_x:
h1 = s1_ht
append_skyline(result, (s1_x, max(h1, h2)))
l1 += 1
else:
h2 = s2_ht
append_skyline(result, (s2_x, max(h1, h2)))
l2 += 1
while l1 < len(s1):
result.append(s1[l1])
l1 += 1
while l2 < len(s2):
result.append(s2[l2])
l2 += 1
return result
def skyline(buildings):
L = len(buildings)
if L == 1:
left, ht, right = buildings[0]
return [(left, ht), (right, 0)]
mid = L/2
s1 = skyline(buildings[:mid])
s2 = skyline(buildings[mid:])
return skyline_merge(s1, s2)
if __name__ == '__main__':
buildings = [(1, 11, 5), (2, 6, 7), (3, 13, 9), (12, 7, 16),
(14, 3, 25), (19, 18, 22), (23, 13, 29), (24, 4, 28)]
buildings.sort(key=lambda k: k[0])
print(buildings)
print(skyline(buildings))
|
[
"vivekkumar1987@gmail.com"
] |
vivekkumar1987@gmail.com
|
19579a2894ac64b3ccba0354f93383ac9fa373e8
|
ff0972ad07a72893f1e0aa02844dcc87dec5fa61
|
/sisyphus/migrations/0009_auto_20180613_1054.py
|
ea2cc2b4007f18d9b8b31e1b746c7cac5190a87a
|
[
"MIT"
] |
permissive
|
JessicaNgo/colossus
|
6febcc616e1c8a12df0b0143cc74b368fa18d162
|
c7e4ec9315b1181f200a0a8d0a2326b91136f137
|
refs/heads/master
| 2020-04-27T19:31:28.885998
| 2019-03-08T18:54:08
| 2019-03-08T18:54:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 506
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2018-06-13 17:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sisyphus', '0008_auto_20180117_1533'),
]
operations = [
migrations.AlterField(
model_name='analysisrun',
name='run_status',
field=models.CharField(default='Unknown', max_length=50, verbose_name='Run Status'),
),
]
|
[
"amcphers@bccrc.ca"
] |
amcphers@bccrc.ca
|
927298017bc99a56cd0be367e27fc3953b24df6c
|
45c636632c7fdd2648f85d4e8a914a8ead4b6512
|
/core/blocks.py
|
616a6e3e6ab5e3ee4ac57cfc30ec1ff23a95b260
|
[] |
no_license
|
Erika1012/DeepFry
|
716cba3f56b71ac0df42ed4406e614557a32e2d0
|
52d1be1553c8a4d7b68fd51ba62037fa31ce17c0
|
refs/heads/main
| 2023-06-03T06:40:34.884707
| 2021-06-18T09:52:54
| 2021-06-18T09:52:54
| 373,422,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,408
|
py
|
from collections import Iterable
import itertools
import tensorflow as tf
from tensorflow.python.keras import layers
class DNN(tf.keras.Model):
"""
Deep Neural Network
"""
def __init__(self,
units,
use_bias=True,
use_bn=False,
dropout=0,
activations=None,
kernel_initializers='glorot_uniform',
bias_initializers='zeros',
kernel_regularizers=tf.keras.regularizers.l2(1e-5),
bias_regularizers=None,
**kwargs):
"""
:param units:
An iterable of hidden layers' neural units' number, its length is the depth of the DNN.
:param use_bias:
Iterable/Boolean.
If this is not iterable, every layer of the DNN will have the same param, the same below.
:param activations:
Iterable/String/TF activation class
:param kernel_initializers:
Iterable/String/TF initializer class
:param bias_initializers:
Iterable/String/TF initializer class
:param kernel_regularizers:
Iterable/String/TF regularizer class
:param bias_regularizers:
Iterable/String/TF regularizer class
"""
super(DNN, self).__init__(**kwargs)
self.units = units
self.use_bias = use_bias
self.use_bn = use_bn
self.dropout = dropout
self.activations = activations
self.kernel_initializers = kernel_initializers
self.bias_initializers = bias_initializers
self.kernel_regularizers = kernel_regularizers
self.bias_regularizers = bias_regularizers
if not isinstance(self.use_bias, Iterable):
self.use_bias = [self.use_bias] * len(self.units)
if not isinstance(self.use_bn, Iterable):
self.use_bn = [self.use_bn] * len(self.units)
if not isinstance(self.dropout, Iterable):
self.dropout = [self.dropout] * len(self.units)
if not isinstance(self.activations, Iterable):
self.activations = [self.activations] * len(self.units)
if isinstance(self.kernel_initializers, str) or not isinstance(self.kernel_initializers, Iterable):
self.kernel_initializers = [self.kernel_initializers] * len(self.units)
if isinstance(self.bias_initializers, str) or not isinstance(self.bias_initializers, Iterable):
self.bias_initializers = [self.bias_initializers] * len(self.units)
if isinstance(self.kernel_regularizers, str) or not isinstance(self.kernel_regularizers, Iterable):
self.kernel_regularizers = [self.kernel_regularizers] * len(self.units)
if isinstance(self.bias_regularizers, str) or not isinstance(self.bias_regularizers, Iterable):
self.bias_regularizers = [self.bias_regularizers] * len(self.units)
self.mlp = tf.keras.Sequential()
for i in range(len(self.units)):
self.mlp.add(layers.Dense(
units=self.units[i],
activation=self.activations[i],
use_bias=self.use_bias[i],
kernel_initializer=self.kernel_initializers[i],
bias_initializer=self.bias_initializers[i],
kernel_regularizer=self.kernel_regularizers[i],
bias_regularizer=self.bias_regularizers[i]
))
if self.dropout[i] > 0:
self.mlp.add(layers.Dropout(self.dropout[i]))
if self.use_bn[i]:
self.mlp.add(layers.BatchNormalization())
def call(self, inputs, **kwargs):
output = self.mlp(inputs)
return output
class FM(tf.keras.Model):
"""
Factorization Machine Block
compute cross features (order-2) and return their sum (without linear term)
"""
def __init__(self, **kwargs):
super(FM, self).__init__(**kwargs)
def call(self, inputs, require_logit=True, **kwargs):
"""
:param inputs:
list of 2D tensor with shape [batch_size, embedding_size]
all the features should be embedded and have the same embedding size
:return:
2D tensor with shape [batch_size, 1]
sum of all cross features
"""
# [b, n, m]
inputs_3d = tf.stack(inputs, axis=1)
# [b, m]
# (a + b) ^ 2 - (a ^ 2 + b ^ 2) = 2 * ab, we need the cross feature "ab"
square_of_sum = tf.square(tf.reduce_sum(inputs_3d, axis=1, keepdims=False))
sum_of_square = tf.reduce_sum(tf.square(inputs_3d), axis=1, keepdims=False)
if require_logit:
outputs = 0.5 * tf.reduce_sum(square_of_sum - sum_of_square, axis=1, keepdims=True)
else:
outputs = 0.5 * (square_of_sum - sum_of_square)
return outputs
class InnerProduct(tf.keras.Model):
def __init__(self, require_logit=True, **kwargs):
super(InnerProduct, self).__init__(**kwargs)
self.require_logit = require_logit
def call(self, inputs, **kwargs):
rows = list()
cols = list()
for i in range(len(inputs) - 1):
for j in range(i, len(inputs)):
rows.append(i)
cols.append(j)
# [batch_size, pairs_num, embedding_size]
p = tf.stack([inputs[i] for i in rows], axis=1)
q = tf.stack([inputs[j] for j in cols], axis=1)
if self.require_logit:
inner_product = tf.reduce_sum(p * q, axis=-1, keepdims=False)
else:
inner_product = tf.keras.layers.Flatten()(p * q)
return inner_product
class OuterProduct(tf.keras.Model):
def __init__(self,
kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
**kwargs):
super(OuterProduct, self).__init__(**kwargs)
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
def call(self, inputs, **kwargs):
outer_products_list = list()
for i in range(len(inputs) - 1):
for j in range(i + 1, len(inputs)):
inp_i = tf.expand_dims(inputs[i], axis=1)
inp_j = tf.expand_dims(inputs[j], axis=-1)
kernel = self.add_weight(shape=(inp_i.shape[2], inp_j.shape[1]),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True)
product = tf.reduce_sum(tf.matmul(tf.matmul(inp_i, kernel), inp_j), axis=-1, keepdims=False)
outer_products_list.append(product)
outer_product_layer = tf.concat(outer_products_list, axis=1)
return outer_product_layer
class CrossNetwork(tf.keras.Model):
def __init__(self,
kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_initializer='zeros',
bias_regularizer=None,
**kwargs):
super(CrossNetwork, self).__init__(**kwargs)
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_initializer = bias_initializer
self.bias_regularizer = bias_regularizer
def call(self, inputs, layers_num=3, require_logit=True, **kwargs):
x0 = tf.expand_dims(tf.concat(inputs, axis=1), axis=-1)
x = tf.transpose(x0, [0, 2, 1])
for i in range(layers_num):
kernel = self.add_weight(shape=(x0.shape[1], 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True)
bias = self.add_weight(shape=(x0.shape[1], 1),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
trainable=True)
x = tf.matmul(tf.matmul(x0, x), kernel) + bias + tf.transpose(x, [0, 2, 1])
x = tf.transpose(x, [0, 2, 1])
x = tf.squeeze(x, axis=1)
if require_logit:
kernel = self.add_weight(shape=(x0.shape[1], 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True)
x = tf.matmul(x, kernel)
return x
class CIN(tf.keras.Model):
def __init__(self,
kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
**kwargs):
super(CIN, self).__init__(**kwargs)
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
def call(self, inputs, hidden_width=(128, 64), require_logit=True, **kwargs):
# hidden_width=(128, 64)
# [b, n, m]
x0 = tf.stack(inputs, axis=1)
x = tf.identity(x0)
hidden_width = [x0.shape[1]] + list(hidden_width)
finals = list()
for h in hidden_width:
rows = list()
cols = list()
for i in range(x0.shape[1]):
for j in range(x.shape[1]):
rows.append(i)
cols.append(j)
# [b, pair, m]
x0_ = tf.gather(x0, rows, axis=1)
x_ = tf.gather(x, cols, axis=1)
# [b, m, pair]
p = tf.transpose(tf.multiply(x0_, x_), [0, 2, 1])
kernel = self.add_weight(shape=(p.shape[-1], h),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True)
# [b, h, m]
x = tf.transpose(tf.matmul(p, kernel), [0, 2, 1])
finals.append(tf.reduce_sum(x, axis=-1, keepdims=False))
finals = tf.concat(finals, axis=-1)
kernel = self.add_weight(shape=(finals.shape[-1], 1),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
trainable=True)
logit = tf.matmul(finals, kernel)
return logit
class AttentionBasedPoolingLayer(tf.keras.Model):
def __init__(self,
attention_factor=4,
kernel_initializer='glorot_uniform',
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
bias_initializer='zeros',
bias_regularizer=None,
**kwargs):
super(AttentionBasedPoolingLayer, self).__init__(**kwargs)
self.attention_factor = attention_factor
self.kernel_initializer = kernel_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_initializer = bias_initializer
self.bias_regularizer = bias_regularizer
self.att_layer = layers.Dense(
units=self.attention_factor,
activation='relu',
use_bias=True,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=self.kernel_regularizer,
bias_initializer=self.bias_initializer,
bias_regularizer=self.bias_regularizer
)
self.att_proj_layer = layers.Dense(
units=1,
activation=None,
use_bias=False,
kernel_initializer=self.kernel_initializer
)
def call(self, inputs, **kwargs):
interactions = list()
for i in range(len(inputs) - 1):
for j in range(i + 1, len(inputs)):
interactions.append(tf.multiply(inputs[i], inputs[j]))
interactions = tf.stack(interactions, axis=1)
att_weight = self.att_layer(interactions)
att_weight = self.att_proj_layer(att_weight)
att_weight = layers.Softmax(axis=1)(att_weight)
output = tf.reduce_sum(interactions * att_weight, axis=1)
return output
class AutoIntInteraction(tf.keras.Model):
def __init__(self, att_embedding_size=8, heads=2, use_res=True, seed=2333, **kwargs):
super(AutoIntInteraction, self).__init__(**kwargs)
self.att_embedding_size = att_embedding_size
self.heads = heads
self.use_res = use_res
self.seed = seed
def call(self, inputs, **kwargs):
m = inputs.shape[-1]
W_Query = self.add_weight(shape=[m, self.att_embedding_size * self.heads],
initializer=tf.keras.initializers.RandomNormal(seed=self.seed))
W_key = self.add_weight(shape=[m, self.att_embedding_size * self.heads],
initializer=tf.keras.initializers.RandomNormal(seed=self.seed))
W_Value = self.add_weight(shape=[m, self.att_embedding_size * self.heads],
initializer=tf.keras.initializers.RandomNormal(seed=self.seed))
queries = tf.matmul(inputs, W_Query)
keys = tf.matmul(inputs, W_key)
values = tf.matmul(inputs, W_Value)
queries = tf.stack(tf.split(queries, self.heads, axis=2))
keys = tf.stack(tf.split(keys, self.heads, axis=2))
values = tf.stack(tf.split(values, self.heads, axis=2))
att_score = tf.matmul(queries, keys, transpose_b=True)
att_score = layers.Softmax(axis=-1)(att_score)
result = tf.matmul(att_score, values)
result = tf.concat(tf.split(result, self.heads), axis=-1)
result = tf.squeeze(result, axis=0)
if self.use_res:
W_Res = self.add_weight(shape=[m, self.att_embedding_size * self.heads],
initializer=tf.keras.initializers.RandomNormal(seed=self.seed))
result = result + tf.matmul(inputs, W_Res)
result = tf.keras.activations.relu(result)
return result
class FGCNNlayer(tf.keras.layers.Layer):
def __init__(self, filters, kernel_width, new_feat_filters, pool_width, **kwargs):
super(FGCNNlayer, self).__init__(**kwargs)
self.filters = filters
self.kernel_width = kernel_width
self.new_feat_filters = new_feat_filters
self.pool_width = pool_width
def call(self, inputs, **kwargs):
output = inputs
output = tf.keras.layers.Conv2D(
filters=self.filters,
strides=(1, 1),
kernel_size=(self.kernel_width, 1),
padding='same',
activation='tanh',
use_bias=True
)(output)
output = tf.keras.layers.MaxPooling2D(
pool_size=(self.pool_width, 1)
)(output)
new_feat_output = tf.keras.layers.Flatten()(output)
new_feat_output = tf.keras.layers.Dense(
units=output.shape[1] * output.shape[2] * self.new_feat_filters,
activation='tanh',
use_bias=True
)(new_feat_output)
new_feat_output = tf.reshape(new_feat_output,
shape=(-1, output.shape[1] * self.new_feat_filters, output.shape[2]))
return output, new_feat_output
class BiInteraction(tf.keras.Model):
def __init__(self, mode='all', **kwargs):
super(BiInteraction, self).__init__(**kwargs)
self.mode = mode
def call(self, inputs, **kwargs):
output = list()
embedding_size = inputs[0].shape[-1]
if self.mode == 'all':
W = self.add_weight(
shape=(embedding_size, embedding_size),
initializer='glorot_uniform',
regularizer=tf.keras.regularizers.l2(1e-5),
trainable=True
)
for i in range(len(inputs) - 1):
for j in range(i, len(inputs)):
inter = tf.tensordot(inputs[i], W, axes=(-1, 0)) * inputs[j]
output.append(inter)
elif self.mode == 'each':
for i in range(len(inputs) - 1):
W = self.add_weight(
shape=(embedding_size, embedding_size),
initializer='glorot_uniform',
regularizer=tf.keras.regularizers.l2(1e-5),
trainable=True
)
for j in range(i, len(inputs)):
inter = tf.tensordot(inputs[i], W, axes=(-1, 0)) * inputs[j]
output.append(inter)
elif self.mode == 'interaction':
for i in range(len(inputs) - 1):
for j in range(i, len(inputs)):
W = self.add_weight(
shape=(embedding_size, embedding_size),
initializer='glorot_uniform',
regularizer=tf.keras.regularizers.l2(1e-5),
trainable=True
)
inter = tf.tensordot(inputs[i], W, axes=(-1, 0)) * inputs[j]
output.append(inter)
output = tf.concat(output, axis=1)
return output
class SENet(tf.keras.Model):
def __init__(self, axis=-1, reduction=4, **kwargs):
super(SENet, self).__init__(**kwargs)
self.axis = axis
self.reduction = reduction
def call(self, inputs, **kwargs):
# inputs [batch_size, feats_num, embedding_size]
feats_num = inputs.shape[1]
weights = tf.reduce_mean(inputs, axis=self.axis, keepdims=False) # [batch_size, feats_num]
W1 = self.add_weight(
shape=(feats_num, self.reduction),
trainable=True,
initializer='glorot_normal'
)
W2 = self.add_weight(
shape=(self.reduction, feats_num),
trainable=True,
initializer='glorot_normal'
)
weights = tf.keras.activations.relu(tf.tensordot(weights, W1, axes=(-1, 0)))
weights = tf.keras.activations.relu(tf.tensordot(weights, W2, axes=(-1, 0)))
weights = tf.expand_dims(weights, axis=-1)
output = tf.multiply(weights, inputs) # [batch_size, feats_num, embedding_size]
return output
|
[
"noreply@github.com"
] |
Erika1012.noreply@github.com
|
f32be0db93bb51a51adb1c0ca44d6a393386a88b
|
7c113fd5b6d96d557b43cb4b2b1b12c006a2ab09
|
/mysite/urls.py
|
77f0d7ada20485a08c3bf2558696473f8acfeb7c
|
[] |
no_license
|
upinderawat/mysite
|
2f3bec8059133b059e08908b40f69c9c9fa47371
|
cabafc7c9c9eedaf63e11f708b6e896d09bae4a9
|
refs/heads/master
| 2020-04-14T13:16:43.803304
| 2019-01-13T13:12:49
| 2019-01-13T13:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,052
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
app_name = 'mysite'
urlpatterns = [
path('', TemplateView.as_view(template_name='base.html'), name='index'),
path('admin/', admin.site.urls),
path('polls/', include('polls.urls')),
path('accounts/', include('accounts.urls')),
path('accounts/', include('django.contrib.auth.urls'))
]
|
[
"upinderawat@gmail.com"
] |
upinderawat@gmail.com
|
6974dd0629b949522de35a36af6999fa371e0f1c
|
4f543fe474c0bab42c14a5e7a1b7531f8d46c2f9
|
/pinclone/settings.py
|
5865cb282ed48d64b8785d8d88b1d2ff9be60544
|
[] |
no_license
|
H-Yazdi/Pin-clone
|
2d04fda82d3380a8ea7e5f9eee9a2eeab1d7d345
|
f92a858d366f6c0baafef10c0c16c33152ab99c6
|
refs/heads/master
| 2022-12-17T22:36:03.193066
| 2020-09-21T06:50:13
| 2020-09-21T06:50:13
| 294,551,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,421
|
py
|
"""
Django settings for Pinterest project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import environ
env = environ.Env(
# set casting, default value
DEBUG=(bool, False)
)
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env('DEBUG')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'account',
'pinterest',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pinclone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# WSGI_APPLICATION = 'Pinterest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media/')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_REDIRECT_URL = ('/')
|
[
"hamideh.shaterzadeh@gmail.com"
] |
hamideh.shaterzadeh@gmail.com
|
1ce4ba3eda2733fe1dfb46779210ee607b1a8714
|
b9db91bdb30ba99aad8bbea251e5e1e8c2a7fa45
|
/opt/src/experiment/clear.py
|
ca129566c4e78356bff95128a9f55ee2db030f13
|
[] |
no_license
|
jacoloves/python_tool
|
682c3a91b535f15f1f8c9299e9b4c9ccbd5eea79
|
93ba5de17a727d6ccf9c67e4bca37ea502d06e5d
|
refs/heads/master
| 2021-03-01T04:25:49.581952
| 2021-01-27T13:52:50
| 2021-01-27T13:52:50
| 245,753,773
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,096
|
py
|
# window clear
import os
import time
# os.system('clear')
# print()
# print()
# print(' _ _ _ _ _ _ _ _ ')
# print(' | |')
# print(' - - - - - -')
# print(' | | ')
# print(' | | ')
# print(' | | ')
# print(' | | ')
# print(' | | ')
# print(' - - - - ')
arr = []
# for i in range(10):
# if i != 0:
# arr.append(" ")
# for j in range(i):
# print(arr[j], end="")
str_main = ""
for i in range(10):
if i == 0:
arr.append(i)
print("\r" + str(arr[0]), end="")
if i != 0:
for j in range(i):
arr.append(" ")
arr.append(i)
for j in range(i+1):
str_main = str_main + str(arr[j])
print("\r" + str_main, end="")
str_main= ""
arr.clear()
time.sleep(0.3)
# for i in range(10):
# if i != 0:
# for j in range(i):
# arr.append(" ")
# print("\r" + str(i), end="")
# time.sleep(1)
|
[
"5511068t@gmail.com"
] |
5511068t@gmail.com
|
cb0d787f69d7856b94ffd65143e4b0933445d600
|
f6ba01660d724ce3feb92a89feca952664a42dca
|
/패키지/1학기/모듈/sysmodule.py
|
b7ad3900c46fe31ce27442d2f676fa4d824f14e8
|
[] |
no_license
|
alswl4304/Programming-Python-
|
0724e0681632cf8dc8dd18358458574ea58a7712
|
f6152654efe7302b81913735598d0b60776ac6d1
|
refs/heads/master
| 2020-07-07T06:58:14.514266
| 2019-11-22T05:54:48
| 2019-11-22T05:54:48
| 203,284,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 193
|
py
|
import sys
print("실행 파일명: ", sys.argv[0])
for i in range(1,len(sys.argv)):
print("옵션",i,":",sys.argv[i])
sys.exit()
for i in range(1,100):
print("여기는 실행 ㄴㄴ")
|
[
"alswl4304@naver.com"
] |
alswl4304@naver.com
|
a25838568da6d2eed97bfd499c97bfa94f8743c5
|
6aec6d752ca457ba68e8cd0822a78ccfc034be6f
|
/tests/test_color_triplet.py
|
4a592c867ac1c97dfbb5f5c040a1086bbb0cc03e
|
[
"MIT"
] |
permissive
|
scalabli/quo
|
1d1f399c86e6e9929238864ac3184cb2bde79fe7
|
6dbeb5b471517392d5ebf59e208d0f355dddff5f
|
refs/heads/master
| 2023-09-01T08:55:49.037768
| 2023-08-02T16:10:42
| 2023-08-02T16:10:42
| 327,378,689
| 23
| 4
|
MIT
| 2023-09-08T17:11:32
| 2021-01-06T17:09:32
|
Python
|
UTF-8
|
Python
| false
| false
| 469
|
py
|
from rich.color_triplet import ColorTriplet
def test_hex():
assert ColorTriplet(255, 255, 255).hex == "#ffffff"
assert ColorTriplet(0, 255, 0).hex == "#00ff00"
def test_rgb():
assert ColorTriplet(255, 255, 255).rgb == "rgb(255,255,255)"
assert ColorTriplet(0, 255, 0).rgb == "rgb(0,255,0)"
def test_normalized():
assert ColorTriplet(255, 255, 255).normalized == (1.0, 1.0, 1.0)
assert ColorTriplet(0, 255, 0).normalized == (0.0, 1.0, 0.0)
|
[
"secretum.inc@pm.me"
] |
secretum.inc@pm.me
|
7b7380fb50dcff34baa9f0fcacdaa239ce98e0a5
|
43a78f0bcd94f617d2c55e5019f3f3475580165d
|
/Udemy/Section 24/logging_format.py
|
10c2b7c10e8bd2242f367e0ca3eefbd1a2f9c101
|
[] |
no_license
|
ctramm/Python_Training
|
2c35bd36b7cd1ea6598f915fafcf37ca048cf8ed
|
a0864a82bd6fb002c5f1a9aa7fb5d0b18341e6b0
|
refs/heads/master
| 2022-12-04T14:18:30.477562
| 2022-11-12T09:03:25
| 2022-11-12T09:03:25
| 171,736,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
"""
Section 24: Lecture 143
Changing the format of logs
https://docs.python.org/3/library/logging.html#logrecord-attributes
https://docs.python.org/3/library/time.html#time.strftime
"""
import logging
logging.basicConfig(format='%(asctime)s :: %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
logging.warning("Warning Message")
logging.info("Info MSG")
logging.error("Error Message")
|
[
"ctramm@wiley.com"
] |
ctramm@wiley.com
|
16fbff869fc8d49814edd99c35aa7c74588db82b
|
84b85961ffec7ed47c20133758d5f27135b21ce9
|
/src/base/baseFile.py
|
3f11495a6dda1b22cdb80ceb65d6bf80b12ee986
|
[] |
no_license
|
hi-cbh/pytest
|
67c70f0374a4d14711db05e337b6388a98c412c1
|
4da8fd4eae4b64e3b235e961238954afae84592e
|
refs/heads/master
| 2021-01-20T22:23:10.253647
| 2017-09-10T12:48:23
| 2017-09-10T12:48:23
| 100,832,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,681
|
py
|
# urs/bin/python
# encoding:utf-8
'''文件操作'''
# urs/bin/python
# encoding:utf-8
import os
import time
import operator
import subprocess
class BaseFile(object):
def adbFindFile(self, path, file):
'''查找文件时是否存在'''
try:
value = os.popen("adb shell ls "+path)
# txt = value.readlines()
# print('value: %r' %txt)
for txt in value.readlines():
# print('value: %r' %txt)
if (file in txt) and ("No such file or directory" not in txt) :
# print('文件存在 :%r' %file)
return True
return False
except BaseException as msg:
print('msg: %r' %msg)
return False
def adbDeleteFile(self, path, file):
'''删除文件'''
try:
os.popen("adb shell rm "+path)
except BaseException as msg:
print('msg: %r' %msg)
def adbTouchFile(self, path, file):
'''创建文件'''
try:
os.popen("adb shell touch "+path + file)
except BaseException as msg:
print('msg: %r' %msg)
def waitforfile(self, path, file, timeout = 10):
'''等待文件出现'''
timeout = int(round(time.time() * 1000)) + timeout * 1000
try:
while (int(round(time.time() * 1000) < timeout)):
# print('wait.....')
if(self.adbFindFile(path, file) == True):
# print('find it')
return True;
time.sleep(0.1)
except BaseException as msg:
print(msg)
else:
# print('time out')
return False
def adbMkdirDir(self, path):
'''创建文件夹'''
try:
os.popen("adb shell mkdir -p " + path)
except BaseException as msg:
print('msg: %r' %msg)
def adbLsFileSize(self, path):
'''创建文件夹'''
try:
value = os.popen("adb shell ls -l " + path)
# txt = value.readlines()
# print('value: %r' %txt)
for txt in value.readlines():
if txt not in [None, '\n']:
# print(txt)
# print(txt.split(' ',13))
# print(txt.split(' ',13)[9])
return txt.split(' ',13)[9]
'''使用正则表达式
s = "sdfdsfis123123#4342#"
result = re.findall(r".*#(.*)#.*",s)
for x in result:
print(x)
print(result)
'''
except BaseException as msg:
print('msg: %r' %msg)
return None
def waitForFileModify(self, timeoutMillis):
'''等待文件更新,单位为:秒'''
try:
# path = "/mnt/sdcard/0/0./t.txt"
# dirpath = "/mnt/sdcard/0/0./"
path = "/mnt/sdcard/Android/data/com.cmcc.test/cache/t.txt"
dirpath = "/mnt/sdcard/Android/data/com.cmcc.test/cache/"
if self.adbFindFile(path, "t.txt") != True:
print('文件存在')
self.adbMkdirDir( dirpath)
self.adbTouchFile(path, '')
time.sleep(1)
if self.adbFindFile(path, "t.txt") != True:
print('文件不存在')
return False
orgsize = self.adbLsFileSize(path)
timeout = int(round(time.time() * 1000)) + timeoutMillis * 1000
while (int(round(time.time() * 1000) < timeout)):
# print('wait.....')
if(operator.ne(self.adbLsFileSize(path),orgsize)):
print('文件更新了.....')
return True;
time.sleep(0.1)
else:
print('time out')
return False
except BaseException as msg:
print('msg: %r' %msg)
return False
def adbTailFile(self):
'''使用 adb shell tail -n 1 查找固定目录下的文件,倒数第一行'''
path = "/mnt/sdcard/Android/data/com.cmcc.test/cache/t.txt"
try:
value = subprocess.Popen("adb shell tail -n 1 " + path, shell=True,stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
value.wait()
output = value.stdout.read().decode()
if output not in ['',None]:
print ("%s" % output)
return output
else:
return None
except BaseException as msg:
print('msg: %r' %msg)
return None
def getTime(self):
try:
'''获取时间值'''
content = self.adbTailFile()
time = 0
if len(content) < 60 or (not content.find('\#') == -1) :
return time
l = content.split('#')[1]
# print("times:%s" %l)
valueTime = str(round((float(l)/1000.0), 3))
print('时间差: %r' %valueTime)
return valueTime
except BaseException:
return time
BaseFile = BaseFile()
|
[
"hi_cbh@qq.com"
] |
hi_cbh@qq.com
|
57c9c6238d2f7787d7f5886e1072a184d1795a6b
|
f22ea8455b90cbe4cd7597a3c49c3db6d9e28df0
|
/experiments/experiment_driver_policy.py
|
ef8193a08b7de4761e7d58aaefa6f1a38ea4715b
|
[] |
no_license
|
NVlabs/sim-parameter-estimation
|
7bd1a255987a79f68e43617b9a59914e7c3acb54
|
932d18545e71a649c59fa00c2fe4cfc0da080dee
|
refs/heads/master
| 2023-01-14T07:22:40.768007
| 2020-11-20T05:16:23
| 2020-11-20T05:16:23
| 313,475,770
| 31
| 2
| null | 2020-11-20T01:20:47
| 2020-11-17T01:41:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,670
|
py
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import matplotlib
matplotlib.use('Agg')
import random
import logging
import numpy as np
import torch
import gym
import argparse
import os
from parameter_estimation.utils.logging import reshow_hyperparameters
from parameter_estimation.envs.randomized_vecenv import make_vec_envs
from experiments.args import get_args
from experiments.estimator_helper import get_estimator
from policy.ddpg import DDPG
def run_experiment(args):
reshow_hyperparameters(args, paths={})
reference_env = make_vec_envs(args.reference_env_id, args.seed, args.nagents)
randomized_env = make_vec_envs(args.randomized_env_id, args.seed, args.nagents)
parameter_estimator = get_estimator(reference_env, randomized_env, args)
logging.root.setLevel(logging.INFO)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
np.random.seed(args.seed)
parameter_estimator.load_trajectory(reference_env, f'trajectories/{args.reference_env_id}.npy')
logging.info('Loaded Trajectories')
t = 0
state_dim = randomized_env.observation_space.shape[0]
action_dim = randomized_env.action_space.shape[0]
max_action = 1.0
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
}
policy = DDPG(**kwargs)
evaluations = []
for iteration in range(args.num_iterations):
parameter_estimator.update_parameter_estimate(randomized_env, policy, reference_env)
if iteration % args.log_interval == 0:
logging.info(
args.jobid,
args.estimator_class,
args.reference_env_id,
args.learned_reward,
iteration,
parameter_estimator.get_parameter_estimate(randomized_env)
)
# TODO: Should be held out, test data
evaluations.append(parameter_estimator.get_parameter_estimate(randomized_env))
np.save('evaluations/{}-{}-{}-{}-policy-evals'.format(
args.estimator_class, args.reference_env_id, args.learned_reward, args.suffix), evaluations)
reshow_hyperparameters(args, paths={})
if __name__ == '__main__':
args = get_args()
run_experiment(args)
|
[
"noreply@github.com"
] |
NVlabs.noreply@github.com
|
688e3d98ff2cd21d62dac591d75392af61b7d98e
|
71bd065fb12ab2d07d4371f1903b45411b91735d
|
/openstack/neutron_plugin/neutron_plugin_contrail/plugins/opencontrail/loadbalancer/plugin.py
|
21b38b7b77a6adab2038f9b06b1be479ceb183ab
|
[
"Apache-2.0"
] |
permissive
|
syedaliawaissabir/BGPaaS
|
dffc3d07eddec91be3d70d8d226b2192511b61ff
|
71931e462944e39d31ca16167f273ab9fa84145a
|
refs/heads/master
| 2020-06-30T23:38:29.650304
| 2017-07-31T11:13:46
| 2017-07-31T11:13:46
| 74,343,705
| 0
| 1
| null | 2016-12-02T14:15:51
| 2016-11-21T08:46:43
|
C++
|
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from loadbalancer_db import LoadBalancerPluginDb
try:
from neutron.extensions import loadbalancer
except ImportError:
from neutron_lbaas.extensions import loadbalancer
from neutron.db import servicetype_db as sdb
from neutron.plugins.common import constants
from neutron.services import provider_configuration as pconf
class LoadBalancerPlugin(LoadBalancerPluginDb):
supported_extension_aliases = ["lbaas", "extra_lbaas_opts"]
if hasattr(loadbalancer, 'LOADBALANCER_PREFIX'):
path_prefix = loadbalancer.LOADBALANCER_PREFIX
def __init__(self):
super(LoadBalancerPlugin, self).__init__()
self._get_default_provider()
def _get_default_provider(self):
self.default_provider = "opencontrail"
try:
service_type_manager = sdb.ServiceTypeManager.get_instance()
provider = (service_type_manager.
get_default_service_provider(None,
constants.LOADBALANCER))
self.pool_manager.check_provider_exists(provider['name'])
self.default_provider = provider['name']
except:
pass
def get_plugin_description(self):
return "OpenContrail LoadBalancer Service Plugin"
def _pool_update_provider(self, context, pool):
if 'provider' not in pool or not pool['provider'] or pool['provider'].__class__ is object:
pool['provider'] = self.default_provider
def create_pool(self, context, pool):
self._pool_update_provider(context, pool['pool'])
return super(LoadBalancerPlugin, self).create_pool(context, pool)
|
[
"awaisalisabir@yahoo.com"
] |
awaisalisabir@yahoo.com
|
81eef8d835d5b1b148857bd544823ae8028dcb3e
|
713fc732a037447897092722647e28cb7a9711a8
|
/app/api_1_0/comments.py
|
6ca08f3510891d5165443f4bd1257386b9c2f393
|
[] |
no_license
|
jkachhadia/StatsBoy
|
9612eec07b44cf34f76c63eddbb085daa7869640
|
ad9bb1f921dcb4c74b1ba842b015445c1e0abe33
|
refs/heads/master
| 2021-01-18T00:46:20.848151
| 2016-07-26T22:09:10
| 2016-07-26T22:09:10
| 64,026,809
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
from flask import jsonify, request, g, url_for, current_app
from .. import db
from ..models import Post, Permission, Comment
from . import api
from .decorators import permission_required
@api.route('/comments/')
def get_comments():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['BLOGPOLE_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_comments', page=page+1, _external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/comments/<int:id>')
def get_comment(id):
comment = Comment.query.get_or_404(id)
return jsonify(comment.to_json())
@api.route('/posts/<int:id>/comments/')
def get_post_comments(id):
post = Post.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['BLOGPOLE_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_post_comments', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_post_comments', page=page+1, _external=True)
return jsonify({
'comments': [comment.to_json() for comment in comments],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/posts/<int:id>/comments/', methods=['POST'])
@permission_required(Permission.COMMENT)
def new_post_comment(id):
post = Post.query.get_or_404(id)
comment = Comment.from_json(request.json)
comment.author = g.current_user
comment.post = post
db.session.add(comment)
db.session.commit()
return jsonify(comment.to_json()), 201, \
{'Location': url_for('api.get_comment', id=comment.id,
_external=True)}
|
[
"jaykachhadia@hotmail.com"
] |
jaykachhadia@hotmail.com
|
5e83a0394b6546a5dfa08d44e4fc80cc7383122c
|
24122f793b84ef1cd3640128b46c7561ef1aece2
|
/wordcount/settings.py
|
b8c5e0c7f22fe973234b8b7736971ad2fef9ddd0
|
[] |
no_license
|
NikhilRaheja/wordcount-project
|
3a9ae6b07c9a51c3b570eb0e9505961cc280b6c3
|
4d5ad69b5750f9badf5c4b4cc7ecbdec50d0fbdb
|
refs/heads/master
| 2022-11-24T05:32:25.086113
| 2020-07-23T07:47:52
| 2020-07-23T07:47:52
| 282,161,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,108
|
py
|
"""
Django settings for wordcount project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!&u75*lfy5*o&%hca$fhw#&tih873p5$h7l9e+(w3w*du7n=#l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'wordcount.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wordcount.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"nikhilraheja078@gmail.com"
] |
nikhilraheja078@gmail.com
|
2e5bf97774d140a22de4789799a566a8307ad69a
|
1b3178461535d5a1b318fa6d978bbbc427a0d1c4
|
/app/utils/seeds.py
|
ce62b4346a53015cb2c18042c7650b53785bc3eb
|
[] |
no_license
|
cyr1z/api-movie-library-
|
29d3cf4a3b56b544fdddf69511651848c8cc1be6
|
b3a021bff8112a3eb81f553b3eb0df751a488adb
|
refs/heads/main
| 2023-06-19T13:09:09.320541
| 2021-07-23T11:46:12
| 2021-07-23T11:46:12
| 381,181,110
| 1
| 0
| null | 2021-07-23T11:46:13
| 2021-06-28T23:01:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,332
|
py
|
"""
Seeds
"""
import json
from faker import Faker
# Country
from app.config import ADMIN_PASSWORD
from app.models.country import Country
from app.models.director import Director
from app.models.genre import Genre
from app.models.movie import Movie
from app.models.user import User
with open("/data/countries.json") as json_file:
data = json.load(json_file)
for item in data["data"]:
if Country.find_by_short(item["short"]) is None:
print(dict(item))
Country(**dict(item)).save()
# Director
with open("/data/directors.json") as json_file:
data = json.load(json_file)
for item in data["data"]:
if Director.find_by_name(item["name"]) is None:
print(dict(item))
Director(**dict(item)).save()
# Genre
with open("/data/genres.json") as json_file:
data = json.load(json_file)
for item in data["data"]:
if Genre.find_by_name(item["name"]) is None:
print(dict(item))
Genre(**dict(item)).save()
# User
fake = Faker()
for _ in range(500):
data = fake.simple_profile()
item = {
"email": data["mail"],
"username": data["username"],
"first_name": data["name"].split()[-2],
"last_name": data["name"].split()[-1],
"is_admin": False,
}
if (
User.find_by_username(item["username"]) is None
and User.find_by_email(item["email"]) is None
):
print(dict(item))
user = User(**dict(item))
user.password = item["username"]
user.save()
admin = {
"email": "admin@gmail.com",
"username": "admin",
"first_name": "admin",
"last_name": "admin",
"is_admin": True,
}
if User.find_by_username(admin["username"]) is None:
print(dict(admin))
user = User(**dict(admin))
user.password = ADMIN_PASSWORD
user.save()
# Movie
with open("/data/movies.json") as datafile:
lines = datafile.readlines()
for line in lines:
film = json.loads(line.strip())
data = {
"rate": film["rate"],
"description": film["description"],
"name": film["name"],
"poster_link": film["poster_link"],
"released": film["released"],
"production": film["production"],
}
if Movie.find_by_name(data["name"]) is None:
print(dict(data))
try:
movie = Movie(**dict(data))
country = Country.find_by_short(film["country"]["short"])
if country is not None:
movie.country = country
for film_genre in film["genres"]:
genre = Genre.find_by_name(film_genre["name"])
if genre is not None:
movie.genres.append(genre)
for film_director in film["directors"]:
director = Director.find_by_name(film_director["name"])
if director is not None:
movie.directors.append(director)
user = User.get_random()
print(user)
if user.id is not None:
movie.user = user
else:
movie.user = User.find_by_name("admin")
movie.save()
except:
continue
|
[
"cyr@zolotarev.pp.ua"
] |
cyr@zolotarev.pp.ua
|
920f8a8bcceed014d6d96e90ad0fc9bdb4314b1b
|
42839d11a80c0fcd9ba715f16c1d77ee0698b148
|
/exercises/1901060015/d11/mymodule/stats_word.py
|
dca455f68f989dda36c0093b0c99fee51014d161
|
[] |
no_license
|
veryspecialdog/selfteaching-python-camp
|
39ed65722f36c5aa99ff8cce6c4e62ea3e10394d
|
8a2191320971d4baeef1307bf82f783e6e12a144
|
refs/heads/master
| 2020-05-09T15:49:18.125465
| 2019-07-28T05:48:38
| 2019-07-28T05:48:38
| 181,246,172
| 1
| 0
| null | 2019-04-14T01:23:00
| 2019-04-14T01:22:59
| null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
def stats_text_en(text):
# 文章字符串前期处理,分割英文
t=text.split()
from collections import Counter
t1=Counter(t).most_common(100)
return t1
def stats_text_cn(text):
# 文章字符串前期处理
from collections import Counter
import jieba
#分割汉字
t=jieba.cut(text)
return Counter(t).most_common(100)
def stats_text(text):
#参数类型检查
if not isinstance(text,(str)):
raise TypeError('ValueError')
return stats_text_cn(text)+stats_text_en(text)
|
[
"13141360658@163.com"
] |
13141360658@163.com
|
3e1b166e0b309fd0f0c91377f3988b9cbdea28ad
|
28f2d87c89bb2effa8afee3783160566a8dede31
|
/augmentation.py
|
fa38ea8c9f2601be25ae19b187b8b94d02bb4183
|
[] |
no_license
|
saicharith15/ImageNet-Classification-
|
63f80c1c0389b34cdaf474bd4659e28ffb00fce7
|
a3a6bf8dbab496b4d7fcfee43d43d8fc26e10df5
|
refs/heads/master
| 2022-12-03T19:30:49.787306
| 2020-08-23T18:39:56
| 2020-08-23T18:39:56
| 289,746,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,670
|
py
|
from keras.preprocessing.image import ImageDataGenerator
import cv2
import os
import glob
import numpy as np
import copy
import sys
cv_img = []
count = 0
for img in glob.glob(".\images/tiger\*.jpg"):
n= cv2.imread(img)
cv_img.append(n)
count += 1
if count > 149:
break
print('Length Images:', len(cv_img))
# cv_img = np.array(cv_img)
# cv_img = np.reshape(cv_img, (len(cv_img), 224, 224, 3))
def flip_horizontally(image):
"""This function flips a given image vertically."""
vertically_flipped_image = copy.deepcopy(image)
center = int(len(image[0]) / 2)
for i, row in enumerate(image):
for j in range(center):
vertically_flipped_image[i][j] = image[i][(len(image[0]) - 1) - j]
vertically_flipped_image[i][(len(image[0]) - 1) - j] = image[i][j]
return vertically_flipped_image
i = 851
for image in cv_img:
horizontally_flipped_img = flip_horizontally(image)
cv2.imwrite(f'{i}.jpg', horizontally_flipped_img)
i += 1
# # I plan of having these commented lines of code in a separate file. This generates the augmented images.
# # Data Augmentation.
# datagen = ImageDataGenerator(
# featurewise_center=False,
# featurewise_std_normalization=False,
# rotation_range=20,
# width_shift_range=0.2,
# height_shift_range=0.2,
# horizontal_flip=True
# )
#
# image_gen = datagen.flow(cv_img, batch_size=1, save_to_dir='./dining_table_augmented',
# save_prefix='image', save_format='jpg')
#
# total = 0
# for image in image_gen:
# total += 1
# if total == 74:
# break
|
[
"saicharith15@gmail.com"
] |
saicharith15@gmail.com
|
ce170db8e933493608120be8ce4bcb840464300d
|
8a71736fd431777fa5b872e9ea7af9e9b52e0290
|
/PythonCookbook/<3>数字日期和时间/14-找出当月的日期范围.py
|
4d696d763c6a9f7d67814cfd7633baf5ee5906d6
|
[] |
no_license
|
Haut-Stone/study_Python
|
54538e56b52c363d57e32846595f1a1427db93e3
|
2442bacb07ceb2f4236b4f62b122c5fc5f7441a8
|
refs/heads/master
| 2021-01-12T04:01:19.828774
| 2018-04-04T11:24:49
| 2018-04-04T11:24:49
| 77,465,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
'''
'''
from datetime import datetime, date, timedelta
import calendar
def get_month_range(start_date=None):
if start_date is None:
start_date = date.today().replace(day=1)
_, days_in_month = calendar.monthrange(start_date.year, start_date.month)
end_date = start_date + timedelta(days=days_in_month)
return (start_date, end_date)
a_day = timedelta(days=1)
first_day, last_day = get_month_range()
while first_day < last_day:
print(first_day)
first_day += a_day
|
[
"haut.1604.stone@gmail.com"
] |
haut.1604.stone@gmail.com
|
0d943fb3cc8ab009cbbc50ea7818ce733ae8e338
|
53e8f26e057a105fd5751768b0872f3ec345bdca
|
/rockets/HERMES_V1/CONFIGURATION_FILES/config.py
|
08d5dda6ef62c324bc3ae4e8690580e67bcb5231
|
[] |
no_license
|
oygx210/computational-fluid-dynamics
|
85991876a785149ff2001eec6e9469742a520c11
|
977c47761254642b8a319cd30d5a9fd48f7b5d99
|
refs/heads/main
| 2023-03-14T00:39:09.976634
| 2021-03-14T11:50:47
| 2021-03-14T11:50:47
| 355,256,619
| 1
| 0
| null | 2021-04-06T16:24:19
| 2021-04-06T16:24:18
| null |
UTF-8
|
Python
| false
| false
| 4,785
|
py
|
__author__ = "Omar Kahol"
__email__ = "omar.kahol@skywarder.eu"
__description__ = "simulation setup"
#imports
import pythonCode.meshClasses.cfmesh as cfmesh
import pythonCode.meshClasses.snappyhexmesh as snappy
from pythonCode.meshClasses.point import point
import pythonCode.meshClasses.dynamicMesh as dynamicMesh
#simulation batch name
simulationName = "simulazione0.7"
# --------------------------------------------------------------------------------------------
# MESHING PART
# This utility will "automatically" create the mesh
# Note, not all meshing can be done automatically. Please setup the template files properly
# --------------------------------------------------------------------------------------------
meshToUse = "snappy" #set it to cfmesh or snappy
#-------------------------------------------------CFMESH CONFIGURATION-------------------------------------------------------------------------------------------------------------
cfmeshConfiguration = cfmesh.cfmeshConfiguration()
cfmeshConfiguration.domainCellSize = 0.3 #cellSize of the domain
cfmeshConfiguration.rocketCellSize = 0.01 #cellSize near the rocket
cfmeshConfiguration.boundaryLayers = 1 #add boundaryLayers
cfmeshConfiguration.thicknessRatio = 1.3 #thickness ratio
cfmeshConfiguration.maxFirstlayerThickness = 0.00001 #maximum allowable thickness of the first layer
cfmeshConfiguration.refinementZones = [
# cfmeshRefinement(NAME, CELLSIZE, POINT1, POINT2, RADIUS1, RADIUS2) ==> create a refinement zone shaped like a cone
cfmesh.cfmeshRefinement("wake_superfine", 0.002, point(0.98, 0, 0), point(1.2, 0, 0), 0.05, 0.0692),
cfmesh.cfmeshRefinement("wake_fine", 0.03, point(-1.4, 0, 0), point(2.5, 0, 0), 0.02, 0.541),
cfmesh.cfmeshRefinement("wake_medium", 0.06, point(2.5, 0, 0), point(5, 0, 0), 0.76, 1.02),
cfmesh.cfmeshRefinement("wake_coarse", 0.09, point(5, 0, 0), point(8, 0, 0), 0.76, 1.02),
cfmesh.cfmeshRefinement("ogive_far", 0.06, point(-1.5, 0, 0), point(0.2, 0, 0), 0.7, 0.848),
cfmesh.cfmeshRefinement("ogive_area", 0.01, point(-0.6, 0, 0), point(-1.5, 0, 0), 0.15, 0.15),
cfmesh.cfmeshRefinement("ogive_narrow", 0.005, point(-1.02, 0, 0), point(-0.6, 0, 0), 0.03, 0.07),
cfmesh.cfmeshRefinement("winglet_zone", 0.01, point(0.8, 0, 0), point(1.5, 0, 0), 0.15, 0.21)
]
#-------------------------------------------------SNAPPY HEX MESH CONFIGURATION-----------------------------------------------------------------------------------------------------
snappyConfiguration = snappy.snappyConfiguration()
snappyConfiguration.featureExtractLevel = 8 #refinement precision for feature extraction
snappyConfiguration.rocketRefineLevel = snappy.refinementLevel(7,7) #rocket feature refinement level, specify min and max level
snappyConfiguration.boundaryLayers = 10 #define number of boundary layers
snappyConfiguration.boundaryControl = snappy.boundaryControl() #add a boundary control
snappyConfiguration.boundaryControl.setFirstLayerThickness(2.0e-5)
snappyConfiguration.boundaryControl.setTotalThickness(1.5e-3)
snappyConfiguration.refinementZones = [
snappy.snappyRefinement("wake_coarse", snappy.refinementLevel(2,3), point(-1.4, 0, 0), point(8,0,0), 0.4),
snappy.snappyRefinement("ogive", snappy.refinementLevel(4,5), point(-1.1, 0, 0), point(-0.55,0,0), 0.15),
snappy.snappyRefinement("winglet", snappy.refinementLevel(6,7), point(0.85, 0, 0), point(1.05,0,0), 0.15),
snappy.snappyRefinement("wake_near_1", snappy.refinementLevel(6,7), point(1.0, 0, 0), point(1.2,0,0), 0.075),
snappy.snappyRefinement("wake_near_2", snappy.refinementLevel(5,6), point(0.95, 0, 0), point(1.5,0,0), 0.1),
snappy.snappyRefinement("wake_near_3", snappy.refinementLevel(4,5), point(0.8, 0, 0), point(2.5,0,0), 0.15)
]
# -------------------------------------------------------------------------------
#-------------------------------------------------DYNAMIC MESH CONFIGURATION-----------------------------------------------------------------------------------------------------
activateDynamicMesh = False
dynamicMeshConfiguration = dynamicMesh.dynamicMeshConfiguration()
dynamicMeshConfiguration.refineInterval = 100
dynamicMeshConfiguration.refineField = "nut"
dynamicMeshConfiguration.lowerRefineLevel = 0.00001
dynamicMeshConfiguration.upperRefineLevel = 0.5
dynamicMeshConfiguration.unrefineLevel = 0.000001
dynamicMeshConfiguration.maxRefinement = 4
dynamicMeshConfiguration.maxCells = 4000000
# -------------------------------------------------------------------------------
#-------------------------------------------------SIMULATIONS--------------------------------------------------------------------------------------------
numberOfProcessors = 10
renumberMesh = True
type = "transient" #set to steady or transient
alfa = 0
beta = 0
mach = 0.70
alt = 0.0
|
[
"okahol@yahoo.it"
] |
okahol@yahoo.it
|
9bf0681ad2c55e309a28a300ad5d60fe99371b97
|
bb1a3a5f0f0f55af8ae8f920552fe12b8af2a186
|
/newpassword.py
|
0fbcd342553746a696b5d7eb62ef1cf6fd5193c6
|
[
"MIT"
] |
permissive
|
ninjay997/ProtonMail-Account-Creator
|
710e4067705333d2b118ba0970b670c55e93d844
|
626313afa7c0c043d657ed8082df047a817e52c3
|
refs/heads/master
| 2022-11-24T20:00:54.689605
| 2020-08-01T20:56:27
| 2020-08-01T20:56:27
| 284,333,348
| 0
| 0
|
MIT
| 2020-08-01T20:24:44
| 2020-08-01T20:24:44
| null |
UTF-8
|
Python
| false
| false
| 4,887
|
py
|
# 🚀 This Project is in it's early stages of Development.
# 📌 Working on new features and main menu.
# ⚠️ Any Questions or Suggestions please Mail to: hendriksdevmail@gmail.com
# 🖥 Version: 1.0.0
from selenium import webdriver
from colorama import Fore, Back, Style
import warnings
import time
import random
import string
import urllib.request
import requests
import csv
import sys
from proxyscrape import create_collector
import os
clear = lambda: os.system('clear')
clear()
collector = create_collector('my-collector', 'https')
print ('\033[31m' + """\
____ __ __ ___ _ __
/ __ \_________ / /_____ ____ / |/ /___ _(_) /
/ /_/ / ___/ __ \/ __/ __ \/ __ \/ /|_/ / __ `/ / /
/ ____/ / / /_/ / /_/ /_/ / / / / / / / /_/ / / /
/_/ /_/ \____/\__/\____/_/ /_/_/ /_/\__,_/_/_/
___ __
/ | ______________ __ ______ / /_
/ /| |/ ___/ ___/ __ \/ / / / __ \/ __/
/ ___ / /__/ /__/ /_/ / /_/ / / / / /_
/_/ |_\___/\___/\____/\__,_/_/ /_/\__/
______ __
/ ____/_______ ____ _/ /_____ _____
/ / / ___/ _ \/ __ `/ __/ __ \/ ___/
/ /___/ / / __/ /_/ / /_/ /_/ / /
\____/_/ \___/\__,_/\__/\____/_/
""" + '\033[0m')
print ('\033[31m' + "Auto Password Changer Script" + '\033[0m')
restart = 2
while (restart > 1):
emailaddress = input('\033[31m' + "Enter Email Address: " + '\033[0m')
password = input('\033[31m' + "Enter Password: " + '\033[0m')
newpassword = input('\033[31m' + "Enter New Password: " + '\033[0m')
proxy_status = "false"
while (proxy_status == "false"):
# Retrieve only 'us' proxies
proxygrab = collector.get_proxy({'code': ('us', 'uk')})
proxy = ("{}:{}".format(proxygrab.host, proxygrab.port))
print ('\033[31m' + "Proxy:", proxy + '\033[0m')
try:
proxy_host = proxygrab.host
proxy_port = proxygrab.port
proxy_auth = ":"
proxies = {'http':'http://{}@{}:{}/'.format(proxy_auth, proxy_host, proxy_port)}
requests.get("http://example.org", proxies=proxies, timeout=1.5)
except OSError:
print ('\033[31m' + "Proxy Connection error!" + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "false"
else:
print ('\033[31m' + "Proxy is working..." + '\033[0m')
time.sleep(1)
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
proxy_status = "true"
else:
from selenium.webdriver.chrome.options import Options
warnings.filterwarnings("ignore", category=DeprecationWarning)
options = Options()
options.add_argument('--proxy-server={}'.format(proxy))
# Change Path to Chrome Driver Path (or move your ChromeDriver into the project folder)
driver = webdriver.Chrome(executable_path='/Users/hendrik/Development/ProtonMail-Account-Creator/driver/chromedriver', chrome_options=options)
url = 'http://mail.protonmail.com/login'
driver.get(url)
time.sleep(4)
driver.find_element_by_id('username').send_keys(emailaddress)
time.sleep(1)
driver.find_element_by_id('password').send_keys(password)
time.sleep(1)
driver.find_element_by_id('login_btn').click()
time.sleep(3)
driver.find_element_by_id('tour-settings').click()
time.sleep(3)
driver.find_elements_by_css_selector('html.protonmail.gr__mail_protonmail_com.ua-gecko.ua-gecko-70.ua-gecko-70-0.ua-firefox.ua-firefox-70.ua-firefox-70-0.ua-desktop.ua-desktop-macintosh.ua-mac_os_x.ua-mac_os_x-10.ua-mac_os_x-10-14.js body#secured-account.appConfigBody-is-firefox.appConfigBody-is-mac.secure.appConfigBody-is-free div#body div#pm_main div#pm_settings div.settings div.row.pm_grid section.pm_form.col-1-2 p button.pm_button').click()
complete = "false"
while (complete == "false"):
complete_q = input('\033[31m' + "Are you done? y/n: " + '\033[0m')
if complete_q == "y":
driver.close()
print ('\033[31m' + "Ok! The script is exiting now." + '\033[0m')
time.sleep(1)
clear()
exit()
else:
print ('\033[31m' + 'Ok. Take your time.' + '\033[0m')
time.sleep(1)
complete = "false"
else:
print("something")
|
[
"hendrik@whalar.com"
] |
hendrik@whalar.com
|
d3669f3390ac8a021ee5857a904aa488ac366791
|
b6a31ec10b39a3dbae183ba40c42078cadf88946
|
/152. Maximum Product Subarray.py
|
0b12660ab67d3ee4203445bfded5f15062a8f6c3
|
[] |
no_license
|
QIAOZHIBAO0104/My-Leetcode-Records
|
69fabd11b279f08861cd644973e51bf664da0d90
|
882724c8d50b2f21193c81e5072c31385c5e6b8e
|
refs/heads/main
| 2023-07-11T00:17:02.368441
| 2021-08-07T16:19:45
| 2021-08-07T16:19:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,235
|
py
|
'''
https://leetcode.com/problems/maximum-product-subarray/
Given an integer array nums, find the contiguous subarray within an array (containing at least one number) which has the largest product.
Example 1:
Input: [2,3,-2,4]
Output: 6
Explanation: [2,3] has the largest product 6.
Example 2:
Input: [-2,0,-1]
Output: 0
Explanation: The result cannot be 2, because [-2,-1] is not a subarray.
'''
'''
Time:O(n**2)
Space:O(1)
'''
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums)==0:
return 0
res = nums[0]
for i in range(len(nums)):
acc = 1
for j in range(i,len(nums)):
acc = acc * nums[j]
res = max(res, acc)
return res
'''
Time:O(n)
Space:O(1)
'''
class Solution(object):
def maxProduct(self, nums):
if len(nums) == 0:
return 0
mx, mn = nums[0], nums[0]
res = mx
for i in range(1, len(nums)):
cur = nums[i]
mx_tmp = max(cur, mx*cur, mn*cur)
mn = min(cur, mx*cur, mn*cur)
mx = mx_tmp
res = max(res, mx)
return res
|
[
"noreply@github.com"
] |
QIAOZHIBAO0104.noreply@github.com
|
f68ec7739e968d9957ffa7c842660c9d7bdb8957
|
78cae5eb320ca54d86ac28d4040453d113fd4094
|
/04-Hybrid/lab04_fft2.py
|
f7bc8aeeae6e17ce44bbf3ee29000790830b8d89
|
[] |
no_license
|
oftrujillo10/IBIO4680
|
251e762647f13877e0c733e465b22568bf0ec6bf
|
03a41235291375d31c9b6ab9bca332d903dcd286
|
refs/heads/master
| 2021-05-05T06:50:11.681936
| 2018-05-09T17:24:07
| 2018-05-09T17:24:07
| 118,827,963
| 0
| 0
| null | 2018-01-24T22:04:09
| 2018-01-24T22:04:09
| null |
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
#!/usr/bin/env python
import cv2
import numpy as np
import os
from scipy import misc
image1 = os.getcwd() + '/Imgs/cat.jpg'
image2 = os.getcwd() + '/Imgs/baby.jpg'
s,z = 500, 500
Cat = cv2.imread(image1,0)
Cat = cv2.resize(Cat, (s,z))
fcat = np.fft.fft2(Cat)
fscat = np.fft.fftshift(fcat)
Baby = cv2.imread(image2,0)
Baby = cv2.resize(Baby, (s,z))
fbaby = np.fft.fft2(Baby)
fsbaby = np.fft.fftshift(fbaby)
a = 2
x,y = Cat.shape
crow,ccol = x/2 , y/2
fscat[crow-a:crow+a, ccol-a:ccol+a] = 0
b = 15
m = np.zeros((x,y))
m[crow-b:crow+b, ccol-b:ccol+b] = 1
fsbaby = fsbaby*m
f_iscat = np.fft.ifftshift(fscat)
img_cat = np.fft.ifft2(f_iscat)
img_cat = np.abs(img_cat)
f_isbaby = np.fft.ifftshift(fsbaby)
img_baby = np.fft.ifft2(f_isbaby)
img_baby = np.abs(img_baby)
Hybrid = img_baby + img_cat
misc.imsave('hybrid_fft2.png', Hybrid)
I = cv2.resize(Hybrid, (200,200))
I1 = cv2.resize(cv2.pyrDown(I), (200,200))
I2 = cv2.resize(cv2.pyrDown(I1), (200,200))
I3 = cv2.resize(cv2.pyrDown(I2), (200,200))
I4 = cv2.resize(cv2.pyrDown(I3), (200,200))
f = np.concatenate((I,I1,I2,I3,I4),axis=1)
misc.imsave('Pyramid_fft2.png', f)
|
[
"otrujillo10.otp@gmail.com"
] |
otrujillo10.otp@gmail.com
|
f60040fcbd5b9d9540ad556e2406ea4de658879e
|
4940788bb9994cb0616daf32dfcdf622e2c9f14d
|
/server/index.py
|
05653d7585a2bc4155e0152d9d734a7e1f41a9d7
|
[
"Apache-2.0"
] |
permissive
|
damonchen/process-manager-sample
|
9e255062ef36bec276f9e8c3b5f64270c7d9ab29
|
75c6d0468eba6f29f488fd4423c9466801dd8cc4
|
refs/heads/master
| 2022-11-28T18:06:27.902974
| 2020-08-11T10:36:44
| 2020-08-11T10:36:44
| 285,949,348
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 697
|
py
|
#!/usr/bin/env python
#coding=utf-8
import sys
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'hello sample'
def get_host_ip():
if len(sys.argv) > 1:
host_ip = sys.argv[1]
host_ips = host_ip.split(':')
if len(host_ips) == 2:
host, port = host_ips
else:
if ':' in host_ip:
host = '127.0.0.1'
port = int(host_ips[0])
else:
host = host_ips[0]
port = 5000
return host, port
else:
return '127.0.0.1', 5000
if __name__ == '__main__':
host, port = get_host_ip()
app.run(host=host, port=port)
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
76053ca28bf3d79768964086ca7f070674960196
|
221d1ad342677d2fac8aa3f8d5c60e059a6316c9
|
/pm4py/algo/discovery/inductive/variants/im/util/base_case.py
|
1d8a90e0221a4a231f5cfe3a821d0742ec49024c
|
[] |
no_license
|
niklasadams/explainable_concept_drift_pm
|
06ff651fbdebece4adf96f94bfb4d1026da14c48
|
6bf84d727ab0bae76716a04ad28c7de73250c89d
|
refs/heads/main
| 2023-08-26T18:21:49.955080
| 2021-10-29T18:53:48
| 2021-10-29T18:53:48
| 314,514,571
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
import logging
def empty_log(log):
'''Returns bool if log is empty'''
if len(log) == 0:
logging.debug("empty_log")
return True
else:
return False
def single_activity(log, activity_key):
'''Returns bool if log consists of single activity only'''
if log:
if len(log[0]) >= 1:
first_activity = log[0][0][activity_key]
for i in range(0, len(log)):
if len(log[i]) != 1 or log[i][0][activity_key] != first_activity:
return False # if there is a trace that has a length not equal to 1, we return false
# check if all traces consist of the same activity, therefore create dfg from log and get activities of that dfg
logging_output = "single_activity: " + str(first_activity)
logging.debug(logging_output)
return True
else:
return False
else:
return False
|
[
"niklas.adams@pads.rwth-aachen.de"
] |
niklas.adams@pads.rwth-aachen.de
|
78fe6c044adbbb0784feb9ac8473c43335b6086d
|
c7db31b1067f570e5dbb1b031800c80d04518bfb
|
/src/robotic_arm/recognition/face_face_recognition_impl.py
|
c2f9a89a01f36fa9179498269087b16e65110d1f
|
[] |
no_license
|
kxxt/robotic-arm
|
d4735d0e872cf9f107a6d1bb3fbe8dbd8cbc1814
|
65a08d96277be3706484c7b185e6d12ce7d0f332
|
refs/heads/master
| 2023-06-16T12:57:52.806526
| 2021-07-12T04:09:21
| 2021-07-12T04:09:21
| 356,072,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,158
|
py
|
from robotic_arm.recognition.base import ImageRecognitionService
import logging
import numpy as np
from robotic_arm.input.camera import get_frame
from datetime import datetime
class FaceRecognitionService(ImageRecognitionService):
def __init__(self):
ImageRecognitionService.__init__(self, 'face-recognition')
self.logger = logging.getLogger('face-recognition-facerec')
self.service = None
self.process_this_frame = True
def load(self):
self.service = __import__("face_recognition")
known_face_encodings = []
known_face_names = []
def recognize(self, frame):
if self.process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = self.service.face_locations(frame)
face_encodings = self.service.face_encodings(frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = self.service.compare_faces(self.known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = self.service.face_distance(self.known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = self.known_face_names[best_match_index]
face_names.append(name)
self.process_this_frame = not self.process_this_frame
return face_names, face_locations, face_encodings
self.process_this_frame = not self.process_this_frame
def real_work(self):
result = self.recognize(get_frame())
if result is not None:
self.output_queue.put(result)
|
[
"18085551+kxxt@users.noreply.github.com"
] |
18085551+kxxt@users.noreply.github.com
|
a1fd630926e347c4a083d6d0779339bdce46e1dc
|
ba3a61adf0130b087b8934b1c8e45f171b027949
|
/Individual1.py
|
5308e5973458bee753c21602bcdfb83e3975221f
|
[
"MIT"
] |
permissive
|
ZyryanovAV/lb10
|
5f7512a21c030ac41a0e52d00b7d36ded38a3ea9
|
8fd9708a0b6ae72fe2e65ab1a22495b51f81803f
|
refs/heads/main
| 2023-01-19T14:51:08.969125
| 2020-12-07T01:05:35
| 2020-12-07T01:05:35
| 319,163,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,474
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Использовать словарь, содержащий следующие ключи: название пункта назначения; номер
# поезда; время отправления. Написать программу, выполняющую следующие действия:
# ввод с клавиатуры данных в список, состоящий из словарей заданной структуры; записи должны
# быть упорядочены по номерам поездов;
# вывод на экран информации о поезде, номер которого введен с клавиатуры; если таких поездов нет,
# выдать на дисплей соответствующее сообщение.
import sys
import json
def add(trains, name, num, time):
train = {
'name': name,
'num': num,
'time': time,
}
trains.append(train)
if len(trains) > 1:
trains.sort(key=lambda item: item.get('num', ''))
def list(trains):
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 17
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^17} |'.format(
"№",
"Пункт назначения",
"Номер поезда",
"Время отправления"
)
)
print(line)
for idx, train in enumerate(trains, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>17} |'.format(
idx,
train.get('name', ''),
train.get('num', ''),
train.get('time', 0)
)
)
print(line)
def select(trains):
count = 0
for train in trains:
if train.get('num') == number:
count += 1
print('Номер поезда:', train.get('num', ''))
print('Пункт назначения:', train.get('name', ''))
print('Время отправления:', train.get('time', ''))
if count == 0:
print("Таких поездов нет!")
if __name__ == '__main__':
trains = []
while True:
command = input(">>> ").lower()
if command == 'exit':
break
elif command == 'add':
name = input("Название пункта назначения: ")
num = int(input("Номер поезда: "))
time = input("Время отправления: ")
add(trains, name, num, time)
elif command == 'list':
print(list(trains))
elif command.startswith('select '):
parts = command.split(' ', maxsplit=2)
number = int(parts[1])
select(trains)
elif command == 'help':
print("Список команд:\n")
print("add - добавить поезд;")
print("list - вывести список поездов;")
print("select <номер поезда> - запросить информацию о выбранном поезде;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr)
|
[
"ar200103zeranov@yandex.ru"
] |
ar200103zeranov@yandex.ru
|
d4520d5f769f14642e5d090a5bb8e3b12d5ed2af
|
a2ceca8af01ef1b33c10f015e0ea84677ea411b7
|
/myfirstproject/settings.py
|
1a2a7ba052171e693233dd51cac4e474c987a311
|
[] |
no_license
|
alzol1/myfirstproject-root
|
b0f6dd9d470ec97cc5cefeb2ea4d5be1dbe65162
|
9c7523e7eba498ccc6f413aafce855a12313ad81
|
refs/heads/master
| 2022-12-23T23:37:49.296560
| 2020-09-22T07:31:03
| 2020-09-22T07:31:03
| 297,360,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
"""
Django settings for myfirstproject project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1gc8gq@!mjx!19-aqj7r5t%p$w+(6-v)!rirayl&6r*dc#*ews'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myfirstproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myfirstproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"55974898+alzol1@users.noreply.github.com"
] |
55974898+alzol1@users.noreply.github.com
|
f68e302debebc088152347cebcb5c94201df4492
|
5795e731efade5a3754f5973015206d42567142b
|
/mylists/listapp/migrations/0009_auto__add_field_link_read.py
|
9a7b362c29dc15b9cbeeaaa13d1c612f176d6345
|
[] |
no_license
|
joaopimentel/mylists
|
c9f0a7ccd24d1615bfc2178062f4289a1229ec90
|
31cdadf28176cfb2f6906c72fd85dcf4d3ebf27f
|
refs/heads/master
| 2021-01-20T12:41:39.757084
| 2013-02-12T22:42:14
| 2013-02-12T22:42:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,862
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Link.read'
db.add_column('listapp_link', 'read',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Link.read'
db.delete_column('listapp_link', 'read')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'listapp.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '63'}),
'tag': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'listapp.link': {
'Meta': {'object_name': 'Link'},
'category': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['listapp.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'listapp.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailbox': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['mailfetcher.MailBox']", 'unique': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'mailfetcher.mailbox': {
'Meta': {'object_name': 'MailBox'},
'host': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['listapp']
|
[
"mail@joaopimentel.net"
] |
mail@joaopimentel.net
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.