blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
885efec9df259eb8f15d01f19deacc378290ee8f
|
4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97
|
/sols/trapping_rain_water.py
|
1bcddf8200efcff10f2e1c9a95121a651a6a9e03
|
[] |
no_license
|
hayeonk/leetcode
|
5136824838eb17ed2e4b7004301ba5bb1037082f
|
6485f8f9b5aa198e96fbb800b058d9283a28e4e2
|
refs/heads/master
| 2020-04-28T03:37:16.800519
| 2019-06-01T14:34:45
| 2019-06-01T14:34:45
| 174,943,756
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
class Solution(object):
def trap(self, height):
left = []
right = []
for h in height:
if not left:
left.append(h)
else:
left.append(max(left[-1], h))
for h in reversed(height):
if not right:
right.append(h)
else:
right.insert(0, max(right[0], h))
ans = 0
for i in xrange(1, len(height)-1):
ans += min(left[i], right[i]) - height[i]
return ans
|
[
"31617695+hayeonk@users.noreply.github.com"
] |
31617695+hayeonk@users.noreply.github.com
|
2ecef8bf5e66a3811d5954b32463e10dce32e217
|
a55f45cc3768383d0ee5b53b8bdf5b1f0ad5fff0
|
/turhousemanager.py
|
9ea29c810c1ce8a31cd6f0d556c06f8944323998
|
[
"Apache-2.0"
] |
permissive
|
BuloZB/turhouse
|
e6efd6e997f754fbdf62f3fb35409f179a556fc6
|
e76db0cdc96d9c9acfc5bd99ed94d9ad1dfecfa1
|
refs/heads/master
| 2021-06-26T15:24:26.870536
| 2015-10-07T19:52:32
| 2015-10-07T19:52:32
| 74,385,386
| 0
| 0
|
Apache-2.0
| 2021-01-21T03:49:48
| 2016-11-21T16:56:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,345
|
py
|
# -*- coding: utf-8 -*-
import time
import datetime
import schedule
import logging
from pprint import pprint
from glob import Glob
from alarm.alarmmanager import AlarmManager
from event.notification import ezs_email
class TurhouseManager(AlarmManager):
'''
turhouse manager object
'''
def __init__(self):
self._alarm = False
AlarmManager. __init__(self)
def beep(self):
self.device('dongle_1').beep()
def alarm(self, sender, event_type, params):
self._alarm = True
self.device('dongle_1').alarm()
ezs_email('Alarm: ', sender, event_type, params)
def alarmStop(self, sender, event_type, params):
if self._alarm:
ezs_email('Alarm Stop: ', sender, event_type, params)
self.device('dongle_1').alarmStop()
self._alarm = False
def controllerEventHandler(self, sender, event_type, params):
Glob.logger.info(
"Controller %s code: %s " % (sender, params['code']))
code = params['code']
if code == 1:
self.zone('dum').arm()
self.beep()
if code == 2:
self.zone('dum').disarm()
self.alarmStop(sender, event_type, params)
self.beep()
if code == 3:
self.device('zasuvka_1').toggle()
self.beep()
|
[
"jan.redl@vizionet.cz"
] |
jan.redl@vizionet.cz
|
fd00da65cc9832503776c473d4bb563f72c5ed8c
|
e14ccc485b835c07c86717aa097e1f8bfd8d35b8
|
/work/time/generate_sine_wave.py
|
01c579509872e3e7cc17e13cb0a94a3f15a9778f
|
[] |
no_license
|
LukeJaffe/trader
|
1e0f39f4db39048f1ee5e9ef57e54854a372b84a
|
c92a6fe4b6588a30282f3f66108b913a88ca9305
|
refs/heads/master
| 2021-01-06T20:35:42.780569
| 2017-10-13T06:37:56
| 2017-10-13T06:37:56
| 99,526,854
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import math
import numpy as np
import torch
T = 20
L = 1000
N = 100
np.random.seed(2)
x = np.empty((N, L), 'int64')
x[:] = np.array(range(L)) + np.random.randint(-4*T, 4*T, N).reshape(N, 1)
data = np.sin(x / 1.0 / T).astype('float64')
torch.save(data, open('traindata.pt', 'wb'))
|
[
"lukejaffe1@gmail.com"
] |
lukejaffe1@gmail.com
|
3cc04765c66f4277a2adfe6dd975ae9a46c7e373
|
7ba9a63b02d1abd8bae028f8703ed7da9941a3a3
|
/antlr/old/TParser.py
|
a08a183ccbd1cb74f81545db5ba5135c94240229
|
[] |
no_license
|
apnorton/ir-project
|
40bd8ea9a52f07262b63ff1c0af3bb3c5e1dfdc1
|
47f487dd76125d923da5fb960a76ed33f87d8213
|
refs/heads/master
| 2021-01-10T13:56:10.635630
| 2016-03-02T16:36:01
| 2016-03-02T16:36:01
| 46,683,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,246
|
py
|
# Generated from T by ANTLR 4.5.1
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3")
buf.write(u"\n\36\4\2\t\2\3\2\3\2\3\2\3\2\3\2\3\2\5\2\13\n\2\3\2")
buf.write(u"\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\3\2\7\2\31\n")
buf.write(u"\2\f\2\16\2\34\13\2\3\2\2\3\2\3\2\2\2!\2\n\3\2\2\2\4")
buf.write(u"\5\b\2\1\2\5\6\7\3\2\2\6\7\5\2\2\2\7\b\7\4\2\2\b\13\3")
buf.write(u"\2\2\2\t\13\7\n\2\2\n\4\3\2\2\2\n\t\3\2\2\2\13\32\3\2")
buf.write(u"\2\2\f\r\f\b\2\2\r\16\7\5\2\2\16\31\5\2\2\t\17\20\f\7")
buf.write(u"\2\2\20\21\7\6\2\2\21\31\5\2\2\b\22\23\f\6\2\2\23\24")
buf.write(u"\7\7\2\2\24\31\5\2\2\7\25\26\f\5\2\2\26\27\7\b\2\2\27")
buf.write(u"\31\5\2\2\6\30\f\3\2\2\2\30\17\3\2\2\2\30\22\3\2\2\2")
buf.write(u"\30\25\3\2\2\2\31\34\3\2\2\2\32\30\3\2\2\2\32\33\3\2")
buf.write(u"\2\2\33\3\3\2\2\2\34\32\3\2\2\2\5\n\30\32")
return buf.getvalue()
class TParser ( Parser ):
grammarFileName = "T"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ u"<INVALID>", u"'('", u"')'", u"'*'", u"'+'", u"'/'",
u"'-'" ]
symbolicNames = [ u"<INVALID>", u"<INVALID>", u"<INVALID>", u"MUL",
u"ADD", u"DIV", u"SUB", u"WS", u"INT" ]
RULE_r = 0
ruleNames = [ u"r" ]
EOF = Token.EOF
T__0=1
T__1=2
MUL=3
ADD=4
DIV=5
SUB=6
WS=7
INT=8
def __init__(self, input):
super(TParser, self).__init__(input)
self.checkVersion("4.5.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class RContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(TParser.RContext, self).__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return TParser.RULE_r
def copyFrom(self, ctx):
super(TParser.RContext, self).copyFrom(ctx)
class AddContext(RContext):
def __init__(self, parser, ctx): # actually a TParser.RContext)
super(TParser.AddContext, self).__init__(parser)
self.copyFrom(ctx)
def r(self, i=None):
if i is None:
return self.getTypedRuleContexts(TParser.RContext)
else:
return self.getTypedRuleContext(TParser.RContext,i)
def ADD(self):
return self.getToken(TParser.ADD, 0)
def enterRule(self, listener):
if hasattr(listener, "enterAdd"):
listener.enterAdd(self)
def exitRule(self, listener):
if hasattr(listener, "exitAdd"):
listener.exitAdd(self)
def accept(self, visitor):
if hasattr(visitor, "visitAdd"):
return visitor.visitAdd(self)
else:
return visitor.visitChildren(self)
class DivContext(RContext):
def __init__(self, parser, ctx): # actually a TParser.RContext)
super(TParser.DivContext, self).__init__(parser)
self.copyFrom(ctx)
def r(self, i=None):
if i is None:
return self.getTypedRuleContexts(TParser.RContext)
else:
return self.getTypedRuleContext(TParser.RContext,i)
def DIV(self):
return self.getToken(TParser.DIV, 0)
def enterRule(self, listener):
if hasattr(listener, "enterDiv"):
listener.enterDiv(self)
def exitRule(self, listener):
if hasattr(listener, "exitDiv"):
listener.exitDiv(self)
def accept(self, visitor):
if hasattr(visitor, "visitDiv"):
return visitor.visitDiv(self)
else:
return visitor.visitChildren(self)
class GroupContext(RContext):
def __init__(self, parser, ctx): # actually a TParser.RContext)
super(TParser.GroupContext, self).__init__(parser)
self.copyFrom(ctx)
def r(self):
return self.getTypedRuleContext(TParser.RContext,0)
def enterRule(self, listener):
if hasattr(listener, "enterGroup"):
listener.enterGroup(self)
def exitRule(self, listener):
if hasattr(listener, "exitGroup"):
listener.exitGroup(self)
def accept(self, visitor):
if hasattr(visitor, "visitGroup"):
return visitor.visitGroup(self)
else:
return visitor.visitChildren(self)
class SubContext(RContext):
def __init__(self, parser, ctx): # actually a TParser.RContext)
super(TParser.SubContext, self).__init__(parser)
self.copyFrom(ctx)
def r(self, i=None):
if i is None:
return self.getTypedRuleContexts(TParser.RContext)
else:
return self.getTypedRuleContext(TParser.RContext,i)
def SUB(self):
return self.getToken(TParser.SUB, 0)
def enterRule(self, listener):
if hasattr(listener, "enterSub"):
listener.enterSub(self)
def exitRule(self, listener):
if hasattr(listener, "exitSub"):
listener.exitSub(self)
def accept(self, visitor):
if hasattr(visitor, "visitSub"):
return visitor.visitSub(self)
else:
return visitor.visitChildren(self)
class MulContext(RContext):
def __init__(self, parser, ctx): # actually a TParser.RContext)
super(TParser.MulContext, self).__init__(parser)
self.copyFrom(ctx)
def r(self, i=None):
if i is None:
return self.getTypedRuleContexts(TParser.RContext)
else:
return self.getTypedRuleContext(TParser.RContext,i)
def MUL(self):
return self.getToken(TParser.MUL, 0)
def enterRule(self, listener):
if hasattr(listener, "enterMul"):
listener.enterMul(self)
def exitRule(self, listener):
if hasattr(listener, "exitMul"):
listener.exitMul(self)
def accept(self, visitor):
if hasattr(visitor, "visitMul"):
return visitor.visitMul(self)
else:
return visitor.visitChildren(self)
class IntContext(RContext):
def __init__(self, parser, ctx): # actually a TParser.RContext)
super(TParser.IntContext, self).__init__(parser)
self.copyFrom(ctx)
def INT(self):
return self.getToken(TParser.INT, 0)
def enterRule(self, listener):
if hasattr(listener, "enterInt"):
listener.enterInt(self)
def exitRule(self, listener):
if hasattr(listener, "exitInt"):
listener.exitInt(self)
def accept(self, visitor):
if hasattr(visitor, "visitInt"):
return visitor.visitInt(self)
else:
return visitor.visitChildren(self)
def r(self, _p=0):
_parentctx = self._ctx
_parentState = self.state
localctx = TParser.RContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 0
self.enterRecursionRule(localctx, 0, self.RULE_r, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 8
token = self._input.LA(1)
if token in [TParser.T__0]:
localctx = TParser.GroupContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 3
self.match(TParser.T__0)
self.state = 4
self.r(0)
self.state = 5
self.match(TParser.T__1)
elif token in [TParser.INT]:
localctx = TParser.IntContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 7
self.match(TParser.INT)
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 24
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 22
la_ = self._interp.adaptivePredict(self._input,1,self._ctx)
if la_ == 1:
localctx = TParser.MulContext(self, TParser.RContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_r)
self.state = 10
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 11
self.match(TParser.MUL)
self.state = 12
self.r(7)
pass
elif la_ == 2:
localctx = TParser.AddContext(self, TParser.RContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_r)
self.state = 13
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 14
self.match(TParser.ADD)
self.state = 15
self.r(6)
pass
elif la_ == 3:
localctx = TParser.DivContext(self, TParser.RContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_r)
self.state = 16
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 17
self.match(TParser.DIV)
self.state = 18
self.r(5)
pass
elif la_ == 4:
localctx = TParser.SubContext(self, TParser.RContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_r)
self.state = 19
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 20
self.match(TParser.SUB)
self.state = 21
self.r(4)
pass
self.state = 26
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
def sempred(self, localctx, ruleIndex, predIndex):
if self._predicates == None:
self._predicates = dict()
self._predicates[0] = self.r_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def r_sempred(self, localctx, predIndex):
if predIndex == 0:
return self.precpred(self._ctx, 6)
if predIndex == 1:
return self.precpred(self._ctx, 5)
if predIndex == 2:
return self.precpred(self._ctx, 4)
if predIndex == 3:
return self.precpred(self._ctx, 3)
|
[
"hskrus@gmail.com"
] |
hskrus@gmail.com
|
437b387c6980f53268b75171dcdcc65a88269569
|
691f3bf0e15f5bec4cf70ac451e7b4a3e92b6dbb
|
/setup.py
|
031e600ceadd1dee4a0f42be3ec90e937ab915f8
|
[
"BSD-3-Clause"
] |
permissive
|
a-tal/pyweet
|
ac90699468bd8b3c450492068a50b93f321d1667
|
61bd9ab7a28ca8e917051977bfcf84904dd06c77
|
refs/heads/master
| 2021-01-17T10:22:10.966528
| 2018-03-10T18:21:58
| 2018-03-10T18:21:58
| 17,307,288
| 1
| 1
|
BSD-3-Clause
| 2018-03-05T00:49:25
| 2014-03-01T03:59:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
"""Pyweet's setup/installer."""
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
"""Shim in pytest to be able to use it with setup.py test."""
def finalize_options(self):
"""Stolen from http://pytest.org/latest/goodpractises.html."""
TestCommand.finalize_options(self)
self.test_args = ["-v", "-rf", "--cov", "pyweet", "test"]
self.test_suite = True
def run_tests(self):
"""Also shamelessly stolen."""
# have to import here, outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
raise SystemExit(errno)
setup(
name="pyweet",
version="0.0.6",
author="Adam Talsma",
author_email="adam@talsma.ca",
packages=["pyweet"],
install_requires=["twitter", "blessings"],
scripts=["bin/pyweet"],
url="https://github.com/a-tal/pyweet",
description="Twitter command line util",
long_description="Yet another Twitter command line utility.",
download_url="https://github.com/a-tal/pyweet",
tests_require=["pytest", "mock", "pytest-cov", "coverage"],
cmdclass={"test": PyTest},
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
],
)
|
[
"github@talsma.ca"
] |
github@talsma.ca
|
097a632a53b4401a64a6fe58723296b12a6b7b74
|
a2cb0b841519db1b8442ce2bb100572d8683e861
|
/ToggleBreakpoint.py
|
acacb88e03eb06c9460f83b82b037a2221875373
|
[
"MIT"
] |
permissive
|
Falven/Debug
|
ea159e3beeb98325ae43f3157f7c459252457671
|
bf45898789d3539dd104a42ce33e18747dd3300b
|
refs/heads/master
| 2016-09-05T17:14:17.974818
| 2015-04-19T01:28:42
| 2015-04-19T01:28:42
| 34,187,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
import sublime, sublime_plugin
def plugin_loaded():
global pluginLoaded;
pluginLoaded = True;
class ToggleBreakpointCommand(sublime_plugin.TextCommand):
def run(self, edit):
global pluginLoaded;
if pluginLoaded:
print("Toggled A Breakpoint.")
|
[
"Falven2000@Hotmail.com"
] |
Falven2000@Hotmail.com
|
f6010c075c7e7f753c9b7e31b4b2e7208d97e792
|
4ec73d7b22ef19c0d1301b055a5bbaf2a0931807
|
/main_app/migrations/0002_auto_20181204_0014.py
|
7f834505cb2def39e1fe804ae1e31e8a3987da8d
|
[] |
no_license
|
nicklogin/realec_testing_platform
|
89138160f100d2028520f8ea6325863a82623113
|
541951ec8169120b66232a0c85015c2063be5a3d
|
refs/heads/master
| 2023-01-28T02:10:53.205266
| 2020-04-18T09:27:29
| 2020-04-18T09:27:29
| 227,206,248
| 0
| 0
| null | 2023-01-04T13:57:06
| 2019-12-10T20:08:41
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 399
|
py
|
# Generated by Django 2.1.3 on 2018-12-03 21:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='quiz',
field=models.ManyToManyField(null=True, to='main_app.Quizz'),
),
]
|
[
"niklogin10v@yandex.ru"
] |
niklogin10v@yandex.ru
|
ae16dc3d50f09ea7a2439484841531bbbd5513dc
|
5e715d3ab6769af0e16de3784b72ffc20b834c34
|
/dags/milkyway/exotel_responses.py
|
bc181b2dc423da4b4d5e4555572d954a818e0e5e
|
[
"MIT"
] |
permissive
|
goodhamgupta/mars
|
6943483b428cffc77e1f1aa2e7d7ee3aa1d04ba5
|
06371dd35a94a5e3605e9256972f6d2a5197459f
|
refs/heads/master
| 2023-03-25T17:09:30.780427
| 2020-03-06T15:42:01
| 2020-03-06T15:42:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# -*- coding: utf-8 -*-
# Airflow DAG for exotel responses
from operators.sqoop_emr_workflow import SqoopEmrWorkflow
from airflow import DAG
params = {
'schedule_interval': '0 13 * * *',
'source_app': 'milkyway',
'destination': 'exotel_responses'
}
dag = SqoopEmrWorkflow.create(params)
|
[
"shubhamg2208@live.com"
] |
shubhamg2208@live.com
|
413db050c8202c87e9e287faeda03fcb45def5a5
|
2e7617666070ad05dab301069cfbe63d83d448b0
|
/states/__init__.py
|
43ad1580f1cb83ff8ddb4dd560c0126231b98c12
|
[] |
no_license
|
dmartinchick/tbot
|
92f44e4bdb091ae759cfa0234ead699b2902709d
|
094b5ef003b96278ee001b273a913309011f94f7
|
refs/heads/master
| 2023-06-03T02:30:04.298770
| 2021-06-17T16:54:48
| 2021-06-17T16:54:48
| 369,304,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
from . import add_result
|
[
"dmartinchick@gmail.com"
] |
dmartinchick@gmail.com
|
d5901153c01b4dc40f38bb9626832f79d17b7ca7
|
eb3ebcd52ef137fc1560b6b65197627533eddf41
|
/Utilize/web_scraping/6_bs4.py
|
d1c4650dc227c8d3d92e2616489def567cc3e2cc
|
[] |
no_license
|
Kor-KTW/PythonWorkSpace
|
a0d14b3be9af1266b35440261fa2ec0a11534288
|
e23ce4a4d104ff665d6a5544ce556b469c4d9d89
|
refs/heads/master
| 2023-01-04T05:10:38.362965
| 2020-11-06T05:11:09
| 2020-11-06T05:11:09
| 304,473,110
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
import requests
from bs4 import BeautifulSoup
url = "https://comic.naver.com/webtoon/weekday.nhn"
res = requests.get(url)
res.raise_for_status()
soup = BeautifulSoup(res.text, "lxml") # 우리가 가져온 html을 lxml을 통해서 beautifulsoup으로 만든다.
# 페이지에 대한 이해가 높은 경우
# print(soup.title)
# print(soup.title.get_text())
# print(soup.a) # soup 객체에서 첫번째로 발견되는 a element 출력.
# print(soup.a.attrs) # a element의 속성정보 출력.
# print(soup.a["href"]) # a element의 href 속성 '값' 출력
# 페이지에 대한 이해가 없는 경우 아래와 같이 원하는 객체의 class, element 이름 등의 특징을 이용하여 찾을 수 있음.
# print(soup.find("a", attrs={"class":"Nbtn_upload"})) # class가 Nbtn_upload인 a element 찾기
# print(soup.find(attrs={"class":"Nbtn_upload"})) # class가 Nbtn_upload인 첫번째로 나오는 어떤 element 찾기
# print(soup.find("li", attrs={"class":"rank01"})) # tag명이 li, class명이 rank01
# rank1 = soup.find("li", attrs={"class":"rank01"})
# rank2 = rank1.next_sibling.next_sibling
# rank3 = rank2.next_sibling.next_sibling
# rank2 = rank3.previous_sibling.previous_sibling
# print(rank2.get_text())
# print(rank1.a.get_text()) # 그 중 tag가 a인 녀석
# print(rank1.next_sibling)
# print(rank1.next_sibling.next_sibling)
# print(rank1.parent)
# rank2 = rank1.find_next_sibling("li")
# print(rank2.a.get_text())
# rank3 = rank2.find_next_sibling("li")
# print(rank3.a.get_text())
# rank3 = rank2.find_previous_sibling("li")
# print(rank2.a.get_text())
# print(rank1.find_next_siblings("li"))
webtoon = soup.find("a", text="연애혁명-323. 마음의 저울")
print(webtoon)
#<a onclick="nclk_v2(event,'rnk*p.cont','570503','2')" href="/webtoon/detail.nhn?titleId=570503&no=327" title="연애혁명-323. 마음의 저울">연애혁명-323. 마음의 저울</a>
|
[
"xodn5492@naver.com"
] |
xodn5492@naver.com
|
116d0392934aa11564552e963f5f466650dfd304
|
b30d74626b0e3de17507655b1afbc73e3b81720e
|
/TurretController.py
|
83cec4f3c12c2ccd8439a52b38320272f30b1edd
|
[] |
no_license
|
CRATOS-360NoScope/CRATOS-Server
|
4d114c12c76781a1796a885e78974e9a11e2a178
|
841c017881a20d8a41cbaa69265519f5f22b2aff
|
refs/heads/master
| 2021-01-20T00:58:40.123543
| 2015-12-02T20:33:47
| 2015-12-02T20:33:47
| 42,613,814
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,481
|
py
|
import RPi.GPIO as GPIO
import time
import threading
class TurretController:
GPIO_PIN_YAW = 11
GPIO_PIN_PITCH = 12
GPIO_PIN_FIRE = 13
pwm_yaw = None
pwm_pitch = None
pwm_fire = None
min_pitch = 6.8 #was 0
max_pitch = 10.0 #was 12.5
current_pitch = 8.4
stopPitchFlag = False
DEBUG = False
pitchDelta = False
pitchingActive = False
pitchThread = None
def __init__(self, pin_yaw, pin_pitch, pin_fire, debug=False):
self.GPIO_PIN_YAW = pin_yaw
self.GPIO_PIN_PITCH = pin_pitch
self.GPIO_PIN_FIRE = pin_fire
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.GPIO_PIN_YAW, GPIO.OUT)
GPIO.setup(self.GPIO_PIN_PITCH, GPIO.OUT)
GPIO.setup(self.GPIO_PIN_FIRE, GPIO.OUT)
self.pwm_yaw = GPIO.PWM(self.GPIO_PIN_YAW, 53)
self.pwm_pitch = GPIO.PWM(self.GPIO_PIN_PITCH, 50)
self.pwm_fire = GPIO.PWM(self.GPIO_PIN_FIRE, 25)
self.pwm_yaw.start(7.5)
self.pwm_pitch.start(self.current_pitch)
self.pwm_fire.start(2)
self.DEBUG = debug
self.triggerThread = threading.Thread(target=self.triggerWork)
self.pitchThread = threading.Thread(target=self.pitchWorker)
time.sleep(0.5)
self.pwm_yaw.ChangeDutyCycle(0)
self.pwm_pitch.ChangeDutyCycle(0)
self.pwm_fire.ChangeDutyCycle(0)
self.triggerLock = threading.Lock();
def __del__(self):
self.pwm_yaw.stop()
self.pwm_pitch.stop()
GPIO.cleanup()
def triggerWork(self):
if (self.triggerLock.locked()):
return
self.triggerLock.acquire()
self.pwm_fire.ChangeDutyCycle(5) #pull trigger(not tested)
time.sleep(0.5)
self.pwm_fire.ChangeDutyCycle(2) #return to original spot
time.sleep(0.5)
self.triggerLock.release()
if self.DEBUG:
print "Trigger Pulled"
return
def pullTrigger(self, sensitivity=1):
self.pitchThread = threading.Thread(target=self.triggerWork)
self.pitchThread.start()
# direction +1 for clockwise, -1 for reverse
def startYaw(self, direction, sensitivity=1):
modifier = 2.5*sensitivity
if self.DEBUG:
print "** startYaw **"
print "Direction: "+str(direction)
print "Sensitivity: "+str(sensitivity)
print "Duty Cycle: "+str(7.5+(modifier*direction))
self.pwm_yaw.ChangeDutyCycle(7.5+(modifier*direction))
def stopYaw(self):
if self.DEBUG:
print "** stopYaw **"
self.pwm_yaw.ChangeDutyCycle(0) #was 7.5 -> 0 is off
def startPitch(self, direction, sensitivity=100):
self.pitchDelta = -float(sensitivity)/10000.0
if self.DEBUG:
print "** startPitch **"
print "pitchDelta: "+str(self.pitchDelta)
print "Sensitivity: "+str(sensitivity)
if not self.pitchingActive:
if self.DEBUG:
print "pitchThread run"
self.pitchThread = threading.Thread(target=self.pitchWorker)
self.stopPitchFlag = False
self.pitchThread.start()
print "pitchThread running"
def pitchWorker(self):
while not self.stopPitchFlag:
self.current_pitch += self.pitchDelta
if self.current_pitch > self.max_pitch:
self.current_pitch = self.max_pitch
break
if self.current_pitch < self.min_pitch:
self.current_pitch = self.min_pitch
break
#if self.DEBUG:
#print "Duty Cycle: "+str(self.current_pitch)
self.pwm_pitch.ChangeDutyCycle(self.current_pitch)
time.sleep(0.01)
time.sleep(0.2)
self.pitchingActive = False
#self.pwm_pitch.ChangeDutyCycle(0)
print "pitchWorker exit"
return
def stopPitch(self):
if self.DEBUG:
print "** stopPitch **"
print "currentPitch: "+str(self.current_pitch)
self.stopPitchFlag = True
|
[
"cwirt@purdue.edu"
] |
cwirt@purdue.edu
|
5542bd603d27b031d107e5285aa6a8d9e6700e91
|
436c9ee595ab3dc6f6b0dc2cd695c62de81c8ecd
|
/core/utils/data.py
|
803e05c480370eaee1f1233e96384912e144b6cd
|
[
"Apache-2.0"
] |
permissive
|
viep/cmdbac
|
f186768754bddededa15c1692c2882c18af65b18
|
2df4b7980d22bd42a8128d9468de101307fc52ac
|
refs/heads/master
| 2021-01-18T00:21:43.040368
| 2016-05-09T12:23:38
| 2016-05-09T12:23:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
#!/usr/bin/env python
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import json
import logging
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmudbac.settings")
import django
django.setup()
import library
from library.models import *
import utils
## =====================================================================
## LOGGING CONFIGURATION
## =====================================================================
LOG = logging.getLogger()
def get_crawler(crawler_status, repo_source):
moduleName = "crawlers.%s" % (repo_source.crawler_class.lower())
moduleHandle = __import__(moduleName, globals(), locals(), [repo_source.crawler_class])
klass = getattr(moduleHandle, repo_source.crawler_class)
# FOR GITHUB
try:
with open(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "secrets", "secrets.json"), 'r') as auth_file:
auth = json.load(auth_file)
except:
auth = None
crawler = klass(crawler_status, auth)
return crawler
def add_module(module_name, package_name, package_type_id, package_version):
project_type = ProjectType.objects.get(id=package_type_id)
Package.objects.get_or_create(name = package_name, version = package_version)
package = Package.objects.get(name = package_name, version = package_version)
module = Module()
module.name = module_name
module.package = package
module.save()
def add_repo(repo_name, crawler_status_id, repo_setup_scripts):
cs = CrawlerStatus.objects.get(id=crawler_status_id)
repo_source = cs.source
project_type = cs.project_type
crawler = get_crawler(cs, repo_source)
crawler.add_repository(repo_name, repo_setup_scripts)
def deploy_repo(repo_name, database = 'PostgreSQL'):
repo = Repository.objects.get(name=repo_name)
print 'Attempting to deploy {} using {} ...'.format(repo, repo.project_type.deployer_class)
try:
result = utils.vagrant_deploy(repo, 0, database)
except Exception, e:
LOG.exception(e)
raise e
return result
def delete_repo(repo_name):
for repo in Repository.objects.filter(name=repo_name):
repo.delete()
|
[
"zeyuanxy@gmail.com"
] |
zeyuanxy@gmail.com
|
14346f5e4229b348c50f7dc83dd2a7143bad9b2d
|
2ba64b9b4b91af5f4730a1617cb0329f766690e2
|
/ML_uniform/network_test_nonuniform.py
|
03ad230969f925ee628d285e75c9d38a9948df29
|
[] |
no_license
|
RalphKang/nonuniformity-effect-on-LAS--temperature-measurement
|
601f7ffd4287839aa6895694c15b7e390ac74c29
|
f35e30be38ffd37cd0d9db9e069c63e9fd040fe8
|
refs/heads/master
| 2023-04-08T07:14:11.752060
| 2022-11-08T08:38:09
| 2022-11-08T08:38:09
| 563,204,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,559
|
py
|
from network_archive.vgg import *
from dataset import dataset_all, dataset_all_2
from sklearn.metrics import mean_squared_error
import torch
import numpy as np
"""
This code is used to test network models on nonuniform twins, and
record the predict temperature in temp_dif_aug_1e-x_xxx.csv
"""
#%% data reading
data_dir='./input/data_nonuniform/file'
label_dir='./input/data_nonuniform/label'
order_dir='./file_reading_order.csv'
spec,_,__,___=dataset_all_2(data_dir,label_dir, order_dir)
"""
read spectra twins, the information inside the temp_dens_comp.csv are as follows:
first column: 0,index of current spectra
second column: 1, the counrterpart index to make up spectra twin
third column: 2, the temperature difference
fourth column: 3, the average temperature for current spectra
fifth column: 4, the average temperature for the counterpart spectra
sixth column: 5, the similarity level
"""
temp_dif=np.loadtxt('input/temp_dif_data/1e-3/temp_dens_comp.csv')
#%% data check, random pick some data to check whether temperature can match or not
column=5
if temp_dif[column,1]==np.where(temp_dif[:,3]==temp_dif[column,4])[0]:
print("true",temp_dif[column,4])
else:
print('cannot match')
#%% read normalization item
spec_max=np.loadtxt('input/norm_case/spec_max.csv')
spec_min=np.loadtxt('input/norm_case/spec_min.csv')
temp_bound=np.loadtxt('input/norm_case/temp_bound.csv')
#%% data normalization
spec_norm=(spec-spec_min)/(spec_max-spec_min)
spec_norm=np.expand_dims(spec_norm,1)
test_tcc=torch.from_numpy(spec_norm).float()
#%% load model
model= VGG(make_layers(cfg['B'], batch_norm=False),1)
model.to("cuda")
model_save_dir = './model/vgg_B_uniform.pt'
model.load_state_dict(torch.load(model_save_dir))
#%%
pred_test_norm=[]
for test in test_tcc:
test_res=test.reshape([1,1,-1])
test_res=test_res.to('cuda')
pred_test=model(test_res)
pred_test_norm.append(pred_test.detach().cpu().numpy().squeeze())
#%%
pred_test_norm=np.array(pred_test_norm)
#%%
temp_pred=pred_test_norm*(temp_bound[1]-temp_bound[0])+temp_bound[0]
temp_main_order = temp_dif[:, 0].astype(np.int)
temp_pred_main = temp_pred[temp_main_order]
temp_sim_order = temp_dif[:, 1].astype(np.int)
temp_pred_sim = temp_pred[temp_sim_order]
# %%
temp_dif_temp_pred = np.hstack((temp_dif, np.reshape(temp_pred_main, [-1, 1])))
temp_dif_aug = np.hstack((temp_dif_temp_pred, np.reshape(temp_pred_sim, [-1, 1])))
#%%
save_dir='out_save/temp_dif_aug_1e-3_vgg.csv'
np.savetxt(save_dir, temp_dif_aug)
#%%
# np.savetxt('./input/data_10p/dens_temp_10p_pred.csv',pred_test)
|
[
"kang1528530671@gmail.com"
] |
kang1528530671@gmail.com
|
89f93a8916b99c5fd18697c7ee126f07ead61d2a
|
acbf4390b5892ea7c9e1095c5cc19a0227dbc8f7
|
/src/utils/ticktock.py
|
d5a4acdaf22eca24d8b8ab203e739e1ffb594214
|
[] |
no_license
|
gongzhitaao/adversarial-text
|
5d68d26bcac46b69b2df0f231bd642837cb9848c
|
0b2009a504492edbb2a7f0c7f972b9efd7aa60f2
|
refs/heads/master
| 2021-10-23T09:12:49.032948
| 2019-03-16T15:15:23
| 2019-03-16T15:15:23
| 116,075,349
| 36
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
from timeit import default_timer
from functools import wraps
import logging
__all__ = ['Timer', 'tick']
logger = logging.getLogger(__name__)
info = logger.info
class Timer(object):
def __init__(self, msg='timer starts', timer=default_timer, factor=1,
fmt='elapsed {:.4f}s'):
self.timer = timer
self.factor = factor
self.fmt = fmt
self.end = None
self.msg = msg
def __call__(self):
"""
Return the current time
"""
return self.timer()
def __enter__(self):
"""
Set the start time
"""
info(self.msg)
self.start = self()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""
Set the end time
"""
self.end = self()
info(str(self))
def __repr__(self):
return self.fmt.format(self.elapsed)
@property
def elapsed(self):
if self.end is None:
# if elapsed is called in the context manager scope
return (self() - self.start) * self.factor
else:
# if elapsed is called out of the context manager scope
return (self.end - self.start) * self.factor
def tick(f):
"""Simple context timer.
"""
@wraps(f)
def wrapper(*args, **kw):
start = default_timer()
res = f(*args, **kw)
end = default_timer()
info('{0} elapsed: {1:.4f}s'.format(f.__name__, end-start))
return res
return wrapper
|
[
"zhitaao.gong@gmail.com"
] |
zhitaao.gong@gmail.com
|
078327eb8cbffb63249d9b201aee71f0e8110bbb
|
84326f32d61d983826d7ae1d40951c43c47b0842
|
/gy4/client_gyak3_f4.py
|
5aae8cef28b6efcf35017f4451c828c514a1622d
|
[] |
no_license
|
bkiac/ELTE.computer-networks
|
749c82b92f83476ab714b3ae2f04da139a19debe
|
ee9b57359b28203e32acf471c64b409cef78a9aa
|
refs/heads/master
| 2021-10-08T03:59:02.640867
| 2018-12-07T12:36:17
| 2018-12-07T12:36:17
| 149,986,172
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
import socket
import struct
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 10000)
connection.connect(server_address)
values = (7.0, '-', 2.0)
packer = struct.Struct('f c f')
packed_data = packer.pack(*values)
print '%f %s %f' % values
connection.sendall(packed_data)
result = connection.recv(16)
print result
connection.close()
|
[
"benceknab.iac@gmail.com"
] |
benceknab.iac@gmail.com
|
0049ef5dfb5967e446a1664834ff89d8811bf15d
|
1cd8c5548a5c1b6b0885f27f11b3f2300abbc563
|
/index.py
|
24753fa2dd05d305cd38cc208dbfaa49d98117ff
|
[] |
no_license
|
KiroCarllos/Problem_Solving
|
5cd0efd73009d47c4c1e7afdb3a5e7192c475efc
|
73c87515bf297843ef9c095da0e9d749c015c7e2
|
refs/heads/master
| 2022-11-11T14:50:26.407643
| 2020-07-01T16:43:23
| 2020-07-01T16:43:23
| 276,431,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,292
|
py
|
# 1- print Hello World
# employee = input('Enter Your Employee');
# print("Hello, "+ employee);
###################################
# 2- DataTypes
# ints,longs,chars,floats,doubles = input().split();
# print(int(ints))
# print(longs)
# print(str(chars))
# print(float(floats))
# print(doubles);
###################################
# 3- Simple Calculator
# x,y = input().split();
# z = int(x) + int(y);
# c = int(x) * int(y);
# b = int(x) - int(y);
# print(str(x)+" + "+str(y) +" = " + str(z))
# print(str(x)+" * "+str(y) +" = " + str(c))
# print(str(x)+" - "+str(y) +" = " + str(b))
###################################
# 4- D. Difference
# A,B,C,D = input().split();
# X = (int(A) * int(B)) - (int(C) * int(D));
# print("Difference"+" = ",int(X));
###################################
# 5- Area Of Circle
# R = input();
# Area = 3.141592653 * float(R) * float(R);
# print(Area)
###################################
# 6- F. Digits Summation
# A,B = input().split();
# x = int(A) % 10;
# y = int(B) % 10;
# z = x + y;
# print(z)
###################################
# 7- G. Summation from 1 to N
# 641009859
# num = int(input())
# sum =0
# while num > 0:
# sum = sum + num
# num = num - 1;
# print(sum)
# n = int(input())
# sum = (n*(n+1)/2)
# print(int(sum))
###################################
# 8 - Tow Numbers Floor - Ceil - Round
# import math
# x,y = (input()).split()
# z= float(x) / float(y);
# print ("floor",x,"/",y ,"=",math.floor(z))
# print ("ceil",x,"/",y ,"=",math.ceil(z))
# print ("round",x,"/",y ,"=",(round(z)))
###################################
# 9- Welcome for you with Conditions
# x,y = (input()).split()
# if(int(x) >= int(y)):
# print("Yes")
# else:
# print("No")
###################################
# 10- J. Multiples
# x,y = input().split()
# if((int(x) % int(y)) == 0 or (int(y) % int(x)) == 0):
# print("Multiples")
# else:
# print("No Multiples")
###################################
# 11- Min Vs Max
# x,y,z = input().split()
# if(int(x) <= int(y) and int(x) <= int(z) and int(y) >= int(z)):
# print(x,y)
# elif (int(x) <= int(y) and int(x) <= int(z) and int(z) >= int(y)):
# print(x, z)
# elif (int(y) <= int(x) and int(y) <= int(z) and int(z) >= int(x)):
# print(y, z)
# elif (int(y) <= int(x) and int(y) <= int(z) and int(x) >= int(z)):
# print(y,x)
# elif (int(z) <= int(x) and int(z) <= int(y) and int(y) >= int(x)):
# print(z, y)
# elif (int(z) <= int(x) and int(z) <= int(y) and int(x) >= int(y)):
# print(z,x)
###################################
# 12- M. Capital or Small or Digit
# x = (input())
# try:
# val = int(x)
# print("IS DIGIT")
# except ValueError:
# try:
# val = float(x)
# print("IS DIGIT")
# except ValueError:
# if(x.isupper()== True):
# print("ALPHA")
# print("IS CAPITAL")
# else:
# print("ALPHA")
# print("IS SMALL")
###################################
# 13- Calculator
# x,s,y = input().split(" ")
# if(str(s) == "+"):
# print(int(x[0]) + int(y[0]))
# elif(str(s) == "-"):
# print(int(x) - int(y))
# elif(str(s) == "*"):
# print(int(x) * int(y))
# elif(str(s) == "/"):
# print(int(x) / int(y))
###################################
# 14- First digit !
# x = input().split()[0]
# if(int(x[0]) % 2 == 0):
# print("EVEN")
# else:
# print("ODD")
###################################
# 15- Q. Coordinates of a Point
# x,y = input().split()
# if(float(x) > 0 and float(y) > 0):
# print("Q1")
# elif (float(x) < 0 and float(y) > 0):
# print("Q2")
# elif(float(x) > 0 and float(y) < 0):
# print("Q4")
# elif (float(x) < 0 and float(y) < 0):
# print("Q3")
# elif(float(x) > 0 and float(y) == 0):
# print("Eixo X")
# elif(float(y) > 0 and float(x) == 0):
# print("Eixo Y")
# elif(float(x) < 0 and float(y) == 0):
# print("Eixo X")
# elif(float(y) < 0 and float(x) == 0):
# print("Eixo Y")
# elif(float(y) ==0 and float(x) == 0):
# print("Origem")
###################################
# 16- Q. R. Age in Days
# x = input()
# year = int(x) / 365;
# monthes = ((int(x)) - int(year)*365)/30
# days = int(x) - ((int(year)*365) + int(monthes) * 30)
# print(int(year) ,"years")
# print(int(monthes) ,"months")
# print(int(days) ,"days")
###################################
# 17- S. Interval
# x = input()
# if(float(x) >= 0 and float(x) <= 25):
# print("Interval [0,25]")
# elif(float(x) >= 25 and float(x) <= 50):
# print("Interval (25,50]")
# elif(float(x) >= 50 and float(x) <= 75):
# print("Interval (50,75]")
# elif(float(x) >= 75 and float(x) <= 100):
# print("Interval (75,100]")
# else:
# print("Out of Intervals")
###################################
# 18- U. Float or int
# x = input().split(".")
# if(int(x[1]) == 0 ):
# print("int "+x[0])
# else:
# print("float " + x[0]+" 0."+x[1])
###################################
# 19- V. Comparison
# A,s,B = input().split()
# if(str(s) == ">" and int(A) > int(B)):
# print("Right")
# elif(str(s) == "<" and int(A) < int(B)):
# print("Right")
# elif(str(s) == "=" and int(A) == int(B)):
# print("Right")
# else:
# print("Wrong")
###################################
# 20- W. Mathematical Expression
# A,s,B,Q,C = input().split(" ")
# x = 0
# if(str(s) == "+"):
# x= (int(A) + int(B))
# elif(str(s) == "-"):
# x= (int(A) - int(B))
# elif(str(s) == "*"):
# x= (int(A) * int(B))
# elif(str(s) == "/"):
# x= (int(A) / int(B))
# else:
# print("Operator Not Suported")
#
# if(int(C) == int(x)):
# print("Yes")
# else:
# print(int(x))
###################################
# 21- X. Two intervals
# A,B,C,D = input().split()
# if(int(C) > int(A) and int(D) < int(B)):
# print((C) + " " + (D))
# elif(int(B) - int(C) > 0):
# print((C) + " " + (B))
# elif(int(B) - int(C) == 0):
# print((C) + " " + (B))
# elif(int(C) < int(A) and int(D) <= int(B)):
# print((C) + " " + (A))
# else:
# print("-1")
# ##################################### Complete Level A
# Get Start With Level B
# 1- A. 1 to N
# x= input()
# for i in range(1,int(x)+1):
# print(i)
###################################
# 2- B. Even Numbers
# x= input()
# for i in range(1,int(x)+1):
# if(i % 2 == 0):
# print(i)
###################################
# 3- C. Even, Odd, Positive and Negative
# y= input()
# x= input().split(" ",int(y))
# Even = 0
# Odd = 0
# Positive= 0
# Negative= 0
# i =0
# for i in x:
# if(int(i) % 2 == 0 and int(i) > 0):
# Even+=1
# Positive += 1
# if(int(i) == 0 ):
# Even+=1
# if(int(i) % 2 == 0 and int(i) < 0):
# Even+=1
# Negative += 1
# if(int(i) % 2 and int(i) >= 0):
# Odd+=1
# Positive += 1
# if(int(i) % 2 and int(i) <= 0):
# Odd+=1
# Negative += 1
# print("Even:",Even)
# print("Odd:",Odd)
# print("Positive:",Positive)
# print("Negative:",Negative)
###################################
# 4- D. Fixed Password
# d= input()
# y= input()
# a= input()
# b= input()
# c= input()
# x = [int(d),int(y),int(a),int(b),int(c)]
# correct_Pass = 1999;
# for i in x:
# if( int(i) == correct_Pass):
# print("Correct")
# break
# if (int(i) != correct_Pass):
# print("Wrong")
###################################
# 5- F. Multiplication table
# x= input()
# for i in range(1,13):
# z = int(x) * int(i)
# print(int(x),"*",int(i),"=",int(z))
###################################
# 6- E. Max
# y= input()
# x= input().split(" ",int(y))
# for i in range(0, len(x)):
# x[i] = int(x[i])
# print(max(x))
###################################
# 7- G. Factorial
# y= input()
# x =[]
# factorial =1
# for i in range(0,int(y)):
# x+=input()
# for a in x:
# if int(a) >= 1:
# for j in range(1, int(a) + 1):
# factorial = factorial * j
# print(factorial)
# factorial = 1
# if(int(a) == 0):
# print("1")
###################################
# 8- G. Factorial
# x= input()
# if int(x) > 1:
# # check for factors
# for i in range(2, int(x)):
# if (int(x) % i) == 0:
# print("NO")
# break
# else:
# print("YES")
# else:
# print("NO")
###################################
# 9- I. Palindrome
|
[
"carloskiro217@gmail.com"
] |
carloskiro217@gmail.com
|
269fbdd3f5abe741ec26a396538010638017122d
|
256c5100914832915192fb1958444389eff4a85f
|
/scripts/online/readStatusPyRoot.py
|
169b70d5aa9093dd5e077de193930daf3269176e
|
[] |
no_license
|
jetatar/snowShovel
|
ea9a45ddcb182ca6776752e08df9799ac6cd7439
|
69678a46cd8c20a57e4b8d8a8cc19b8900173775
|
refs/heads/master
| 2020-05-30T19:42:33.193932
| 2014-08-18T06:37:56
| 2014-08-18T06:37:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,417
|
py
|
import sys
import struct
from AriUtils import *
def readStatus(msg):
status = None
evt = None
power = None
ssvers = ROOT.Long()
erv = ROOT.Long()
prv = ROOT.Long()
mcode = ROOT.Long()
mlen = ROOT.Long()
totb = msg.size()
br = 0
while ( br<totb ):
br += ROOT.TSnIOHeaderFrame.PyReadFrom(msg, mcode, mlen)
if (mcode==castHeaderCode(ROOT.TSnIOHeaderFrame.kStatusCode)):
status = ROOT.TSnStatusUpdate()
try:
br += ROOT.TSnIOStatusFrame.BytesReadFrom(msg,
status, ssvers)
except:
printout(vtype.kInfo,"Read without event failed. "
"Trying with event.")
evt = ROOT.TSnEvent()
br += ROOT.TSnIOStatusFrame.BytesReadFrom(msg,
status,
evt,
ssvers)
elif (mcode==castHeaderCode(ROOT.TSnIOHeaderFrame.kEventCode)):
evt = ROOT.TSnEvent()
br += ROOT.TSnIOEventFrame.BytesReadFrom(msg,
evt,
status.GetWvLoseLSB(),
status.GetWvLoseMSB(),
status.GetWvBaseline(),
erv)
elif (mcode==castHeaderCode(ROOT.TSnIOHeaderFrame.kPowerCode)):
power = ROOT.TSnPowerReading()
br += ROOT.TSnIOPowerFrame.BytesReadFrom(msg,
power, prv)
else:
raise ValueError("Unhandled block type {0:02x}".format(mcode))
return br, status, power, evt, ssvers, prv, erv
def main():
if (len(sys.argv)<2):
print 'Need filename'
sys.exit()
infn = sys.argv[1]
with open(infn,'rb') as inf:
msg = ROOT.TSnIOBuffer(inf.read())
br, status, power, evt, ssvers, prv, erv = readStatus(msg)
if (status!=None):
status.Print()
if (power!=None):
power.Print()
if (evt!=None):
evt.Print()
if __name__=="__main__":
main()
|
[
"jtatar@uw.edu"
] |
jtatar@uw.edu
|
10ae3cef498bf99f949b001c20ee00663182d0d4
|
843ed1bff5e3184ae9428e40354d726341418572
|
/core/tests/providers_test.py
|
9e6b67868f50a75c3e6e15b031331f6c81935d3d
|
[] |
no_license
|
marquesds/juggler
|
22f0bdbefe780c75ea20b17aa5c527441763169d
|
de2252d5ac37db968af4e9cc74ca890575bf5ffd
|
refs/heads/master
| 2021-01-10T07:16:25.789252
| 2015-06-16T02:51:37
| 2015-06-16T02:51:37
| 36,475,538
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
from unittest import TestCase
from core.providers import *
class TestCassandraProvider(TestCase):
def test_connection(self):
cassandra = CassandraProvider(host=['192.168.0.7'], port=9042, database='system')
def test_close_connection(self):
pass
def test_show_tables(self):
pass
|
[
"lucasmarquesds@gmail.com"
] |
lucasmarquesds@gmail.com
|
af5886042c006dc14b35138fd937d584761d7c01
|
8039370c0c6ad3b26fc8a3eed35a33df6cf4a00a
|
/example_optimization.py
|
d6422626ec4369035b73dd9c15feb999f77bb1c3
|
[] |
no_license
|
whateverforever/Differentiable-Polygons
|
13d22f6a8fb62306cf91647a14b435e5334b1a44
|
c18d262bb526584911c60c138d1b504ecca9d015
|
refs/heads/master
| 2022-12-25T05:20:25.371340
| 2020-10-02T16:11:53
| 2020-10-02T16:11:53
| 252,662,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,323
|
py
|
"""
End-to-end example of a simple optimization problem that makes use of the gradients
computed by the library. The problem is basically a very small inverse kinematics
problem with a unique solution.
"""
import timeit
import numpy as np # type:ignore
import matplotlib.pyplot as plt # type:ignore
from scipy import optimize # type: ignore
from diff_polygons import Point, Param, Vector
def parametric_pt(l, theta):
"""
Outputs the distance of the end of a kinematic chain from a predetermined target
point. Has parameters l and theta. Analytical solution is for l=1.23, theta=34deg
"""
l = Param("l", l)
theta = Param("theta", theta)
shift_right = Vector(l, 0)
origin = Point(0, 0)
new_pt = origin.translate(shift_right).rotate(origin, theta).translate(shift_right)
target = Point(2.24972, 0.6878)
dist = (new_pt - target).norm()
return dist
def f(x):
l, theta = x
dist = parametric_pt(*x)
return dist.value
def jac(x):
l, theta = x
dist = parametric_pt(l, theta)
grads = []
for param in ["l", "theta"]:
grads.append(dist.grads[param])
return np.squeeze(grads)
def main():
print("Go ####################\n\n")
x0 = [1.0, np.radians(40)]
xs = []
def reporter(xk):
xs.append(xk)
res_jacced = optimize.minimize(f, x0, method="CG", jac=jac, callback=reporter)
length_reached = parametric_pt(*res_jacced.x)
res_numeric = optimize.minimize(f, x0, method="CG")
print(f"Analytical gradients needed {res_jacced.nfev} fun evals")
print(f"Numerical gradients needed {res_numeric.nfev} fun evals")
print("\n")
print("x initial: {}".format(x0))
print("x final: {}".format(res_jacced.x))
print("\n")
print("Initial distance: {}".format(f(x0)))
print(
"Final distance: {}, gradient norm: l={:.2f}, theta={:.2f}".format(
length_reached.value,
np.linalg.norm(length_reached.grads["l"]),
np.linalg.norm(length_reached.grads["theta"]),
)
)
print("\n")
## Plotting
xs = np.array(xs)
xxs, yys = np.meshgrid(
np.linspace(np.min(xs[:, 0]), np.max(xs[:, 0]), 50),
np.linspace(np.min(xs[:, 1]), np.max(xs[:, 1]), 50),
)
zzs = np.zeros_like(xxs)
jjs = np.zeros((xxs.shape[0], xxs.shape[1], 2))
for ix, x in enumerate(np.linspace(np.min(xs[:, 0]), np.max(xs[:, 0]), 50)):
for iy, y in enumerate(np.linspace(np.min(xs[:, 1]), np.max(xs[:, 1]), 50)):
zzs[iy, ix] = f([x, y])
jjs[iy, ix] = jac([x, y])
fig, axes = plt.subplots(ncols=3)
a = axes[0].contourf(xxs, yys, zzs, levels=50)
axes[0].contour(xxs, yys, zzs, levels=20, colors="k", linewidths=0.5)
axes[0].plot(xs[:, 0], xs[:, 1], "-o")
axes[0].quiver(xxs[:, ::6], yys[:, ::6], jjs[:, ::6, 0], jjs[:, ::6, 1], scale=20)
plt.colorbar(a)
axes[0].set_title("Solution Space")
axes[0].set_xlabel("l")
axes[0].set_ylabel("theta")
axes[1].plot(range(len(xs)), [f(x) for x in xs])
axes[1].set_title("Convergence Plot")
axes[1].set_ylabel("Objective Fun.")
axes[1].set_xlabel("Iteration #")
axes[2].plot(range(len(xs)), [jac(x)[1] for x in xs])
axes[2].set_title("Infty Norm of Jacobian")
axes[2].set_ylabel("Norm of Jac.")
axes[2].set_xlabel("Iteration #")
plt.tight_layout()
plt.show()
nexecs = 3
nrepeats = 50
print("Going for statistical run time evaluation...")
print(f"Runs of {nexecs}, times {nrepeats} repeats for std...")
testcode_jacced = lambda: optimize.minimize(f, x0, method="CG", jac=jac)
testcode_numeric = lambda: optimize.minimize(f, x0, method="CG")
times_analytical = timeit.repeat(testcode_jacced, number=nexecs, repeat=nrepeats)
times_numeric = timeit.repeat(testcode_numeric, number=nexecs, repeat=nrepeats)
print(
"Analytic grads take {:.3f}s (min: {:.3f}, std: {:.3f})".format(
np.mean(times_analytical),
np.min(times_analytical),
np.std(times_analytical),
)
)
print(
"Numerical grads take {:.3f}s (min: {:.3f}, std: {:.3f})".format(
np.mean(times_numeric), np.min(times_numeric), np.std(times_numeric)
)
)
if __name__ == "__main__":
main()
|
[
"max@thousandyardstare.de"
] |
max@thousandyardstare.de
|
a7fb99e11451b0915e084a6a5fae76d23a581732
|
fecaed6824eb6ed09c33f335c52410a6fe8fe212
|
/kata-2/test_kata2_simple.py
|
5476f7befde7be97d74aae45a26beddfbe969c09
|
[
"MIT"
] |
permissive
|
Laoujin/osherove-kata
|
d39876830d3f20e651da87bed07344a67f1ef967
|
0177bc3de5db1096ab2f47fc03189397cdcaf400
|
refs/heads/master
| 2021-05-16T03:06:48.004211
| 2017-11-02T21:45:45
| 2017-11-02T21:45:45
| 30,038,477
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
#!/usr/bin/python -tt
# String Calculator - Interactions
# http://osherove.com/tdd-kata-2/
import pytest
from mock import Mock
from Calculator import Calculator
####################### SIMPLE KATA-2 TESTS
# ex 1
def test_logger_writes_result():
logger = Mock()
calcer = Calculator(logger)
total = calcer.add("1,2,3")
logger.write.assert_called_with(total)
# ex 2
class ThrowingLogger:
def write(self, input):
raise
def test_failing_log_calls_service():
#logger = Mock(write=Exception("IO error"))
logger = ThrowingLogger()
service = Mock()
calcer = Calculator(logger, service)
calcer.add("1,2,3")
assert service.error.called
|
[
"woutervs@hotmail.com"
] |
woutervs@hotmail.com
|
ffce33738782672fb95e5b549b244d8f3d6b347a
|
ad901f2ecba8be5101a865f719460bd3daee4c3d
|
/euler12.py
|
87c6846d271c2bb3b1852fc793a9930b016cc33d
|
[] |
no_license
|
ajsabesirovic/EULER-EXERCISES
|
9f1893f2f6aacedaab8d8fe899fe5036acde69ba
|
61427ad60288b5fac6015208b6233898c7c7a7f1
|
refs/heads/main
| 2023-07-14T19:41:47.106809
| 2021-08-25T12:17:48
| 2021-08-25T12:17:48
| 397,927,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
br = 1
triBr = 0
while True:
triBr += br
dividers = []
for i in range(1,triBr+1):
if triBr % i == 0:
dividers.append(i)
if len(dividers) > 500:
print(triBr,dividers)
break
br += 1
|
[
"besirovicajsa@gmail.com"
] |
besirovicajsa@gmail.com
|
39e8dfce80fabe8b3a7947dee64d4c360a3025f5
|
54de11abc7bf3ce3f89e5f502106c76bb14316bd
|
/manage.py
|
7bfbed5b9383f6b08617db783ce7bbdfc2b43cf7
|
[] |
no_license
|
lordofhell-666/energymitra
|
937058c753f73b4e1c2358cdc12484c4bf6a5061
|
855f7ddbe3787ee00f83da16e3272b7eefce87c5
|
refs/heads/main
| 2023-02-21T01:33:04.956955
| 2021-01-24T19:31:47
| 2021-01-24T19:31:47
| 332,535,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'emitra.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"lordsaurabh.tripathy@gmail.com"
] |
lordsaurabh.tripathy@gmail.com
|
86692b6fe947d56b3a0c15ab8ec18205dc0077b1
|
fa95ad476b1393de3d3cf16cdefbe0c2dc1ce437
|
/DataCamp/02-intermediate-python-for-data-science/5-case-study-hacker-statistics/determine-your-next-move.py
|
d4b69da12e467b7dba2603af37375481d77d9444
|
[] |
no_license
|
vijaykumar79/Data-Science-Python
|
95a6f6ba5f112cceeaf2fbfe8be3e7185d67ce3d
|
0ed142ca1e9aaef19e11621bd9c54d29c1abe152
|
refs/heads/master
| 2020-03-29T19:22:09.814218
| 2020-01-02T08:23:14
| 2020-01-02T08:23:14
| 150,259,437
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
'''
Determine your next move
100xp
In the Empire State Building bet, your next move depends on the number of eyes you
throw with the dice. We can perfectly code this with an if-elif-else construct!
The sample code assumes that you're currently at step 50. Can you fill in the
missing pieces to finish the script?
Instructions
-Roll the dice. Use randint() to create the variable dice.
-Finish the if-elif-else construct by replacing ___:
-If dice is 1 or 2, you go one step down.
-if dice is 3, 4 or 5, you go one step up.
-Else, you throw the dice again. The number of eyes is the number of steps you go up.
-Print out dice and step. Given the value of dice, was step updated correctly?
'''
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Starting step
step = 50
# Roll the dice
dice = np.random.randint(1, 7)
# Finish the control construct
if dice <= 2 :
step = step - 1
elif dice < 6 :
step = step + 1
else :
step = step + np.random.randint(1,7)
# Print out dice and step
print(dice)
print(step)
|
[
"noreply@github.com"
] |
vijaykumar79.noreply@github.com
|
e6c23d11dee1a24118f976e70873545bf5209e7c
|
04514f86523d393a56960b07eadb28a2ab83eee7
|
/old/09.06.00- jadi.py
|
e04923eedee58001af08c45d6434e433d70d20ad
|
[] |
no_license
|
nceh/my_First
|
6426162199cb42ecabe0fb309d19fd48e0b1276a
|
0e4b1128fe270da9d864ee725068767a9444ad08
|
refs/heads/main
| 2023-08-04T01:43:20.301945
| 2021-09-17T19:03:56
| 2021-09-17T19:03:56
| 401,407,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,089
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
reshte = input()
reshte.lower()
reshte=reshte.replace("a","")
reshte=reshte.replace("e","")
reshte=reshte.replace("i","")
reshte=reshte.replace("o","")
reshte=reshte.replace("u","")
# print(reshte)
khoroji=""
for i in range(0,len(reshte)):
khoroji +="."+reshte[i]
print(khoroji)
# In[53]:
#moahkelsh hal nashod
vorodi = (input())
print(vorodi)
vorodi=vorodi.replace("+",",")
vorodi=vorodi.split(",")
a =int(vorodi)
a
# a=[]
# a.append((vorodi))
# a
# a = []
# a.append(vorodi)
# a.sort()
# for i in a:
# print(i)
# In[17]:
def standard(name):
sta=name[0].upper()+name[1:]
return sta
# standard("ali")
for i in range(0,10):
name = input("please enter your name: ")
print(standard(name))
# In[ ]:
def estandard(a):
a=a.lower()
first= a[0]
edame= a[1::]
first=first.upper()
return first+edame
list= []
for i in range(0,10):
temp=estandard(input())
list.append(temp)
for j in range(0,10):
print(list[j])
# In[19]:
def standard(name):
sta=name[0].upper()+name[1:]
return sta
list= []
for i in range(0,10):
temp=standard(input())
list.append(temp)
for j in range(0,10):
print(list[j])
# In[ ]:
a= input()
a=a.lower()
vaziat = "NO"
if ('h' in a) and ("e" in a) and ("ll") and ("o" in a):
vaziat="YES"
else:
vaziat = "NO"
h=a.find('h')
e=a.find('e')
ll=a.find('ll')
o=a.find('o')
if vaziat=='YES' and o>ll and ll>e and e>h:
vaziat= "YES"
else:
vaziat="NO"
print(vaziat)
# In[21]:
a = input()
a=a.lower()
vaziat = "NO"
if ("h" in a) and ("e" in a) and ("ll" in a) and ("o" in a):
vaziat = "YES"
else:
vaziat = "NO"
h = a.find("h")
e = a.find("e")
ll = a.find("ll")
o = a.find("o")
if vaziat =="YES" and o>ll and ll>e and e>h:
vaziat = "YES"
else:
vaziat = "NO"
print(vaziat)
# In[41]:
string = input()
# string = string.replace("AB","@")
# string = string.replace("BA","$")
# vaziat =""
if ("AB" in string) and ("BA" in string):
vaziat = "YES"
else:
vaziat = "NO"
print(vaziat)
# In[48]:
l = [1,3,4,5,6]
for i in range(0,(len(l))):
print(i,l[i])
# In[55]:
s = input()
x1,x2,x3 = s.split(" ")
# print(x1,x2,x3)
x1= int(x1)
x2=int(x2)
x3=int(x3)
print(max(x1,x2,x3) - min(x1,x2,x3))
# In[2]:
a = input()
b = [int(l) for l in input().split(" ")]
temp=[]
for i in range (0,len(b)):
temp.append(b[i])
count=0
for j in range(0,len(temp)):
if temp[j]==0 or temp[j]==1 or temp[j]==2:
count+=1
print(int(count/3))
# ###### dictionary
# In[5]:
string = "salam nceh, halet chetore?"
count = dict()
for letter in string:
if letter in count:
count[letter] +=1
else:
count[letter] =1
print(letter,count)
# In[6]:
string = "nceh salam, chetorii? khobi?"
count = dict()
for letter in string:
if letter in count:
count[letter] +=1
else:
count[letter] = 1
for this_one in list(count.keys()):
print("%s appeard %s times" % (this_one,count[this_one]))
# In[7]:
string = "nceh salam, chetorii? khobi?"
count = dict()
for letter in string:
count[letter] = count.get(letter,0)+1
for this_one in list(count.keys()):
print("%s appeard %s times" % (this_one,count[this_one]))
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[33]:
def lo_counter(a):
l_counter = 0
lo=a.lower()
for i in range(0,int(len(a))):
if a[i] ==lo[i]:
l_counter +=1
return l_counter
def up_counter(a):
u_counter = 0
up = a.upper()
for j in range(0,int(len(a))):
if a[j] == up[j]:
u_counetr +=1
return u_counter
a = input()
vaziat = True
if lo_counter(a) == up_counter(a):
vaziat = True
elif lo_counter(a) > up_counter(a):
vaziat = True
else:
vaziat = False
if vaziat == True:
a = a.lower()
else:
a = a.upper()
print(a)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"nceh.mousavinezhad@gmail.com"
] |
nceh.mousavinezhad@gmail.com
|
24b8ac225cb3cb0a355715ff7bcf224953641e30
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_tropes.py
|
1f0a9f2ffa1386e6dabbb080f223e38e329f992c
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#calss header
class _TROPES():
def __init__(self,):
self.name = "TROPES"
self.definitions = trope
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['trope']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b1bdecdd030b7788d2e7c1815e23128df224612e
|
af90ae97d8dd90663beedd81539604242e677bbc
|
/vivo_api.py
|
079f949c9c3a241ca093b15112d149ee002f1991
|
[] |
no_license
|
naomidb/api_tests
|
e9d1104a6d45995ded2dbd85da489d9995f8063f
|
52ad7429c5b92b76a1ce5f964018880566c685aa
|
refs/heads/develop
| 2022-12-11T16:40:39.535348
| 2019-04-02T17:54:21
| 2019-04-02T17:54:21
| 151,307,248
| 0
| 0
| null | 2022-12-08T02:55:34
| 2018-10-02T18:57:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,652
|
py
|
'''
A self-contained means of testing queries for the VIVO API.
You will need to edit this program with the query you want to run.
Usage:
python vivo_api.py (-q | -i | -d) <config_file>
Options:
-q Use the query endpoint
-i Use the update endpoint to insert (requires an account with admin rights)
-d Use the update endpoint to delete (requires an account with admin rights)
'''
import requests
import sys
import yaml
def get_config(config_path):
try:
with open(config_path, 'r') as config_file:
config = yaml.load(config_file.read())
except:
print("Error: Check config file")
exit()
return config
def do_query(payload, endpoint):
print("Query:\n:" + payload['query'])
headers = {'Accept': 'application/sparql-results+json'}
response = requests.get(endpoint2, params=payload, headers=headers, verify=False)
print(response)
print(response.json())
return response
def do_update(payload, endpoint):
print("Query:\n" + payload['query'])
response = requests.post(endpoint, params=payload, verify=False)
print(response)
return response
def main(q_type, config_path):
config = get_config(config_path)
email = config.get('vivo_email')
password = config.get('vivo_password')
if q_type == '-i':
endpoint = config.get('u_endpoint')
# Write insert query below
query = """
INSERT DATA {
GRAPH <http://vitro.mannlib.cornell.edu/default/vitro-kb-2> {
}
}
"""
payload = {
'email': email,
'password': password,
'update': query,
}
do_update(payload, endpoint)
elif q_type == '-d':
endpoint = config.get('u_endpoint')
# Write delete query below
query = """
DELETE DATA {
GRAPH <http://vitro.mannlib.cornell.edu/default/vitro-kb-2> {
}
}
"""
payload = {
'email': email,
'password': password,
'update': query,
}
do_update(payload, endpoint)
elif q_type == '-q':
endpoint = config.get('q_endpoint')
# Write query below
query = """
SELECT
WHERE{
}
"""
payload = {
'email': email,
'password': password,
'query': query,
}
do_query(payload, endpoint)
else:
print("Incorrect flag.")
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
[
"looseymoose@Naomis-Mistress.local"
] |
looseymoose@Naomis-Mistress.local
|
d8d87fc53e50b4ab9c6c03dcd73340f244b82016
|
0622f984a5094ec0d005a08eeeff79748941c316
|
/users/urls.py
|
51270d85aa3435a934749b5e532c0a5992191b4e
|
[] |
no_license
|
maximgamolin/hw04_tests
|
a1e16a22f1774f88e22685aec6de18b63ae5678d
|
549956cfccffedbb8b2d28013c3cce0bee63a3d6
|
refs/heads/master
| 2023-03-08T06:39:46.306143
| 2021-01-22T11:23:10
| 2021-01-22T11:23:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path("signup/", views.SignUp.as_view(), name="signup")
]
|
[
"artur.g.r@yandex.ru"
] |
artur.g.r@yandex.ru
|
bfa138c92d7cd595defd96b5cb36d943a9d4d195
|
ccf3793329233d8407ca669683f59760c02817b1
|
/forum/migrations/0005_auto_20181214_1448.py
|
2ece165e4d4bfbe252b33aa08d34979a81a1306e
|
[] |
no_license
|
clavos/cartoonWar
|
44ae7ebf07496e452f3a94c74aaeb405e58fb50b
|
7e9fa84d4a502a33b1f2bebf8936cddf5d830079
|
refs/heads/master
| 2020-03-28T14:39:29.569402
| 2019-01-22T16:00:09
| 2019-01-22T16:00:09
| 148,508,662
| 0
| 0
| null | 2019-01-22T16:04:21
| 2018-09-12T16:21:23
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
# Generated by Django 2.1.3 on 2018-12-14 13:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forum', '0004_article_publish_date'),
]
operations = [
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='comment_article', to='forum.Article'),
),
migrations.AddField(
model_name='comment',
name='comment',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='comment_comment', to='forum.Comment'),
),
]
|
[
"flores28.elodie@gmail.com"
] |
flores28.elodie@gmail.com
|
a71d029a16dd788af6d70cf6594d5e4458b21fe2
|
7bed99cbe12386739a292b1495e7f4d85ffac8b2
|
/PasswordAE/architecture.py
|
b3699b82509d206f2eb77b73d913968b0070222a
|
[] |
no_license
|
w0r1dhe110/PLR
|
88e1d7562ed249a0a8958bd45341765d81fbb567
|
fcd8bee1e612da50349ceb1176f929c4bc2dc2a6
|
refs/heads/master
| 2023-06-30T14:48:40.568349
| 2021-08-02T08:32:19
| 2021-08-02T08:32:19
| 391,857,147
| 0
| 0
| null | 2021-08-02T07:43:09
| 2021-08-02T07:43:08
| null |
UTF-8
|
Python
| false
| false
| 16,668
|
py
|
import tensorflow as tf
FILTER_SIZE = 3
def enc0_16(x, latent_size, training):
x = tf.layers.conv1d(x, 64, FILTER_SIZE, strides=1, padding='same', activation=tf.nn.relu)
x = tf.layers.conv1d(x, 128, FILTER_SIZE, strides=2, padding='same', activation=tf.nn.relu)
x = tf.layers.conv1d(x, 128, FILTER_SIZE, strides=1, padding='same', activation=tf.nn.relu)
x = tf.layers.conv1d(x, 128, FILTER_SIZE, strides=1, padding='same', activation=tf.nn.relu)
x = tf.layers.conv1d(x, 256, FILTER_SIZE, strides=2, padding='same', activation=tf.nn.relu)
x = tf.layers.flatten(x)
x = tf.layers.dense(x, latent_size, activation=None)
return x
def dec0_16(x, output_shape, training):
output_size = output_shape[0] * output_shape[1]
x = tf.layers.dense(x, 4 * 256, activation=tf.nn.relu)
x = tf.reshape(x, (-1, 4, 1, 256))
x = tf.layers.conv2d_transpose(x, 256, FILTER_SIZE, strides=(2, 1), padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 128, FILTER_SIZE, strides=(1, 1), padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 128, FILTER_SIZE, strides=(1, 1), padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 128, FILTER_SIZE, strides=(2, 1), padding='same', activation=tf.nn.relu)
x = tf.layers.conv2d_transpose(x, 64, FILTER_SIZE, strides=(1, 1), padding='same', activation=tf.nn.relu)
x = tf.reshape(x, (-1, 16, 64))
x = tf.layers.conv1d(x, output_shape[1], FILTER_SIZE, strides=1, padding='same')
return x
ARCH0_16 = [enc0_16, dec0_16]
def enc1_16(x, latent_size, training):
x = tf.layers.conv1d(x, 64, kernel_size=5, strides=2, padding='same', activation=tf.nn.relu)
print(x.shape)
x = tf.layers.conv1d(x, 128, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu)
print(x.shape)
x = tf.layers.conv1d(x, 256, kernel_size=3, strides=2, padding='same', activation=tf.nn.relu)
print(x.shape)
x = tf.layers.flatten(x)
print(x.shape)
x = tf.layers.dense(x, latent_size, use_bias=False)
return x
def dec1_16(x, output_shape, training):
output_size = output_shape[0] * output_shape[1]
x = tf.layers.dense(x, 4 * 256, activation=tf.nn.relu)
x = tf.reshape(x, (-1, 4, 1, 256))
print(x.shape)
x = tf.layers.conv2d_transpose(x, 256, 3, strides=(2, 1), padding='same', activation=tf.nn.relu)
print(x.shape)
x = tf.layers.conv2d_transpose(x, 128, 3, strides=(2, 1), padding='same', activation=tf.nn.relu)
print(x.shape)
x = tf.reshape(x, (-1, 16, 128))
x = tf.layers.conv1d(x, output_shape[1], 5, strides=1, padding='same')
print(x.shape)
return x
ARCH1_16 = [enc1_16, dec1_16]
########################################################################################
def ResBlockDeepBNK(inputs, dim, with_batch_norm=True, training=True):
x = inputs
dim_BNK = dim // 2
if with_batch_norm:
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.layers.conv1d(x, dim_BNK, 1, padding='same')
if with_batch_norm:
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.layers.conv1d(x, dim_BNK, 5, padding='same')
if with_batch_norm:
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.layers.conv1d(x, dim, 1, padding='same')
return inputs + (0.3*x)
def encResnetBNK(x, latent_size, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
#x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
#print(x.shape)
x = tf.layers.flatten(x)
print(x.shape)
logits = tf.layers.dense(x, latent_size)
return logits
def decResnetBNK(x, output_shape, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.dense(x, output_shape[0] * layer_dim)
print(x.shape)
x = tf.reshape(x, [-1, output_shape[0], layer_dim])
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
#x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
#print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_resnetBNK0 = [encResnetBNK, decResnetBNK]
########################################################################################
def encResnetBNK1(x, latent_size, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = tf.layers.flatten(x)
print(x.shape)
logits = tf.layers.dense(x, latent_size)
return logits
def decResnetBNK1(x, output_shape, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.dense(x, output_shape[0] * layer_dim)
print(x.shape)
x = tf.reshape(x, [-1, output_shape[0], layer_dim])
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_resnetBNK1 = [encResnetBNK1, decResnetBNK1]
########################################################################################
def encResnetBNK2(x, latent_size, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = tf.layers.flatten(x)
print(x.shape)
logits = tf.layers.dense(x, latent_size)
return logits
def decResnetBNK2(x, output_shape, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.dense(x, output_shape[0] * layer_dim)
print(x.shape)
x = tf.reshape(x, [-1, output_shape[0], layer_dim])
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_resnetBNK2 = [encResnetBNK2, decResnetBNK2]
########################################################################################
def encResnetBNK3(x, latent_size, training):
batch_norm = True
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = tf.layers.flatten(x)
print(x.shape)
logits = tf.layers.dense(x, latent_size)
return logits
def decResnetBNK3(x, output_shape, training):
batch_norm = True
layer_dim = 128
x = tf.layers.dense(x, output_shape[0] * layer_dim)
print(x.shape)
x = tf.reshape(x, [-1, output_shape[0], layer_dim])
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_resnetBNK3 = [encResnetBNK3, decResnetBNK3]
########################################################################################
def encResnetBNK4(x, latent_size, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = tf.layers.flatten(x)
print(x.shape)
logits = tf.layers.dense(x, latent_size)
return logits
def decResnetBNK4(x, output_shape, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.dense(x, output_shape[0] * layer_dim)
print(x.shape)
x = tf.reshape(x, [-1, output_shape[0], layer_dim])
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_resnetBNK4 = [encResnetBNK4, decResnetBNK4]
########################################################################################
def INAE_enc(x, latent_size, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
return x
def INAE_dec(x, output_shape, training=False):
batch_norm = False
layer_dim = 128
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_INAE = [INAE_enc, INAE_dec]
########################################################################################
def INAE_enc1(x, latent_size, training=False):
batch_norm = False
layer_dim = 128
x = tf.layers.conv1d(x, layer_dim, 5, padding='same')
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
return x
def INAE_dec1(x, output_shape, training=False):
batch_norm = False
layer_dim = 128
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
x = ResBlockDeepBNK(x, layer_dim, with_batch_norm=batch_norm, training=training)
print(x.shape)
logits = tf.layers.conv1d(x, output_shape[1], 1, padding='same')
print(logits.shape)
return logits
ARCH_INAE2 = [INAE_enc1, INAE_dec1]
########################################################################################
|
[
"pasquini@di.uniroma1.it"
] |
pasquini@di.uniroma1.it
|
4eda93ef31bc1a3fd2991cda9afc50a579a4812f
|
dcfec1645e18e83383c82282e0064d16d57f2917
|
/athospy/top_fcns.py
|
173b542a3fff2b732e06e6bc19cb973a19209c23
|
[] |
no_license
|
Saynah/AthosPy
|
4986d3174d3e445d086aaf6a1128388ab2b51e0c
|
65841e7a39c68f27c770de30e57d579b3df32b1e
|
refs/heads/master
| 2020-05-19T11:26:29.843050
| 2015-08-08T17:07:17
| 2015-08-08T17:07:17
| 40,409,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,907
|
py
|
from __future__ import division
import os
import pandas as pd
import fnmatch
import difflib
import shutil
import numpy as np
# athospy packages
import visualization as viz
import calcs as clc
import fileops as fop
# ML
from sklearn import svm, metrics
def label_folders(basepath, write_dst):
'''Return dataframe containing labels parsed from sub-folders directly under basepath.
Write a record of what was done to disk.
'''
folder_labels = []
paths = []
for fname in os.listdir(basepath):
path = os.path.join(basepath, fname)
if os.path.isdir(path) and fname[0] != '.':
paths.append(path)
labels, names = fop.parse_folder_name(fname)
folder_labels.append(labels)
df = _to_dataframe(folder_labels, names)
df['Path'] = paths
df['Person_id'] = _name_to_id(df.First_Last)
return _select_parsed(df, write_dst)
def label_csvfiles_by_folder(basepath, df_folders, write_dst):
'''Return dataframe for all csv files under basepath, with a column for folder ids
Write a record of what was done to disk.
'''
# create table for all csv-files
subdir__ix = zip(df_folders.Path, df_folders.index)
df_list = [label_csvfiles(subdir, ix) for subdir, ix in subdir__ix]
# concatenate data frames for all subdirectoriees
files = pd.concat(df_list, ignore_index=True)
return _select_parsed(files, write_dst)
def label_csvfiles(basepath, id=-1):
'''Return dataframe containing labels parsed from all csv files under basepath (recursively).
Also append an id to index the top-level folder from which the csv files came
'''
file_labels = []
paths = []
for root, dirnames, fnames in os.walk(basepath):
for fn in fnmatch.filter(fnames, '*.csv'):
paths.append(os.path.join(root, fn))
labels, names = fop.parse_csv_name(fn)
file_labels.append(labels)
df = _to_dataframe(file_labels, names)
df['Path'] = paths
df['Folder_id'] = [id] * len(df) # add index to parent folder
return df
def join_and_anonymize(df_files, df_folders, write_dst):
'''Join file and folder tables and remove person names
'''
df_joined = pd.merge(df_folders[['Person_id', 'Trial']],
df_files[['Exercise', 'Legside', 'Resistance', 'Path', 'Folder_id']],
left_index=True, right_on='Folder_id')
df_joined = _rename_csvfiles(df_joined, write_dst) # copy and overwrite path
df_joined.drop('Folder_id', axis=1, inplace=True) # don't need joining idx anymore
return df_joined
def get_best_match(item, possible):
'''Return the best matching string in the possible list.
Otherwise return the original'''
cutoff = 0.5
n_match = 1
match = difflib.get_close_matches(item, possible, n_match, cutoff)
if match:
return match[0] # upack
return None
def load_and_plot(df_files, write_dir='', plot_on=True):
'''Loads and plots the csv files in df_files. Optionally saves pdf.
Labels are used as the title'''
for i in df_files.index:
row = df_files.ix[i]
df = fop.load_emg(row.Path)
title = str(row.tolist())
fig, _ = viz.plot_emg(df, title=title)
if write_dir:
try: os.mkdir(write_dir)
except: pass
fig.savefig(os.path.join(write_dir, str(i) + '.pdf'))
if not plot_on:
viz.plt.close()
def check_quality(df_files):
'''Compile quality metrics into a dataframe and plot their distrubution
'''
d_summ = []
for path in df_files.Path:
d_summ.append(clc.quality(path))
df_quality = pd.DataFrame(d_summ, index=df_files.index)
viz.plot_qc(df_quality)
return df_quality
def exclude_by_quality(df_files, df_quality, write_dir):
'''Remove files that don't match the quality criteria.
Also keep records of the removed files in the `write_dir` folder
'''
n_orig_files = len(df_files)
short = df_quality.Length < 500
repeats = df_quality.MaxFrac_repeat > 60
zeros = df_quality.MaxFrac_zero > 30
noisy = df_quality.Median > 100
try:
shutil.rmtree(write_dir)
except:
pass
os.mkdir(write_dir)
# write list of bad files as a record
df_files[short].to_csv(os.path.join(write_dir, 'files_short.csv'))
df_files[repeats].to_csv(os.path.join(write_dir, 'files_repeats.csv'))
df_files[zeros].to_csv(os.path.join(write_dir, 'files_zeros.csv'))
df_files[noisy].to_csv(os.path.join(write_dir, 'files_noisy.csv'))
is_bad = short | repeats | zeros | noisy
df_files = df_files[~is_bad]
print 'excluded %d files of %d' % (n_orig_files - len(df_files), n_orig_files)
return df_files
def split_by_personid(files, frac_apprx):
'''Split files into two parts by person id.
'''
n_persons = len(files.Person_id.unique())
n_left = int(frac_apprx * n_persons)
files_left = files[files.Person_id < n_left]
files_right = files[files.Person_id > n_left]
return files_left, files_right
def get_features(files, n_sec, standardize=False):
'''Sample data, calculate features, and collapse into a data frame.
'''
index = files.index
data_dict = fop.sample_data(files, n_sec)
freq, peaks, phase = [], [], []
for ix in index:
df = data_dict[ix]
_, _, fc = clc.fft_df(df)
freq.append(fc[::-1])
peaks.append(clc.meanpeaks_df(df, 0.5))
phase.append(clc.phase_df(df))
peak_cols = df.columns.values.tolist()
phase_cols = ['p_%s' % s for s in peak_cols]
columns = peak_cols + ['f1', 'f2'] + phase_cols
arr = np.concatenate((peaks, freq, phase), axis=1)
feat = pd.DataFrame(arr, index=index, columns=columns)
if standardize:
feat = (feat - feat.mean()) / feat.std()
return feat
def prediction_report(predicted, labels, classes, plot_on=True, print_mat=''):
avg_correct = sum(predicted==labels) / len(predicted) * 100
print '\npercent correct:', avg_correct
counts = labels.groupby(labels.values).count().values
mat = metrics.confusion_matrix(labels, predicted)
# print mat
print metrics.classification_report(labels, predicted)
frac_predicted = (mat.T / counts).T
if plot_on:
viz.plot_confusion(frac_predicted, classes)
if print_mat == 'mat':
print mat
elif print_mat == 'frac':
print frac_predicted
return avg_correct
# Helper functions
###################################################
def _to_dataframe(labels, names):
if len(labels) == 0:
return []
else:
return pd.DataFrame(labels, columns=names)
def _select_parsed(df, write_dst):
'''Save record of all items in DataFrame.
Return only items that were successfully parsed.
'''
df.to_csv(write_dst)
parsed = df[df.iloc[:, 1].notnull()]
print 'Parsed %d of %d items. See record at "%s"' % (
len(parsed), len(df), write_dst)
return parsed
def _name_to_id(S_name):
'''replace names in series with numberical identifiers
'''
name_unq = S_name.unique()
left = pd.DataFrame({'name': S_name})
right = pd.DataFrame({'name': name_unq,
'id': range(len(name_unq))})
return pd.merge(left, right)['id']
def _rename_csvfiles(df_files, write_dir):
'''rename csv files by file index
'''
try:
shutil.rmtree(write_dir)
except:
pass
os.mkdir(write_dir)
path_new = []
ix__path = zip(df_files.index, df_files.Path)
for ix, src in ix__path:
dst = os.path.join(write_dir, str(ix) + '.csv')
shutil.copy(src, dst)
path_new.append(dst)
print 'Copied %d files and renamed by file_id. See "%s"' % (
len(df_files), write_dir)
df_files.Path = path_new
return df_files
|
[
"msena505+git@gmail.com"
] |
msena505+git@gmail.com
|
dc8d52706c9e04a981453256b24c9e2111cb8438
|
29a0d522bc4caeb7efad8c59f9b2bab88e7cb274
|
/Practical1/electricitybillestimator2.py
|
fd605d1f04af85d76ac1a63337f2e5862b3e0777
|
[] |
no_license
|
Kphoo/Programming2
|
20662a3594394d4b524d33adf149d6d4f18bf940
|
6ca3591d2e461b34dc2aeccb32706a6559a2b877
|
refs/heads/master
| 2020-04-09T11:27:49.498005
| 2019-01-16T17:50:27
| 2019-01-16T17:50:27
| 160,310,417
| 0
| 0
| null | 2018-12-04T07:36:21
| 2018-12-04T06:33:28
|
Python
|
UTF-8
|
Python
| false
| false
| 840
|
py
|
def bill_estimator():
MENU="""11 - TARIFF_11 = 0.244618
31 - TARIFF_31 = 0.136928
"""
print(MENU)
tariff_11=0.244618
tariff_31=0.136928
choice=int(input("Which tariff? 11 or 31: "))
if choice==11:
daily_use=float(input("Enter daily use in kWh: "))
billing_days=int(input("Enter number of billing days: "))
bill= (tariff_11*daily_use*billing_days)
print("Estimated bill:$ {:.2f}".format(bill))
elif choice==31:
daily_use = float(input("Enter daily use in kWh: "))
billing_days = int(input("Enter number of billing days: "))
bill = (tariff_31 * daily_use * billing_days)
print("Estimated bill:$ {:.2f}".format(bill))
else:
while 1:
print("Invalid input")
bill_estimator()
break
bill_estimator()
|
[
"kphoothawnaing@my.jcu.edu.au"
] |
kphoothawnaing@my.jcu.edu.au
|
e6b01ab9367986908a5c83dd98fa4dde404f632f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03379/s654666202.py
|
c7f75f4f497f62efa5e1b77c9ad3d1f24f1abba9
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
N=int(input())
X=list(map(int,input().split()))
Z=sorted(X)
left=Z[N//2-1]
right=Z[N//2]
for i in X:
if i<=left:
print(right)
else:
print(left)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5faaccfffd1b0b28344509c56aeff8b0c12956bb
|
808707d86f2cb92fc4b0c235b3b2e10d81d578da
|
/src/all_offense_month_from.py
|
0bb72831d99803809841f9d07f25750bef4227d1
|
[] |
no_license
|
HoodPanther/crimeproject
|
3eba91be240b17c87b1e2333ead5a34a9564bc6b
|
87f55fcbc375d7c75df10f7bc0017444d28d015f
|
refs/heads/master
| 2021-06-16T13:00:56.988862
| 2017-05-20T18:52:09
| 2017-05-20T18:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
from csv import reader
if __name__ == "__main__":
sc = SparkContext()
lines = sc.textFile(sys.argv[1], 1)
lines = lines.mapPartitions(lambda x: reader(x)).filter(lambda x: x[0] != 'CMPLNT_NUM')
# -1 means invalid or missing
results = lines.map(lambda x: (x[1][0:2], 1) if len(x[1])==10 else ('-1', 1) ) \
.reduceByKey(add) \
.sortBy(lambda x: x[0]) \
.map(lambda x: x[0] + '\t' + str(x[1])) \
results.saveAsTextFile('all_offense_month_from.out')
sc.stop()
|
[
"da1933@nyu.edu"
] |
da1933@nyu.edu
|
8ed511529f375e6018227358ffe009f42d6fbfa0
|
8a0e14299d8b915c0a909cf9fa9a86589dc63d76
|
/python/ray/rllib/agents/qmix/__init__.py
|
a5a1e4993b675531147b7cbe073dad0d4907c3e3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
natashamjaques/ray
|
795e4271c3c5f3e261327afea40b81ffe6f362ac
|
aca9dd5ee7a8fef508a5383fdd26ad8ccdcb16e4
|
refs/heads/master
| 2020-04-12T05:58:15.680359
| 2019-03-06T22:08:10
| 2019-03-06T22:08:10
| 162,337,948
| 3
| 2
|
Apache-2.0
| 2018-12-18T19:47:02
| 2018-12-18T19:47:01
| null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.rllib.agents.qmix.qmix import QMixAgent, DEFAULT_CONFIG
from ray.rllib.agents.qmix.apex import ApexQMixAgent
__all__ = ["QMixAgent", "ApexQMixAgent", "DEFAULT_CONFIG"]
|
[
"noreply@github.com"
] |
natashamjaques.noreply@github.com
|
0a72d6e577ced1da22d4cbd21c1a035716957133
|
ce776194946ac395bde4d494e56a09a62dcd575f
|
/week4/RL8.py
|
5a8186e9e4e6595df89a4e137e40edffe1b02f6a
|
[] |
no_license
|
ZhangLiangyu5411/DRL-GWZQ
|
f607590852bdbc27558f0f8446e19a8ba3d79794
|
4b7a93bce6de676f1b54d27e2bb6df2f3561c829
|
refs/heads/master
| 2022-01-26T19:14:10.739940
| 2019-05-23T08:53:31
| 2019-05-23T08:53:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,236
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 23 23:06:00 2019
@author: Xianglin ZHOU
Q learning agent with fake goal and observer
"""
import numpy as np
import pandas as pd
import time
import copy
import tkinter as tk
#actions
ACTIONS = ['LEFT', 'RIGHT', 'UP', 'DOWN']
ID_ACTIONS = list(range(len(ACTIONS)))# 0:left, 1:right, 3:up, 4:down
#q_learning
GAMMA = 0.9
ALPHA = 0.1
EPSILON = 0.9
LAMBDA = 0.9
EPISODES = 30
#map
SIZE = 100
X = 9
Y = 9
WALLS = [[-X, -1], [-1, Y], [X, Y], [X, -1]]
for i in range(0, X):
WALLS = WALLS + [[i , -1]] + [[-1, i]] + [[i, Y]] + [[X, i]]
BARRIERS = [] + WALLS
#print(len(BARRIERS))
#GOALS = [[5,5]]
FAKE = [[6,4]] #fake goal
GOALS = [[4,6]]
class Maze(tk.Tk, object):
def __init__(self, size, x, y):
super(Maze, self).__init__()
self.title('maze')
self.goals = GOALS
self.fake = FAKE
self.barriers = BARRIERS
self.size = size
self.x_total = x
self.y_total = y
self.geometry('1800x900')
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg = 'white', height = self.size * self.y_total,
width = self.size * self.x_total)
for i in range(self.x_total):
for j in range(5):
self.canvas.create_line(self.size - 2 + j + self.size*i, 0,
self.size - 2 + j + self.size*i, self.size*self.y_total)
for i in range(self.y_total):
for j in range(5):
self.canvas.create_line(0, self.size - 2 + j + self.size*i,
self.size*self.y_total, self.size - 2 + j + self.size*i)
# mouse_file = tk.PhotoImage(file='mouse.gif')
# self.mouse = self.canvas.create_image(10, 10, anchor='nw', image = mouse_file)
# print(11111)
# food_file = tk.PhotoImage(file='food.gif')
# self.food = self.canvas.create_image(1510, 10, anchor='nw', image = food_file)
self.food = self.canvas.create_rectangle(10 + self.goals[0][0]*self.size, 10 + self.goals[0][1]*self.size,(self.goals[0][0] + 1)*self.size - 10, (self.goals[0][1] + 1)*self.size - 10,fill = 'red')
self.fakefood = self.canvas.create_rectangle(10 + self.fake[0][0]*self.size, 10 + self.fake[0][1]*self.size,(self.fake[0][0] + 1)*self.size - 10, (self.fake[0][1] + 1)*self.size - 10,fill = 'orange')
self.mouse = self.canvas.create_oval(10, 10, 10 + self.size - 20, 10 + self.size - 20, fill = 'black')
# self.barriers = self.canvas.create_rectangle(10 + 3*self.size, 10 + 3*self.size,
# (3 + 1)*self.size - 10, (3 + 1)*self.size - 10,
# fill = 'blue')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.mouse)
self.mouse = self.canvas.create_oval(10, 10,
10 + self.size - 20, 10 + self.size - 20,
fill = 'black')
# mouse_file = tk.PhotoImage(file='mouse.gif')
# self.mouse = self.canvas.create_image(10, 10, anchor='nw', image = mouse_file)
# return observation
#return self.canvas.coords(self.rect)
def move_to(self, action):
self.update()
time.sleep(0.05)
if action == ID_ACTIONS[0]:
self.canvas.move(self.mouse, -self.size, 0)
elif action == ID_ACTIONS[1]:
self.canvas.move(self.mouse, self.size, 0)
elif action == ID_ACTIONS[2]:
self.canvas.move(self.mouse, 0, -self.size)
else:
self.canvas.move(self.mouse, 0, self.size)
def render(self):
time.sleep(0.1)
self.update()
class Observer(object):
def __init__(self, init_state = [], current_state = [], goals = [], barriers = []):
self.init_state = init_state
self.current_state = current_state
self.goals = goals
self.barriers = barriers
"""
def env_reaction(self, state, action):
new_state = copy.copy(state)
if action == ID_ACTIONS[0]:
new_state[0] -= 1
elif action == ID_ACTIONS[1]:
new_state[0] += 1
elif action == ID_ACTIONS[2]:
new_state[1] -= 1
else:
new_state[1] += 1
if new_state in self.barriers:
new_state = state
else:
env.move_to(action)
if new_state in self.goals:
r = 1
else:
r = 0
return new_state, r
"""
#give reward based on the difference of distance to the fake goal and real goal
def distance (self, position1, position2):
dist = abs(position1[0] - position2[0]) + abs(position1[1]-position2[1])
return dist
def env_reaction(self, state, action):
new_state = copy.copy(state)
#new_state = state
# 0:left, 1:right, 2:up, 3:down
if action == ID_ACTIONS[0]:
new_state[0] -= 1
elif action == ID_ACTIONS[1]:
new_state[0] += 1
elif action == ID_ACTIONS[2]:
new_state[1] -= 1
else:
new_state[1] += 1
if new_state in BARRIERS:
new_state = state
else:
env.move_to(action)
if not RL.simulation:
if new_state in FAKE:
r = 0.1
RL.simulation = True
elif (self.distance(state, FAKE[0]) >= self.distance(new_state, FAKE[0])):
r = 0.01
else:
r = -0.01
else:
if new_state in GOALS:
r = 1
elif (self.distance(state, GOALS[0]) >= self.distance(new_state, GOALS[0])):
r = 0.1
else:
r = -0.1
return new_state, r
class RL_Agent(object):
def __init__(self, actions = ID_ACTIONS, learning_rate = ALPHA, reward_decay = GAMMA, e_greedy = EPSILON):
self.actions = actions
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, state):
self.check_state_exist(state)
if np.random.uniform() < self.epsilon:
scores_of_actions = self.q_table.loc[state, :]
action = np.random.choice(scores_of_actions[scores_of_actions
== np.max(scores_of_actions)].index)
else:
action = np.random.choice(self.actions)
return action
def check_state_exist(self, state):
if state not in self.q_table.index:
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.actions),
index = self.q_table.columns,
name = state,
)
)
def learn(self, *args):
pass
class Qlearning_Agent(RL_Agent):
def __init__(self, actions = ID_ACTIONS, learning_rate = ALPHA, reward_decay = GAMMA, e_greedy = EPSILON):
super(Qlearning_Agent, self).__init__(actions, learning_rate, reward_decay, e_greedy)
self.simulation = False
def learn(self, state, action, r, new_state):
self.check_state_exist(new_state)
q_predict = self.q_table.loc[state, action]
if new_state not in GOALS:
q_target = r + self.gamma * self.q_table.loc[new_state, :].max()
else:
q_target = r
self.q_table.loc[state, action] += self.lr * (q_target - q_predict)
class Sarsa_Agent(RL_Agent):
def __init__(self, actions = ID_ACTIONS, learning_rate = ALPHA, reward_decay = GAMMA, e_greedy = EPSILON):
super(Sarsa_Agent, self).__init__(actions, learning_rate, reward_decay, e_greedy)
def learn(self, state, action, r, new_state, new_action):
self.check_state_exist(new_state)
q_predict = self.q_table.loc[state, action]
if new_state not in GOALS:
q_target = r + self.gamma * self.q_table.loc[new_state, new_action]
else:
q_target = r
self.q_table.loc[state, action] += self.lr * (q_target - q_predict)
class N_Step_Sarsa_Agent(RL_Agent):
def __init__(self, actions = ID_ACTIONS, learning_rate = ALPHA, reward_decay = GAMMA,
e_greedy = EPSILON, trace_decay = LAMBDA):
super(N_Step_Sarsa_Agent, self).__init__(actions, learning_rate, reward_decay, e_greedy)
self.lambd = trace_decay
self.eligibility_trace = self.q_table.copy()
def check_state_exist(self, state):
if state not in self.q_table.index:
to_be_append = pd.Series(
[0] * len(self.actions),
index=self.q_table.columns,
name=state,
)
self.q_table = self.q_table.append(to_be_append)
self.eligibility_trace = self.eligibility_trace.append(to_be_append)
def learn(self, state, action, r, new_state, new_action):
self.check_state_exist(new_state)
q_predict = self.q_table.loc[state, action]
if new_state not in GOALS:
q_target = r + self.gamma * self.q_table.loc[new_state, new_action]
else:
q_target = r
self.eligibility_trace.loc[state, :] *= 0
self.eligibility_trace.loc[state, action] = 1
self.q_table += self.lr * (q_target - q_predict) * self.eligibility_trace #!!!the whole table
self.eligibility_trace *= self.gamma * self.lambd
def run_agent():
env.reset()
state = [0, 0]
#action = RL.choose_action(str(state)) #Sarsa
RL.simulation = False # Q learning
while state not in GOALS:
action = RL.choose_action(str(state)) #Qlearning
new_state, r = Obs.env_reaction(state, action)
#new_action = RL.choose_action(str(new_state))
RL.learn(str(state), action, r, str(new_state)) #Qlearning
#RL.learn(str(state), action, r, str(new_state), new_action) #Sarsa
state = new_state
#action = new_action #Sarsa
print(RL.q_table)
def training():
for t in range(EPISODES):
env.reset()
env.render()
run_agent()
# print('game over')
# env.destroy()
env.reset()
if __name__ == "__main__":
env = Maze(SIZE, X, Y)
Obs = Observer(goals = env.goals, barriers = env.barriers)
RL = Qlearning_Agent()
#RL = Sarsa_Agent()
#RL = N_Step_Sarsa_Agent()
training()
#env.after(100, update)
env.mainloop()
|
[
"noreply@github.com"
] |
ZhangLiangyu5411.noreply@github.com
|
a2fe085654a21fee526d110b779f48dfeb6fe9d5
|
a1fe62f072b68e64be0a761ea2fe93d2690dd8f9
|
/store/urls.py
|
2279630df5c9f2227850213bd38911aaed7bc5ef
|
[] |
no_license
|
1nonlyabhi/dukaan-assignment
|
3ff9167ca98be6964ed9f9f3feebaa0cb776d797
|
5a4c14e4c167a5b025995bb44e67a4a5ea7f5c17
|
refs/heads/main
| 2023-08-15T14:57:57.538284
| 2021-09-26T16:52:29
| 2021-09-26T16:52:29
| 410,544,483
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
from django.urls import path
from account.views import *
from store.views import ProductView, StoreView, detail_store_cat_view, detail_store_view
app_name = "store"
urlpatterns = [
path('', StoreView.as_view(), name="store"),
path('<slug>/', detail_store_view, name="detail"),
path('<slug>/product', ProductView.as_view(), name="product"),
path('<slug>/category', detail_store_cat_view, name="category"),
]
|
[
"gabhishek0407@gmail.com"
] |
gabhishek0407@gmail.com
|
6308246f1f21cd53a6eb7e44e01b80b0e9f21ff8
|
6ce2982e30e9c14e0e71291879fe9c2dd81776dc
|
/jouaan/main/migrations/0001_initial.py
|
e2b35520529c67eac8a42f62d6871cb07b12e889
|
[] |
no_license
|
yacoublambaz/Jouaan
|
6f3c88d32d3317dc3e713032e94d54c04138fb58
|
c424029a765f5c2ae20cda7b354b68251e0fc143
|
refs/heads/main
| 2023-04-27T16:46:47.775709
| 2021-05-06T14:00:41
| 2021-05-06T14:00:41
| 352,722,587
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
# Generated by Django 3.1.7 on 2021-03-30 17:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('email', models.CharField(max_length=200)),
('signup_date', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('review_date', models.DateTimeField(auto_now_add=True)),
('cleanliness', models.IntegerField(null=True)),
('taste', models.IntegerField(null=True)),
('environment', models.IntegerField(null=True)),
('price', models.IntegerField(null=True)),
('comments', models.TextField(null=True)),
('review_score', models.IntegerField(null=True)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.customer')),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('profile_pic', models.ImageField(null=True, upload_to='')),
('address', models.CharField(max_length=200)),
('phone_number', models.CharField(max_length=8)),
('what_we_serve', models.CharField(max_length=200)),
('signup_date', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Announcement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(null=True)),
('restaurant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.restaurant')),
],
),
]
|
[
""
] | |
11adadb04fae1383c61d747eea5e932bd6384471
|
1532c4117246d61be9f4e8fa0d283f579d884874
|
/social/urls.py
|
cc523a8a58ab2c0be93e050fc7c5e1ad65394303
|
[] |
no_license
|
jkaalexkei/redsocial
|
ae11ab17aec6453416810166c840d1dee9d0f4ed
|
25bb9141d0f730fa389bdefd259fd961b5f3c3ab
|
refs/heads/master
| 2023-07-06T09:50:55.850379
| 2021-07-29T16:09:50
| 2021-07-29T16:09:50
| 389,832,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth.views import LoginView, LogoutView
urlpatterns = [
path('',views.feed,name='feed'),
path('profile/',views.profile,name='profile'),
path('profile/<str:username>/',views.profile,name='profile'),
path('register/',views.register,name='register'),
path('login/',LoginView.as_view(template_name='social/login.html'),name='login'),
path('logout/',LogoutView.as_view(template_name='social/logout.html'),name='logout'),
path('post/',views.post, name='post'),
path('follow/<str:username>/',views.follow,name='follow'),
path('unfollow/<str:username>/',views.unfollow,name='unfollow'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"jkaalexkei@gmail.com"
] |
jkaalexkei@gmail.com
|
2fc951b9431678e1bdc9ccdae97502170f3490b3
|
c10af00ed8ec3ffe10f2a65720d9e2aca4c4486c
|
/venv/Lib/site-packages/sportsreference/mlb/boxscore.py
|
f0a79f5dc347a007d3c0436e61e2cd7bd11d6c57
|
[] |
no_license
|
afornaca/nflstats
|
c590801d6415648bba2f6f88a0d66076cd5c1b20
|
4d7504be29724653ee09ac31ad60b906b216b927
|
refs/heads/master
| 2020-07-04T21:21:54.696209
| 2020-02-04T23:20:52
| 2020-02-04T23:20:52
| 202,420,370
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68,009
|
py
|
import pandas as pd
import re
from datetime import timedelta
from pyquery import PyQuery as pq
from .. import utils
from ..constants import AWAY, HOME
from ..decorators import float_property_decorator, int_property_decorator
from .constants import (BOXSCORE_ELEMENT_INDEX,
BOXSCORE_SCHEME,
BOXSCORE_URL,
BOXSCORES_URL,
DOUBLE_HEADER_INDICES)
from .player import (AbstractPlayer,
_float_property_decorator,
_int_property_decorator)
from sportsreference import utils
from sportsreference.constants import AWAY, HOME
from sportsreference.mlb.constants import DAY, NIGHT
from six.moves.urllib.error import HTTPError
class BoxscorePlayer(AbstractPlayer):
"""
Get player stats for an individual game.
Given a player ID, such as 'altuvjo01' for Jose Altuve, their full name,
and all associated stats from the Boxscore page in HTML format, parse the
HTML and extract only the relevant stats for the specified player and
assign them to readable properties.
This class inherits the ``AbstractPlayer`` class. As a result, all
properties associated with ``AbstractPlayer`` can also be read directly
from this class.
As this class is instantiated from within the Boxscore class, it should not
be called directly and should instead be queried using the appropriate
players properties from the Boxscore class.
Parameters
----------
player_id : string
A player's ID according to baseball-reference.com, such as 'altuvjo01'
for Jose Altuve. The player ID can be found by navigating to the
player's stats page and getting the string between the final slash and
the '.html' in the URL. In general, the ID is in the format 'LLLLLFFNN'
where 'LLLLL' are the first 5 letters in the player's last name, 'FF',
are the first 2 letters in the player's first name, and 'NN' is a
number starting at '01' for the first time that player ID has been used
and increments by 1 for every successive player.
player_name : string
A string representing the player's first and last name, such as 'Jose
Altuve'.
player_data : string
A string representation of the player's HTML data from the Boxscore
page. If the player appears in multiple tables, all of their
information will appear in one single string concatenated together.
"""
def __init__(self, player_id, player_name, player_data):
self._index = 0
self._player_id = player_id
self._average_leverage_index = None
self._base_out_runs_added = None
self._earned_runs_against = None
self._innings_pitched = None
self._pitches_thrown = None
self._strikes = None
self._home_runs_thrown = None
self._strikes_thrown = None
self._strikes_contact = None
self._strikes_swinging = None
self._strikes_looking = None
self._grounded_balls = None
self._fly_balls = None
self._line_drives = None
self._unknown_bat_types = None
self._game_score = None
self._inherited_runners = None
self._inherited_score = None
self._win_probability_added_pitcher = None
self._average_leverage_index_pitcher = None
self._base_out_runs_saved = None
self._win_probability_added = None
self._win_probability_for_offensive_player = None
self._win_probability_subtracted = None
AbstractPlayer.__init__(self, player_id, player_name, player_data)
@property
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values for the specified game.
"""
fields_to_include = {
'assists': self.assists,
'at_bats': self.at_bats,
'average_leverage_index': self.average_leverage_index,
'average_leverage_index_pitcher':
self.average_leverage_index_pitcher,
'bases_on_balls': self.bases_on_balls,
'bases_on_balls_given': self.bases_on_balls_given,
'base_out_runs_added': self.base_out_runs_added,
'base_out_runs_saved': self.base_out_runs_saved,
'batters_faced': self.batters_faced,
'batting_average': self.batting_average,
'earned_runs_allowed': self.earned_runs_allowed,
'earned_runs_against': self.earned_runs_against,
'fly_balls': self.fly_balls,
'game_score': self.game_score,
'grounded_balls': self.grounded_balls,
'hits': self.hits,
'hits_allowed': self.hits_allowed,
'home_runs_thrown': self.home_runs_thrown,
'inherited_runners': self.inherited_runners,
'inherited_score': self.inherited_score,
'innings_pitched': self.innings_pitched,
'line_drives': self.line_drives,
'name': self.name,
'on_base_percentage': self.on_base_percentage,
'on_base_plus_slugging_percentage':
self.on_base_plus_slugging_percentage,
'pitches_thrown': self.pitches_thrown,
'plate_appearances': self.plate_appearances,
'putouts': self.putouts,
'runs': self.runs,
'runs_allowed': self.runs_allowed,
'runs_batted_in': self.runs_batted_in,
'slugging_percentage': self.slugging_percentage,
'strikes': self.strikes,
'strikes_contact': self.strikes_contact,
'strikes_looking': self.strikes_looking,
'strikes_swinging': self.strikes_swinging,
'strikes_thrown': self.strikes_thrown,
'strikeouts': self.strikeouts,
'times_struck_out': self.times_struck_out,
'unknown_bat_types': self.unknown_bat_types,
'win_probability_added': self.win_probability_added,
'win_probability_added_pitcher':
self.win_probability_added_pitcher,
'win_probability_for_offensive_player':
self.win_probability_for_offensive_player,
'win_probability_subtracted': self.win_probability_subtracted
}
return pd.DataFrame([fields_to_include], index=[self._player_id])
@_float_property_decorator
def average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the player faced during
the game. 1.0 denotes average pressure while numbers less than 0 denote
lighter pressure.
"""
return self._average_leverage_index
@_float_property_decorator
def base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the player.
"""
return self._base_out_runs_added
@_float_property_decorator
def earned_runs_against(self):
"""
Returns a ``float`` of the player's overall Earned Runs Against average
as calculated by 9 * earned_runs / innings_pitched.
"""
return self._earned_runs_against
@_int_property_decorator
def innings_pitched(self):
"""
Returns an ``int`` of the number of innings the player pitched in.
"""
return self._innings_pitched
@_int_property_decorator
def home_runs_thrown(self):
"""
Returns an ``int`` of the number of home runs the player threw.
"""
return self._home_runs_thrown
@_int_property_decorator
def pitches_thrown(self):
"""
Returns an ``int`` of the number of pitches the player threw.
"""
return self._pitches_thrown
@_int_property_decorator
def strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the player.
"""
return self._strikes
@_int_property_decorator
def strikes_thrown(self):
"""
Returns an ``int`` of the number of times a strikes the player threw.
"""
return self._strikes_thrown
@_int_property_decorator
def strikes_contact(self):
"""
Returns an ``int`` of the number of times the player threw a strike
when the player made contact with the ball.
"""
return self._strikes_contact
@_int_property_decorator
def strikes_swinging(self):
"""
Returns an ``int`` of the number of times the player threw a strike
with the batter swinging.
"""
return self._strikes_swinging
@_int_property_decorator
def strikes_looking(self):
"""
Returns an ``int`` of the number of times the player threw a strike
with the player looking.
"""
return self._strikes_looking
@_int_property_decorator
def grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the player allowed.
"""
return self._grounded_balls
@_int_property_decorator
def fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the player allowed.
"""
return self._fly_balls
@_int_property_decorator
def line_drives(self):
"""
Returns an ``int`` of the number of line drives the player allowed.
"""
return self._line_drives
@_int_property_decorator
def unknown_bat_types(self):
"""
Returns an ``int`` of the number of line drives the player allowed.
"""
return self._unknown_bat_types
@_int_property_decorator
def game_score(self):
"""
Returns an ``int`` of the pitcher's score determine by many factors,
such as number of runs scored against, number of strikes, etc.
"""
return self._game_score
@_int_property_decorator
def inherited_runners(self):
"""
Returns an ``int`` of the number of runners a relief pitcher inherited.
"""
return self._inherited_runners
@_int_property_decorator
def inherited_score(self):
"""
Returns an ``int`` of the number of runners on base when a relief
pitcher entered the game that ended up scoring.
"""
return self._inherited_score
@_float_property_decorator
def win_probability_added_pitcher(self):
"""
Returns a ``float`` of the total positive influence the pitcher's
offense had on the outcome of the game.
"""
return self._win_probability_added_pitcher
@_float_property_decorator
def average_leverage_index_pitcher(self):
"""
Returns a ``float`` of the amount of pressure the pitcher faced during
the game. 1.0 denotes average pressure while numbers less than 0 denote
lighter pressure.
"""
return self._average_leverage_index_pitcher
@_float_property_decorator
def base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the pitcher based on
the number of players on bases. 0.0 denotes an average value.
"""
return self._base_out_runs_saved
@_float_property_decorator
def win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the player's
offense had on the outcome of the game.
"""
return self._win_probability_added
@_float_property_decorator
def win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the player's
offense had on the outcome of the game.
"""
return self._win_probability_subtracted
@_float_property_decorator
def win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the player's offense had
on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._win_probability_for_offensive_player
class Boxscore(object):
"""
Detailed information about the final statistics for a game.
Stores all relevant information for a game such as the date, time,
location, result, and more advanced metrics such as the number of strikes,
a pitcher's influence on the game, the number of putouts and much more.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'BOS/BOS201806070'.
"""
def __init__(self, uri):
self._uri = uri
self._date = None
self._time = None
self._attendance = None
self._venue = None
self._time_of_day = None
self._duration = None
self._away_name = None
self._home_name = None
self._winner = None
self._winning_name = None
self._winning_abbr = None
self._losing_name = None
self._losing_abbr = None
self._losing_abbr = None
self._away_at_bats = None
self._away_runs = None
self._away_hits = None
self._away_rbi = None
self._away_earned_runs = None
self._away_bases_on_balls = None
self._away_strikeouts = None
self._away_plate_appearances = None
self._away_batting_average = None
self._away_on_base_percentage = None
self._away_slugging_percentage = None
self._away_on_base_plus = None
self._away_pitches = None
self._away_strikes = None
self._away_win_probability_for_offensive_player = None
self._away_average_leverage_index = None
self._away_win_probability_added = None
self._away_win_probability_subtracted = None
self._away_base_out_runs_added = None
self._away_putouts = None
self._away_assists = None
self._away_innings_pitched = None
self._away_home_runs = None
self._away_strikes_by_contact = None
self._away_strikes_swinging = None
self._away_strikes_looking = None
self._away_grounded_balls = None
self._away_fly_balls = None
self._away_line_drives = None
self._away_unknown_bat_type = None
self._away_game_score = None
self._away_inherited_runners = None
self._away_inherited_score = None
self._away_win_probability_by_pitcher = None
self._away_base_out_runs_saved = None
self._home_at_bats = None
self._home_runs = None
self._home_hits = None
self._home_rbi = None
self._home_earned_runs = None
self._home_bases_on_balls = None
self._home_strikeouts = None
self._home_plate_appearances = None
self._home_batting_average = None
self._home_on_base_percentage = None
self._home_slugging_percentage = None
self._home_on_base_plus = None
self._home_pitches = None
self._home_strikes = None
self._home_win_probability_for_offensive_player = None
self._home_average_leverage_index = None
self._home_win_probability_added = None
self._home_win_probability_subtracted = None
self._home_base_out_runs_added = None
self._home_putouts = None
self._home_assists = None
self._home_innings_pitched = None
self._home_home_runs = None
self._home_strikes_by_contact = None
self._home_strikes_swinging = None
self._home_strikes_looking = None
self._home_grounded_balls = None
self._home_fly_balls = None
self._home_line_drives = None
self._home_unknown_bat_type = None
self._home_game_score = None
self._home_inherited_runners = None
self._home_inherited_score = None
self._home_win_probability_by_pitcher = None
self._home_base_out_runs_saved = None
self._parse_game_data(uri)
def _retrieve_html_page(self, uri):
"""
Download the requested HTML page.
Given a relative link, download the requested page and strip it of all
comment tags before returning a pyquery object which will be used to
parse the data.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'BOS/BOS201806070'.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = BOXSCORE_URL % uri
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_game_date_and_location(self, boxscore):
"""
Retrieve the game's date and location.
The game's meta information, such as date, location, attendance, and
duration, follow a complex parsing scheme that changes based on the
layout of the page. The information should be able to be parsed and set
regardless of the order and how much information is included. To do
this, the meta information should be iterated through line-by-line and
fields should be determined by the values that are found in each line.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
"""
scheme = BOXSCORE_SCHEME["game_info"]
items = [i.text() for i in boxscore(scheme).items()]
game_info = items[0].split('\n')
attendance = None
date = None
duration = None
time = None
time_of_day = None
venue = None
if len(game_info) > 0:
date = game_info[0]
for line in game_info:
if 'Start Time: ' in line:
time = line.replace('Start Time: ', '')
if 'Attendance: ' in line:
attendance = line.replace('Attendance: ', '').replace(',', '')
if 'Venue: ' in line:
venue = line.replace('Venue: ', '')
if 'Game Duration: ' in line:
duration = line.replace('Game Duration: ', '')
if 'Night Game' in line or 'Day Game' in line:
time_of_day = line
setattr(self, '_attendance', attendance)
setattr(self, '_date', date)
setattr(self, '_duration', duration)
setattr(self, '_time', time)
setattr(self, '_time_of_day', time_of_day)
setattr(self, '_venue', venue)
def _parse_name(self, field, boxscore):
"""
Retrieve the team's complete name tag.
Both the team's full name (embedded in the tag's text) and the team's
abbreviation are stored in the name tag which can be used to parse
the winning and losing team's information.
Parameters
----------
field : string
The name of the attribute to parse
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
Returns
-------
PyQuery object
The complete text for the requested tag.
"""
scheme = BOXSCORE_SCHEME[field]
return boxscore(scheme)
def _find_boxscore_tables(self, boxscore):
"""
Find all tables with boxscore information on the page.
Iterate through all tables on the page and see if any of them are
boxscore pages by checking if the ID is prefixed with 'box_'. If so,
add it to a list and return the final list at the end.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
Returns
-------
list
Returns a ``list`` of the PyQuery objects where each object
represents a boxscore table.
"""
tables = []
for table in boxscore('table').items():
try:
if 'pitching' in table.attr['id'] or \
'batting' in table.attr['id']:
tables.append(table)
except (KeyError, TypeError):
continue
return tables
def _find_player_id(self, row):
"""
Find the player's ID.
Find the player's ID as embedded in the 'data-append-csv' attribute,
such as 'altuvjo01' for Jose Altuve.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` of the player's ID, such as 'altuvjo01' for
Jose Altuve.
"""
return row('th').attr('data-append-csv')
def _find_player_name(self, row):
"""
Find the player's full name.
Find the player's full name, such as 'Jose Altuve'. The name is the
text displayed for a link to the player's individual stats.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` of the player's full name, such as 'Jose
Altuve'.
"""
return row('a').text()
def _extract_player_stats(self, table, player_dict, home_or_away):
"""
Combine all player stats into a single object.
Since each player generally has a couple of rows worth of stats (one
for basic stats and another for advanced stats) on the boxscore page,
both rows should be combined into a single string object to easily
query all fields from a single object instead of determining which row
to pull metrics from.
Parameters
----------
table : PyQuery object
A PyQuery object of a single boxscore table, such as the home
team's advanced stats or the away team's basic stats.
player_dict : dictionary
A dictionary where each key is a string of the player's ID and each
value is a dictionary where the values contain the player's name,
HTML data, and a string constant indicating which team the player
is a member of.
home_or_away : string constant
A string constant indicating whether the player plays for the home
or away team.
Returns
-------
dictionary
Returns a ``dictionary`` where each key is a string of the player's
ID and each value is a dictionary where the values contain the
player's name, HTML data, and a string constant indicating which
team the player is a member of.
"""
for row in table('tbody tr').items():
player_id = self._find_player_id(row)
# Occurs when a header row is identified instead of a player.
if not player_id:
continue
name = self._find_player_name(row)
try:
player_dict[player_id]['data'] += str(row).strip()
except KeyError:
player_dict[player_id] = {
'name': name,
'data': str(row).strip(),
'team': home_or_away
}
return player_dict
def _instantiate_players(self, player_dict):
"""
Create a list of player instances for both the home and away teams.
For every player listed on the boxscores page, create an instance of
the BoxscorePlayer class for that player and add them to a list of
players for their respective team.
Parameters
----------
player_dict : dictionary
A dictionary containing information for every player on the
boxscores page. Each key is a string containing the player's ID
and each value is a dictionary with the player's full name, a
string representation of their HTML stats, and a string constant
denoting which team they play for as the values.
Returns
-------
tuple
Returns a ``tuple`` in the format (away_players, home_players)
where each element is a list of player instances for the away and
home teams, respectively.
"""
home_players = []
away_players = []
for player_id, details in player_dict.items():
player = BoxscorePlayer(player_id,
details['name'],
details['data'])
if details['team'] == HOME:
home_players.append(player)
else:
away_players.append(player)
return away_players, home_players
def _find_players(self, boxscore):
"""
Find all players for each team.
Iterate through every player for both teams as found in the boxscore
tables and create a list of instances of the BoxscorePlayer class for
each player. Return lists of player instances comprising the away and
home team players, respectively.
Parameters
----------
boxscore : PyQuery object
A PyQuery object containing all of the HTML data from the boxscore.
Returns
-------
tuple
Returns a ``tuple`` in the format (away_players, home_players)
where each element is a list of player instances for the away and
home teams, respectively.
"""
player_dict = {}
table_count = 0
tables = self._find_boxscore_tables(boxscore)
for table in tables:
home_or_away = HOME
# There are two tables per team with the odd tables belonging to
# the away team.
if table_count % 2 == 1:
home_or_away = AWAY
player_dict = self._extract_player_stats(table,
player_dict,
home_or_away)
table_count += 1
away_players, home_players = self._instantiate_players(player_dict)
return away_players, home_players
def _parse_game_data(self, uri):
"""
Parses a value for every attribute.
This function looks through every attribute and retrieves the value
according to the parsing scheme and index of the attribute from the
passed HTML data. Once the value is retrieved, the attribute's value is
updated with the returned result.
Note that this method is called directly once Boxscore is invoked and
does not need to be called manually.
Parameters
----------
uri : string
The relative link to the boxscore HTML page, such as
'BOS/BOS201806070'.
"""
boxscore = self._retrieve_html_page(uri)
# If the boxscore is None, the game likely hasn't been played yet and
# no information can be gathered. As there is nothing to grab, the
# class instance should just be empty.
if not boxscore:
return
for field in self.__dict__:
# Remove the '_' from the name
short_field = str(field)[1:]
if short_field == 'winner' or \
short_field == 'winning_name' or \
short_field == 'winning_abbr' or \
short_field == 'losing_name' or \
short_field == 'losing_abbr' or \
short_field == 'uri' or \
short_field == 'date' or \
short_field == 'time' or \
short_field == 'venue' or \
short_field == 'attendance' or \
short_field == 'time_of_day' or \
short_field == 'duration':
continue
if short_field == 'away_name' or \
short_field == 'home_name':
value = self._parse_name(short_field, boxscore)
setattr(self, field, value)
continue
index = 0
if short_field in BOXSCORE_ELEMENT_INDEX.keys():
index = BOXSCORE_ELEMENT_INDEX[short_field]
value = utils._parse_field(BOXSCORE_SCHEME,
boxscore,
short_field,
index)
setattr(self, field, value)
self._parse_game_date_and_location(boxscore)
self._away_players, self._home_players = self._find_players(boxscore)
@property
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as 'BOS201806070'.
"""
if self._away_runs is None and self._home_runs is None:
return None
fields_to_include = {
'date': self.date,
'time': self.time,
'venue': self.venue,
'attendance': self.attendance,
'duration': self.duration,
'time_of_day': self.time_of_day,
'winner': self.winner,
'winning_name': self.winning_name,
'winning_abbr': self.winning_abbr,
'losing_name': self.losing_name,
'losing_abbr': self.losing_abbr,
'away_at_bats': self.away_at_bats,
'away_runs': self.away_runs,
'away_hits': self.away_hits,
'away_rbi': self.away_rbi,
'away_earned_runs': self.away_earned_runs,
'away_bases_on_balls': self.away_bases_on_balls,
'away_strikeouts': self.away_strikeouts,
'away_plate_appearances': self.away_plate_appearances,
'away_batting_average': self.away_batting_average,
'away_on_base_percentage': self.away_on_base_percentage,
'away_slugging_percentage': self.away_slugging_percentage,
'away_on_base_plus': self.away_on_base_plus,
'away_pitches': self.away_pitches,
'away_strikes': self.away_strikes,
'away_win_probability_for_offensive_player':
self.away_win_probability_for_offensive_player,
'away_average_leverage_index': self.away_average_leverage_index,
'away_win_probability_added': self.away_win_probability_added,
'away_win_probability_subtracted':
self.away_win_probability_subtracted,
'away_base_out_runs_added': self.away_base_out_runs_added,
'away_putouts': self.away_putouts,
'away_assists': self.away_assists,
'away_innings_pitched': self.away_innings_pitched,
'away_home_runs': self.away_home_runs,
'away_strikes_by_contact': self.away_strikes_by_contact,
'away_strikes_swinging': self.away_strikes_swinging,
'away_strikes_looking': self.away_strikes_looking,
'away_grounded_balls': self.away_grounded_balls,
'away_fly_balls': self.away_fly_balls,
'away_line_drives': self.away_line_drives,
'away_unknown_bat_type': self.away_unknown_bat_type,
'away_game_score': self.away_game_score,
'away_inherited_runners': self.away_inherited_runners,
'away_inherited_score': self.away_inherited_score,
'away_win_probability_by_pitcher':
self.away_win_probability_by_pitcher,
'away_base_out_runs_saved': self.away_base_out_runs_saved,
'home_at_bats': self.home_at_bats,
'home_runs': self.home_runs,
'home_hits': self.home_hits,
'home_rbi': self.home_rbi,
'home_earned_runs': self.home_earned_runs,
'home_bases_on_balls': self.home_bases_on_balls,
'home_strikeouts': self.home_strikeouts,
'home_plate_appearances': self.home_plate_appearances,
'home_batting_average': self.home_batting_average,
'home_on_base_percentage': self.home_on_base_percentage,
'home_slugging_percentage': self.home_slugging_percentage,
'home_on_base_plus': self.home_on_base_plus,
'home_pitches': self.home_pitches,
'home_strikes': self.home_strikes,
'home_win_probability_for_offensive_player':
self.home_win_probability_for_offensive_player,
'home_average_leverage_index': self.home_average_leverage_index,
'home_win_probability_added': self.home_win_probability_added,
'home_win_probability_subtracted':
self.home_win_probability_subtracted,
'home_base_out_runs_added': self.home_base_out_runs_added,
'home_putouts': self.home_putouts,
'home_assists': self.home_assists,
'home_innings_pitched': self.home_innings_pitched,
'home_home_runs': self.home_home_runs,
'home_strikes_by_contact': self.home_strikes_by_contact,
'home_strikes_swinging': self.home_strikes_swinging,
'home_strikes_looking': self.home_strikes_looking,
'home_grounded_balls': self.home_grounded_balls,
'home_fly_balls': self.home_fly_balls,
'home_line_drives': self.home_line_drives,
'home_unknown_bat_type': self.home_unknown_bat_type,
'home_game_score': self.home_game_score,
'home_inherited_runners': self.home_inherited_runners,
'home_inherited_score': self.home_inherited_score,
'home_win_probability_by_pitcher':
self.home_win_probability_by_pitcher,
'home_base_out_runs_saved': self.home_base_out_runs_saved
}
return pd.DataFrame([fields_to_include], index=[self._uri])
@property
def away_players(self):
"""
Returns a ``list`` of ``BoxscorePlayer`` class instances for each
player on the away team.
"""
return self._away_players
@property
def home_players(self):
"""
Returns a ``list`` of ``BoxscorePlayer`` class instances for each
player on the home team.
"""
return self._home_players
@property
def date(self):
"""
Returns a ``string`` of the date the game took place.
"""
return self._date
@property
def time(self):
"""
Returns a ``string`` of the time the game started.
"""
return self._time
@property
def venue(self):
"""
Returns a ``string`` of the name of the ballpark where the game was
played.
"""
return self._venue
@int_property_decorator
def attendance(self):
"""
Returns an ``int`` of the game's listed attendance.
"""
return self._attendance
@property
def duration(self):
"""
Returns a ``string`` of the game's duration in the format 'H:MM'.
"""
return self._duration
@property
def time_of_day(self):
"""
Returns a ``string`` constant indicated whether the game was played
during the day or at night.
"""
if 'night' in self._time_of_day.lower():
return NIGHT
return DAY
@property
def winner(self):
"""
Returns a ``string`` constant indicating whether the home or away team
won.
"""
if self.home_runs > self.away_runs:
return HOME
return AWAY
@property
def winning_name(self):
"""
Returns a ``string`` of the winning team's name, such as 'Houston
Astros'.
"""
if self.winner == HOME:
return self._home_name.text()
return self._away_name.text()
@property
def winning_abbr(self):
"""
Returns a ``string`` of the winning team's abbreviation, such as 'HOU'
for the Houston Astros.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._home_name)
return utils._parse_abbreviation(self._away_name)
@property
def losing_name(self):
"""
Returns a ``string`` of the losing team's name, such as 'Los Angeles
Dodgers'.
"""
if self.winner == HOME:
return self._away_name.text()
return self._home_name.text()
@property
def losing_abbr(self):
"""
Returns a ``string`` of the losing team's abbreviation, such as 'LAD'
for the Los Angeles Dodgers.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._away_name)
return utils._parse_abbreviation(self._home_name)
@int_property_decorator
def away_at_bats(self):
"""
Returns an ``int`` of the number of at bats the away team had.
"""
return self._away_at_bats
@int_property_decorator
def away_runs(self):
"""
Returns an ``int`` of the number of runs the away team scored.
"""
return self._away_runs
@int_property_decorator
def away_hits(self):
"""
Returns an ``int`` of the number of hits the away team had.
"""
return self._away_hits
@int_property_decorator
def away_rbi(self):
"""
Returns an ``int`` of the number of runs batted in the away team
registered.
"""
return self._away_rbi
@float_property_decorator
def away_earned_runs(self):
"""
Returns a ``float`` of the number of runs the away team earned.
"""
return self._away_earned_runs
@int_property_decorator
def away_bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the away team registerd as a
result of balls.
"""
return self._away_bases_on_balls
@int_property_decorator
def away_strikeouts(self):
"""
Returns an ``int`` of the number of times the away team was struck out.
"""
return self._away_strikeouts
@int_property_decorator
def away_plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the away team
made.
"""
return self._away_plate_appearances
@float_property_decorator
def away_batting_average(self):
"""
Returns a ``float`` of the batting average for the away team.
"""
return self._away_batting_average
@float_property_decorator
def away_on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._away_on_base_percentage
@float_property_decorator
def away_slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the away team based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._away_slugging_percentage
@float_property_decorator
def away_on_base_plus(self):
"""
Returns a ``float`` of the on base percentage plus the slugging
percentage. Percentage ranges from 0-1.
"""
return self._away_on_base_plus
@int_property_decorator
def away_pitches(self):
"""
Returns an ``int`` of the number of pitches the away team faced.
"""
return self._away_pitches
@int_property_decorator
def away_strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the away team.
"""
return self._away_strikes
@float_property_decorator
def away_win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the away team's offense
had on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._away_win_probability_for_offensive_player
@float_property_decorator
def away_average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the away team's pitcher
faced during the game. 1.0 denotes average pressure while numbers less
than 0 denote lighter pressure.
"""
return self._away_average_leverage_index
@float_property_decorator
def away_win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the away team's
offense had on the outcome of the game.
"""
return self._away_win_probability_added
@float_property_decorator
def away_win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the away team's
offense had on the outcome of the game.
"""
return self._away_win_probability_subtracted
@float_property_decorator
def away_base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the away
team.
"""
return self._away_base_out_runs_added
@int_property_decorator
def away_putouts(self):
"""
Returns an ``int`` of the number of putouts the away team registered.
"""
return self._away_putouts
@int_property_decorator
def away_assists(self):
"""
Returns an ``int`` of the number of assists the away team registered.
"""
return self._away_assists
@float_property_decorator
def away_innings_pitched(self):
"""
Returns a ``float`` of the number of innings the away team pitched.
"""
return self._away_innings_pitched
@int_property_decorator
def away_home_runs(self):
"""
Returns an ``int`` of the number of times the away team gave up a home
run.
"""
return self._away_home_runs
@int_property_decorator
def away_strikes_by_contact(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who made contact with the pitch.
"""
return self._away_strikes_by_contact
@int_property_decorator
def away_strikes_swinging(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who was swinging.
"""
return self._away_strikes_swinging
@int_property_decorator
def away_strikes_looking(self):
"""
Returns an ``int`` of the number of times the away team struck out a
batter who was looking.
"""
return self._away_strikes_looking
@int_property_decorator
def away_grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the away team
allowed.
"""
return self._away_grounded_balls
@int_property_decorator
def away_fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the away team allowed.
"""
return self._away_fly_balls
@int_property_decorator
def away_line_drives(self):
"""
Returns an ``int`` of the number of line drives the away team allowed.
"""
return self._away_line_drives
@int_property_decorator
def away_unknown_bat_type(self):
"""
Returns an ``int`` of the number of away at bats that were not properly
tracked and therefore cannot be safely placed in another statistical
category.
"""
return self._away_unknown_bat_type
@int_property_decorator
def away_game_score(self):
"""
Returns an ``int`` of the starting away pitcher's score determine by
many factors, such as number of runs scored against, number of strikes,
etc.
"""
return self._away_game_score
@int_property_decorator
def away_inherited_runners(self):
"""
Returns an ``int`` of the number of runners a pitcher inherited when he
entered the game.
"""
return self._away_inherited_runners
@int_property_decorator
def away_inherited_score(self):
"""
Returns an ``int`` of the number of scorers a pitcher inherited when he
entered the game.
"""
return self._away_inherited_score
@float_property_decorator
def away_win_probability_by_pitcher(self):
"""
Returns a ``float`` of the amount of influence the away pitcher had on
the game's result with 0.0 denoting zero influence and 1.0 denoting he
was solely responsible for the team's win.
"""
return self._away_win_probability_by_pitcher
@float_property_decorator
def away_base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the away pitcher
based on the number of players on bases. 0.0 denotes an average value.
"""
return self._away_base_out_runs_saved
@int_property_decorator
def home_at_bats(self):
"""
Returns an ``int`` of the number of at bats the home team had.
"""
return self._home_at_bats
@int_property_decorator
def home_runs(self):
"""
Returns an ``int`` of the number of runs the home team scored.
"""
return self._home_runs
@int_property_decorator
def home_hits(self):
"""
Returns an ``int`` of the number of hits the home team had.
"""
return self._home_hits
@int_property_decorator
def home_rbi(self):
"""
Returns an ``int`` of the number of runs batted in the home team
registered.
"""
return self._home_rbi
@float_property_decorator
def home_earned_runs(self):
"""
Returns a ``float`` of the number of runs the home team earned.
"""
return self._home_earned_runs
@int_property_decorator
def home_bases_on_balls(self):
"""
Returns an ``int`` of the number of bases the home team registerd as a
result of balls.
"""
return self._home_bases_on_balls
@int_property_decorator
def home_strikeouts(self):
"""
Returns an ``int`` of the number of times the home team was struck out.
"""
return self._home_strikeouts
@int_property_decorator
def home_plate_appearances(self):
"""
Returns an ``int`` of the number of plate appearances the home team
made.
"""
return self._home_plate_appearances
@float_property_decorator
def home_batting_average(self):
"""
Returns a ``float`` of the batting average for the home team.
"""
return self._home_batting_average
@float_property_decorator
def home_on_base_percentage(self):
"""
Returns a ``float`` of the percentage of at bats that result in the
batter getting on base.
"""
return self._home_on_base_percentage
@float_property_decorator
def home_slugging_percentage(self):
"""
Returns a ``float`` of the slugging percentage for the home team based
on the number of bases gained per at-bat with bigger plays getting more
weight.
"""
return self._home_slugging_percentage
@float_property_decorator
def home_on_base_plus(self):
"""
Returns a ``float`` of the on base percentage plus the slugging
percentage. Percentage ranges from 0-1.
"""
return self._home_on_base_plus
@int_property_decorator
def home_pitches(self):
"""
Returns an ``int`` of the number of pitches the home team faced.
"""
return self._home_pitches
@int_property_decorator
def home_strikes(self):
"""
Returns an ``int`` of the number of times a strike was called against
the home team.
"""
return self._home_strikes
@float_property_decorator
def home_win_probability_for_offensive_player(self):
"""
Returns a ``float`` of the overall influence the home team's offense
had on the outcome of the game where 0.0 denotes no influence and 1.0
denotes the offense was solely responsible for the outcome.
"""
return self._home_win_probability_for_offensive_player
@float_property_decorator
def home_average_leverage_index(self):
"""
Returns a ``float`` of the amount of pressure the home team's pitcher
faced during the game. 1.0 denotes average pressure while numbers less
than 0 denote lighter pressure.
"""
return self._home_average_leverage_index
@float_property_decorator
def home_win_probability_added(self):
"""
Returns a ``float`` of the total positive influence the home team's
offense had on the outcome of the game.
"""
return self._home_win_probability_added
@float_property_decorator
def home_win_probability_subtracted(self):
"""
Returns a ``float`` of the total negative influence the home team's
offense had on the outcome of the game.
"""
return self._home_win_probability_subtracted
@float_property_decorator
def home_base_out_runs_added(self):
"""
Returns a ``float`` of the number of base out runs added by the home
team.
"""
return self._home_base_out_runs_added
@int_property_decorator
def home_putouts(self):
"""
Returns an ``int`` of the number of putouts the home team registered.
"""
return self._home_putouts
@int_property_decorator
def home_assists(self):
"""
Returns an ``int`` of the number of assists the home team registered.
"""
return self._home_assists
@float_property_decorator
def home_innings_pitched(self):
"""
Returns a ``float`` of the number of innings the home team pitched.
"""
return self._home_innings_pitched
@int_property_decorator
def home_home_runs(self):
"""
Returns an ``int`` of the number of times the home team gave up a home
run.
"""
return self._home_home_runs
@int_property_decorator
def home_strikes_by_contact(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who made contact with the pitch.
"""
return self._home_strikes_by_contact
@int_property_decorator
def home_strikes_swinging(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who was swinging.
"""
return self._home_strikes_swinging
@int_property_decorator
def home_strikes_looking(self):
"""
Returns an ``int`` of the number of times the home team struck out a
batter who was looking.
"""
return self._home_strikes_looking
@int_property_decorator
def home_grounded_balls(self):
"""
Returns an ``int`` of the number of grounded balls the home team
allowed.
"""
return self._home_grounded_balls
@int_property_decorator
def home_fly_balls(self):
"""
Returns an ``int`` of the number of fly balls the home team allowed.
"""
return self._home_fly_balls
@int_property_decorator
def home_line_drives(self):
"""
Returns an ``int`` of the number of line drives the home team allowed.
"""
return self._home_line_drives
@int_property_decorator
def home_unknown_bat_type(self):
"""
Returns an ``int`` of the number of home at bats that were not properly
tracked and therefore cannot be safely placed in another statistical
category.
"""
return self._home_unknown_bat_type
@int_property_decorator
def home_game_score(self):
"""
Returns an ``int`` of the starting home pitcher's score determine by
many factors, such as number of runs scored against, number of strikes,
etc.
"""
return self._home_game_score
@int_property_decorator
def home_inherited_runners(self):
"""
Returns an ``int`` of the number of runners a pitcher inherited when he
entered the game.
"""
return self._home_inherited_runners
@int_property_decorator
def home_inherited_score(self):
"""
Returns an ``int`` of the number of scorers a pitcher inherited when he
entered the game.
"""
return self._home_inherited_score
@float_property_decorator
def home_win_probability_by_pitcher(self):
"""
Returns a ``float`` of the amount of influence the home pitcher had on
the game's result with 0.0 denoting zero influence and 1.0 denoting he
was solely responsible for the team's win.
"""
return self._home_win_probability_by_pitcher
@float_property_decorator
def home_base_out_runs_saved(self):
"""
Returns a ``float`` of the number of runs saved by the home pitcher
based on the number of players on bases. 0.0 denotes an average value.
"""
return self._home_base_out_runs_saved
class Boxscores:
"""
Search for MLB games taking place on a particular day.
Retrieve a dictionary which contains a list of all games being played on a
particular day. Output includes a link to the boxscore, and the names and
abbreviations for both the home teams. If no games are played on a
particular day, the list will be empty.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will be
pulled. If left empty, or if 'end_date' is prior to 'date', only the
games from the day specified in the 'date' parameter will be saved.
"""
def __init__(self, date, end_date=None):
self._boxscores = {}
self._find_games(date, end_date)
@property
def games(self):
"""
Returns a ``dictionary`` object representing all of the games played on
the requested day. Dictionary is in the following format::
{
'date': [ # 'date' is the string date in format 'MM-DD-YYYY'
{
'home_name': Name of the home team, such as 'New York
Yankees' (`str`),
'home_abbr': Abbreviation for the home team, such as
'NYY' (`str`),
'away_name': Name of the away team, such as 'Houston
Astros' (`str`),
'away_abbr': Abbreviation for the away team, such as
'HOU' (`str`),
'boxscore': String representing the boxscore URI, such
as 'SLN/SLN201807280' (`str`),
'winning_name': Full name of the winning team, such as
'New York Yankees' (`str`),
'winning_abbr': Abbreviation for the winning team, such
as 'NYY' (`str`),
'losing_name': Full name of the losing team, such as
'Houston Astros' (`str`),
'losing_abbr': Abbreviation for the losing team, such
as 'HOU' (`str`),
'home_score': Integer score for the home team (`int`),
'away_score': Integer score for the away team (`int`)
},
{ ... },
...
]
}
If no games were played on 'date', the list for ['date'] will be empty.
"""
return self._boxscores
def _create_url(self, date):
"""
Build the URL based on the passed datetime object.
In order to get the proper boxscore page, the URL needs to include the
requested month, day, and year.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
Returns
-------
string
Returns a ``string`` of the boxscore URL including the requested
date.
"""
return BOXSCORES_URL % (date.year, date.month, date.day)
def _get_requested_page(self, url):
"""
Get the requested page.
Download the requested page given the created URL and return a PyQuery
object.
Parameters
----------
url : string
The URL containing the boxscores to find.
Returns
-------
PyQuery object
A PyQuery object containing the HTML contents of the requested
page.
"""
return pq(url)
def _get_boxscore_uri(self, url):
"""
Find the boxscore URI.
Given the boxscore tag for a game, parse the embedded URI for the
boxscore.
Parameters
----------
url : PyQuery object
A PyQuery object containing the game's boxscore tag which has the
boxscore URI embedded within it.
Returns
-------
string
Returns a ``string`` containing the link to the game's boxscore
page.
"""
uri = re.sub(r'.*/boxes/', '', str(url))
uri = re.sub(r'\.shtml.*', '', uri).strip()
return uri
def _parse_abbreviation(self, abbr):
"""
Parse a team's abbreviation.
Given the team's HTML name tag, parse their abbreviation.
Parameters
----------
abbr : string
A string of a team's HTML name tag.
Returns
-------
string
Returns a ``string`` of the team's abbreviation.
"""
abbr = re.sub(r'.*/teams/', '', str(abbr))
abbr = re.sub(r'/.*', '', abbr)
return abbr
def _get_name(self, name):
"""
Find a team's name and abbreviation.
Given the team's HTML name tag, determine their name, and abbreviation.
Parameters
----------
name : PyQuery object
A PyQuery object of a team's HTML name tag in the boxscore.
Returns
-------
tuple
Returns a tuple containing the name and abbreviation for a team.
Tuple is in the following order: Team Name, Team Abbreviation.
"""
team_name = name.text()
abbr = self._parse_abbreviation(name)
return team_name, abbr
def _get_score(self, score_link):
"""
Find a team's final score.
Given an HTML string of a team's boxscore, extract the integer
representing the final score and return the number.
Parameters
----------
score_link : string
An HTML string representing a team's final score in the format
'<td class="right">NN</td>' where 'NN' is the team's score.
Returns
-------
int
Returns an int representing the team's final score in runs.
"""
score = score_link.replace('<td class="right">', '')
score = score.replace('</td>', '')
return int(score)
def _get_team_details(self, game):
"""
Find the names and abbreviations for both teams in a game.
Using the HTML contents in a boxscore, find the name and abbreviation
for both teams.
Parameters
----------
game : PyQuery object
A PyQuery object of a single boxscore containing information about
both teams.
Returns
-------
tuple
Returns a tuple containing the names and abbreviations of both
teams in the following order: Away Name, Away Abbreviation, Away
Score, Home Name, Home Abbreviation, Home Score.
"""
links = [i for i in game('td a').items()]
# The away team is the first link in the boxscore
away = links[0]
# The home team is the last (3rd) link in the boxscore
home = links[-1]
scores = re.findall(r'<td class="right">\d+</td>', str(game))
away_score = None
home_score = None
# If the game hasn't started or hasn't been updated on sports-reference
# yet, no score will be shown and therefore can't be parsed.
if len(scores) == 2:
away_score = self._get_score(scores[0])
home_score = self._get_score(scores[1])
away_name, away_abbr = self._get_name(away)
home_name, home_abbr = self._get_name(home)
return (away_name, away_abbr, away_score, home_name, home_abbr,
home_score)
def _get_team_results(self, team_result_html):
"""
Extract the winning or losing team's name and abbreviation.
Depending on which team's data field is passed (either the winner or
loser), return the name and abbreviation of that team to denote which
team won and which lost the game.
Parameters
----------
team_result_html : PyQuery object
A PyQuery object representing either the winning or losing team's
data field within the boxscore.
Returns
-------
tuple
Returns a tuple of the team's name followed by the abbreviation.
"""
link = [i for i in team_result_html('td a').items()]
# If there are no links, the boxscore is likely misformed and can't be
# parsed. In this case, the boxscore should be skipped.
if len(link) < 1:
return None
name, abbreviation = self._get_name(link[0])
return name, abbreviation
def _extract_game_info(self, games):
"""
Parse game information from all boxscores.
Find the major game information for all boxscores listed on a
particular boxscores webpage and return the results in a list.
Parameters
----------
games : generator
A generator where each element points to a boxscore on the parsed
boxscores webpage.
Returns
-------
list
Returns a ``list`` of dictionaries where each dictionary contains
the name and abbreviations for both the home and away teams, and a
link to the game's boxscore.
"""
all_boxscores = []
for game in games:
details = self._get_team_details(game)
away_name, away_abbr, away_score, home_name, home_abbr, \
home_score = details
boxscore_url = game('td[class="right gamelink"] a')
boxscore_uri = self._get_boxscore_uri(boxscore_url)
losers = [l for l in game('tr[class="loser"]').items()]
winner = self._get_team_results(game('tr[class="winner"]'))
loser = self._get_team_results(game('tr[class="loser"]'))
# Occurs when the boxscore format is invalid and the game should be
# skipped to avoid conflicts populating the game information.
if (len(losers) != 2 and loser and not winner) or \
(len(losers) != 2 and winner and not loser):
continue
# Occurs when information couldn't be parsed from the boxscore or
# the game hasn't occurred yet. In this case, the winner should be
# None to avoid conflicts.
if not winner or len(losers) == 2:
winning_name = None
winning_abbreviation = None
else:
winning_name, winning_abbreviation = winner
# Occurs when information couldn't be parsed from the boxscore or
# the game hasn't occurred yet. In this case, the winner should be
# None to avoid conflicts.
if not loser or len(losers) == 2:
losing_name = None
losing_abbreviation = None
else:
losing_name, losing_abbreviation = loser
game_info = {
'boxscore': boxscore_uri,
'away_name': away_name,
'away_abbr': away_abbr,
'away_score': away_score,
'home_name': home_name,
'home_abbr': home_abbr,
'home_score': home_score,
'winning_name': winning_name,
'winning_abbr': winning_abbreviation,
'losing_name': losing_name,
'losing_abbr': losing_abbreviation
}
all_boxscores.append(game_info)
return all_boxscores
def _find_games(self, date, end_date):
"""
Retrieve all major games played on a given day.
Builds a URL based on the requested date and downloads the HTML
contents before parsing any and all games played during that day. Any
games that are found are added to the boxscores dictionary with
high-level game information such as the home and away team names and a
link to the boxscore page.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
end_date : datetime object (optional)
Optionally specify an end date to iterate until. All boxscores
starting from the date specified in the 'date' parameter up to and
including the boxscores specified in the 'end_date' parameter will
be pulled. If left empty, or if 'end_date' is prior to 'date', only
the games from the day specified in the 'date' parameter will be
saved.
"""
# Set the end date to the start date if the end date is before the
# start date.
if not end_date or date > end_date:
end_date = date
date_step = date
while date_step <= end_date:
url = self._create_url(date_step)
page = self._get_requested_page(url)
games = page('table[class="teams"]').items()
boxscores = self._extract_game_info(games)
timestamp = '%s-%s-%s' % (date_step.month, date_step.day,
date_step.year)
self._boxscores[timestamp] = boxscores
date_step += timedelta(days=1)
|
[
"ajfornaca00@gmail.com"
] |
ajfornaca00@gmail.com
|
cc29071f7afda7eb0d4e1f64e8ee6ec5a02e93fd
|
1375f57f96c4021f8b362ad7fb693210be32eac9
|
/kubernetes/test/test_v2alpha1_horizontal_pod_autoscaler_list.py
|
0ec877e8359999bd271c0535d574564c381630b9
|
[
"Apache-2.0"
] |
permissive
|
dawidfieluba/client-python
|
92d637354e2f2842f4c2408ed44d9d71d5572606
|
53e882c920d34fab84c76b9e38eecfed0d265da1
|
refs/heads/master
| 2021-12-23T20:13:26.751954
| 2017-10-06T22:29:14
| 2017-10-06T22:29:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_list import V2alpha1HorizontalPodAutoscalerList
class TestV2alpha1HorizontalPodAutoscalerList(unittest.TestCase):
""" V2alpha1HorizontalPodAutoscalerList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2alpha1HorizontalPodAutoscalerList(self):
"""
Test V2alpha1HorizontalPodAutoscalerList
"""
model = kubernetes.client.models.v2alpha1_horizontal_pod_autoscaler_list.V2alpha1HorizontalPodAutoscalerList()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
3fbdf0023c9ffb85017d96d5d5f3397f36af5f0c
|
2c6f4f3afbf4ddb4df79877ff3f4fa7748819117
|
/MiniFlash/MiniFlash/wsgi.py
|
b74f588ce0874b2af52db1b99442bc84f5b0461d
|
[] |
no_license
|
juree/MiniFlash
|
ccf8064b3b40fe9a7fa43baee3a8f4c32ba57eed
|
4fc1b957f473c9dc22e3cbeb55c1eaa39f15454d
|
refs/heads/master
| 2020-04-06T05:13:37.637203
| 2014-09-24T13:41:32
| 2014-09-24T13:41:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
WSGI config for MiniFlash project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MiniFlash.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"nejc.il92@gmail.com"
] |
nejc.il92@gmail.com
|
e2f9404bcc14f3e91ba1077a654b24afc62b635f
|
12e1fcbeb0bb0c3866e9aa863016ebf5b8cf6fa9
|
/torch/torch_net.py
|
5e72c05b9578ebd1f08c20941d7e85bfb42276f8
|
[] |
no_license
|
Grid-Gudx/sound_classification
|
0eee6c523e5c6732ce4456a297757ef20015753c
|
c79a83b5882c1b386254a33b2ac9ac44d0546f7b
|
refs/heads/main
| 2023-08-18T14:51:33.181996
| 2021-09-15T07:54:53
| 2021-09-15T07:54:53
| 403,004,685
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,857
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 8 13:39:05 2021
@author: gdx
"""
import torch
import torch.nn as nn
import torchvision.models as models
from torchsummary import summary
import torch.nn.functional as F
class CNN(nn.Module):
"""
input_shape: batchsize * 1 * 640
output_shape: batchsize * num_labels
"""
def __init__(self):
super(CNN, self).__init__()
self.stage1 = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=16, kernel_size=3, stride=1,padding=1),
nn.BatchNorm1d(16),
nn.Conv1d(16, 16, 3, 1, 1),
nn.BatchNorm1d(16),
nn.ReLU(True),
nn.MaxPool1d(kernel_size=2, stride=2),
nn.Conv1d(16, 32, 3, 1, 1),
nn.BatchNorm1d(32),
nn.Conv1d(32, 32, 3, 1, 1),
nn.BatchNorm1d(32),
nn.ReLU(True),
nn.MaxPool1d(2, 2),
nn.Conv1d(32, 64, 3, 1, 1),
nn.BatchNorm1d(64),
nn.Conv1d(64, 64, 3, 1, 1),
nn.BatchNorm1d(64),
nn.ReLU(True),
nn.MaxPool1d(2, 2),
nn.Conv1d(64, 128, 3, 1, 1),
nn.BatchNorm1d(128),
nn.Conv1d(128, 128, 3, 1, 1),
nn.BatchNorm1d(128),
nn.ReLU(True),
nn.MaxPool1d(2, 2)
)
def forward(self, x):
x=x.float()
x = self.stage1(x)
return x
class My_resnet18(nn.Module):
def __init__(self, num_class):
super(My_resnet18, self).__init__()
resnet18 = models.resnet18(pretrained=True)
resnet_layer = nn.Sequential(*list(mobilenet.children())[:-1])
self.resnet = resnet_layer
# print(self.resnet)
self.fc = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(512, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.2),
nn.Linear(128, num_class))
def forward(self, x):
x = self.resnet(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class My_resnet18(nn.Module):
def __init__(self, num_class):
super(My_resnet18, self).__init__()
self.conv = nn.Conv2d(1, 3, kernel_size=1)
# resnet18 = models.resnet18(pretrained=True)
mobilenet = models.mobilenet_v2(pretrained=True)
resnet_layer = nn.Sequential(*list(mobilenet.children())[:-1])
self.resnet = resnet_layer
# print(self.resnet)
self.amg = nn.AdaptiveMaxPool2d((1,1))
self.fc = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(1280, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.2),
nn.Linear(128, num_class))
def forward(self, x):
x = self.resnet(x)
x = self.amg(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
# out = F.softmax(x,dim=1)
# if self.training is True: #no activation in training
# return x
# else:
# return out
if __name__ == '__main__':
net = My_resnet18(num_class=50)
for name, param in net.named_parameters():
# print(name)
param.requires_grad = False
if "fc" in name:
param.requires_grad = True
mobilenet = models.mobilenet_v2(pretrained=True)
resnet_layer = nn.Sequential(*list(mobilenet.children())[:-1])
print(resnet_layer)
# fc_features = resnet18.classifier[1].in_features
# resnet18.classifier[1] = nn.Linear(fc_features, 2)
dummy_input = torch.randn(5,3,100,100)
out = net(dummy_input)
# out = nn.AdaptiveMaxPool2d((1,1))(out)
print(out.shape)
summary(net, input_size=(3,100,100))
# torch.onnx.export(resnet18, dummy_input, "./model_struct/resnet18.onnx")
|
[
"56808862+Grid-Gudx@users.noreply.github.com"
] |
56808862+Grid-Gudx@users.noreply.github.com
|
71428d1c474c76168a12ff0b2d4978909e64c792
|
d0ae2b5919a2b679a62d70069b5b1531771266d7
|
/data/bin/pip3.5
|
853217990c8a2e885ecbc5eae7cee8ec1c301367
|
[] |
no_license
|
felexkemboi/teamdata
|
9ac60c8d7f7fba39afd9ce7ff81075f1771d46ac
|
df683f3561e87c24275967e6ae82fd6da0dc8642
|
refs/heads/master
| 2020-03-24T20:53:50.788654
| 2019-02-05T06:20:36
| 2019-02-05T06:20:36
| 143,002,423
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
5
|
#!/home/limo/Desktop/Django/teamdata/data/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"felokemboi10@gmail.com"
] |
felokemboi10@gmail.com
|
07e2328efa9184c76f36bc7feaf15a3d6ab7ec09
|
a2736eb0f99b9316fba5194c8b2cc4b084f3bf13
|
/src/behavior/normal.py
|
421a90f43bcd2bddb876e8b79d11561b7bbe2ab5
|
[] |
no_license
|
tlelepvriercussol/primitiveWS
|
e9c1205821bd5689b66c986024c3027d783b0143
|
67cfa60b60441df11bd8a797f0fc16caf1f0a57c
|
refs/heads/master
| 2021-01-18T13:28:25.076762
| 2016-01-16T10:11:46
| 2016-01-16T10:11:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
import time
import pypot.primitive
class NormalBehave(pypot.primitive.Primitive):
"A behave to put all motors at 0"
def run(self):
poppy = self.robot
for m in poppy.motors:
m.compliant = False
for m in poppy.torso + poppy.head:
m.goto_position(0, 2)
poppy.l_shoulder_y.goto_position(-8, 2)
poppy.l_shoulder_x.goto_position(10, 2)
poppy.l_arm_z.goto_position(20, 2)
poppy.l_elbow_y.goto_position(-25, 2)
poppy.r_shoulder_y.goto_position(-8, 2)
poppy.r_shoulder_x.goto_position(-10, 2)
poppy.r_arm_z.goto_position(-20, 2)
poppy.r_elbow_y.goto_position(-25, 2, wait=True)
for m in poppy.arms:
m.compliant = True
|
[
"tom.lelep@free.fr"
] |
tom.lelep@free.fr
|
7954f5dc67cd69db54a0bcb6a07e2a4c98ca869f
|
5e1385521729efb8a5e90af19638dc43c2fadb88
|
/day14/p1.py
|
a052874a1d91c68ab508602bb2193bac03186e6e
|
[] |
no_license
|
pwicks86/adventofcode2017
|
d8557f1496af0393b58e669f7f3c78a95565e871
|
11b5bd06ed900b857e726649c8ad2b8d619c2172
|
refs/heads/master
| 2021-08-30T16:17:15.609771
| 2017-12-18T15:59:21
| 2017-12-18T15:59:21
| 112,669,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
from functools import reduce
def knot_hash(s):
skip_size = 0
cur_pos = 0
lens = [ord(c) for c in s]
lens.extend([17, 31, 73, 47, 23])
nums = list(range(256))
for r in range(64):
for l in lens:
start = cur_pos
end = start + l
# reverse
for i in range(l/2):
b = (end - i - 1) % len(nums)
a = (start + i) % len(nums)
temp = nums[a]
nums[a] = nums[b]
nums[b] = temp
# adjust cur
cur_pos = cur_pos + l + skip_size
skip_size += 1
dense = []
for i in range(16):
block = nums[i * 16: (i + 1) * 16]
x = reduce(lambda a, b: a ^ b, block, 0)
dense.append(x)
return "".join(("{0:b}".format(b) for b in dense))
instr = "ugkiagan"
full = 0
for i in range(128):
hashstr = instr + "-" + str(i)
hashed = knot_hash(hashstr)
full += hashed.count("1")
print(full)
|
[
"pwicks86@gmail.com"
] |
pwicks86@gmail.com
|
76af1bb31f3ea7a072427fe48eccc9c878f5d40d
|
34f508116dc30ebefc5b7c604e28396296dfc438
|
/real_time_curriculum.py
|
99d362f9d25f5d3be461cd1f2d34c4863cfe0567
|
[] |
no_license
|
mlaico/cbas
|
8038a1b1e09da21910b1019f5c5840814e7dad74
|
b1fa2829568e0b9d672930a27181642c494af16f
|
refs/heads/master
| 2020-03-14T04:08:09.334524
| 2018-05-10T15:46:10
| 2018-05-10T15:46:10
| 131,435,161
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,030
|
py
|
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
import matplotlib.pyplot as plt
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
class MySampler(Sampler):
def __init__(self, data_source):
self.data_source = data_source
self.random_list = None
def __iter__(self):
self.random_list = torch.randperm(len(self.data_source)).tolist()
return iter(self.random_list)
def get_idx(self):
return self.random_list
def __len__(self):
return len(self.data_source)
class MyWeightedSampler(Sampler):
def __init__(self, weights, num_samples, replacement=True):
self.weights = torch.DoubleTensor(weights)
self.num_samples = num_samples
self.replacement = replacement
self.random_list = None
def __iter__(self):
ret = torch.multinomial(self.weights, self.num_samples, self.replacement)
self.random_list = ret.numpy().tolist()
return iter(ret)
def get_idx(self):
return self.random_list
def __len__(self):
return self.num_samples
def normal_weights(losses, mu=None):
mu, var = mu if mu else np.mean(losses), np.var(losses)
return (1/(np.sqrt(np.pi*2*var)))*np.exp(-((losses-mu)**2)/(2*var))
def real_time(training_set, model, loss_fn, optimizer, deviations):
"""
training_set: class type 'torchvision.datasets.ImageFolder'
deviations: a sequence of standard deviations scalars to be applied to the sampling distribution's
mean to determine the probability of sampling and image with a given loss value. If set to [0...0],
the probability of sampling each image (based on loss value) will be determined by the normal
distribution's pdf. If deviation = -1, probability will be dictated by a normal dist with shifted mean
mean(loss) -1*std(loss). This in effect allows us to shift the difficulty of training images over
each epoch. Images are sampled with replacement, so we can shift the focus from easy to hard. For
example: [-1, 0, 1] samples from a normal distribution centered at mean(loss) -1*std(loss),
mean(loss), then mean(loss) + 1*std(loss) for the training epochs.
Note: number of epochs == len(deviations) + 1 (+1 for the initial training epoch)
"""
def real_time_curriculum(sampler, loader, net, criterion, optimizer):
orderings = []
running_loss = 0.0
for i, data in enumerate(loader, 0):
# get the inputs
inputs, labels = data
try:
numpy_labels = labels.numpy()
except:
numpy_labels = labels.data.numpy()
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
try:
numpy_outputs = outputs.numpy()
except:
numpy_outputs = outputs.data.numpy()
log_probs = -np.log(np.exp(numpy_outputs)
/ np.reshape(np.sum(np.exp(numpy_outputs), axis=1), (numpy_labels.shape[0], 1)))
orderings += log_probs[:, numpy_labels].tolist()[0]
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 2000 == 1999: # print every 2000 mini-batches
print('%5d loss: %.3f' %
(i + 1, running_loss / 2000))
running_loss = 0.0
idx = np.argsort(np.array(sampler.get_idx()))
culmulative_orderings = np.array(orderings)[idx]
return culmulative_orderings
my_sampler = MySampler(training_set)
trainloader = torch.utils.data.DataLoader(
training_set, batch_size=4, shuffle=False, sampler=my_sampler, num_workers=4)
print("epoch #1")
real_time_curr = \
real_time_curriculum(my_sampler, trainloader, model, loss_fn, optimizer)
epoch = 1
num_samples = real_time_curr.shape[0]
for deviation in deviations:
epoch += 1
print("epoch #%d" % epoch)
weights = normal_weights(real_time_curr, np.mean(real_time_curr) + deviation * np.std(real_time_curr))
weight_denom = np.sum(weights)
weight_denom = weight_denom if weight_denom != 0 else (1/1e30)
weights = weights / weight_denom
sampler = MyWeightedSampler(weights, num_samples, replacement=True)
real_time_curriculum_loader = \
torch.utils.data.DataLoader(training_set, batch_size=4, shuffle=False, sampler=sampler, num_workers=4)
real_time_curr = \
real_time_curriculum(sampler, real_time_curriculum_loader, model, loss_fn, optimizer)
|
[
"airmanfair@Davids-MacBook-Pro-5.local"
] |
airmanfair@Davids-MacBook-Pro-5.local
|
044b5038b2dc6e08eec058794d0aab40e361ee05
|
a24be055d6df91f7d9df033b6986c228162b2e39
|
/setup.py
|
4775973d0d595192477681ffb0320c56267fc8da
|
[
"Apache-2.0"
] |
permissive
|
mbukatov/tox-sitepackages-reproducer
|
38c1542665258fbcc7e23080d26608443d1541a2
|
91fe327f5f8d725f70f24598540577f02e9fd559
|
refs/heads/master
| 2021-01-17T17:26:25.033849
| 2017-02-27T10:12:43
| 2017-02-27T10:39:55
| 82,937,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='reproducer',
version='0.0.1',
author='Martin Bukatovič',
author_email='mbukatov@redhat.com',
license='Apache 2.0',
url='https://github.com/mbukatov/tox-sitepackages-reproducer',
description='Reproducer for tox issue #461',
packages=find_packages(exclude=['tests']),
install_requires=["lxml", "kernelconfig"],
)
|
[
"mbukatov@redhat.com"
] |
mbukatov@redhat.com
|
ec7444a5624a66b29237d928f9130d95128e3637
|
f5bf18ababaa8ca5bed2ab1af4cb0349ec5284e1
|
/Python/urlshortener/url/models.py
|
a889d3b1278ea6469961baa2f143fa7d2f67eccb
|
[] |
no_license
|
rgsriram/Personal-Projects
|
c0377524f8d7e8f56940b24bfeacf243bb603917
|
44c634080e270bdee375cf6fee9ca6c313b80a1b
|
refs/heads/master
| 2023-03-03T16:34:05.542497
| 2023-02-20T13:57:00
| 2023-02-20T13:57:00
| 125,975,423
| 0
| 0
| null | 2022-01-21T19:36:05
| 2018-03-20T07:08:27
|
Java
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
from django.utils import timezone
from mongoengine import *
class URL(Document):
long_url = StringField(max_length=500)
short_url = StringField(max_length=500)
created_at = DateTimeField(default=timezone.now(), help_text='Url added at')
no_of_clicks = IntField(default=0)
domain = StringField(max_length=200, default=None, null=True)
# In case of having expiring time for urls.
expire_at = DateTimeField(default=None, help_text='Url expire at')
is_purged = BooleanField(default=False)
|
[
"srignsh22@gmail.com"
] |
srignsh22@gmail.com
|
2c4990087a05486cd48b5d4446694d10cd2be69c
|
044967f71c1a7b21163eb995165ea1bfcfc9c0fa
|
/text_processor.py
|
490773c6fe588e00b776d0af836de5d60dbe0b21
|
[] |
no_license
|
H-Yin/owl
|
bc12d5388a3408c3c1b4d4048654a9f0770d6697
|
990f50cc083bf3baec01f846edf7b42449e58401
|
refs/heads/master
| 2021-06-23T17:05:30.299153
| 2017-08-17T01:15:25
| 2017-08-17T01:15:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,602
|
py
|
# *--coding:utf-8--*
from datetime import datetime
import traceback;
import nltk
from nltk.stem.lancaster import LancasterStemmer
from nltk.corpus import stopwords, brown
class TextProcessor(object):
'''
process text inputed by user
'''
def __init__(self, text=""):
# text inputed by user
self.text = text
def __get_corrected_pos__(self, words_tagged=[]):
'''
correcte words pos based on brown
'''
def get_word_pos(word_set=None, word=""):
'''
get word's pos and its frequent according to brown
'''
tags = [_tag for (_word, _tag) in word_set if _word == word.lower()]
# word exissts in brown
if tags:
# calculate frequent of word's tags
frequent = nltk.FreqDist(tags)
# get the most probable pos
return frequent.max()
else:
return ""
# corrected word and its pos
words_corrected_tag = []
# get brown tagged words
brown_tagged = brown.tagged_words(categories=['reviews', 'editorial'])
# get stopwords in English
stopwords_list = stopwords.words('english')
# correct word's pos one-by-one
for word, word_pos in words_tagged:
# if word_pos[:2] in ['JJ', 'NN', 'VB']:
if word not in stopwords_list and word_pos[0:2] not in ['VB', 'JJ', 'CD']:
# get tagged word's pos
temp_word_pos = get_word_pos(word_set=brown_tagged, word=word)
if temp_word_pos:
# use tagged word's pos
words_corrected_tag.append((word, temp_word_pos))
else:
# self-defined pos for words
patterns = [
(r'.*[ts]ion$', 'NNP'),
(r'.*om[ae]$', 'NNP'),
(r'.*[tsl]is$', 'NNP'),
(r'.*[cd]er$', 'NNP'),
(r'.*[mnpsxd]ia$', 'NNP'),
(r'.*[pt]hy$', 'NNP'),
(r'.*asm$', 'NNP'),
(r'.*mor$', 'NNP'),
(r'.*ncy$', 'NNP'),
(r'.*', 'NN') # nouns (default)
]
# create a regexp tagger
regexp_tagger = nltk.RegexpTagger(patterns)
# tag word by regexp tagger
temp_word, temp_word_pos = regexp_tagger.tag([word, ])[0]
words_corrected_tag.append((temp_word, temp_word_pos))
else:
words_corrected_tag.append((word, word_pos))
return words_corrected_tag
def __get_chunck__(self, words_tagged=[]):
'''
get NP-chunck of tagged words
'''
chunck = []
if words_tagged:
try:
# create grammer
basic_grammar = r'''NP: {<JJ|VBD|VBG|NN.*>*<NN.*>+<CD>?}'''
# create parser
regexp_parser = nltk.RegexpParser(basic_grammar)
# generate grammer tree
result_tree = regexp_parser.parse(words_tagged)
# extract 'NP' subtree
for subtree in result_tree.subtrees():
if subtree.label() == 'NP':
# create a chunck by joining words
chunck.append(" ".join([word for (word, pos) in subtree.leaves()]))
except:
traceback.print_exc()
return chunck
def get_keywords(self):
'''
get keyword list from text
'''
keywords = []
# word tokenize
words = nltk.word_tokenize(self.text)
# tag words
words_tagged = nltk.pos_tag(words)
#print words_tagged
# corrected word's pos
words_corrected_tag = self.__get_corrected_pos__(words_tagged=words_tagged)
#print words_corrected_tag
# get NP-chunck
keywords.extend(self.__get_chunck__(words_corrected_tag))
# remove duplicates
return list(set(keywords))
if __name__ == "__main__":
text = "What made you want to look up Waardenburg syndrome?"
text2 = "Waardenburg syndrome is usually inherited in an autosomal dominant pattern, which means one copy of the altered gene is sufficient to cause the disorder. "
start = datetime.now()
text_processor = TextProcessor(text = text)
print text_processor.get_keywords()
print "Total:", datetime.now()-start
|
[
"nywzyinhao@163.com"
] |
nywzyinhao@163.com
|
aed4106be15ee2f60d1b655d64c873bd9310833d
|
31900bdf5648061a3093230711c5394e20b90436
|
/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/porn/voyeurhit.py
|
f04bb7583010ba617ea271edcd837c8a6296914a
|
[] |
no_license
|
linuxbox10/enigma2-plugin-extensions-mediaportal
|
aa6f14ecfc42ce91e22c487070541459a1ab820c
|
e6b388918c186442718e7200e03c83d0db260831
|
refs/heads/master
| 2021-05-01T18:50:50.332850
| 2018-02-10T11:33:48
| 2018-02-10T11:33:48
| 121,009,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,738
|
py
|
# -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2018
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
default_cover = "file://%s/voyeurhit.png" % (config.mediaportal.iconcachepath.value + "logos")
class voyeurhitGenreScreen(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("VoyeurHit.com")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.keyLocked = True
url = 'http://voyeurhit.com/categories/'
getPage(url).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
kats = re.findall('<a class="tooltip".*?href="(.*?)".*?id="(.*?)">.*?<strong>(.*?)</strong>.*?<span>.*?<img src="(.*?)".*?height=".*?".*?width=".*?">',data, re.S)
if kats:
for (url, id, title, img) in kats:
Title = title.replace(' ','').replace('\n','')
self.genreliste.append((Title, url, img))
self.genreliste.sort()
self.genreliste.insert(0, ("Most Popular", "http://www.voyeurhit.com/most-popular/", default_cover))
self.genreliste.insert(0, ("Top Rated", "http://voyeurhit.com/top-rated/", default_cover))
self.genreliste.insert(0, ("Most Recent", "http://voyeurhit.com/latest-updates/", default_cover))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(voyeurhitFilmScreen, Link, Name)
class voyeurhitFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("VoyeurHit.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
cat = self.Link
url = "%s%s/" % (self.Link, str(self.page))
getPage(url).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, 'lass="pagination">(.*?)</ul>')
parse = re.search('<div class="block-thumb">(.*?)<div class="pagination">', data, re.S)
videos = re.findall('<a href="(.*?)" class="thumb">.*?<span class="image"><img.*?src="(.*?)"\s{0,1}alt="(.*?)".*?<span class="dur_ovimg">(.*?)</span>', parse.group(0), re.S)
for (url,img,desc,dur) in videos:
self.filmliste.append((decodeHtml(desc), url, img))
if len(self.filmliste) == 0:
self.filmliste.append((_('No movies found!'), None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
Title = self['liste'].getCurrent()[0][0]
Image = self['liste'].getCurrent()[0][2]
self['name'].setText(Title)
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
if Link == None:
return
self.keyLocked = True
getPage(Link).addCallback(self.getVideoPage).addErrback(self.dataError)
def getVideoPage(self, data):
videoPage = re.findall('video_url="(.*?)";', data, re.S)
if videoPage:
self.keyLocked = False
Title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(Title, videoPage[-1])], showPlaylist=False, ltype='voyeurhit')
|
[
"jaysmith940@hotmail.co.uk"
] |
jaysmith940@hotmail.co.uk
|
0c72b38b1807ffdf0c0bbad486813abdc8249805
|
50dc7b063ca860d89717866ff6f844fef9164683
|
/pwndbg/info.py
|
4bdf15f2b6cc7acb34d683493bd756b46f006cca
|
[
"MIT"
] |
permissive
|
sigma-random/pwndbg
|
3519a0411dd95d254cda9f83ad1faf85c20c335a
|
da88e347f7e1b828f6ebee2753117c666f4dd97d
|
refs/heads/master
| 2021-01-22T13:08:11.112154
| 2015-05-23T01:25:41
| 2015-05-23T01:25:41
| 36,412,538
| 2
| 0
| null | 2015-05-28T03:32:10
| 2015-05-28T03:32:10
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Runs a few useful commands which are available under "info".
We probably don't need this anymore.
"""
import gdb
import pwndbg.memoize
@pwndbg.memoize.reset_on_exit
def proc_mapping():
try:
return gdb.execute('info proc mapping', to_string=True)
except gdb.error:
return ''
@pwndbg.memoize.reset_on_exit
def auxv():
try:
return gdb.execute('info auxv', to_string=True)
except gdb.error:
return ''
@pwndbg.memoize.reset_on_stop
def files():
try:
return gdb.execute('info files', to_string=True)
except gdb.error:
return ''
|
[
"riggle@google.com"
] |
riggle@google.com
|
2810e7ca25da235d2ad6cda17b2a2e24a0dbfc5c
|
93d62f8f6525010cf25127e4cc9b8aba7cf03a77
|
/authDjango/authApp/urls.py
|
535af82d63d1b22f1a9b9c3416e2310a046926d5
|
[] |
no_license
|
pourmirza/DRF-React-Auth
|
fcc5e14dfe90d63bd6b236a72630c4f3edd73a2a
|
5a545aac3d52c4415b5010ae47e8df081d80fdae
|
refs/heads/master
| 2023-07-03T03:01:53.554414
| 2021-08-08T15:43:36
| 2021-08-08T15:43:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
from django.urls import path
from .views import set_csrf_token, login_view, CheckAuth
urlpatterns = [
path('set-csrf/', set_csrf_token, name='Set-CSRF'),
path('login/', login_view, name='Login'),
path('test-auth/', CheckAuth.as_view(), name='check-auth')
]
|
[
"53014897+kieronjmckenna@users.noreply.github.com"
] |
53014897+kieronjmckenna@users.noreply.github.com
|
80831e3880c0732805d920c03fde0582f8b7f7b8
|
0afed628ce737295ce370d4c397888e822937b66
|
/expenses/migrations/0024_auto_20190203_1712.py
|
701b6e67d5437e2760ab9f301b0d99b0446b8321
|
[] |
no_license
|
newbusox/expensetracker
|
4cdae836d65604ba9f051bd8da40f0cd3acc0363
|
008cdc75962351e4271358575e5c8007c8263f1d
|
refs/heads/master
| 2022-12-14T04:03:25.207888
| 2020-08-09T04:34:30
| 2020-08-09T04:34:30
| 169,085,053
| 0
| 0
| null | 2022-12-08T01:35:12
| 2019-02-04T13:48:16
|
Python
|
UTF-8
|
Python
| false
| false
| 662
|
py
|
# Generated by Django 2.1.5 on 2019-02-03 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('expenses', '0023_constructiondivision'),
]
operations = [
migrations.RemoveField(
model_name='constructiondivision',
name='division_choices',
),
migrations.AddField(
model_name='constructiondivision',
name='division_choice',
field=models.CharField(choices=[('01', 'Plans/Permits'), ('02', 'Demolition'), ('03', 'Foundation')], default=1, max_length=2),
preserve_default=False,
),
]
|
[
"john.errico@gmail.com"
] |
john.errico@gmail.com
|
8f8e62220d50fa2f672496e56c1d1c509e1a83eb
|
8a135e5e41b0648aaf10641705b1f0a83b5e144a
|
/test/core/045-hierarchy-sharedfs-b/local_hierarchy.py
|
d5f4f00e9f49c1a3ec55e1c47d6926deab1811e7
|
[
"Apache-2.0"
] |
permissive
|
fengggli/pegasus
|
43fb182cf02d4545693b9500dc4fd7d08ac61ebc
|
402bdbc67438afb0cdcc5868419cf28b4d229ff4
|
refs/heads/master
| 2020-11-26T16:57:06.577507
| 2019-12-19T18:15:42
| 2019-12-19T18:15:42
| 229,146,362
| 0
| 0
|
Apache-2.0
| 2019-12-19T22:07:49
| 2019-12-19T22:07:47
| null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
#!/usr/bin/env python
import os
import sys
import subprocess
if len(sys.argv) != 2:
print "Usage: %s CLUSTER_PEGASUS_HOME" % (sys.argv[0])
sys.exit(1)
cluster_pegasus_home=sys.argv[1]
# to setup python lib dir for importing Pegasus PYTHON DAX API
#pegasus_config = os.path.join("pegasus-config") + " --noeoln --python"
#lib_dir = subprocess.Popen(pegasus_config, stdout=subprocess.PIPE, shell=True).communicate()[0]
#Insert this directory in our search path
#os.sys.path.insert(0, lib_dir)
from Pegasus.DAX3 import *
# Create a abstract dag
adag = ADAG('local-hierarchy')
daxfile = File('blackdiamond.dax')
dax1 = DAX (daxfile)
#DAX jobs are called with same arguments passed, while planning the root level dax
dax1.addArguments('--output-site local')
dax1.addArguments( '-vvv')
adag.addJob(dax1)
# this dax job uses a pre-existing dax file
# that has to be present in the replica catalog
daxfile2 = File('sleep.dax')
dax2 = DAX (daxfile2)
dax2.addArguments('--output-site local')
dax2.addArguments( '-vvv')
adag.addJob(dax2)
# Add control-flow dependencies
#adag.addDependency(Dependency(parent=dax1, child=dax2))
# Write the DAX to stdout
adag.writeXML(sys.stdout)
|
[
"vahi@isi.edu"
] |
vahi@isi.edu
|
ed390fc5eb173e6c52436f931d003dbf4f303955
|
9c9b908a4697491c040b8d7877c1dacf253b836a
|
/venv/Scripts/easy_install-script.py
|
a145659108160ec633efca8aff4a1045c92dda70
|
[] |
no_license
|
aj1218/SeleniumTest
|
37e796f42417326386002abdd7d7dedc70d4f203
|
cd9375269335133978395aaad9cc24ffc9f35551
|
refs/heads/main
| 2023-01-25T04:54:16.380816
| 2020-12-03T07:29:25
| 2020-12-03T07:29:25
| 319,197,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
#!E:\SeleniumTest\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"nnczstar@NNCZStar-iMac.local"
] |
nnczstar@NNCZStar-iMac.local
|
71dd61e2b8ce662766a38021747136d4753a097b
|
faf2c4dca39486207868c74f13f69e62fd88e9b1
|
/utilities/generate-header.py
|
0046313cdd1b04984f619ad926cde31e20adab47
|
[] |
no_license
|
Unco3892/FaceRunner
|
922f96fb99d5ea7d2dfff079f6367b64c4e1e262
|
f862262dd19c6c41b510e862b6f7baf8602b5528
|
refs/heads/main
| 2023-02-26T02:28:41.990543
| 2021-02-01T21:06:54
| 2021-02-01T21:06:54
| 335,082,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
# This file was used to generate the header which was not palced directly in the main body as this method leaves some newlines which we desired to remove.
from pyfiglet import Figlet
header = Figlet(font='big')
subheader = Figlet(font='digital')
print(header.renderText("FaceRunner"))
print(subheader.renderText("By Ilia Azizi & Emile Evers"))
|
[
"unco3892@gmail.com"
] |
unco3892@gmail.com
|
1c1078806866c1170018081bf291d0fcc2d4ffe2
|
61f33d36c86e9961800976b927df6597ec47aa87
|
/constant.py
|
6320082fed6404ae75e38bca3c5c3aebac090aa6
|
[
"MIT"
] |
permissive
|
29527/OKExPyWebsocket
|
accfbeca7fbdf75b16d14122023765410bfb6309
|
d084373e0bf18ca533bcc8f4fc1ba051d6be0209
|
refs/heads/main
| 2023-07-17T08:29:12.956397
| 2021-09-02T07:49:16
| 2021-09-02T07:49:16
| 398,232,201
| 0
| 0
|
MIT
| 2021-08-20T10:00:50
| 2021-08-20T10:00:50
| null |
UTF-8
|
Python
| false
| false
| 1,583
|
py
|
class Operation(str):
# 操作
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
ERROR = "error"
# 登陆
LOGIN = "login"
# 下单、撤单、改单相关
ORDER = "order"
CANCEL_ORDER = "cancel-order"
AMEND_ORDER = "amend-order"
class Channel(str):
"""
私有频道中的channel
"""
ACCOUNT = "account" # 账号情况
POSITIONS = "positions" # 持仓情况
BALANCE_AND_POSITION = "balance_and_position" # 账户余额和持仓频道
ORDERS = "orders" # 获取订单信息
ORDERS_ALGO = "orders-algo" # 获取策略委托订单
"""
公共频道中的channel
"""
INSTRUMENTS = "instruments" # 产品数据
TICKERS = "tickers" # 产品行情
OPEN_INTEREST = "open-interest" # 持仓总量
CANDLE1D = "candle1D" # K线
TRADES = "trades" # 交易频道, 获取最近的成交数据
ESTIMATED_PRICE = "estimated-price" # 获取交割合约和期权预估交割/行权价。
MARK_PRICE = "mark-price" # 标记价格频道
MARK_PRICE_CANDLE1D = "mark-price-candle1D" # 标记价格K线频道
PRICE_LIMIT = "price-limit" # 限价频道, 获取交易的最高买价和最低卖价
BOOKS = "books" # 深度频道
OPT_SUMMARY = "opt-summary" # 期权定价频道
FUNDING_RATE = "funding-rate" # 资金费率频道
INDEX_CANDLE30M = "index-candle30m" # 指数K线频道
INDEX_TICKERS = "index-tickers" # 指数行情频道
STATUS = "status" # Status 频道
class Currency(str):
"""
货币
"""
BTC = "BTC"
|
[
"cl193931"
] |
cl193931
|
16d466faf8885448bc782e7158f0b12d78f1ed4c
|
72af42076bac692f9a42e0a914913e031738cc55
|
/Do it! 자료구조와 함께 배우는 알고리즘 입문 – 파이썬 편/chap02/reverse.py
|
36203133733209c57a24c7b4927b5f17ee2d9f8f
|
[] |
no_license
|
goareum93/Algorithm
|
f0ab0ee7926f89802d851c2a80f98cba08116f6c
|
ec68f2526b1ea2904891b929a7bbc74139a6402e
|
refs/heads/master
| 2023-07-01T07:17:16.987779
| 2021-08-05T14:52:51
| 2021-08-05T14:52:51
| 376,908,264
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
# [Do it! 실습 2-6] 뮤터블 시퀀스 원소를 역순으로 정렬
from typing import Any, MutableSequence
def reverse_array(a: MutableSequence) -> None:
"""뮤터블 시퀀스형 a의 원소를 역순으로 정렬"""
n = len(a)
for i in range(n // 2):
a[i], a[n - i - 1] = a[n - i - 1], a[i]
if __name__ == '__main__':
print('배열 원소를 역순으로 정렬합니다.')
nx = int(input('원소 수를 입력하세요.: '))
x = [None] * nx # 원소 수가 nx인 리스트를 생성
for i in range(nx):
x[i] = int(input(f'x[{i}] : '))
reverse_array(x) # x를 역순으로 정렬
print('배열 원소를 역순으로 정렬했습니다.')
for i in range(nx):
print(f'x[{i}] = {x[i]}')
|
[
"goareum7@gmail.com"
] |
goareum7@gmail.com
|
c411bd03168f2f7f1423730ee2d476bd59141dae
|
62b7a34776b851692ee7d9c18070e74f7ffbe13a
|
/app.py
|
4a3ff116facd599a62b4e7e57fa4883b0b4b17af
|
[] |
no_license
|
cathyann/flask-task-manager-project
|
a0153e0d95ce7fc6a18551dad17e7258cc7f0599
|
f344df5422b1f3916a5e6b3e15b40aa5691eb0f1
|
refs/heads/master
| 2023-02-10T20:09:53.277767
| 2021-01-04T09:29:15
| 2021-01-04T09:29:15
| 322,547,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,529
|
py
|
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
@app.route("/get_tasks")
def get_tasks():
tasks = list(mongo.db.tasks.find())
print(tasks)
return render_template("tasks.html", tasks=tasks)
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exist in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("User already exists")
return redirect(url_for("register"))
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username"). lower()
flash("Registration Successful!")
return redirect(url_for("profile", username=session["user"]))
return render_template("register.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username"). lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(
request.form.get("username")))
return redirect(url_for(
"profile", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/profile/<username>", methods=["GET", "POST"])
def profile(username):
# grab the session user's username from db
username = mongo.db.users.find_one(
{"username": session["user"]})["username"]
if session["user"]:
return render_template("profile.html", username=username)
return redirect(url_for("login"))
@app.route("/logout")
def logout():
# remove user from session cookies
flash("You have been logged out")
session.pop("user")
return redirect(url_for("login"))
@app.route("/add_task", methods=["GET", "POST"])
def add_task():
if request.method == "POST":
is_urgent = "on" if request.form.get("is_urgent") else "off"
task = {
"category_name": request.form.get("category_name"),
"task_name": request.form.get("task_name"),
"task_description": request.form.get("task_description"),
"is_urgent": is_urgent,
"due_date": request.form.get("due_date"),
"created_by": session["user"]
}
mongo.db.tasks.insert_one(task)
flash("Task Successfully Added")
return redirect(url_for("get_tasks"))
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template("add_task.html", categories=categories)
@app.route("/edit_task/<task_id>", methods=["GET", "POST"])
def edit_task(task_id):
if request.method == "POST":
is_urgent = "on" if request.form.get("is_urgent") else "off"
submit = {
"category_name": request.form.get("category_name"),
"task_name": request.form.get("task_name"),
"task_description": request.form.get("task_description"),
"is_urgent": is_urgent,
"due_date": request.form.get("due_date"),
"created_by": session["user"]
}
mongo.db.tasks.update({"_id": ObjectId(task_id)}, submit)
flash("Task Successfully Updated")
task = mongo.db.tasks.find_one({"_id": ObjectId(task_id)})
categories = mongo.db.categories.find().sort("category_name", 1)
return render_template("edit_task.html", task=task, categories=categories)
@app.route("/delete_task/<task_id>")
def delete_task(task_id):
mongo.db.tasks.remove({"_id": ObjectId(task_id)})
flash("Task Successfully Deleted")
return redirect(url_for("get_tasks"))
@app.route("/get_categories")
def get_categories():
categories = list(mongo.db.categories.find().sort("category_name", 1))
return render_template("categories.html", categories=categories)
@app.route("/add_category", methods=["GET", "POST"])
def add_category():
if request.method == "POST":
category = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.insert_one(category)
flash("New Category Added")
return redirect(url_for("get_categories"))
return render_template("add_category.html")
@app.route("/edit_category<category_id>", methods=["GET", "POST"])
def edit_category(category_id):
if request.method == "POST":
submit = {
"category_name": request.form.get("category_name")
}
mongo.db.categories.update({"_id": ObjectId(category_id)}, submit)
flash("Category Successfully Updated")
return redirect(url_for("get_categories"))
category = mongo.db.categories.find_one({"_id": ObjectId(category_id)})
return render_template("edit_category.html", category=category)
@app.route("/delete_category/<category_id>")
def delete_category(category_id):
mongo.db.categories.remove({"_id": ObjectId(category_id)})
flash("Category Successfully Deleted")
return redirect(url_for("get_categories"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=True)
|
[
"cathyannsy@gmail.com"
] |
cathyannsy@gmail.com
|
9b5e5c683798764ef15363fbd2fdf392b0afaf11
|
d4a6c3ae3b911cc1f3866c75e5934165d2fafb49
|
/setup.py
|
d3633613e45988ed063dd11f2eb62cc7f8ab5a8e
|
[] |
no_license
|
arthur-a/nativeview
|
a7ab4f7dac9a28f7c253dd5a882fe6b9ad2a6974
|
6fa5a5bff56a36783edb5b4dabc32c386498f2ea
|
refs/heads/master
| 2016-09-01T23:25:05.197254
| 2015-11-10T06:14:48
| 2015-11-10T06:14:48
| 25,459,839
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
from setuptools import setup, find_packages
requires = [
'arrow>=0.4.4',
'translationstring'
]
setup(name='nativeview',
version='0.0',
description='nativeview',
classifiers=[
"Programming Language :: Python",
],
author='Arthur Aminov',
author_email='',
url='',
packages=find_packages('nativeview'),
zip_safe=False,
install_requires=requires
)
|
[
"aminov.a.r@gmail.com"
] |
aminov.a.r@gmail.com
|
23f7e2bcf92de1dc1282410eb1cb672dcdcaf44f
|
66ba6a582d8fd5ed7ba01742ca19b658a59cc28a
|
/crawlers/server/DianPing.py
|
ab1d169cc6c11e502e7692a7b2f83b8b9269ee65
|
[] |
no_license
|
BaymaxGroot/DataCrawler
|
84417a470db3d425296214d0f524668e7bf61f5c
|
43044ba6a51114518b22abcc8c082062c4507d6f
|
refs/heads/master
| 2020-03-19T02:48:38.131595
| 2018-08-13T01:52:50
| 2018-08-13T01:52:50
| 135,662,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,350
|
py
|
import os
import json
import requests
import random
import time
from crawlers.common.db_operation import batch_insert_update_delete,db_query
pro = ['192.155.185.43', '192.155.185.153', '192.155.185.171', '192.155.185.197']
head = {
'user-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Cookie': ''
}
def update_domestic_city_info():
"""
update local database domestic city info from dinaping.com
:return:
"""
try:
province_exist = get_exist_province_list()
city_exist = get_exist_city_list()
url = "http://www.dianping.com/ajax/citylist/getAllDomesticCity"
time.sleep(random.random() * 3)
print("DianPing: start crawler city info from {}".format(url))
response = requests.get(url)
province_list_remote = response.json()['provinceList']
city_list_remote = response.json()['cityMap']
sql_list = []
for province_remote in province_list_remote:
isExisted = False
for province_local in province_exist:
if str(province_local['provinceId']) == str(province_remote['provinceId']):
isExisted = True
break
if not isExisted:
sql = """
insert into dp_province
(areaId, provinceId, provinceName)
values
('{areaId}','{provinceId}','{provinceName}')
""".format(areaId=province_remote['areaId'],
provinceId=province_remote['provinceId'],
provinceName=province_remote['provinceName'])
sql_list.append(sql)
batch_insert_update_delete(sql_list)
print("DianPing: success update the province info for local database")
for k in city_list_remote:
sql_list = []
city_list = city_list_remote[k]
for city_remote in city_list:
isExisted = False
for city_local in city_exist:
if str(city_local['cityId']) == str(city_remote['cityId']):
isExisted = True
break
if not isExisted:
sql = """
insert into dp_city
(activeCity, appHotLevel, cityAbbrCode,
cityEnName, cityId, cityLevel, cityName, cityOrderId,
cityPyName, gLat, gLng, overseasCity,
parentCityId, provinceId, scenery, tuanGouFlag)
values
('{activeCity}', '{appHotLevel}', '{cityAbbrCode}',
'{cityEnName}','{cityId}','{cityLevel}', '{cityName}','{cityOrderId}',
'{cityPyName}','{gLat}', '{gLng}','{overseasCity}',
'{parentCityId}', '{provinceId}', '{scenery}','{tuanGouFlag}')
""".format(activeCity=1 if city_remote['activeCity'] else 0,
appHotLevel=city_remote['appHotLevel'],
cityAbbrCode=city_remote['cityAbbrCode'],
cityEnName=city_remote['cityEnName'],
cityId=city_remote['cityId'], cityLevel=city_remote['cityLevel'],
cityName=city_remote['cityName'], cityOrderId=city_remote['cityOrderId'],
cityPyName=city_remote['cityPyName'],
gLat=float(city_remote['gLat']), gLng=float(city_remote['gLng']),
overseasCity=1 if city_remote['overseasCity'] else 0,
parentCityId=city_remote['parentCityId'],
provinceId=city_remote['provinceId'],
scenery=1 if city_remote['scenery'] else 0,
tuanGouFlag=city_remote['tuanGouFlag'])
sql_list.append(sql)
batch_insert_update_delete(sql_list)
print("DianPing: success update the city for provinceId {}".format(k))
print("DianPing: success update the city info for local database")
except:
print("DianPing: Failed to update the domestic city info in local databse")
raise
def get_exist_province_list():
"""
get existed province list information
:return:
"""
sql = """
select provinceId from dp_province
"""
province_list = []
try:
province_list = db_query(sql)
print("DianPing: success to get the province existed in local database")
return province_list
except:
print("DianPing: failed to query the database to get the existed province info list")
raise
def get_exist_city_list():
"""
get existed city list from local database
:return:
"""
sql = """
select cityId from dp_city
"""
city_list = []
try:
city_list = db_query(sql)
print("DianPing: success to get the city list from local database")
return city_list
except:
print("DianPing: failed to query the database to get the city info ")
raise
pass
|
[
"lele.zheng@citrix.com"
] |
lele.zheng@citrix.com
|
21732b6a55f993398d367ce33a847b4d240dc182
|
9db06f1464974bbe20c87009b3f1b345a778df85
|
/test/tiaoxingtu.py
|
398664baf6b9f830683fe1100d2e0fee5d784495
|
[] |
no_license
|
wangyuntao1990/data_analysis
|
e83547404c8bc09acc6d371c035e7eac8218db77
|
380ab4a911773997e7745e470c1715808648545f
|
refs/heads/main
| 2023-03-13T01:40:41.686533
| 2021-03-06T14:09:35
| 2021-03-06T14:09:35
| 341,168,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
# -*- coding: UTF-8 -*-
import matplotlib.pyplot as plt
import seaborn as sns
# 数据准备
x = ['Cat1', 'Cat2', 'Cat3', 'Cat4', 'Cat5']
y = [5, 4, 8, 12, 7]
# 用Matplotlib画条形图
plt.bar(x, y)
plt.show()
# 用Seaborn画条形图
sns.barplot(x, y)
plt.show()
|
[
"253782489@qq.com"
] |
253782489@qq.com
|
a572ce463403833a173873f566d1d844feb927d8
|
4ddd5aafb68cfdfd1afbf8a481711da00a674e13
|
/Some Python Libraries for Data Science/Numpy Basics/numpyBasics.py
|
3da9198138db6a0f1c313e9dc9c3472240e8efc3
|
[] |
no_license
|
MertUsenmez/Some-Python-Libraries-for-Data-Science-
|
b1a1c09a860ebcdc6cb3c09f4c0817975f738c46
|
a7f4a12132b3af5985ec714d5d7a98a81f455244
|
refs/heads/master
| 2020-04-25T17:49:07.225840
| 2019-02-27T17:52:26
| 2019-02-27T17:52:26
| 172,962,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
# -*- coding: utf-8 -*-
"""
@author: User
"""
#%% Numpy
import numpy as np
array = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) #1*15 matrix(vector)
print(array.shape)
a = array.reshape(3,5)
print("Shape:",a.shape)
print("Dimension:",a.ndim)
print("Data type:",a.dtype.name)
print("Size:",a.size)
print(type(a))
array1 = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
zeros = np.zeros((3,4)) # memory allocation
zeros[0,0]=5
print(zeros)
zeros[2,3]=8
print(zeros)
np.ones((3,4))
np.empty((3,2))
a = np.arange(10,50,5) # From 10 until 50, +5 a array of increasing.
print(a)
a = np.linspace(5,50,20) # Print 20 numbers between 5 and 50.
print(a)
#%%
#%% Numpy Basic Operations
a = np.array([1,2,3])
b = np.array([3,4,5])
print(a+b)
print(a-b)
print(a**2)
print(np.sin(a))
print(a<2)
a = np.array([[1,2,3],[4,5,6]])
b = np.array([[1,2,3],[4,5,6]])
# Element wise product.
print(a*b)
# Matrix product.
print(a.dot(b.T)) #b.T b'nin transpozudur.
# Exponential of a.
print(np.exp(a))
# Creates a random matrix(5x5) of numbers between 0 to 1.
a = np.random.random((5,5))
print(a.sum())
print(a.max())
print(a.min())
# sum of columns
print(a.sum(axis=0))
# sum of rows
print(a.sum(axis=1))
# square root
print(np.sqrt(a))
# squared
print(a**2)
print(np.add(a,a))
#%% Shape Manipulation
array = np.array([[1,2,3],[4,5,6],[7,8,9]]) # 3x3 matrix
# Let's turn this matrix into a vector, that is, to a 1-dimensional array.
# flatten
a = array.ravel()
# we want to make a 3-dimensional array again.
array2 = a.reshape(3,3)
# Transpose of array2
arrayT = array2.T
print(arrayT)
print(arrayT.shape)
#%% stacking array
array1 = np.array([[1,2],[3,4]])
array2 = np.array([[-1,-2],[-3,-4]])
# arrays(matrix) horizontal merge
array3 = np.hstack((array1,array2))
# arrays(matrix) vertical merge
array4 = np.vstack((array1,array2))
#%% convert array and copy array
# convert
liste = [1,2,3,4]
print(type(liste))
array1 = np.array(liste)
print(type(array1))
liste1 = list(array1)
# copy
a = np.array([1,2,3,4])
b = a
c = a
b[0] = 5 # In this case, a, b, c will change because those are kept as an area in memory. Those are not kept as a value in memory.
# If we want no change those.
d = np.array([1,2,3,4])
e = d.copy()
f = d.copy()
# Using the copy() method, we created new fields for e and d so that the changes do not depend on each other.
e[0] = 5
|
[
"noreply@github.com"
] |
MertUsenmez.noreply@github.com
|
3f21409f67a329d44c7e3650cbed075f5fb08512
|
0bc81c8742e6b7cd4bd3a804ac41cedee637c921
|
/portalweb/services/instancegroupservice.py
|
94912cd53481aa718386c5a0539d490129c83f52
|
[] |
no_license
|
TPAC-MARVL/portal
|
c7ff9445ea340774aaa1890e2b847001e6564379
|
b9660d7b771f105360c814e1a861fb16dc036c2b
|
refs/heads/master
| 2016-09-16T11:25:25.742221
| 2014-11-07T04:44:19
| 2014-11-07T04:44:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,361
|
py
|
from portalweb.system.message import MessageManager
from portalweb.system.message import Message
from portalweb.decorators.transaction import transaction
from portalweb.system.util import Util
from baseservice import BaseService
from portalweb.cloud.entities.instance import InstanceState
class InstanceGroupService(BaseService):
def is_authorized(self, instance, user):
if instance.state == InstanceState.STOPPED:
return False
else:
if user.is_admin:
return True
else:
instances = self.getAllowedToViewInstancesByUser(user)
for ins in instances:
if ins.id == instance.id:
return True
return False
def get_total_instance_number_by_group(self, group):
instance_groups = self._instance_group_manager.get_instance_groups_by_group(group)
return instance_groups.count()
def get_instances_by_group(self, group=None, group_id=None):
if group_id:
group = self._group_manager.getGroupById(group_id)
instance_groups = self._instance_group_manager.get_instance_groups_by_group(group)
instances = []
if instance_groups:
for instance_group in instance_groups:
instances.append(instance_group.instance)
return instances
def get_group_ids_by_insance(self, instance_id=None, instance=None):
instance_groups = self.get_instance_groups_by_instance(instance_id, instance=instance)
group_ids = []
if instance_groups:
for instance_group in instance_groups:
group_ids.append(instance_group.group.id)
return group_ids
def getAllowedToViewInstancesByUser(self, user):
instances = []
instance_groups = self._instance_group_manager.getAllInstancesByUser(user)
public_instances = self._instance_manager.get_public_instances()
old_instances = self._instance_manager.getAllInstancesByUser(user)
if instance_groups:
for instance_group in instance_groups:
instances.append(instance_group.instance)
if public_instances:
for public_instance in public_instances:
instances.append(public_instance)
if old_instances:
for old_instance in old_instances:
instances.append(old_instance)
refined_instances = []
for instance in instances:
if instance.state != InstanceState.STOPPED:
refined_instances.append(instance)
return refined_instances
def get_instance_groups_by_instance(self, instance_id=None, instance=None):
if instance_id:
instance = self._instance_manager.getInstanceById(instance_id)
if instance:
return self._instance_group_manager.get_instance_groups_by_instance(instance)
else:
return []
def edit_instance_groups(self, instance_id, group_ids, creator):
success = self._edit_instance_groups(instance_id, group_ids, creator)
token = ''
if success:
message = Util.get_replaced_text("1 2 been changed successfully.", group_ids, [('1', 'Group'), ('2', 'has')])
token = MessageManager.setMessage(message, Message.SUCCESS)
else:
message = Util.get_replaced_text("Error occurred when changing the 1. Please try again later.", group_ids, [('1', 'group')])
token = MessageManager.setMessage(message, Message.ERROR)
return token
@transaction
def _edit_instance_groups(self, instance_id, group_ids, creator):
instance = self._instance_manager.getInstanceById(instance_id)
groups = self._group_manager.getGroupsByIds(group_ids)
success1 = self._delete_instance_group(instance)
success2 = self._instance_group_manager.create_instance_groups(instance, groups, creator)
if success1 and success2:
return True
else:
return False
def delete_instance_group(self, instance_id=None, instance=None, group_ids=None):
success = self._delete_instance_group(instance_id, instance)
if success:
message = Util.get_replaced_text("1 2 been removed successfully.", group_ids, [('1', 'Group'), ('2', 'has')])
token = MessageManager.setMessage(message, Message.SUCCESS)
else:
message = Util.get_replaced_text("Error occurred when removing the 1. Please try again later.", group_ids, [('1', 'group')])
token = MessageManager.setMessage(message, Message.ERROR)
return token
def _delete_instance_group(self, instance_id=None, instance=None):
if instance_id:
instance = self._instance_manager.getInstanceById(instance_id)
return self._instance_group_manager.remove_instance_group(instance=instance)
def create_instance_group(self, instance_id, group_id, creator):
instance = self._instance_manager.getInstanceById(instance_id)
group = self._group_manager.getGroupById(group_id)
success = self._instance_group_manager.create_instance_group(instance, group, creator)
if success:
if group_id:
token = MessageManager.setMessage("Group has been changed successfully.", Message.SUCCESS)
else:
token = MessageManager.setMessage("Group has been removed successfully.", Message.SUCCESS)
else:
if group_id:
token = MessageManager.setMessage("Error occurred when changing the group. Please try again later.", Message.ERROR)
else:
token = MessageManager.setMessage("Error occurred when removing the group. Please try again later.", Message.ERROR)
return token
|
[
"fxmzb123@gmail.com"
] |
fxmzb123@gmail.com
|
77cc1bcafc3a1e60f67633790891d345b694ea52
|
af4830183cc22bc93b392a8acea72f51a34c3103
|
/genome.py
|
91e7c4d000902763ba3d6865160184ee3e7b2b2e
|
[] |
no_license
|
siekmanj/genetic-code-generation
|
e5e6d0dcc50a777584756ca52c261d87e0daada7
|
ee713ad3f311de681ea3f711b1a901fa8dcda41e
|
refs/heads/master
| 2021-09-20T23:41:57.486326
| 2018-08-16T21:34:59
| 2018-08-16T21:34:59
| 116,884,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
import random
#The only thing that this class should do is generate random 1s and 0s
#The other stuff should be in an intermediary class
#character interpretations
class Genome:
def __init__(self, genome_length):
self.genome = []
self.length = genome_length
random.seed()
for i in range(genome_length):
self.genome.append(random.randint(0, 1))
def sequence(self, bytelength):
nums = []
for i in range(0, len(self.genome), bytelength):
codon = int("".join(str(self.genome[x]) for x in range(i, i+bytelength)), 2)
nums.append(codon)
return nums
|
[
"siekmanj@oregonstate.edu"
] |
siekmanj@oregonstate.edu
|
6ad58bb1abb0f28a69b2fda6bb4a2263fb8ebefd
|
669fb7909b023a5315f3f20edffc9b40d57e28bb
|
/questionnaire_site/urls.py
|
2d80a31a02f571a77eec0358376bdaa11c68d84d
|
[] |
no_license
|
alinacristea/questionnaire_site
|
cc6dd8df56ef085c713f73a8fd49c9510389839e
|
f5302a468d36fbf3fad7004d9b27bd4292bea3de
|
refs/heads/master
| 2020-05-02T02:39:06.841693
| 2016-01-28T18:57:53
| 2016-01-28T18:57:53
| 21,604,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from questionnaire_site import views
# the URLs created for the application
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^admin/', include(admin.site.urls)),
# when the regular expression "r'^view_survey/$'" is matched then
# the "views.viewSurvey" function will be called
url(r'^view_survey/$', views.viewSurvey, name='view_survey'),
url(r'^view_answers/$', views.viewAnswers, name='view_answers'),
url(r'^add_survey/$', views.add_survey, name='add_survey'),
url(r'^add_question/$', views.add_question, name='add_question'),
url(r'^add_participant/$', views.add_participant, name='add_participant'),
url(r'^add_likert_scale_answer/$', views.add_likert_scale_answer, name='add_likert_scale_answer'),
url(r'^add_text_answer/$', views.add_text_answer, name='add_text_answer'),
url(r'^add_boolean_answer/$', views.add_boolean_answer, name='add_boolean_answer'),
url(r'^add_response/$', views.add_response, name='add_response'),
url(r'^survey_stats/$', views.survey_stats, name='survey_stats'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
# url needed to handle the AJAX request
url(r'^delete_question/$', views.delete_question, name='delete-question'),
)
|
[
"alina.andreea.cristea@gmail.com"
] |
alina.andreea.cristea@gmail.com
|
04ae14073ae09d78e57d714a6833da4011531657
|
3108ebd916033991e8a6178e418d9caf2248056c
|
/media.py
|
355d4e0ffefc5d48a2449611cde023c06cb97dcf
|
[] |
no_license
|
demesvardestin/project-movie-trailer
|
ec7eb658e89ec3ed531df7df63b0ac241d5d8194
|
b7ca0d56f37ca4fa59e06c6e4dc96f6baf12e51a
|
refs/heads/master
| 2021-07-10T18:52:54.881675
| 2017-10-12T03:28:13
| 2017-10-12T03:28:13
| 106,588,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
import webbrowser
class Movie():
""" TrailFlix website source-code
Author: Demesvar D. Destin
"""
def __init__(self, title, movie_id, description, poster_image_url, trailer_youtube_url, genre, actors, box_office, release):
# initiate movie attributes
self.title = title
self.movie_id = movie_id
self.description = description
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
self.genre = genre
self.actors = actors
self.box_office = box_office
self.release = release
|
[
"dddemesvar07@gmail.com"
] |
dddemesvar07@gmail.com
|
07d8d878ca7856a8bb99ddf171a7f718602f8ca1
|
718b810b7c8103795f54d934116c60715cbfb44b
|
/pixivlink.py
|
23152c98e2050a4946e5ac8fe99ea09a9171d54b
|
[] |
no_license
|
godofalb/PixivPicGet
|
816c12f741a10477b7a63e0df10a10e7c8fc748f
|
47f65036da75de49a64551ff0de0c49996287b04
|
refs/heads/master
| 2021-01-19T09:31:38.510983
| 2018-10-09T11:05:14
| 2018-10-09T11:05:14
| 100,656,805
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,075
|
py
|
#-*- coding:utf-8 -*-
from httplib import HTTPException
from urllib2 import HTTPError
import cookielib, urllib2,urllib
from Cookie import CookieError
import re
import time
import types
import os
#import ssl
#关闭ssl验证
#ssl._create_default_https_context = ssl._create_unverified_context
false=False
true=True
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class PixivLinker():
def __init__(self,filepath="G:\\Hp"):
print "InitStart"
#初始化
self.filePath=filepath
self.newPath=filepath+'\\New'
self.mynewPath=filepath+'\\NewMy'
self.recommonedPath=filepath+'\\Recommoned'
self.authorPath=filepath+'\\Author'
#self.searchPath=filepath+'\\Search'
self.mkDir(self.mynewPath)
self.mkDir(self.newPath)
self.mkDir(self.recommonedPath)
self.mkDir(self.authorPath)
#self.mkDir(self.searchPath)
self.mainUrl="https://www.pixiv.net/"
self.newUrl="https://www.pixiv.net/bookmark_new_illust.php?p={0}"
self.authorUrl="https://www.pixiv.net/member_illust.php?id={0}&type=all&p={1}"
#self.searchUrl="https://www.pixiv.net/search.php?word={0}&order=date_d&p={1}"
self.size=r'600x600'
self.orgsize=r'150x150'
#self.delete=re.compile(r'_master\d*')
self.OrigingalUrl="https://www.pixiv.net/member_illust.php?mode=medium&illust_id={0}"
#读取cookie
self.cookie=cookielib.MozillaCookieJar()
self.handle=urllib2.HTTPCookieProcessor(self.cookie)
self.opener = urllib2.build_opener(self.handle)
#用来获得文件名的正则表达式
self.namefinder=re.compile('/[a-z,A-Z,_,0-9]*?.jpg')
self.sizeF=re.compile(self.orgsize)
self.findfilename=re.compile(r'/.*?\..*?', re.S)
self.findworkplace=re.compile(r'<div class="_layout-thumbnail">(.*?)</div>', re.S)
self.finder=re.compile(r'(https://i.pximg.net/img-original/.*?)"', re.S)
#self.finder=re.compile(r'<img.*?data-src="(https://i.pximg.net/img-original/.*?)".*?class="original-image".*?>', re.S)
self.Header= { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'
,'Host': 'i.pximg.net'
,'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'
,'Accept-Encoding': 'gzip, deflate, br'
,'Referer': 'https://www.pixiv.net/'
,'DNT': '1'
,'Connection': 'keep-alive'
,'Accept':'*/*'
}
self.domainfinder=re.compile(r'://(.*?)/')
self.username=''
self.password=''
self.maxList=50
def UrlChange(self,url):
domain=re.search(self.domainfinder,url)
if domain:
domain=domain.group(1)
return url.replace(domain,self.pixivDNS[domain])
return url
#登入
def LoginIn(self):
url="https://accounts.pixiv.net/api/login?lang=zh"
loginUrl="https://accounts.pixiv.net/login"
Header= { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'
,'Host': 'accounts.pixiv.net'
,'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'
,'Accept-Encoding': 'gzip, deflate, br'
,'Referer': 'https://accounts.pixiv.net/login'
,'DNT': '1'
,'Connection': 'keep-alive'
,'Accept':'*/*'
}
req1=self.opener.open(loginUrl)
content=req1.read()
pattern = re.compile(r'<input.*?"post_key".*?value="(.*?)"')
match = pattern.search(content)
if match:
datas={'pixiv_id':self.username,'password':self.password
,'post_key':match.group(1)
,'ref':'wwwtop_accounts_index'
,'return_to':'https://www.pixiv.net/'
,'source':'pc'
}
postdata = urllib.urlencode(datas)
req=urllib2.Request(url,headers=Header,data=postdata)
res=self.opener.open(req)
#创建新目录
def mkDir(self,path):
path = path.strip()
#注意要添加设置文件编码格式
isExists=os.path.exists(path.decode('utf-8'))
if not isExists:
os.makedirs(path.decode('utf-8'))
return True
else:
print "Exists"
return False
def dealpic(self,pid):
print 'saving'
reallink=[]
filename=[]
try:
print 'finding'
tempres=urllib2.Request(self.OrigingalUrl.format(pid))
print self.OrigingalUrl.format(pid)
time.sleep(1)
res = self.opener.open(tempres)
content=self.finder.search(res.read()).group(1)
reallink.append(content)
filename.append(self.findfilename.search(content))
except Exception,e:
print e.message
#保存图片
def savePic(self,path,filename,link,name,pid='',date=''):
#reallink=link.replace('24-x24-')
'''
if pid:
try:
print 'finding'
tempres=urllib2.Request(self.OrigingalUrl.format(pid))
print self.OrigingalUrl.format(pid)
time.sleep(3)
res = self.opener.open(tempres)
reallink=self.finder.search(res.read()).group(1)
name+="."+reallink[-3:]
except Exception,e:
print e.message
else:
reallink=self.sizeF.sub(self.size,link)
name+='.jpg'
if not reallink:
reallink=self.sizeF.sub(self.size,link)
name+='.jpg'
'''
reallink=link.replace('c/240x240/img-master','img-original').replace('_master1200','')
print reallink,name
try:
name=name+'.jpg'
request=urllib2.Request(reallink,headers=self.Header)
response = self.opener.open(request)
except HTTPError,e:
reallink=reallink.replace('jpg','png')
name=name.replace('jpg','png')
print('Try PNG')
request=urllib2.Request(reallink,headers=self.Header)
response = self.opener.open(request)
print reallink,name
try:
print path+'\\'+date+name
file=open((path+'\\'+date+name),"wb")
# for byte in response.read():
file.write( response.read())
file.close()
return path+'\\'+date+name
except Exception,e:
print e.message
print path+'\\'+date+filename+'.jpg'
file=open((path+'\\'+date+filename+'.jpg'),"wb")
# for byte in response.read():
file.write( response.read())
file.close()
return path+'\\'+date+filename+'.jpg'
#保存文本 https://i.pximg.net/img-original/img/2017/09/15/19/41/41/64969252_p0.jpg
def saveTxt(self,path,name,linkname,tag,author,aid,pid,date=''):
try:
file=open((path+'\\'+date+name+'.txt').decode('utf-8'),'w')
file.write("作品名:{0}\n文件名:{1}\n作品id:{2}\n作者:{3} \n作者id:{4}\n标签:{5}\n".format(name,linkname,pid,author,aid,tag))
file.close()
except:
file=open((path+'\\'+date+linkname+'.txt').decode('utf-8'),'w')
file.write("作品名:{0}\n文件名:{1}\n作品id:{2}\n作者:{3} \n作者id:{4}\n标签:{5}\n".format(name,linkname,pid,author,aid,tag))
file.close()
#保存推荐内容
def saveRec(self,contents,NewDate=True):
#0-url 1-pid 2-tag 3-aid 4-title 5-username
pattern=re.compile(r'<li.*?class="image-item".*?data-src="(.*?)".*?data-id="(.*?)".*?data-tags="(.*?)".*?data-user-id="(.*?)".*?<h1 class="title gtm-recommended-illusts" title="(.*?)">.*?data-user_name="(.*?)".*?</li>',re.S)
FPath= self.recommonedPath
if NewDate:
FPath=FPath+'\\'+time.strftime('%Y-%m-%d',time.localtime(time.time()))
self.mkDir(FPath)
for content in contents:
for s in re.findall(pattern,content):
print s[0],s[1],s[2],s[3],s[4],s[5]
self.saveTxt(
FPath,
s[4],
self.namefinder.search(s[0]).group()[1:],
s[2],
s[5],
s[3],
s[1])
self.savePic(
FPath,
self.namefinder.search(s[0]).group()[1:],
s[0],
s[4],
s[1])
#保存大家更新内容
def saveNew(self,contents,NewDate=True):
#0-url 1-pid 2-tag 3-aid 4-title 5-username
pattern=re.compile(r'<li.*?class="image-item".*?data-src="(.*?)".*?data-id="(.*?)".*?data-tags="(.*?)".*?data-user-id="(.*?)".*?<h1 class="title gtm-everyone-new-illusts" title="(.*?)">.*?data-user_name="(.*?)".*?</li>',re.S)
FPath=self.newPath
if NewDate:
FPath=FPath+'\\'+time.strftime('%Y-%m-%d',time.localtime(time.time()))
self.mkDir(FPath)
for content in contents:
for s in re.findall(pattern,content):
print s[0],s[1],s[2],s[3],s[4],s[5]
self.saveTxt(
FPath,
s[4],
self.namefinder.search(s[0]).group()[1:],
s[2],
s[5],
s[3],
s[1])
self.savePic(
FPath,
self.namefinder.search(s[0]).group()[1:],
s[0],
s[4],
s[1])
#保存订阅更新内容
def saveMyNew(self,content):
#0-url 1-pid 2-tag 3-aid 4-title 5-username
pattern=re.compile(r'<li.*?class="image-item".*?data-src="(.*?)".*?data-id="(.*?)".*?data-tags="(.*?)".*?data-user-id="(.*?)".*?<h1 class="title" title="(.*?)">.*?data-user_name="(.*?)".*?</li>',re.S)
for s in re.findall(pattern,content):
print s[0],s[1],s[2],s[3],s[4],s[5]
self.saveTxt(self.mynewPath,
s[4],
self.namefinder.search(s[0]).group()[1:],
s[2],
s[5],
s[3],
s[1])
self.savePic(self.mynewPath,
self.namefinder.search(s[0]).group()[1:],
s[0],
s[4],
s[1])
#保存某作家的内容
def saveAuthor(self,content,path,aname):
#0-url 1-pid 2-tag 3-aid 4-title 5-username
pattern=re.compile(r'<li.*?class="image-item".*?data-src="(.*?)".*?data-id="(.*?)".*?data-tags="(.*?)".*?data-user-id="(.*?)".*?<h1 class="title" title="(.*?)">.*?</li>',re.S)
for s in re.findall(pattern,content):
print s[0],s[1],s[2],s[3],s[4]
self.saveTxt(path,
s[4],
self.namefinder.search(s[0]).group()[1:],
s[2],
aname,
s[3],
s[1])
self.savePic(path,
self.namefinder.search(s[0]).group()[1:],
s[0],
s[4],
s[1])
#获得主页信息
def getMain(self,save=False,wantNew=False,wantRec=True,NewDate=True):
try:
print "GetMain..."
print self.mainUrl
req = urllib2.Request(self.mainUrl)
response = self.opener.open(req)
self.cookie.save(filename='cookies.txt', ignore_discard=True, ignore_expires=True)
content=response.read()
if save:
file=open('HtmlTmp.txt','w')
file.write(content)
file.close()
if wantRec:
recommonedpattern=re.compile(r'<section class="item recommended-illusts " data-name="recommended_illusts">.*?</section>',re.S)
self.saveRec(re.findall(recommonedpattern,content),NewDate)#recommonedpattern.search(content).group())
if wantNew:
newpattern=re.compile(r'<section class="item everyone-new-illusts" data-name="everyone_new_illusts">.*?</section>',re.S)
self.saveNew(re.findall(newpattern,content),NewDate)#newpattern.search(content).group())
print "Over"
except CookieError,e:
print e.reason
except Exception, e:
print e.message
def saveList(self,ids,path,header,tt):
#https://www.pixiv.net/rpc/illust_list.php?illust_ids=65473011%2C65419178%2C65074614%2C65185576%2C65508558%2C65456275%2C65144502%2C65531239%2C65414423%2C65409762%2C65074959%2C65373539%2C65525510%2C65382906%2C65518224%2C65467454%2C65520983%2C65353027%2C65108280%2C65127935%2C65290068%2C65397514%2C65346015%2C65433918%2C65407698%2C65281689%2C65185350%2C65422835%2C65364898%2C65259574%2C65326536%2C65374516%2C65474412%2C65204968%2C65471083%2C65440000%2C65535513%2C65481378%2C65115686%2C65435274%2C65202162%2C65511561%2C65089638%2C65096039%2C65540233%2C65472225%2C65178994%2C65202055%2C65256707%2C65486757&page=discover&exclude_muted_illusts=1&tt=b4424083a29b1aa069dcf38eaf318dbc
listurl='https://www.pixiv.net/rpc/illust_list.php?illust_ids='
b=True
for id in ids:
if b:
b=False
listurl+='{0}'.format(id)
else:
listurl+='%2C{0}'.format(id)
listurl+='&page=discover&exclude_muted_illusts=1&tt=%s'%(tt)
req=urllib2.Request(listurl,headers=header)
response=self.opener.open(req)
content=response.read()
# pattern=re.compile(r'"tags":(?P<tags>.*?),"url":(?P<url>.*?),"user_name":(?P<user_name>.*?),"illust_id":(?P<illust_id>.*?),"illust_title":(?P<illust_title>.*?),"illust_user_id":(?P<illust_user_id>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?),"user_name":(?P<user_name>.*?)', re.S)
for match in re.findall(r'{.*?}',content,re.S):
try:
time.sleep(1)
jsons=eval(match)
'''
for k in jsons:
if type(jsons[k])==types.StringType:
jsons[k]=jsons[k].decode('unicode-escape')
if types(jsons[k])==types.ListType:
'''
tags=''
for tag in jsons['tags']:
if tag[0]=='\\' and tag[1]=='u' :
tags+=tag.decode('unicode-escape')+' , '
else:
tags+=tag+' , '
jsons['tags']=tags
for k in jsons:
if type(jsons[k])==types.StringType:
if jsons[k][0]=='\\' and jsons[k][1]=='u' :
jsons[k]=jsons[k].decode('unicode-escape')
#path,name,linkname,tag,author,aid,pid,date=''
'''
print jsons['illust_title']
print self.namefinder.search(jsons['url']).group()[1:]
print jsons['tags']
print jsons['user_name']
print jsons['illust_user_id']
print jsons['illust_id']
print jsons['url']
print jsons['illust_page_count']
print re.sub(r'\\/',r'/',jsons['url'])
'''
self.saveTxt(path,jsons['illust_title'],self.namefinder.search(jsons['url']).group()[1:],jsons['tags'],jsons['user_name'],jsons['illust_user_id'],jsons['illust_id'])
#self,path,filename,link,name,pid='',date='' path,filename,link,name,pid='',date=''
#savePic(self,path,filename,link,name,pid='',date=''):
self.savePic(path, self.namefinder.search(jsons['url']).group()[1:], re.sub(r'\\/','/',jsons['url']),jsons['illust_title'],pid=jsons['illust_id'])
except Exception,e:
print e
def getRecommend(self,num=10):
req=urllib2.Request('https://www.pixiv.net/discovery')
response=self.opener.open(req)
content=response.read()
tokenfinder=re.compile(r'pixiv.context.token = "(.*?)"', re.S)
tokenmatch = re.search(tokenfinder, content)
Header= { 'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0'
,'Host': 'www.pixiv.net'
,'Referer': 'https://www.pixiv.net/discovery'
,'DNT': '1'
,'Accept':'*/*'
}
tt=tokenmatch.group(1)
datareq=urllib2.Request('https://www.pixiv.net/rpc/recommender.php?type=illust&sample_illusts=auto&num_recommendations={0}&page=discovery&mode=all&tt={1}'.format(num,tt),headers=Header)
datasresponse=self.opener.open(datareq)
data=datasresponse.read()
FPath=self.recommonedPath+'\\'+time.strftime('%Y-%m-%d',time.localtime(time.time()))
self.mkDir(FPath)
i=0
L=[]
for match in re.findall(r'\d+',data,re.S):
i+=1
L.append(match)
if i>=self.maxList:
print 'sending-------------------'
self.saveList(L,FPath,Header,tt)
i=0
L=[]
self.saveList(L,FPath,Header,tt)
#获得我的更新
def getMyNew(self,save=False,MaxPage=1):
try:
print "GetMyNew..."
for i in range(1,MaxPage+1):
req = urllib2.Request(self.newUrl.format(i))
print self.newUrl.format(i)
response = self.opener.open(req)
self.cookie.save(filename='cookies.txt', ignore_discard=True, ignore_expires=True)
content=response.read()
if save:
file=open('HtmlTmp{0}.txt'.format(i),'w')
file.write(content)
file.close()
self.saveMyNew(content)
print "Over"
except CookieError,e:
print e.reason
#获得某作者的信息
def getAuthor(self,aid,save=False,MaxPage=1):
try:
print 'getAuthor...'
Aname='UnKnown'
path=''
for i in range(1,MaxPage+1):
req = urllib2.Request(self.authorUrl.format(aid,i))
print self.authorUrl.format(aid,i)
response = self.opener.open(req)
self.cookie.save(filename='cookies.txt', ignore_discard=True, ignore_expires=True)
content=response.read()
if save:
file=open('HtmlTmp{0}.txt'.format(i),'w')
file.write(content)
file.close()
if i==1:
pattern=re.compile(r'<a.*?class="user-name".*?>(.*?)</a>',re.S)
Aname=pattern.search(content).group(1)
print Aname
path=self.authorPath+'\\'+Aname
print path
self.mkDir(path)
self.saveAuthor(content,path,Aname)
print "Over"
except CookieError,e:
print e.reason
if __name__=='__main__':
pass
p=PixivLinker()
#p.getAuthor('4239212',False, 9)#'8189060'
#p.getMyNew(False, 1)
p.getMain(save=True,wantNew=False,wantRec=True)
|
[
"xwl992365231@163.com"
] |
xwl992365231@163.com
|
34b03cd2b0561302a442746036964d970147b488
|
aff33d74832ac5c4ba271d1735f41bf9f7048e9e
|
/JustesSite/wsgi.py
|
f2363782918d913441d82df70285eab7934af771
|
[] |
no_license
|
JustinaSavickaite/DjangoGirlsBlog
|
1daa0277f5f77fe1a62591687ed11e22426cbc8b
|
edcd5340f7cb167000c7492966d289922b1cd588
|
refs/heads/master
| 2020-05-27T08:13:16.380610
| 2019-05-25T13:02:49
| 2019-05-25T13:02:49
| 188,541,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for JustesSite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "JustesSite.settings")
application = get_wsgi_application()
|
[
"justinaa.savickaite@gmail.com"
] |
justinaa.savickaite@gmail.com
|
59b9c718b121c393ba492ef0fab66fd9222c0f25
|
4c0b1c2477a1c1d9f35d3d1cdaccde8d11c5bf0c
|
/oz/bandit/actions.py
|
6e441bf15b15f997cae021d8030b3884fd0c00b3
|
[
"BSD-3-Clause"
] |
permissive
|
dailymuse/oz
|
5307af4066c054d5f027ba370ec18e4db3b91d22
|
f4fec5078bba3258a15504247394339a100487de
|
refs/heads/develop
| 2021-01-17T01:21:24.048074
| 2020-09-25T20:15:06
| 2020-09-25T20:15:06
| 12,148,880
| 36
| 4
|
BSD-3-Clause
| 2020-09-26T00:32:12
| 2013-08-16T02:28:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
import oz
import datetime
import oz.redis
import oz.bandit
from tornado import escape
@oz.action
def add_experiment(experiment):
"""Adds a new experiment"""
redis = oz.redis.create_connection()
oz.bandit.add_experiment(redis, experiment)
@oz.action
def archive_experiment(experiment):
"""Archives an experiment"""
redis = oz.redis.create_connection()
oz.bandit.Experiment(redis, experiment).archive()
@oz.action
def add_experiment_choice(experiment, choice):
"""Adds an experiment choice"""
redis = oz.redis.create_connection()
oz.bandit.Experiment(redis, experiment).add_choice(choice)
@oz.action
def remove_experiment_choice(experiment, choice):
"""Removes an experiment choice"""
redis = oz.redis.create_connection()
oz.bandit.Experiment(redis, experiment).remove_choice(choice)
@oz.action
def get_experiment_results():
"""
Computes the results of all experiments, stores it in redis, and prints it
out
"""
redis = oz.redis.create_connection()
for experiment in oz.bandit.get_experiments(redis):
experiment.compute_default_choice()
csq, confident = experiment.confidence()
print("%s:" % experiment.name)
print("- creation date: %s" % experiment.metadata["creation_date"])
print("- default choice: %s" % experiment.default_choice)
print("- chi squared: %s" % csq)
print("- confident: %s" % confident)
print("- choices:")
for choice in experiment.choices:
print(" - %s: plays=%s, rewards=%s, performance=%s" % (choice.name, choice.plays, choice.rewards, choice.performance))
@oz.action
def sync_experiments_from_spec(filename):
"""
Takes the path to a JSON file declaring experiment specifications, and
modifies the experiments stored in redis to match the spec.
A spec looks like this:
{
"experiment 1": ["choice 1", "choice 2", "choice 3"],
"experiment 2": ["choice 1", "choice 2"]
}
"""
redis = oz.redis.create_connection()
with open(filename, "r") as f:
schema = escape.json_decode(f.read())
oz.bandit.sync_from_spec(redis, schema)
|
[
"simonson@gmail.com"
] |
simonson@gmail.com
|
73e6a623d85f20031eb6facf7519a7542786ec9f
|
f59246a0b83df52c4a8d53f350e16c74333eb56c
|
/bin/epylint
|
65b9cc758eea3c753eb0f53d23a166437dcf16a5
|
[] |
no_license
|
Angel-Chang/HSDBS
|
bcc4ffe679cd56898354f8d4e0aa21bd94fc2844
|
df14e42beb17ad3f6262b3fdc50052b73d99fd25
|
refs/heads/master
| 2023-07-10T22:19:34.965001
| 2021-08-24T14:42:58
| 2021-08-24T14:42:58
| 399,503,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
#!/Users/dualwings/Projects/HappyCityDB/HSDBS/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())
|
[
"huichi.chang@gmail.com"
] |
huichi.chang@gmail.com
|
|
49418e89eb996edd410c9e539d7f89196d1c80c0
|
7a0adfa02066795cf426434f36469560e9731448
|
/TokenExtract.py
|
82faec4da27067fe03eb1f735690fb41508c5960
|
[] |
no_license
|
kevinkoo001/malClassifier
|
b5b7dc4287903eae6a69eb547d60f5d3e966f8d0
|
9c25bad29069afa030443284042e8d1fcbeb7233
|
refs/heads/master
| 2021-01-10T19:36:53.793962
| 2015-03-09T19:24:06
| 2015-03-09T19:24:06
| 30,928,405
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
__author__ = 'HEEYOUNG'
import csv
import numpy as np
import sklearn
import sys
from os import listdir
from os.path import isfile, join
import os
from pymongo import MongoClient
client = MongoClient('localhost', 27017)
db = client.Malware
BASEDIR = "H:/Malware_Data/"
if __name__ == '__main__':
TrainList = []
AC = db.AssemCnt
AK = db.AssemKeys
CI = db.CallInfo
CA = db.CallArg
Files = CI.find(timeout=False)
for f in Files:
for k in f.keys():
if k == '_id' or k == 'Id':
continue
if CA.find_one({"arg":k}):
continue
CA.insert({"arg": k})
arg = CA.find(timeout=False)
arg_id = 0
for c in arg:
AK.update({"_id":c['_id']}, {"$set": {"arg_id": arg_id}})
arg_id += 1
sys.exit()
|
[
"kevinkoo001@gmail.com"
] |
kevinkoo001@gmail.com
|
dc7eb1f17fb333249a13d48e83e81c6eb1c15213
|
684a4b1b43810ed6ebe6bbb29b3de90038ee90a6
|
/Decision_Trees/LMT/calculate_diff_sum_with_pos_neg_weights.py
|
1e3b1bafee3c3991cb4d9c1d7de194bcc2a4385e
|
[] |
no_license
|
liuyejia/Yeti-Thesis-Project
|
47735120f3314e0041fe978d16ba2c922775b0fd
|
3ce083c5f871b6efe0a581d8ce8db18116ea6ba4
|
refs/heads/master
| 2021-10-09T12:25:23.067022
| 2018-12-28T06:02:41
| 2018-12-28T06:02:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,085
|
py
|
import csv
import pandas as pd
import numpy as np
# get the average value with sql "SELECT LeafNode, avg(DraftAge_norm), avg(Weight_norm), avg(CSS_rank_norm), avg(rs_A_norm), avg(rs_GP_norm), avg(rs_P_norm), avg(rs_PIM_norm), avg(rs_PlusMinus_norm), avg(po_P_norm), avg(po_PIM_norm), avg(po_PlusMinus_norm), avg(po_A_norm), avg(country_EURO), avg(country_USA), avg(country_CAN), avg(position_R) FROM chao_draft.lmt_testYears_CSS_null_norm_prob_for_points where DraftYear in (2001,2002)/(2007,2008) group by LeafNode;"
df1 = pd.read_csv('Desktop/lmt_10years_CSS_null_norm_prob_01_02.csv')
DraftAge_norm_avg = [0.042207792207792194, 0.05088062622309201,0.10545556805399334, 0.06395348837209304, 0.10204081632653063]
Weight_norm_avg = [0.4145541958041958, 0.4143835616438356, 0.4140672319806176, 0.39020572450804997, 0.3820970695970695]
CSS_rank_norm_avg = [0.5431709438886473, 0.5189093530838303, 0.5073842444335611, 0.47947034605541355, 0.5593529277739803]
rs_A_norm_avg = [0.19184491978609627, 0.15028203062046727, 0.212289640265555, 0.2045143638850889, 0.20891690009337066]
rs_GP_norm_avg = [0.6603535353535354, 0.5652968036529681, 0.5430446194225723, 0.5740310077519379, 0.6825396825396827]
rs_P_norm_avg = [0.20497698504027628, 0.1596150511531126, 0.2299661118309578, 0.22188695908154255, 0.24035563592525613]
rs_PIM_norm_avg = [0.18898272917062062, 0.14762490348042442, 0.1367432150313153, 0.13191241442928583, 0.17447062332239785]
rs_PlusMinus_norm_avg = [0.566, 0.47331506849315086, 0.5520000000000023, 0.634790697674419, 0.6540952380952382]
po_P_norm_avg = [0.05039525691699606, 0.022036926742108394, 0.07300581992468333, 0.06648129423660265, 0.14906832298136646]
po_PIM_norm_avg = [0.06079545454545455, 0.023972602739726033, 0.05692257217847772, 0.04544573643410852, 0.12539682539682542]
po_PlusMinus_norm_avg = [0.28693181818181823, 0.375, 0.375, 0.375, 0.5674603174603176]
po_A_norm_avg = [0.05506993006993008, 0.02370916754478399, 0.0772259236826166, 0.06663685152057243, 0.16300366300366298]
country_EURO_avg = [0.4091, 0.5342, 0.4291, 0.6163, 0.5714]
country_USA_avg = [0.0682, 0.1370, 0.2402, 0.0814, 0.0238]
country_CAN_avg = [0.5227, 0.3288, 0.3307, 0.3023, 0.4048]
position_R_avg = [0.1818, 0.2329, 0.2126, 0.1047, 0.2143]
with open('Desktop/lmt_10years_CSS_null_norm_prob_01_02.csv', 'rb') as csvfile:
d_reader = csv.DictReader(csvfile)
vals_positive = []
vals_negative = []
DraftAge_norm_diff_list = []
Weight_norm_diff_list = []
CSS_rank_norm_diff_list = []
rs_A_norm_diff_list = []
rs_P_norm_diff_list = []
country_EURO_diff_list = []
rs_GP_norm_diff_list = []
rs_PIM_norm_diff_list = []
rs_PlusMinus_norm_diff_list = []
po_A_norm_diff_list = []
po_P_norm_diff_list = []
po_PIM_norm_diff_list = []
po_PlusMinus_norm_diff_list = []
position_R_diff_list = []
country_CAN_diff_list = []
for row in d_reader:
DraftAge_norm_diff = float(row['DraftAge_norm']) - DraftAge_norm_avg[int(row['LeafNode'])-1]
DraftAge_norm_diff_list.append(DraftAge_norm_diff)
Weight_norm_diff = float(row['Weight_norm']) - Weight_norm_avg[int(row['LeafNode'])-1]
Weight_norm_diff_list.append(Weight_norm_diff)
CSS_rank_norm_diff = float(row['CSS_rank_norm']) - CSS_rank_norm_avg[int(row['LeafNode'])-1]
CSS_rank_norm_diff_list.append(CSS_rank_norm_diff)
rs_A_norm_diff = float(row['rs_A_norm']) - rs_A_norm_avg[int(row['LeafNode'])-1]
rs_A_norm_diff_list.append(rs_A_norm_diff)
rs_P_norm_diff = float(row['rs_P_norm']) - rs_P_norm_avg[int(row['LeafNode'])-1]
rs_P_norm_diff_list.append(rs_P_norm_diff)
country_EURO_diff = float(row['country_EURO']) - country_EURO_avg[int(row['LeafNode'])-1]
country_EURO_diff_list.append(country_EURO_diff)
rs_GP_norm_diff = float(row['rs_GP_norm']) - rs_GP_norm_avg[int(row['LeafNode'])-1]
rs_GP_norm_diff_list.append(rs_GP_norm_diff)
rs_PIM_norm_diff = float(row['rs_PIM_norm']) - rs_PIM_norm_avg[int(row['LeafNode'])-1]
rs_PIM_norm_diff_list.append(rs_PIM_norm_diff)
rs_PlusMinus_norm_diff = float(row['rs_PlusMinus_norm']) -rs_PlusMinus_norm_avg[int(row['LeafNode'])-1]
rs_PlusMinus_norm_diff_list.append(rs_PlusMinus_norm_diff)
po_A_norm_diff = float(row['po_A_norm']) - po_A_norm_avg[int(row['LeafNode'])-1]
po_A_norm_diff_list.append(po_A_norm_diff)
po_P_norm_diff = float(row['po_P_norm']) - po_P_norm_avg[int(row['LeafNode'])-1]
po_P_norm_diff_list.append(po_P_norm_diff)
po_PIM_norm_diff = float(row['po_PIM_norm']) - po_PIM_norm_avg[int(row['LeafNode'])-1]
po_PIM_norm_diff_list.append(po_PIM_norm_diff)
po_PlusMinus_norm_diff = float(row['po_PlusMinus_norm']) - po_PlusMinus_norm_avg[int(row['LeafNode'])-1]
po_PlusMinus_norm_diff_list.append(po_PlusMinus_norm_diff)
position_R_diff = float(row['position_R']) - position_R_avg[int(row['LeafNode'])-1]
position_R_diff_list.append(position_R_diff)
country_CAN_diff = float(row['country_CAN']) - country_CAN_avg[int(row['LeafNode'])-1]
country_CAN_diff_list.append(country_CAN_diff)
if int(row['LeafNode']) == 1:
val_neg = CSS_rank_norm_diff *-1.256756345 + rs_A_norm_diff * -1.3622035894
val_pos = DraftAge_norm_diff* 1.5253614015 + Weight_norm_diff * 1.4919620265 + rs_P_norm_diff * 1.5706334136
elif int(row['LeafNode']) == 2:
val_neg = country_EURO_diff *-0.1901788186 + CSS_rank_norm_diff * -1.4750414248 + po_PlusMinus_norm_diff * -7.1433895008
val_pos = DraftAge_norm_diff * 2.2681273438 + Weight_norm_diff *1.5706434401 + rs_GP_norm_diff *1.1639704533 + rs_P_norm_diff * 1.5706334136 + rs_PlusMinus_norm_diff * 1.8081514932 + po_P_norm_diff * 1.4172451085 + po_PIM_norm_diff *22.3783855159
elif int(row['LeafNode']) == 3:
val_neg = CSS_rank_norm_diff * -2.2145122397 + rs_PIM_norm_diff * -1.0367303956 + rs_PlusMinus_norm_diff * -10.8803616145 + po_A_norm_diff * -0.9534784316 + po_PlusMinus_norm_diff * -7.1433895008
val_pos = DraftAge_norm_diff * 2.2681273438 + country_EURO_diff * 0.0056782221 + country_CAN_diff * 0.332912836 + Weight_norm_diff * 1.5706434401 + rs_GP_norm_diff * 2.4135766868 + rs_P_norm_diff * 1.5706334136 + po_P_norm_diff * 1.4172451085 + po_PIM_norm_diff * 1.5097963896
elif int(row['LeafNode']) == 4:
val_neg = country_EURO_diff * -0.1901788186 + CSS_rank_norm_diff * -1.4750414248 + rs_PIM_norm_diff * -1.0367303956 + po_PlusMinus_norm_diff * -7.1433895008
val_pos = DraftAge_norm_diff * 2.2681273438 + country_CAN_diff * 0.332912836 + Weight_norm_diff * 1.5706434401 + rs_GP_norm_diff * 1.8403281774 + rs_P_norm_diff * 1.5706334136 + rs_PlusMinus_norm_diff * 0.7777586333 + po_P_norm_diff * 4.3659576659 + po_PIM_norm_diff * 5.2983383711
elif int(row['LeafNode']) == 5:
val_neg = position_R_diff * -0.1171557532 + CSS_rank_norm_diff * -1.4750414248 + po_PlusMinus_norm_diff * -7.1433895008
val_pos = DraftAge_norm_diff * 0.7210529086 + Weight_norm_diff * 1.5706434401 + rs_GP_norm_diff * 1.1639704533 + rs_P_norm_diff * 1.5706334136 + po_P_norm_diff * 1.4172451085
vals_positive.append(val_pos)
vals_negative.append(val_neg)
with open('Desktop/lmt_10years_CSS_null_norm_prob_01_02.csv', 'rb') as input, open('Desktop/output_lmt_points_01_02_pos_neg_points_with_diff.csv', 'wb') as output:
reader = csv.reader(input, delimiter = ',')
writer = csv.writer(output, delimiter = ',')
row = next(reader) # read title line
row.append('weight_pos_val')
row.append('weight_neg_val')
row.append('DraftAge_norm_diff')
row.append('Weight_norm_diff')
row.append('CSS_rank_norm_diff')
row.append('rs_A_norm_diff')
row.append('rs_P_norm_diff')
row.append('country_EURO_diff')
row.append('rs_GP_norm_diff')
row.append('rs_PIM_norm_diff')
row.append('rs_PlusMinus_norm_diff')
row.append('po_A_norm_diff')
row.append('po_P_norm_diff')
row.append('po_PIM_norm_diff')
row.append('po_PlusMinus_norm_diff')
row.append('position_R_diff')
row.append('country_CAN_diff')
writer.writerow(row) # write enhanced title line
it_pos = vals_positive.__iter__() # create an iterator on the result
it_neg = vals_negative.__iter__()
it_1 = DraftAge_norm_diff_list.__iter__()
it_2 = Weight_norm_diff_list.__iter__()
it_3 = CSS_rank_norm_diff_list.__iter__()
it_4 = rs_A_norm_diff_list.__iter__()
it_5= rs_P_norm_diff_list.__iter__()
it_6 = country_EURO_diff_list.__iter__()
it_7 = rs_GP_norm_diff_list.__iter__()
it_9 = rs_PIM_norm_diff_list.__iter__()
it_10 = rs_PlusMinus_norm_diff_list.__iter__()
it_11 = po_A_norm_diff_list.__iter__()
it_12 = po_P_norm_diff_list.__iter__()
it_13 = po_PIM_norm_diff_list.__iter__()
it_14 = po_PlusMinus_norm_diff_list.__iter__()
it_15 = position_R_diff_list.__iter__()
it_16 = country_CAN_diff_list.__iter__()
for row in reader:
if row: # avoid empty lines that usually lurk undetected at the end of the files
try:
row.append(next(it_pos)) # add a result to current row
row.append(next(it_neg))
row.append(next(it_1))
row.append(next(it_2))
row.append(next(it_3))
row.append(next(it_4))
row.append(next(it_5))
row.append(next(it_6))
row.append(next(it_7))
row.append(next(it_9))
row.append(next(it_10))
row.append(next(it_11))
row.append(next(it_12))
row.append(next(it_13))
row.append(next(it_14))
row.append(next(it_15))
row.append(next(it_16))
except StopIteration:
row.append("N/A") # not enough results: pad with N/A
writer.writerow(row)
|
[
"noreply@github.com"
] |
liuyejia.noreply@github.com
|
81b6956e40e1cd7df9d676045abb0fb86f48d988
|
3e08cffe70e114cdee33469c5b95e95e31dc668b
|
/common_tools/units.py
|
a9175bce0ff93735177f3d02fdda1232fc3321fa
|
[] |
no_license
|
mzoll/common_tools
|
7d5433daed4284b11df6eb5de6112d48eb9ac094
|
1a679f152727d52c9ce9e718f1d609c6f3d3fca4
|
refs/heads/master
| 2021-06-02T01:34:47.206657
| 2019-02-21T23:35:32
| 2019-02-21T23:35:32
| 125,834,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
'''
Created on Sep 28, 2017
@author: marcel.zoll
define some commonly used units as multiplicative values, so that we wont have to guess what kind of factor they entail to standard definitions.
'''
class units:
""" defines conversion factors between units
Examples
--------
```
t = 1. * units.time.milliseconds
print('t equal to ', t, units.time._native_name)
```
"""
class time:
seconds = 1.
sec = 1
minutes = 60.
m = 60.
hours = 3600.
h = 3600.
days = 24.*3600.
d = 24.*3600.
milliseconds = 1E-3
ms = 1E-3
microseconds = 1E-6
mus = 1E-6
nanoseconds = 1E-9
ns = 1E-9
class distance:
meters = 1.
m = 1.
kilometers = 1E3
km = 1E3
class weight:
kilograms = 1.
kg = 1.
grams = 1E-3
g = 1E-3
tons = 1E3
t = 1E3
class factors:
percent = 1E-2
kilo = 1E3
mega = 1E6
giga =1E9
tera = 1E12
deci = 1E-1
centi = 1E-2
milli = 1E-3
micro = 1E-6
nano = 1E-9
class abstract:
money = 1.
class money:
Kronor = 1.
SEK = 1.
Dollar = 1.
USD = 1.
Euro = 1.
EUR = 1.
class _native_name:
time = 'seconds'
distance = 'meters'
weight = 'kilogram'
def impl(value):
""" gotten an implict unit value: multiply with this to just mark this variable as explicit unit dependent
Examples
--------
```
t_ms = 1. * impl('milliseconds')
print('t equal to ', t*units.time*milliseconds , units._native_name.time)
```
"""
return(1.)
|
[
"marcel.zoll@nowinteract.com"
] |
marcel.zoll@nowinteract.com
|
7a9a5b0d61016058241e1c30827b44586ea150a4
|
7649d4420c729eb1114d1159b35f9b7e05f40c42
|
/dog.py
|
aa05e8daff6e94930b21c7a8244682bcb393f74f
|
[] |
no_license
|
ninankkim/python-functions
|
6492bfe5dc9151092179367effde1a4d733c2270
|
4b872849d7d527620b08e762536ca8b24d30f7f5
|
refs/heads/master
| 2022-12-23T09:00:23.183486
| 2020-09-24T02:31:52
| 2020-09-24T02:31:52
| 289,960,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
class Dog:
species = 'mammal'
def __init__(self, Nelson, age):
self.name = name
self.age = age
nelson = Dog("Nelson", 3)
print(f"{nelson.name} is {nelson.age}.")
def description(self):
return(f'{self.name} is {self.age} years old'. )
# nelson = Dog("Nelson", 3)
# print(f"{nelson.name} is {nelson.age}.")
|
[
"noreply@github.com"
] |
ninankkim.noreply@github.com
|
ad2413067fa7f22525ccaa649449bf4eb0f75ac0
|
6633410f0b0922acc53a452a4fb379d322be60ae
|
/trydjango19/manage.py
|
394fc3ba1c3c0abd172f8409d04987e6abc650f1
|
[] |
no_license
|
miradouro/django_rest
|
40cf71eb26abe7da509aa417104503563295dca7
|
1250eed5860ee6e2b87ca1c0f7b90899e0eb7d44
|
refs/heads/master
| 2023-04-09T12:03:33.059053
| 2021-03-31T11:55:07
| 2021-03-31T11:55:07
| 352,301,531
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'trydjango19.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"rafaelmiradouro@gmail.com"
] |
rafaelmiradouro@gmail.com
|
7215c3ea37b0c050243a3ad04035d80c0d10a10d
|
a5d42f157dc642888564bca72d9ebfa18b7e8ee7
|
/05-condicionales/ejemplo_if.py
|
896d6dd4d392358a1077f0e341a8acfb3ce6546f
|
[
"MIT"
] |
permissive
|
tulcas/master-python
|
5cbc17881e4b2f3f3e3134f3edc61ecc73898f68
|
ddac8aa1bf3e6448fe237eac239d27ce3fda705c
|
refs/heads/master
| 2023-01-08T17:17:12.616654
| 2020-11-14T09:38:51
| 2020-11-14T09:38:51
| 275,884,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
# Ejemplo 1
print ("########################## EJEMPLO 1 ########################## ")
color = "rojo"
color = "verde"
if color == "rojo":
print("Enhorabuena!!!")
print("El color es: ROJO ")
else:
print("El color NO es: ROJO ")
# Ejemplo 2
print ("\n########################## EJEMPLO 2 ########################## ")
year = int(input("En que año estamos ?: "))
anioActual = 2020
if year >= anioActual:
print("Estamos de 2020 en adelante...")
else:
print("Es un año anterior a 2020! ")
# Ejemplo 3
print ("\n########################## EJEMPLO 3 ########################## ")
nombre = "Victor Robles"
|
[
"andllanes@gmail.com"
] |
andllanes@gmail.com
|
cee26de311e1547ddda332a99951c846aae0f3a4
|
655f463e8a357a649972782b2a338be03044b119
|
/h_index_P3.py
|
86a598e5917d3e50c46750deb15af0c599528919
|
[] |
no_license
|
covy92/algorithmPrac
|
01c29476ebe869c1bcd15133f95e44ba7e9b59b2
|
86c9691d86b6275026195e3c2fe0e34778f03159
|
refs/heads/master
| 2023-09-04T16:49:09.994580
| 2021-11-05T01:54:28
| 2021-11-05T01:54:28
| 422,837,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
citations = [0, 1, 1]
citations.sort()
print(citations)
count = 0
over_num = 0
answer_list = [0]
while count < len(citations)+1:
for i in range((len(citations))):
if citations[i] >= count:
over_num += 1
print(count, over_num)
if count <= over_num:
answer_list.append(count)
over_num = 0
count += 1
answer = max(answer_list)
print(answer)
# 모법답안
def solution(citations):
citations = sorted(citations)
l = len(citations)
for i in range(l):
if citations[i] >= l-i:
return l-i
return 0
print(solution(citations))
|
[
"tjddlf1232@gmail.com"
] |
tjddlf1232@gmail.com
|
3984eab721b23a8156156fad53f978306b280051
|
9275f3919f80c3d1525afa890a9422d86e2c745b
|
/coderbyte-TimeConvert.py
|
4b13ee7d9dd3b5cd26688273943992491c5e4486
|
[] |
no_license
|
Jandeh7/coderbyte-solution
|
3b97c1d6c9be5c00607b1d1d86b0f413c975f3d4
|
7614bc9a00787e92be67dc745f5c4b6de90aa27e
|
refs/heads/master
| 2022-01-07T19:41:08.443776
| 2019-07-05T09:15:31
| 2019-07-05T09:15:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
'''
Have the function TimeConvert(num) take the num parameter being passed and return the number of hours and minutes the parameter converts to (ie. if num = 63 then the output should be 1:3). Separate the number of hours and minutes with a colon.
Use the Parameter Testing feature in the box below to test your code with different arguments.
'''
def TimeConvert(num):
hour = int(num/60)
min = num - hour * 60
# code goes here
return str(hour) + ':' + str(min)
# keep this function call here
print TimeConvert(raw_input())
|
[
"liangmanping@outlook.com"
] |
liangmanping@outlook.com
|
5c304c1203ea75a46905540701399371d11cb521
|
1f2cd2042e3ad8a3866a01ee6dcc9735537ffa28
|
/Task 3.py
|
eed4903ddca70e5a5901d032feb5f256a160ffaa
|
[] |
no_license
|
ADRITA-PARIA/TSF-tasks
|
f6613b68d55813ca14805f36a2a60c61a264d04d
|
26975dd63096eb7d2750eff380cf0345c72b8d3c
|
refs/heads/main
| 2023-02-27T05:46:57.990106
| 2021-02-10T13:30:55
| 2021-02-10T13:30:55
| 336,715,941
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,373
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Data Science And Business Analytics Intern At The Sparks Foundation
# # GRIPFEB21
# # Author: Adrita Paria
# # Task 3=Exploratory Data Analysis(Sample Superstore)
# # Step 1: Importing standard ML libraries
# In[132]:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
# # Step 2: Importing the data set
# In[52]:
df=pd.read_csv("C:\\Users\\adrit\\Desktop\\SampleSuperstore.csv")
df.head(5)
# # 3. Data Analysis
# In[53]:
df.describe()
# In[54]:
df.shape
# In[55]:
df.columns
# In[56]:
df.nunique()
# In[57]:
df.isnull().sum()
# # 4. Data Visualization Using Correlation Matrix
# In[58]:
correlation=df.corr()
# In[59]:
plt.figure(figsize=(10,8))
sns.heatmap(correlation,annot=True)
plt.xticks(fontsize=13)
plt.yticks(fontsize=13)
plt.show()
# In[60]:
plt.figure(figsize=(15,7))
sns.boxplot(data=df)
# # There are no outliers present here
# In[61]:
df.groupby(['Category','Sub-Category'])['Quantity'].count()
# In[62]:
sns.catplot(x='Quantity',kind='box',data=df)
# In[79]:
sns.pairplot(df)
# In[75]:
y=df[['Ship Mode','Sales']]
print(y)
# # 5. Individual visualisation of the categories
# In[93]:
plt.figure(figsize=(10,8))
sns.countplot(x=df['Segment'])
print(df['Segment'].value_counts())
# ## The section consumer shows the highest consumption rather than corporate and home office
# In[94]:
plt.figure(figsize=(10,8))
sns.countplot(x=df['Region'])
print(df['Region'].value_counts())
# ## The section west shows the highest count
# In[90]:
plt.figure(figsize=(10,8))
sns.countplot(x=df['Category'])
print(df['Category'].value_counts())
# In[92]:
plt.figure(figsize=(15,9))
sns.countplot(x=df['Sub-Category'])
print(df['Sub-Category'].value_counts())
# In[97]:
plt.figure(figsize=(20,5))
sns.countplot(x=df['City'])
print(df['City'].value_counts())
# In[78]:
plt.figure(figsize=(10,7))
plt.bar(x=y['Ship Mode'],height=y['Sales'])
plt.title('Shipping mode vs Sales',fontsize=14)
plt.xlabel('Shipping mode',fontsize=13)
plt.ylabel('Sales',fontsize=13)
plt.show()
# In[100]:
plt.figure(figsize=(19,7))
plt.bar('Sub-Category','Category',data=df,color='y')
# In[105]:
plt.figure(figsize=(15,8))
sns.countplot(x='Sub-Category',hue='Region',data=df)
print(df['Profit'].value_counts())
# In[109]:
plt.figure(figsize=(15,8))
sns.countplot(x='Sub-Category',hue='Segment',data=df)
# In[130]:
plt.figure(figsize=(10,8))
df['Category'].value_counts().plot.pie()
plt.show()
# In[126]:
plt.figure(figsize=(10,8))
df['Sub-Category'].value_counts().plot.pie()
plt.show()
# In[139]:
fig=px.sunburst(df,path=['Country','Category','Sub-Category'],values='Sales',color='Category',hover_data=['Sales','Quantity','Profit'])
fig.update_layout(height=800)
fig.show()
# # Interpretation
# ### The dataset is about a superstore’s sales.
# 2) The shape of the dataset is 9994, 13(Rows, Columns).
# 3) The max profit on a single sale is 8399.976.
# 4) Different Types of Shipping Mode is (Standard, Second, First Class and Same Day).
# 5) Categories of the Products are (Office Supplies, Technology, and Furniture).
# 6) Segment of the Customers are (Consumer, Home Office, Corporate).
# 7) Products are delivered in 39 states.
# 8) Region of services is (East, West, South, and Central).
# 9) There are several Sub-Categories of Products.
# 10) Total profit made: 286397.0217.
# 11) Total Sales made: 2297200.8603.
# 12) Therefore Profit Percentage is: 12.46.
# 13) Many Discounts are also given.
# 14) As California has the highest number of sales let’s take a look its stats.
# 15) In California Office Supplies has the highest number of sales.
# 16) In Office Supplies Paper is the highest Sold.
# 17) Most people in California prefer Standard Class of shipping mode.
# 18) As most of the sales are in the quantity of 3, company should provide a special discount on a bundle of 3, so sales may increase.
# 19) In west also Office supplies has the highest sales.
# 20) But a noticeable thing to see is that instead of paper in west highest sales is of binders.
# 21) In the technology category Phones has the highest sales in California.
# 22) Sales in Furniture : 74199.7953
# 23) Sales in Office Supplies: 719047.032
# 24) Sales in Technology: 836154.033
# 25) Though highest no.of sales were of Office Supplies still Technology’s Sales are greater in number.
# 26) Profit in Furniture: 18451.272
# 27) Profit in Office Supplies: 122490.8008
# 28) Profit in Technology: 145454.9481
# 29) There is highest loss in the Office Supplies Category.
# 30) California is the state where there is highest profit in the art category so art related ads should be run in California.
# 31) Company should work on the Central region because that region has the highest losses.
# 32) And state wise company should work on Texas.
|
[
"noreply@github.com"
] |
ADRITA-PARIA.noreply@github.com
|
b91f6ce958f69e9dfef51a067e91a094aa42930b
|
99de523ddc847b29db4236ef87d39eb0c1f6ac1c
|
/python/searchMethodUI.py
|
a5e2497ec6aa9e548e9d45302a2fb5c00bd5efc7
|
[] |
no_license
|
sanfx/searchMethod
|
28ffcbcee5c8e7adaed1ae410c033965b1dbf875
|
38392d0101c2f3c1435e50e52857df13e50a9943
|
refs/heads/master
| 2016-09-06T03:36:55.479814
| 2013-11-04T14:01:33
| 2013-11-04T14:01:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,091
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/searchMethod.ui'
#
# Created: Sat Nov 2 02:20:15 2013
# by: pyside-uic 0.2.13 running on PySide 1.1.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_searchMethodMainWidget(object):
def setupUi(self, searchMethodMainWidget):
searchMethodMainWidget.setObjectName("searchMethodMainWidget")
searchMethodMainWidget.resize(553, 414)
self.gridLayout_2 = QtGui.QGridLayout(searchMethodMainWidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.searchBtn = QtGui.QPushButton(searchMethodMainWidget)
self.searchBtn.setObjectName("searchBtn")
self.gridLayout.addWidget(self.searchBtn, 1, 2, 1, 1)
self.lookInsideLbl = QtGui.QLabel(searchMethodMainWidget)
self.lookInsideLbl.setObjectName("lookInsideLbl")
self.gridLayout.addWidget(self.lookInsideLbl, 1, 0, 1, 1)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lookInsideEdit = CompleterLineEdit(searchMethodMainWidget)
self.lookInsideEdit.setAutoFillBackground(True)
self.lookInsideEdit.setDragEnabled(True)
self.lookInsideEdit.setObjectName("lookInsideEdit")
self.horizontalLayout_3.addWidget(self.lookInsideEdit)
self.label = QtGui.QLabel(searchMethodMainWidget)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.lineEdit = QtGui.QLineEdit(searchMethodMainWidget)
self.lineEdit.setCursorMoveStyle(QtCore.Qt.VisualMoveStyle)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_3.addWidget(self.lineEdit)
self.gridLayout.addLayout(self.horizontalLayout_3, 1, 1, 1, 1)
self.addPathEdit = AddPathLineEdit(searchMethodMainWidget)
self.addPathEdit.setInputMask("")
self.addPathEdit.setObjectName("addPathEdit")
self.gridLayout.addWidget(self.addPathEdit, 0, 1, 1, 1)
self.addPathlbl = QtGui.QLabel(searchMethodMainWidget)
self.addPathlbl.setObjectName("addPathlbl")
self.gridLayout.addWidget(self.addPathlbl, 0, 0, 1, 1)
self.browseBtn = QtGui.QPushButton(searchMethodMainWidget)
self.browseBtn.setObjectName("browseBtn")
self.gridLayout.addWidget(self.browseBtn, 0, 2, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.resultlbl = QtGui.QLabel(searchMethodMainWidget)
self.resultlbl.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.resultlbl.sizePolicy().hasHeightForWidth())
self.resultlbl.setSizePolicy(sizePolicy)
self.resultlbl.setMinimumSize(QtCore.QSize(0, 9))
font = QtGui.QFont()
font.setPointSize(11)
self.resultlbl.setFont(font)
self.resultlbl.setObjectName("resultlbl")
self.verticalLayout.addWidget(self.resultlbl)
self.searchListView = QtGui.QListView(searchMethodMainWidget)
self.searchListView.setMaximumSize(QtCore.QSize(16777215, 150))
self.searchListView.setTabKeyNavigation(True)
self.searchListView.setProperty("isWrapping", True)
self.searchListView.setResizeMode(QtGui.QListView.Adjust)
self.searchListView.setObjectName("searchListView")
self.verticalLayout.addWidget(self.searchListView)
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.methodListView = QtGui.QListView(searchMethodMainWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.methodListView.sizePolicy().hasHeightForWidth())
self.methodListView.setSizePolicy(sizePolicy)
self.methodListView.setMinimumSize(QtCore.QSize(20, 0))
self.methodListView.setMaximumSize(QtCore.QSize(125, 16777215))
self.methodListView.setObjectName("methodListView")
self.gridLayout_3.addWidget(self.methodListView, 1, 0, 1, 1)
self.helpOnSelMethodTxtEdit = QtGui.QTextEdit(searchMethodMainWidget)
font = QtGui.QFont()
font.setUnderline(False)
self.helpOnSelMethodTxtEdit.setFont(font)
self.helpOnSelMethodTxtEdit.setProperty("cursor", QtCore.Qt.IBeamCursor)
self.helpOnSelMethodTxtEdit.setFrameShape(QtGui.QFrame.StyledPanel)
self.helpOnSelMethodTxtEdit.setFrameShadow(QtGui.QFrame.Raised)
self.helpOnSelMethodTxtEdit.setTabChangesFocus(True)
self.helpOnSelMethodTxtEdit.setReadOnly(True)
self.helpOnSelMethodTxtEdit.setObjectName("helpOnSelMethodTxtEdit")
self.gridLayout_3.addWidget(self.helpOnSelMethodTxtEdit, 1, 1, 1, 1)
self.methodlbl = QtGui.QLabel(searchMethodMainWidget)
self.methodlbl.setMinimumSize(QtCore.QSize(0, 9))
font = QtGui.QFont()
font.setPointSize(11)
self.methodlbl.setFont(font)
self.methodlbl.setObjectName("methodlbl")
self.gridLayout_3.addWidget(self.methodlbl, 0, 0, 1, 1)
self.helplbl = QtGui.QLabel(searchMethodMainWidget)
self.helplbl.setMinimumSize(QtCore.QSize(0, 12))
font = QtGui.QFont()
font.setPointSize(11)
font.setWeight(50)
font.setBold(False)
self.helplbl.setFont(font)
self.helplbl.setObjectName("helplbl")
self.gridLayout_3.addWidget(self.helplbl, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_3)
self.gridLayout_2.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(searchMethodMainWidget)
QtCore.QMetaObject.connectSlotsByName(searchMethodMainWidget)
searchMethodMainWidget.setTabOrder(self.lookInsideEdit, self.lineEdit)
searchMethodMainWidget.setTabOrder(self.lineEdit, self.searchBtn)
searchMethodMainWidget.setTabOrder(self.searchBtn, self.searchListView)
searchMethodMainWidget.setTabOrder(self.searchListView, self.addPathEdit)
searchMethodMainWidget.setTabOrder(self.addPathEdit, self.browseBtn)
def retranslateUi(self, searchMethodMainWidget):
searchMethodMainWidget.setWindowTitle(QtGui.QApplication.translate("searchMethodMainWidget", "Search Method with help", None, QtGui.QApplication.UnicodeUTF8))
self.searchBtn.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Search", None, QtGui.QApplication.UnicodeUTF8))
self.lookInsideLbl.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Look Inside", None, QtGui.QApplication.UnicodeUTF8))
self.lookInsideEdit.setToolTip(QtGui.QApplication.translate("searchMethodMainWidget", "modules or package names separated by comma", None, QtGui.QApplication.UnicodeUTF8))
self.lookInsideEdit.setPlaceholderText(QtGui.QApplication.translate("searchMethodMainWidget", "Enter module name", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Prefix", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setToolTip(QtGui.QApplication.translate("searchMethodMainWidget", "prefix to filter from all methods", None, QtGui.QApplication.UnicodeUTF8))
self.lineEdit.setPlaceholderText(QtGui.QApplication.translate("searchMethodMainWidget", "Enter starting letter/s or leave empty and hit enter", None, QtGui.QApplication.UnicodeUTF8))
self.addPathEdit.setToolTip(QtGui.QApplication.translate("searchMethodMainWidget", "location of module or package not in sys.path", None, QtGui.QApplication.UnicodeUTF8))
self.addPathEdit.setPlaceholderText(QtGui.QApplication.translate("searchMethodMainWidget", "Add path of the module or package not in sys.path list by default", None, QtGui.QApplication.UnicodeUTF8))
self.addPathlbl.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Add Path", None, QtGui.QApplication.UnicodeUTF8))
self.browseBtn.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.resultlbl.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Results", None, QtGui.QApplication.UnicodeUTF8))
self.methodlbl.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Methods", None, QtGui.QApplication.UnicodeUTF8))
self.helplbl.setText(QtGui.QApplication.translate("searchMethodMainWidget", "Help", None, QtGui.QApplication.UnicodeUTF8))
from autoComplete import CompleterLineEdit
from utils import AddPathLineEdit
|
[
"skysan@gmail.com"
] |
skysan@gmail.com
|
939c889e2b58279d75e55db716e9309be7d5298d
|
c1a67f7650e7949ec66d0109c4a05326cfa7e976
|
/travel_budy/urls.py
|
278cd14802a1c21928a3ec5ecb46e93b193aa265
|
[] |
no_license
|
SaralynOgden/Travel_budy
|
7dd7c12bf121996771291c418e72ed84d441aa00
|
c57c39e8822febfa64deed1672dffc457a5f6037
|
refs/heads/master
| 2021-01-22T09:54:34.193167
| 2016-09-07T04:28:00
| 2016-09-07T04:28:00
| 81,977,291
| 0
| 0
| null | 2017-02-14T18:41:28
| 2017-02-14T18:41:28
| null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
"""dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.loginreg.urls')),
url(r'^', include('apps.main.urls')),
]
|
[
"lynam.emily@gmail.com"
] |
lynam.emily@gmail.com
|
81ae8a68197e4435c336fff6212a256e7cc3d667
|
174942d21c29300d79b03be2d43f7b8635d38b07
|
/lesson_05/task_1.py
|
471274cf7c804e4372102453ce1f46d23320edd0
|
[] |
no_license
|
HelenMaksimova/algorithms
|
43a546295888470838bea8301db9acab7b58496b
|
9b3676f2312cd79b28c984aa11dfeb4868237e46
|
refs/heads/main
| 2023-07-03T11:21:26.576778
| 2021-08-12T20:06:06
| 2021-08-12T20:06:06
| 395,431,444
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,893
|
py
|
"""
1. Пользователь вводит данные о количестве предприятий, их наименования и прибыль
за 4 квартала (т.е. 4 отдельных числа) для каждого предприятия.
Программа должна определить среднюю прибыль (за год для всех предприятий)
и вывести наименования предприятий, чья прибыль выше среднего и отдельно
вывести наименования предприятий, чья прибыль ниже среднего.
Подсказка:
Для решения задачи обязательно примените какую-нибудь коллекцию из модуля collections
Для лучшее освоения материала можете даже сделать несколько решений этого задания,
применив несколько коллекций из модуля collections
Пример:
Введите количество предприятий для расчета прибыли: 2
Введите название предприятия: Рога
через пробел введите прибыль данного предприятия
за каждый квартал(Всего 4 квартала): 235 345634 55 235
Введите название предприятия: Копыта
через пробел введите прибыль данного предприятия
за каждый квартал(Всего 4 квартала): 345 34 543 34
Средняя годовая прибыль всех предприятий: 173557.5
Предприятия, с прибылью выше среднего значения: Рога
Предприятия, с прибылью ниже среднего значения: Копыта
"""
from collections import namedtuple, defaultdict
def get_firms_data(number):
"""Формирует словарь с шаблоном namedtuple по умолчанию
и заполняет его данными по предприятиям за 4 квартала"""
FIRM_PROFIT = namedtuple('Profits', 'I II III IV sum_profit')
firms_dict = defaultdict(FIRM_PROFIT)
try:
for _ in range(number):
firm_name = input('\nВведите название предприятия: ')
firm_profit = [float(elem)for elem in input(
'Через пробел введите прибыль данного предприятия поквартально: ').split()]
sum_profit = sum(firm_profit)
firms_dict[firm_name] = FIRM_PROFIT(*firm_profit, sum_profit)
except ValueError:
print('\nНеобходимо ввести числа в качестве значений прибыли!')
return get_firms_data(number)
except TypeError:
print('\nНеобходимо ввести прибыль за четыре квартала!')
return get_firms_data(number)
return firms_dict
def average_profit(firms_dct):
"""Считает среднюю прибыль по всем предприятиям"""
profit = sum(value.sum_profit for value in firms_dct.values())
return profit / len(firms_dct)
def below_profit(firms_dct, profit):
"""Возвращает генератор, содержащий наименования предприятий,
прибыль которых ниже средней"""
result = (key for key in firms_dct if firms_dct[key].sum_profit < profit)
return result
def over_profit(firms_dct, profit):
"""Возвращает генератор, содержащий наименования предприятий,
прибыль которых выше средней"""
result = (key for key in firms_dct if firms_dct[key].sum_profit > profit)
return result
def firms_count():
"""Возвращает количество предприятий"""
try:
count = int(input('\nВведите количество фирм: '))
except ValueError:
print('\nНеобходимо ввести целое число!')
return firms_count()
return count
firms_num = firms_count()
firms_data = get_firms_data(firms_num)
av_profit = average_profit(firms_data)
print(f'\nСредняя годовая прибыль всех предприятий: {av_profit}')
print('\nПредприятия с прыбылью выше средней:', *over_profit(firms_data, av_profit), sep='\n')
print('\nПредприятия с прыбылью ниже средней:', *below_profit(firms_data, av_profit), sep='\n')
|
[
"noreply@github.com"
] |
HelenMaksimova.noreply@github.com
|
5df80fcc727c5509aada718e3f6c68c69dd3230e
|
6187eda12d02fbf8e309c9efdcead58dacc35b6f
|
/sdk/cogscale/client/service_client.py
|
f8cd9be0f71252ee6f0a50fc2c2c5001c347a4e6
|
[
"Apache-2.0"
] |
permissive
|
CognitiveScale/industry-models
|
3f08fc8417357a19b78bef95a084d5aa7a983c93
|
9afc26f0b209fbec7bfd41e37d6ff7d86ea7d1c3
|
refs/heads/master
| 2021-01-20T20:14:28.253187
| 2016-07-21T18:29:54
| 2016-07-21T18:29:54
| 63,868,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,946
|
py
|
#
# Copyright 2016 CognitiveScale, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from cogscale.client.resource import Resource
from cogscale.client.results import Success, create_error, Error
class Service(Resource):
def __init__(self, client, attributes):
Resource.__init__(self, client, attributes)
class Activation(Resource):
def __init__(self, client, attributes):
Resource.__init__(self, client, attributes)
class ServiceClient(object):
def __init__(self, client):
self.client = client
def find_services_of_type(self, service_type):
r = self.client.get_as_json("services", {'typeExpr': service_type})
if "services" in r:
return Success({"services": [Service(self.client, s) for s in r["services"]]})
return Error({"error": "Error listing services of type %s: %s" % (service_type, r)})
def get_service_of_type(self, service_type, service_id):
r = self.client.get_as_json("services", {'typeExpr': service_type, 'idExpr': service_id})
if "services" in r:
if len(r["services"]) == 0:
return Error({"error": "Service of type %s with ID %s not found" % (service_type, service_id)})
return Success({"service": Service(self.client, r["services"][0])})
return create_error(r)
def find_activations(self):
r = self.client.get_as_json("activations")
if "activations" in r:
return Success({'activations': [Activation(self.client, a) for a in r["activations"]]})
return Error({"error": "Error listing activations: %s" % r})
def get_activation(self, slug):
r = self.client.get_as_json("activations/%s" % slug)
return Success({'activation': Activation(self.client, r)})
def activate_service(self, activation):
r = self.client.post('activations', activation)
if r.status_code == 201:
if os.getenv("CS_DEBUG"):
print r.headers
slug = r.headers['Location']
return Success({'message': 'Created activation %s' % slug, 'slug': slug})
return create_error(r)
def save_activation(self, slug, activation):
if os.getenv("CS_DEBUG"):
print slug, activation
r = self.client.put('activations/%s' % slug, activation)
if r.status_code == 200:
return Success({'message': 'Saved activation %s' % slug})
return create_error(r)
def disable_activation(self, slug):
r = self.client.put('activations/%s/state' % slug, "disabled")
if r.status_code // 100 == 2:
return Success({'message': 'disabled activation %s' % slug})
return create_error(r)
def resume_activation(self, slug):
r = self.client.put('activations/%s/state' % slug, "enabled")
if r.status_code // 100 == 2:
return Success({'message': 'resumed activation %s' % slug})
return create_error(r)
def drop_activation(self, slug):
r = self.client.delete('activations/%s' % slug)
if r.status_code == 200:
return Success({'message': 'Activation %s dropped successfully' % slug})
return create_error(r)
def service_status(self):
r = self.client.get_as_json('status')
if 'status' in r:
return Success({'status': r['status']})
return Error({'error': 'Error getting service status: %s' % r})
|
[
"msanchez@cognitivescale.com"
] |
msanchez@cognitivescale.com
|
9aa9512ec6fb84ba8b0f2c3e7cff52f2a54cf02c
|
8c11893f10a1eef5deec2a5cb16eaeeaf5d5ef01
|
/src/100818/042719/nnn_disorder_spinup_gf_QSHlead.py
|
be85edaef513c86cbb2a2a864e3d3c4b29d9fd06
|
[] |
no_license
|
yuhao12345/topological_propagation
|
a2b325f11d4e6117581f94d2339c3fba3190271d
|
ef4bf7ee9fdb8a0e0bd04115250a131f919fdac9
|
refs/heads/master
| 2023-07-19T14:55:00.224328
| 2021-09-17T05:05:45
| 2021-09-17T05:05:45
| 307,262,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,421
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 15:34:19 2019
@author: ykang
"""
import numpy as np
from matplotlib import pyplot
import scipy.io as sio
import os.path
import random
import kwant
from kwant.digest import uniform
from random import choices
from time import time
from joblib import Parallel, delayed
import multiprocessing
t_ini=time()
width=30
length=200
dis=0
graphene = kwant.lattice.general([[1, 0], [1/2, np.sqrt(3)/2]], # lattice vectors
[[0, 0], [0, 1/np.sqrt(3)]]) # Coordinates of the sites
a, b = graphene.sublattices
m2 = .1 #spin 3*np.sqrt(3)*m2
nnn_hoppings_a = (((-1, 0), a, a), ((0, 1), a, a), ((1, -1), a, a))
nnn_hoppings_b = (((1, 0), b, b), ((0, -1), b, b), ((-1, 1), b, b))
nnn_hoppings = nnn_hoppings_a + nnn_hoppings_b
def make_system(width, length, salt):
def disk(pos):
x,y=pos
return abs(y)<width and abs(x)<length #25.1
def onsite(site):
x,y=site.pos
if y>width-10:
return (uniform(repr(site),salt)-0.5)*dis
else:
return 0
sys=kwant.Builder()
sys[graphene.shape(disk,(0,0))]= onsite #0 #
sys[graphene.neighbors()]=1 # comment it, when has rashba
sys[[kwant.builder.HoppingKind(*hopping) for hopping in nnn_hoppings]] = 1j*m2
return sys
def attach_lead(sys):
def lead_shape(pos):
x,y=pos
return abs(y)<width
sym = kwant.TranslationalSymmetry((-1,0))
sym.add_site_family(graphene.sublattices[0], other_vectors=[(-1, 2)])
sym.add_site_family(graphene.sublattices[1], other_vectors=[(-1, 2)])
lead = kwant.Builder(sym)
lead[graphene.shape(lead_shape, (0, width-1))] = 0
lead[graphene.neighbors()]=1
# lead[[kwant.builder.HoppingKind(*hopping) for hopping in nnn_hoppings]]=1j *m2
sys.attach_lead(lead)
sys.attach_lead(lead.reversed())
#def mount_vlead(sys, vlead_interface, norb):
# """Mounts virtual lead to interfaces provided.
#
# :sys: kwant.builder.Builder
# An unfinalized system to mount leads
# :vlead_interface: sequence of kwant.builder.Site
# Interface of lead
# :norb: integer
# Number of orbitals in system hamiltonian.
# """
# dim = len(vlead_interface)*norb
# zero_array = np.zeros((dim, dim), dtype=float)
# def selfenergy_func(energy, args=()):
# return zero_array
#
# vlead = kwant.builder.SelfEnergyLead(selfenergy_func, vlead_interface)
# sys.leads.append(vlead)
########### for one configuration
#en=0.4
syst=make_system(width, length, '9') # whole system as virtual lead
attach_lead(syst)
##kwant.plot(syst0,fig_size=(25, 10))
#
##greens_function_sites = syst0.sites()
##mount_vlead(syst0, greens_function_sites, 1)
sys = syst.finalized()
#kwant.plot(sys)
en=0.4008
G=kwant.smatrix(sys,en).transmission(1,0)
wf=kwant.wave_function(sys,en)(0)
kwant.plotter.map(sys, (abs(wf[0])**2),num_lead_cells=5,fig_size=(15, 10),colorbar=False)
### step 1, gf spectrum
energies=np.linspace(0.35,0.45,1000)
def gf_01(cishu):
en=energies[cishu]
gf=kwant.greens_function(sys,en).submatrix(1,0)
myDict = {'gf':gf} #,'ld':ld
completeName = os.path.join('E:/dwell3/751/69/', str(cishu)+'.mat')
sio.savemat(completeName,myDict,oned_as='row')
return gf
Parallel(n_jobs=10)(delayed(gf_01)(cishu) for cishu in np.arange(0,1000,1))
elapsed=time()-t_ini
|
[
"yuhaok@uchicago.edu"
] |
yuhaok@uchicago.edu
|
ed16fecfd1164b433a4d9af495e35147b69f90a2
|
20a2789ddd20ee35c069cd72724b1a0a5df76194
|
/alte Klausur/3. Mal PK.py
|
96736ffd3d48f401262035cabbc415a7e2012e02
|
[] |
no_license
|
KKainz/Uebungen
|
9c63e691b37fa678ed578f38668b278ad82837d5
|
372473e91a44e8ec244e8a9312b5464ffe36e147
|
refs/heads/master
| 2023-02-09T18:12:47.220168
| 2021-01-10T17:26:42
| 2021-01-10T17:26:42
| 325,608,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,670
|
py
|
from typing import List, Dict
import abc
import math
class Instrument:
def __init__(self, name: str, lautstaerke: float):
self.name = name
self.lautstaerke = lautstaerke
class Musikant(abc.ABC):
def __init__(self, anzahl_beine: int, instrument: Instrument):
self.__anzahl_beine = anzahl_beine
self.__instrument = instrument
@property
def anzahl_beine(self):
return self.__anzahl_beine
@property
def instrument(self):
return self.__instrument
@abc.abstractmethod
def verscheuche_raeuber(self) -> int:
pass
@abc.abstractmethod
def spiele_musik(self) -> float:
pass
def __repr__(self):
return f'Verscheucht: {self.verscheuche_raeuber()}, Musiziert: {self.spiele_musik()}'
class Esel(Musikant):
def __init__(self, anzahl_beine: int, instrument: Instrument, tritt_kraft: float):
super().__init__(anzahl_beine, instrument)
self.__tritt_kraft = tritt_kraft
def __repr__(self):
return f'{type(self).__name__} {self.__tritt_kraft}: {super().__repr__()}'
def verscheuche_raeuber(self) -> int:
return math.floor(self.__tritt_kraft * self.anzahl_beine)
def spiele_musik(self) -> float:
return self.instrument.lautstaerke
class Hund(Musikant):
def __init__(self, anzahl_beine: int, instrument: Instrument, bell_lautstaerke: float):
super().__init__(anzahl_beine, instrument)
self.__bell_lautstaerke = bell_lautstaerke
def __repr__(self):
return f'{type(self).__name__} {self.__bell_lautstaerke}: {super().__repr__()}'
def verscheuche_raeuber(self) -> int:
if self.__bell_lautstaerke > self.instrument.lautstaerke:
return math.floor(self.__bell_lautstaerke)
else:
return math.floor(self.instrument.lautstaerke)
def spiele_musik(self) -> float:
return (self.__bell_lautstaerke + self.instrument.lautstaerke) / 2
class Katze(Musikant):
def __init__(self, anzahl_beine: int, instrument: Instrument, kratz_kraft: float):
super().__init__(anzahl_beine, instrument)
self.__kratz_kraft = kratz_kraft
def __repr__(self):
return f'{type(self).__name__} {self.__kratz_kraft}: {super().__repr__()}'
def verscheuche_raeuber(self) -> int:
if self.anzahl_beine == 3:
return math.floor(self.__kratz_kraft / 2)
elif self.anzahl_beine <= 2:
return 1
else:
return math.floor(self.__kratz_kraft)
def spiele_musik(self) -> float:
return self.instrument.lautstaerke
class Hahn(Musikant):
def __init__(self, anzahl_beine: int, instrument: Instrument, flug_weite: int):
super().__init__(anzahl_beine, instrument)
self.__flug_weite = flug_weite
def __repr__(self):
return f'{type(self).__name__} {self.__flug_weite}: {super().__repr__()}'
def verscheuche_raeuber(self) -> int:
if self.__flug_weite < 2:
return math.floor(self.instrument.lautstaerke)
elif self.__flug_weite == 2:
return 6
elif self.__flug_weite == 3:
return 5
elif self.__flug_weite == 4:
return 4
elif self.__flug_weite == 5:
return 3
elif self.__flug_weite == 6:
return 2
else:
return 1
def spiele_musik(self) -> float:
return (self.instrument.lautstaerke + 2) / self.__flug_weite
class Quartett:
def __init__(self):
self.__musikanten_liste = []
def add(self, m: Musikant):
self.__musikanten_liste.append(m)
def ist_quartett(self) -> bool:
if len(self.__musikanten_liste) == 4:
return True
else:
return False
def gemeinsam_raeuber_verscheucht(self) -> int:
ver = 0
for r in self.__musikanten_liste:
ver += r.verscheuche_raeuber()
return ver
def durchschnittliche_lautstaerke(self) -> float:
vol = 0
for laut in self.__musikanten_liste:
vol += laut.spiele_musik()
erg = vol / len(self.__musikanten_liste)
return erg
def get_musikanten_in_laustaerke_bereich(self, von: float, bis: float) -> List[Musikant]:
bereich = []
for m in self.__musikanten_liste:
if m.spiele_musik() >= von and m.spiele_musik() <= bis:
bereich.append(m)
return bereich
def get_anzahl_musikanten_mit_bein_anzahl(self) -> Dict[int, int]:
bein_dict = {}
for b in self.__musikanten_liste:
anzahl = bein_dict.get(b.anzahl_beine, 0) + 1
bein_dict[b.anzahl_beine] = anzahl
return bein_dict
# for b in range(len(self.__musikanten_liste)):
# if self.__musikanten_liste[b] in bein_dict:
# bein_dict[self.__musikanten_liste[b].anzahl_beine] += 1
# else:
# bein_dict[self.__musikanten_liste[b].anzahl_beine] = 1
# return bein_dict
if __name__ == '__main__':
chello = Instrument("Chello", 5)
klavier = Instrument("Klavier", 6)
floete = Instrument("Floete", 4)
drum = Instrument("Schlagzeug", 8)
esel = Esel(4, chello, 6.5)
hund = Hund(4, klavier, 5.9)
katze = Katze(3, floete, 8.2)
hahn = Hahn(2, drum, 3)
q = Quartett()
q.add(esel)
q.add(hahn)
q.add(hund)
q.add(katze)
print(q.ist_quartett())
print(q.gemeinsam_raeuber_verscheucht())
print(q.durchschnittliche_lautstaerke())
print(q.get_musikanten_in_laustaerke_bereich(5, 10))
print(q.get_anzahl_musikanten_mit_bein_anzahl())
|
[
"kristina.kainz@edu.campus02.at"
] |
kristina.kainz@edu.campus02.at
|
9da49f42c6d8aaedf68d9d5f35f09e8d3c89dbf6
|
f201a096b9c74bccd13648ae0137e41397e3c0a2
|
/ec_curves.py
|
d13df3fee4e4750ebe995930c93e5cbb06724669
|
[] |
no_license
|
vickraj/Elliptic-Curve-Crypto
|
48344286307fb54bb186c01ff97eaa654ce608e5
|
5d01f0c9498b040f62a8e6d93d677fbef0e1fc77
|
refs/heads/master
| 2020-03-21T08:23:06.516227
| 2018-06-24T00:58:16
| 2018-06-24T00:58:16
| 138,339,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,128
|
py
|
from ec_fields import f_add, f_mult, f_inv, f_sqrt, f_rand
from sympy import *
import numpy as np
###A module containing the two classes, Curves and Points, and related methods.
class Curve: #all curves initialize as the trivial elliptic curve.
def __init__(self):
self.q = 0; #q is the size of the finite field, 0 for rationals, -1 for reals.
self.a = 0;
self.b = 0;
self.c = 0;
###Add: add two points on this elliptic curve. If the fac flag is on, then stop
#as soon as a nontrivial gcd is found in any inversions.
#The addition is done in the standard chord and flip method.
###Inputs:
#x, y - points to add, should be on the elliptic curve.
#fac - a flag determining whether we should be caring about gcd's.
###Outputs:
#0 on failure.
#p - a point that is the sum of x and y on this curve on success.
def add(self, x, y, fac =0):
if ec.verify(x) != True:
print("This is not on the curve.")
return 0
if ec.verify(y) != True:
print("This is not on the curve.")
return 0
if y.z == 0:
return x
elif x.z == 0:
return y
else:
q = self.q
x_1 = x.plane()[0]
y_1 = x.plane()[1]
x_2 = y.plane()[0]
y_2 = y.plane()[1]
ret = Point()
if x_1 != x_2:
m = f_add(self.q, y_2, -y_1)
m = f_mult(self.q, m, f_inv(self.q, f_add(self.q, x_2, -x_1), fac))
x_3 = f_mult(q, m, m)
x_3 = f_add(q, x_3, -x_1)
x_3 = f_add(q, x_3, -x_2)
y_3 = f_add(q, x_1, -x_3)
y_3 = f_mult(q,m,y_3)
y_3 = f_add(q, y_3, -y_1)
elif y_1 != y_2:
ret.x = 0
ret.y = 1
ret.z = 0
return ret
elif y_1 != 0: #P_1 = P_2, y_1 != 0
m = f_mult(q, 3, x_1)
m = f_mult(q, m, x_1)
m = f_add(q, m, self.b)
div = f_mult(q, 2, y_1)
m = f_mult(q, m, f_inv(q, div, fac))
#print(m)
x_3 = f_mult(q, m, m)
x_3 = f_add(q, x_3, -x_1)
x_3 = f_add(q, x_3, -x_1)
y_3 = f_add(q, x_1, -x_3)
y_3 = f_mult(q,m,y_3)
y_3 = f_add(q, y_3, -y_1)
elif y_1 == 0:
ret.x = 0
ret.y = 1
ret.z = 0
return ret
ret.x = x_3
ret.y = y_3
ret.z = 1
return ret
###scale:
#A way to easily scale a point on an elliptic curve by an integer.
#We'll do double and add scaling.
###Inputs:
#point - the point to scale
#n - the integer to scale it by.
#fac - an optional flag determining whether we should care about addition.
###Outputs:
#0 on failure
#ret - the scaled point.
def scale(self, point, n, fac = 0):#We'll do double and add scaling.
if n == 0:
return 0
elif n == 1:
return point
elif n%2 == 1:
return self.add(point, self.scale(point, n-1, fac), fac)
else:
return self.scale(self.add(point, point, fac), n/2, fac)
###rand
#A method that returns a random point on this elliptic curve.
###Inputs: just the elliptic curve
###Outputs:
#ret - a random point on this elliptic curve.
def rand(self):#pick a random point on this ec. TODO: Prime power support.
y_0 = False
while(y_0 == False):
x_0 = np.random.randint(0, self.q)
y_square = f_mult(self.q, x_0, x_0)
y_square = f_mult(self.q, x_0, y_square)
temp = f_mult(self.q, x_0, self.b)
y_square = f_add(self.q, temp, y_square)
y_square = f_add(self.q, y_square, self.c)
y_0 = f_sqrt(self.q, y_square)
r = Point()
r.x = x_0
r.y = y_0
r.z = 1
return r
###verify:
#A method that determines whether a point is actually on an elliptic curve.
###Inputs:
#p - a point() object
###Outputs:
#A boolean determining whether it is or is not on the curve.
def verify(self, p):
x = p.plane()[0]
y = p.plane()[1]
if self.q > 0:
x = int(x)
y = int(y)
y = pow(y, 2, self.q)
rhs = pow(x, 3, self.q)
rhs += (self.b*x)%(self.q)
rhs += self.c
rhs = rhs%(self.q)
if y == rhs:
return True
else:
return False
else:
if y**2 == (x**3 + self.b*x + self.c):
return True
else:
return False
###__str__
#Debugging information that says exactly what this elliptic curve is.
def __str__(self):
stra = str(self.a);
strb = str(self.b);
if self.b < 0:
strb = str(-1*self.b);
strc = str(self.c);
if self.c < 0:
strc = str(-1*self.c)
field = "";
if self.q == 0:
field = "rationals"
elif self.q == -1:
field = "reals"
else:
field = "finite field with "+str(self.q)+" elements"
if self.a == 0:
stra = ""
elif self.a == 1:
stra = "x^3"
elif self.a == -1:
stra = "-x^3"
else:
stra += "x^3"
if self.b == 0:
strb = ""
elif self.b == 1 or self.b == -1:
strb = "x"
else:
strb = strb +"x"
string = stra;
if self.a == 0:
string = "";
if self.b < 0 and self.a != 0:
string += " - " + strb
elif self.b > 0 and self.a != 0:
string += " + " + strb
elif self.a == 0 and self.b < 0:
string += "-" + strb
elif self.a == 0 and self.b > 0:
string += strb
if string == "":
if self.c < 0:
string += "-"+strc;
else:
string += strc;
else:
if self.c < 0:
string += " - " + strc
elif self.c > 0:
string += " + " + strc
return("This curve is over the " + field + " with equation" +
" y^2 = "+string)
class Point: #all points initialize as the point at infinity, we'll
#use projective coordinates.
def __init__(self):
self.x = 0;
self.y = 1;
self.z = 0;
###neg:
#this will negate the point (as long as the curve is in weierstrass form.
def neg(self):
self.y = -self.y;
if self.z == 0:
self.y = 1
###plane:
#This gives the points in plane coordinates, when otherwise they would be in
#projective coordinates.
###Inputs: None
###Outputs:
#the points in plane coordinates as a tuple.
def plane(self):
if self.z == 0:
return (0, 1)
else:
return (self.x/self.z, self.y/self.z)
###equiv_c
#this allows points to check if they are equivalent to another point on a curve.
###Inputs:
#ec - an elliptic curve.
#m - the other point - default is the other point is the point at infinity.
###Outputs:
#True or False, depending if the points are equivalent on this curve.
def equiv_c(self, ec, m = 0):
if m == 0:
p = Point()
p.x = 0
p.y = 1
p.z = 0
return(self.equiv_c(ec, p))
if self.z == 0 or m.z == 0:
if self.z == 0 and m.z == 0:
return True
else:
return False
temp = f_inv(ec.q, m.z)
s_temp = f_inv(ec.q, self.z)
self_x = f_mult(ec.q, (int) (self.x), s_temp)
self_y = f_mult(ec.q, (int) (self.y), s_temp)
m_x = f_mult(ec.q, m.x, m.z)
m_y = f_mult(ec.q, m.y, m.z)
if ec.q > 0:
if m_x%(ec.q) == self_x%(ec.q) and (m_y%(ec.q)) == self_y%ec.q:
return True
else:
return False
else:
if m_x == self_x and m_y == self_y:
return True
else:
return False
###str:
#the string method that gives the point in both plane coordinates and
#projective coordinates.
def __str__(self):
if self.z != 0:
return "Plane: " + str((self.x/self.z, self.y/self.z)) + "\n" + "Projective: " + str((self.x, self.y, self.z))
else:
return "Point at infinity: (0, 1, 0)"
|
[
"noreply@github.com"
] |
vickraj.noreply@github.com
|
bad518a75ce094b830ad4c3e9d91bb3752671f3e
|
8a4556e3ea8aa24e795fb8c55f4e518dd748e4b9
|
/src/networks/unet_mini.py
|
c435723b7fd3f509efcf525432088b0859fef733
|
[] |
no_license
|
MostafaGazar/lines-segmentation-pytorch
|
673f74fa2e5b45afdebad0d4dfd3e079541e174f
|
6a2dfff4c72913479653af4aed0b5cd2d3d16333
|
refs/heads/master
| 2022-11-23T08:58:19.705910
| 2019-09-14T19:49:00
| 2019-09-14T19:49:00
| 208,118,988
| 4
| 3
| null | 2022-11-22T04:16:45
| 2019-09-12T18:29:38
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,646
|
py
|
import torch
from torch.nn import Module
from torch.nn import Sequential
from torch.nn import Conv2d, Dropout2d, MaxPool2d, ReLU, UpsamplingNearest2d
# Based on https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/unet.py#L19
class UNetMini(Module):
def __init__(self, num_classes):
super(UNetMini, self).__init__()
# Use padding 1 to mimic `padding='same'` in keras,
# use this visualization tool https://ezyang.github.io/convolution-visualizer/index.html
self.block1 = Sequential(
Conv2d(1, 32, kernel_size=3, padding=1),
ReLU(),
Dropout2d(0.2),
Conv2d(32, 32, kernel_size=3, padding=1),
ReLU(),
)
self.pool1 = MaxPool2d((2, 2))
self.block2 = Sequential(
Conv2d(32, 64, kernel_size=3, padding=1),
ReLU(),
Dropout2d(0.2),
Conv2d(64, 64, kernel_size=3, padding=1),
ReLU(),
)
self.pool2 = MaxPool2d((2, 2))
self.block3 = Sequential(
Conv2d(64, 128, kernel_size=3, padding=1),
ReLU(),
Dropout2d(0.2),
Conv2d(128, 128, kernel_size=3, padding=1),
ReLU()
)
self.up1 = UpsamplingNearest2d(scale_factor=2)
self.block4 = Sequential(
Conv2d(192, 64, kernel_size=3, padding=1),
ReLU(),
Dropout2d(0.2),
Conv2d(64, 64, kernel_size=3, padding=1),
ReLU()
)
self.up2 = UpsamplingNearest2d(scale_factor=2)
self.block5 = Sequential(
Conv2d(96, 32, kernel_size=3, padding=1),
ReLU(),
Dropout2d(0.2),
Conv2d(32, 32, kernel_size=3, padding=1),
ReLU()
)
self.conv2d = Conv2d(32, num_classes, kernel_size=1)
def forward(self, x):
out1 = self.block1(x)
out_pool1 = self.pool1(out1)
out2 = self.block2(out_pool1)
out_pool2 = self.pool1(out2)
out3 = self.block3(out_pool2)
out_up1 = self.up1(out3)
# return out_up1
out4 = torch.cat((out_up1, out2), dim=1)
out4 = self.block4(out4)
out_up2 = self.up2(out4)
out5 = torch.cat((out_up2, out1), dim=1)
out5 = self.block5(out5)
out = self.conv2d(out5)
return out
if __name__ == '__main__':
from torchsummary import summary
device = torch.device("cpu")
number_of_classes = 3
model = UNetMini(number_of_classes).to(device)
summary(model, input_size=(1, 256, 256)) # (channels, H, W)
|
[
"mmegazar@gmail.com"
] |
mmegazar@gmail.com
|
268f582b8020642e0d04afced2cadbd446bcd957
|
d0dce965ee97847397e12c741d247941b72a086d
|
/platform_msg/AppAllInfo/AppAllInfo/spiders/feixiaohao_notice.py
|
fee9a49f44ad8534773849c17954a40d427a2fad
|
[] |
no_license
|
xuxin8911/wechat_robot
|
21e04b53c34cd34148767ba0bfa1bf076271bea5
|
49b6ed2d452144913c78ec457bcea0def14beb43
|
refs/heads/master
| 2021-05-05T21:49:37.019674
| 2018-01-24T08:07:01
| 2018-01-24T08:07:01
| 116,029,119
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,334
|
py
|
# -*- coding:utf-8 -*-
import scrapy
from scrapy.selector import Selector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.http import Request
from AppAllInfo.items import *
from AppAllInfo.settings import APP_NAME
import codecs
class feixiaohao_notice(scrapy.Spider):
name = "feixiaohao_notice_spider"
allowed_domains = ["www.feixiaohao.com"]
urls = [
#"http://www.wandoujia.com/tag/视频",
#"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
"https://www.feixiaohao.com/notice/"
# "https://www.feixiaohao.com/currencies/bitcoin/"
]
#urls.extend([ "http://www.feixiaohao.com/list_%d.html" % x for x in range(2,17) ])
start_urls = urls
#rules = [Rule(LinkExtractor(allow=['/apps/.+']), 'parse')]
def parse(self, response):
page = Selector(response)
for link in page.xpath("//a/@href"):
href=link.extract()
if href.startswith("/currencies/"):
yield Request("http://www.feixiaohao.com" +href, callback=self.parse_curr_page)
def parse_curr_page(self, response):
#for sel in response.xpath('//ul/li'):
# title = sel.xpath('a/text()').extract()
# link = sel.xpath('a/@href').extract()
# desc = sel.xpath('text()').extract()
#print title, link, desc
item = FeiXiaoHaoItem()
sel = Selector(response)
name = sel.xpath('//*[@id="baseInfo"]/div[1]/div[1]/h1/node()').extract()[2].strip()
chineseName = sel.xpath('//*[@id="baseInfo"]/div[1]/div[1]/h1/node()').extract()[-1].strip()
engName = sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[1]/span[2]/text()').extract()[0]
cnyPrice = sel.xpath('//*[@id="baseInfo"]/div[1]/div[1]/div[1]/text()').extract()[0]
usdtPrice = sel.xpath('//*[@id="baseInfo"]/div[1]/div[1]/div[3]/span[1]/text()').extract()[0].replace(u'\u2248', '')
btcPrice = sel.xpath('//*[@id="baseInfo"]/div[1]/div[1]/div[3]/span[2]/text()').extract()[0].replace(u'\u2248', '')
upMarkets = sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[3]/span[2]/a/text()').extract()[0].strip().replace("家","")
releaseTime = sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[4]/span[2]/text()').extract()[0]
whitePaper = sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[5]/span[2]/a/@href').extract()[0]
site = repr(sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[6]/span[2]/a/@href').extract())
blockite = repr(sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[7]/span[2]/a/@href').extract())
concept = sel.xpath('//*[@id="baseInfo"]/div[2]/ul/li[8]/span[2]/a/text()').extract()[0]
print name,chineseName
item['name'] = name
item['chineseName'] = chineseName
item['engName'] = engName
item['cnyPrice'] = cnyPrice
item['usdtPrice'] = usdtPrice
item['btcPrice'] = btcPrice
item['upMarkets'] = upMarkets
item['releaseTime'] = releaseTime
item['whitePaper'] = whitePaper
item['site'] =site
item['blockite'] = blockite
item['concept'] = "" #concept
yield item
def process_item(self,item):
return item and item[0].strip() or ""
def process_name(self,item):
return item and item[1].strip() or ""
|
[
"xuxin@gdbigdata.com"
] |
xuxin@gdbigdata.com
|
ac1afcab6467a272667166b1b5f392fec0281043
|
116d445b32192a4ae441c4a345d8562b2ecbfead
|
/Chapter_04/prg_01.py
|
76f41a0b19ed2a55bac6f2b64fb94b649df52f3c
|
[
"MIT"
] |
permissive
|
lokeshvishwakarma/computer-vision
|
f7003feb677b60ae3a194b31330a210f784ebaa4
|
af4bff3b10e11f6ffca4468378a13c9313328226
|
refs/heads/master
| 2022-07-16T03:42:49.607690
| 2020-05-18T13:02:28
| 2020-05-18T13:02:28
| 262,029,607
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# Shapes and Texts
import cv2
import numpy as np
# Create a matrix with zeroes
img = np.zeros((512, 512, 3), np.uint8) # This creates a black image of 512 x 512
print('Original Image', img.shape)
# img[:] = 255, 0, 0 # Puts blue color to the full image
img[200:300, 100:300] = 255, 0, 0 # Puts blue color to the specified range of image
# Because in OpenCV are BGR instead of RGB
# cv2.line(img, (0, 0), (200, 300), (0, 255, 255)) # takes img, start point, end point, color
cv2.line(img, (0, 0), (img.shape[0], img.shape[1]), (0, 255, 255)) # here end point is given as the corners of the img
cv2.rectangle(img, (20, 20), (250, 300), (0, 0, 255), 5) # creates outlined rectangle
# cv2.rectangle(img, (20, 20), (250, 300), (0, 0, 255), cv2.FILLED) # creates filled rectangle
cv2.circle(img, (400, 400), 60, (0, 255, 255), 3)
cv2.putText(img, 'OpenCV', (300, 300), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 150, 230), 1)
cv2.imshow('Original Image', img)
cv2.waitKey(3000)
|
[
"uvishwakarma3@gmail.com"
] |
uvishwakarma3@gmail.com
|
a063e35a31c7a672b1914db46992b226b0604ce9
|
b21cc7127132b5b3abbbea7aabd43eb8c89f91d8
|
/TP3/redblacktree.py
|
f287c4c76eab2067fbfad0ebf79fdeeec4ca37a3
|
[] |
no_license
|
LucianoAlbanes/AyEDII
|
4f338c67fc7b2fbb4b1373c74145c0a88f58c294
|
ceba670f6b2b4df46b2edb695e82ccba69d8257d
|
refs/heads/master
| 2023-06-16T22:10:32.968978
| 2021-07-08T17:28:20
| 2021-07-08T17:28:20
| 375,737,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,082
|
py
|
# Red-Black Tree implementation
from mybinarytree import getNode, insertAux, searchAux, moveNode, search, access, update, traverseInPreOrder
# Define classes
class RedBlackTree:
root = None
class RedBlackNode:
parent = None
leftnode = None
rightnode = None
key = None
red = None
value = None
nilNode = RedBlackNode()
# Define functions
def insert(RBTree, RBNode):
'''
Explanation:
Inserts an RBNode in a Red-Black Tree, fix violations.
Params:
RBTree: The Red-Black Tree on which you want to perform the Alt.
RBNode: The node to be inserted
Return:
The RBTree pointer with the inserted node.
'None' if exists another node with the same key.
'''
# Check if exists another node with same key, if not, proceed with insertion
if not getNode(RBTree, RBNode.key):
# Insert node
if not RBTree.root: # Case if empty tree.
RBTree.root = RBNode
else: # General case
insertAux(RBTree.root, RBNode)
# Fix RBTree inconssistences.
fixup(RBTree, RBNode)
return RBTree
else:
return None
def insertAlt(RBTree, value, key):
'''
Explanation:
Inserts an value with a given key in a Red-Black Tree, fix violations.
Params:
RBTree: The Red-Black Tree on which you want to perform the Alt.
value: The value to Alt in the given binary tree.
key: The key of the node with the given value to Alt.
Return:
The key of the node of the inserted value.
Returns 'None' if the Alt cannot be performed (Exists a node with same key).
'''
# Create the new node
newNode = RedBlackNode()
newNode.key = key
newNode.red = True
newNode.value = value
# Call insert fn, will verify if exists key, and do the fixup
if insert(RBTree, newNode): # Return if was performed successfully
return key
else:
return None
def fixup(RBTree, RBNode):
'''
Explanation:
Correct all possible RBNode violations according to the Red-Black tree specification.
Params:
RBTree: The Red-Black Tree on which you want to perform the operation.
RBNode: The RBNode to check if violations exists.
'''
while RBNode.parent and RBNode.parent.parent and RBNode.parent.red:
if RBNode.parent is RBNode.parent.parent.leftnode:
uncle = RBNode.parent.parent.rightnode
if uncle and uncle.red:
RBNode.parent.red = False # Case 1
uncle.red = False # Case 1
RBNode.parent.parent.red = True # Case 1
RBNode = RBNode.parent.parent
else:
if RBNode is RBNode.parent.rightnode:
RBNode = RBNode.parent # Case 2
rotateLeft(RBTree, RBNode) # Case 2
RBNode.parent.red = False # Case 3
RBNode.parent.parent.red = True # Case 3
rotateRight(RBTree, RBNode.parent.parent) # Case 3
else:
uncle = RBNode.parent.parent.leftnode
if uncle and uncle.red:
RBNode.parent.red = False # Case 1
uncle.red = False # Case 1
RBNode.parent.parent.red = True # Case 1
RBNode = RBNode.parent.parent
else:
if RBNode is RBNode.parent.leftnode:
RBNode = RBNode.parent # Case 2
rotateRight(RBTree, RBNode) # Case 2
RBNode.parent.red = False # Case 3
RBNode.parent.parent.red = True # Case 3
rotateLeft(RBTree, RBNode.parent.parent) # Case 3
RBTree.root.red = False
def rotateLeft(RBTree, RBNode):
'''
Explanation:
The unbalanced RBNode becomes the child of its right child by performing a rotation.
Params:
RBTree: The Red-Black Tree on which you want to perform the rotation.
RBNode: The unbalanced RBNode 'root' to be rotated.
Return:
The pointer of the new balanced 'root' RBNode.
'''
# Check condition to rotate
if not RBNode.rightnode:
print("Can't rotate left, no rightnode")
newRoot = RBNode.rightnode
# Check if the new root have left child node
if newRoot.leftnode:
RBNode.rightnode = newRoot.leftnode
RBNode.rightnode.parent = RBNode
else:
RBNode.rightnode = None
# Change parents relationships betwen roots
newRoot.parent = RBNode.parent
if RBNode is RBTree.root:
RBTree.root = newRoot
else:
if RBNode is RBNode.parent.rightnode:
RBNode.parent.rightnode = newRoot
else:
RBNode.parent.leftnode = newRoot
# Finish child's relationships
newRoot.leftnode = RBNode
RBNode.parent = newRoot
# Return new root pointer
return newRoot
def rotateRight(RBTree, RBNode):
'''
Explanation:
The unbalanced RBNode becomes the child of its left child by performing a rotation.
Params:
RBTree: The Red-Black Tree on which you want to perform the rotation.
RBNode: The unbalanced RBNode 'root' to be rotated.
Return:
The pointer of the new balanced 'root' RBNode.
'''
# Check condition to rotate
if not RBNode.leftnode:
print("Can't rotate right, no leftnode")
newRoot = RBNode.leftnode
# Check if the new root have right child node
if newRoot.rightnode:
RBNode.leftnode = newRoot.rightnode
RBNode.leftnode.parent = RBNode
else:
RBNode.leftnode = None
# Change parents relationships betwen roots
newRoot.parent = RBNode.parent
if RBNode is RBTree.root:
RBTree.root = newRoot
else:
if RBNode is RBNode.parent.leftnode:
RBNode.parent.leftnode = newRoot
else:
RBNode.parent.rightnode = newRoot
# Finish child's relationships
newRoot.rightnode = RBNode
RBNode.parent = newRoot
# Return new root pointer
return newRoot
def delete(RBTree, RBNode):
'''
Explanation:
Delete an node from a Red-Black Tree, fix violations.
Params:
RBTree: The Red-Black Tree on which you want to perform the deleteValue.
value: The value of the node of the tree to be deleted.
Return:
The pointer of the tree.
Returns 'None' if the deletion can't be performed.
'''
if RBTree and RBNode: # Check if parameters are valid.
# Only one node case
if not (RBTree.root.leftnode or RBTree.root.rightnode):
RBTree.root = None
else: # General Case
deleteAux(RBTree, RBNode)
# Unlink nilNode from the tree
removeTempNode()
return RBTree
else:
return None
def deleteValue(RBTree, value):
'''
Explanation:
Delete an node with a given value on an Red-Black Tree, fix violations.
Info:
If exist more than one node with the value, only the first one will be deleted. (Preorder)
Params:
RBTree: The Red-Black Tree on which you want to perform the delete.
value: The value of the node of the tree to be deleted.
Return:
The key of the deleted node.
Returns 'None' if there is no a node with the given value in the tree.
'''
# Search the value
nodeToDelete = searchAux(RBTree.root, value)
# Call delete fn.
if delete(RBTree, nodeToDelete):
return nodeToDelete.key
else:
return None
def deleteKey(RBTree, key):
'''
Explanation:
Delete an node with a given key on an Red-Black Tree, fix violations.
Params:
RBTree: The tree on which you want to perform the delete.
key: The key of the node of the tree to be deleted.
Return:
The key of the deleted node.
Returns 'None' if there is no a node with the given key.
'''
# Search the value
nodeToDelete = getNode(RBTree, key)
# Call delete fn.
if delete(RBTree, nodeToDelete):
return nodeToDelete.key
else:
return None
def deleteAux(RBTree, RBNode):
'''
Perform the deletion of the RBNode,
and prepares the tree for the deleteFixup() that will be called inside.
'''
successorNode = RBNode
successorColor = successorNode.red
fixupNode = None
# Case leaf node
if not (RBNode.leftnode or RBNode.rightnode):
if RBNode is RBNode.parent.leftnode:
if RBNode.red:
RBNode.parent.leftnode = None
else:
RBNode.parent.leftnode = createTempNode(RBNode.parent, True)
fixupNode = RBNode.parent.leftnode
else:
if RBNode.red:
RBNode.parent.rightnode = None
else:
RBNode.parent.rightnode = createTempNode(RBNode.parent, False)
fixupNode = RBNode.parent.rightnode
# Case right branch
elif not RBNode.leftnode:
fixupNode = RBNode.rightnode
moveNode(RBTree, RBNode.rightnode, RBNode)
# Case left branch
elif not RBNode.rightnode:
fixupNode = RBNode.leftnode
moveNode(RBTree, RBNode.leftnode, RBNode)
# Case both branches
else:
# Define successorNode
successorNode = RBNode.rightnode
while successorNode.leftnode:
successorNode = successorNode.leftnode
successorColor = successorNode.red
fixupNode = successorNode.rightnode
if not fixupNode:
if successorNode.parent.rightnode is successorNode:
fixupNode = createTempNode(successorNode.parent, False)
else:
fixupNode = createTempNode(successorNode.parent, True)
# Reasign pointers
if successorNode.parent is RBNode:
if fixupNode:
fixupNode.parent = successorNode
else:
if successorNode.rightnode:
moveNode(RBTree, successorNode.rightnode, successorNode)
successorNode.rightnode = RBNode.rightnode
if successorNode.rightnode:
successorNode.rightnode.parent = successorNode
moveNode(RBTree, successorNode, RBNode)
successorNode.leftnode = RBNode.leftnode
successorNode.leftnode.parent = successorNode
successorNode.red = RBNode.red
# Call fixup fn if necessary
if not successorColor:
deleteFixup(RBTree, fixupNode)
def deleteFixup(RBTree, RBNode):
'''
This function fix the possibles violations from the RBNode.
'''
# CLRS <3
while RBNode.parent and not RBNode.red:
if RBNode is RBNode.parent.leftnode: # RBNode is leftnode case
siblingNode = RBNode.parent.rightnode
if isRed(siblingNode): # Case 1
siblingNode.red = False
RBNode.parent.red = True
rotateLeft(RBTree, RBNode.parent)
siblingNode = RBNode.parent.rightnode
if not (isRed(siblingNode.leftnode) or isRed(siblingNode.rightnode)):
siblingNode.red = True
RBNode = RBNode.parent
else:
if not isRed(siblingNode.rightnode): # Case 3
siblingNode.leftnode.red = False
siblingNode.red = True
rotateRight(RBTree, siblingNode)
siblingNode = RBNode.parent.rightnode
siblingNode.red = RBNode.parent.red # Case 4
RBNode.parent.red = False
siblingNode.rightnode.red = False
rotateLeft(RBTree, RBNode.parent)
RBNode = RBTree.root
else: # RBNode is rightnode case
siblingNode = RBNode.parent.leftnode
if siblingNode.red: # Case 1
siblingNode.red = False
RBNode.parent.red = True
rotateRight(RBTree, RBNode.parent)
siblingNode = RBNode.parent.leftnode
if not (isRed(siblingNode.leftnode) or isRed(siblingNode.rightnode)): # Case 2
siblingNode.red = True
RBNode = RBNode.parent
else:
if not isRed(siblingNode.leftnode): # Case 3
siblingNode.rightnode.red = False
siblingNode.red = True
rotateLeft(RBTree, siblingNode)
siblingNode = RBNode.parent.leftnode
siblingNode.red = RBNode.parent.red # Case 4
RBNode.parent.red = False
siblingNode.leftnode.red = False
rotateRight(RBTree, RBNode.parent)
RBNode = RBTree.root
RBNode.red = False
def isRed(RBNode):
'''
Return the color of RBNode (handles NULL Nodes)
'''
if RBNode:
return RBNode.red
else:
return False # non-existant node = Black
def createTempNode(parent, isLeftChild):
'''
Creates a temp 'Nil Node' (Like CLRS), useful in some cases of the deletion.
Will be removed after perform the deletion
nilNode object is global.
'''
nilNode.parent = parent
nilNode.red = False
if isLeftChild:
parent.leftnode = nilNode
else:
parent.rightnode = nilNode
return nilNode
def removeTempNode():
'''
Remove (unlink) the possibly created node using createTempNode()
nilNode object is global.
'''
if nilNode.parent:
if nilNode is nilNode.parent.leftnode:
nilNode.parent.leftnode = None
else:
nilNode.parent.rightnode = None
|
[
"lucianoalbanes@gmai.com"
] |
lucianoalbanes@gmai.com
|
8fb50491315368de4dda83ceec98b10bce12cc65
|
90fb6f665e6c8c83f1c8207e0a2e793833fc3c6d
|
/NLP/chapter01.py
|
b4c083d4041d757f9e866a99623ce48e01eae74f
|
[] |
no_license
|
hello-wangjj/Introduction-to-Programming-Using-Python
|
8698ced3067714ab639328f5e3e78ccbbb4905c4
|
ff24118d1779d589e37aa6ab88002176e8f475c3
|
refs/heads/master
| 2021-09-13T22:29:00.355794
| 2018-05-05T07:28:29
| 2018-05-05T07:28:29
| 57,047,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
# -*- coding:utf-8 -*-
"""
用途,文档说明
"""
from nltk.book import *
def lexical_diversity(text):
# 词汇
return len(text)/len(set(text))
def percentage(count,tatal):
return 100*count/tatal
|
[
"576988736@qq.com"
] |
576988736@qq.com
|
7cbb4ec069723b56e751ab29d7c238522f85aab6
|
a65bff31e28a6abd615dee40a0d44b8a31004045
|
/templator.py
|
bcae71c821bf6a6346f734eaf8d9938729c0e3f2
|
[] |
no_license
|
artmikh/Patterns.Framework
|
138b4a241ffa9a3b568672f5236cb92c3ae01898
|
2e9df931ad1ebd5417fc2d8db8a57aa976f4c738
|
refs/heads/main
| 2023-04-21T08:52:01.960031
| 2021-05-14T20:20:01
| 2021-05-14T20:20:01
| 362,932,020
| 0
| 0
| null | 2021-05-16T21:43:39
| 2021-04-29T19:56:05
|
Python
|
UTF-8
|
Python
| false
| false
| 499
|
py
|
import settings
from jinja2 import FileSystemLoader
from jinja2.environment import Environment
def render(template_name, **kwargs):
env = Environment()
# Загружаем папку с шаблонами
env.loader = FileSystemLoader(settings.TEMPLATES_ROOT)
# Открываем шаблон по имени
template = env.get_template(template_name)
# рендерим шаблон с параметрами
return template.render(css=settings.css_file, **kwargs)
|
[
"artmikh@yandex.ru"
] |
artmikh@yandex.ru
|
9e51363e2a976dbbdb4bd24a0d2cec4b15627703
|
ecbc9493c2584b926845276fd31aeeb7363c5c0f
|
/textutils/views.py
|
1e59596d62e5bf8ce93ea60af4f74dca833d3b51
|
[] |
no_license
|
AsifNasim/Publish
|
72c717e15814ecc6a03c927a980f31b8f0ab00ed
|
4713db85c6179793999693bdf9b5ec4970c834b6
|
refs/heads/master
| 2023-04-28T03:32:08.107524
| 2020-04-05T06:22:15
| 2020-04-05T06:22:15
| 253,167,987
| 0
| 0
| null | 2023-04-21T20:54:14
| 2020-04-05T06:11:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
from django.contrib import admin
from django.urls import path
from django.http import HttpResponse
from django.shortcuts import render
def index(Request):
return render(Request,'index.html')
def analyze(Request):
#Get the text
djtext = Request.POST.get('text', 'default')
# Check checkbox values
removePunc = Request.POST.get('removePunc', 'off')
fullcaps = Request.POST.get('fullcaps', 'off')
newLineRemover = Request.POST.get('newLineRemover', 'off')
extraSpaceRemover = Request.POST.get('extraSpaceRemover', 'off')
characterCount = Request.POST.get('characterCounter', 'off')
#Check which checkbox is on
if removePunc == "on":
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
analyzed = ""
for char in djtext:
if char not in punctuations:
analyzed = analyzed + char
params = {'purpose':' Punctuations Removed ', 'analyzed_text': analyzed}
djtext = analyzed
if(fullcaps == "on"):
analyzed =""
for char in djtext:
analyzed += char.upper()
params = {'purpose':' Converted String to Upper Case ', 'analyzed_text': analyzed}
djtext = analyzed
if(extraSpaceRemover == "on"):
analyzed =""
for index,char in enumerate(djtext):
if djtext[index] == " " and djtext[index +1] == " ":
pass
else:
analyzed = analyzed + char
params = {'purpose':' Extra Spaces have been removed ', 'analyzed_text': analyzed}
djtext = analyzed
if(newLineRemover == "on"):
analyzed = ""
for char in djtext:
if char != "\n" and char != "\r":
analyzed = analyzed + char
params = {'purpose':' New Line has been removed ', 'analyzed_text': analyzed}
if(removePunc != "on" and fullcaps != "on" and extraSpaceRemover != "on" and newLineRemover != "on"):
return HttpResponse("Please Choose any operations!")
return render(Request, 'analyze.html', params)
|
[
"asifnasimofficial@gmail.com"
] |
asifnasimofficial@gmail.com
|
7621c014a8edf7806405f6fc76c6539e4aef5ef0
|
0cc61991d526eb761ba603438647561cc1337ef4
|
/astar_multi.py
|
cb1b67b77bc0f35bbcb8eedd1ecfd11b6fb5acdb
|
[] |
no_license
|
etahirovic1/gmstar
|
0d6d4e9e9cace84ddded070ed115657398ee6301
|
2e93c4c20bde0df881e83e83e0ac5eb11a273921
|
refs/heads/main
| 2023-08-11T11:47:05.133278
| 2021-09-16T16:17:02
| 2021-09-16T16:17:02
| 404,886,915
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,954
|
py
|
import heapq
import random
class Node:
def __init__(self, parent=None, position=None, value=0):
self.parent = parent
self.position = position
self.value = value
self.g = 0
self.h = 0
self.f = 0
def __eq__(self, other):
# return self.position == other
return self.position == other.position
def __repr__(self):
return f"{self.position} - g: {self.g} h: {self.h} f: {self.f}"
def __lt__(self, other):
return self.f < other.f
def __gt__(self, other):
return self.f > other.f
class Astar:
def __init__(self, workspace, robots, given_choice, num_randoms=0, type_obs=0, num_seed=0, difficulty=0):
self.starts = [[] for i in range(len(robots))]
self.paths = [[] for i in range(len(robots))]
self.children = [[] for i in range(len(robots))]
self.open_lists = []
self.closed_lists = []
self.given_choice = given_choice
self.num_randoms = num_randoms
self.type_obs = type_obs
self.num_seed = num_seed
self.difficulty = difficulty
for num_rob in range(len(robots)):
self.open_list_i = []
heapq.heapify(self.open_list_i)
self.open_lists.append(self.open_list_i)
self.closed_list_i = []
heapq.heapify(self.closed_list_i)
self.closed_lists.append(self.closed_list_i)
self.neighbors = ((-1, -1), (1, -1), (1, 1), (-1, 1), (-1, 0), (0, 1), (1, 0), (0, -1), (0, 0))
self.workspace = workspace
self.robots = robots
def randomise(self):
start_positions = []
end_positions = []
for robot in self.robots:
start_positions.append(robot.start)
end_positions.append(robot.goal)
randoms = []
random.seed(int(self.num_seed))
while len(randoms) != self.num_randoms:
num = random.randint(0, (len(self.workspace)-1) * (len(self.workspace)-1))
w = num % (len(self.workspace) - 1)
h = int(num / (len(self.workspace)-1))
tapl = (h, w)
forbidden = [[], [(12, 8), (12, 15)], [(15, 11), (15, 12)]]
if num not in randoms and tapl not in start_positions and tapl not in end_positions and \
self.workspace[h][w] != 1 and tapl not in forbidden[int(self.difficulty)]:
randoms.append(num)
self.workspace[h][w] = 1
else:
continue
def choose_course(self):
if self.given_choice == '0':
self.randomise()
elif self.given_choice == '1':
if self.type_obs == '0':
for h in range(len(self.workspace)):
for w in range(len(self.workspace)):
self.workspace[h][w] = 0
self.randomise()
elif self.type_obs == '1':
return
elif self.type_obs == '2':
self.randomise()
def return_path(self, current_node):
path = []
current = current_node
while current is not None:
path.append(current)
current = current.parent
return path[::-1]
def run(self):
self.choose_course()
for ri in range(len(self.robots)):
start = Node(None, self.robots[ri].start, 0)
heapq.heappush(self.open_lists[ri], start)
self.starts[ri] = start
iteration = 0
while self.open_lists[ri]:
if iteration > 3 * len(self.robots) * len(self.workspace):
self.paths[ri] = self.return_path(vk)
print('Put za robota broj', ri, 'nije pronađen unutar prihvatljivog vremena.')
break
iteration = iteration + 1
vk = heapq.heappop(self.open_lists[ri])
heapq.heappush(self.closed_lists[ri], vk)
if vk.position == self.robots[ri].goal:
self.paths[ri] = self.return_path(vk)
break
for new_position in self.neighbors:
node_position = (vk.position[0] + new_position[0], vk.position[1] + new_position[1])
if node_position[0] > (len(self.workspace) - 1) or node_position[0] < 0 or node_position[1] > (
len(self.workspace[len(self.workspace)-1]) -1) or node_position[1] < 0:
continue
if self.workspace[node_position[0]][node_position[1]] != 0:
continue
new_node = Node(vk, node_position, 0)
self.children[ri].append(new_node)
if new_position[0] != 1 and new_position[1] != 1:
new_node.g = vk.g + 1.44
else:
new_node.g = vk.g + 1
new_node.h = (((new_node.position[0] - self.robots[ri].goal[0]) ** 2) + (
(new_node.position[1] - self.robots[ri].goal[1]) ** 2))**(1/2)
new_node.f = new_node.g + new_node.h
if new_node in self.closed_lists[ri]:
continue
for open_node in self.open_lists[ri]:
if new_node.position == open_node.position:
if new_node.g < open_node.g:
open_node.g = new_node.g
open_node.parent = new_node.parent
break
if new_node not in self.open_lists[ri]:
heapq.heappush(self.open_lists[ri], new_node)
for path_index in range(len(self.paths)):
if len(self.paths[path_index]) == 0:
print('No path for robot', path_index+1)
return self.paths
|
[
"noreply@github.com"
] |
etahirovic1.noreply@github.com
|
9884cae2f8e8ccc4f6c642f95ef5bf620d4a2864
|
fabcd19d61549304dd4127c7227dc49974a9d628
|
/app/import_fide.py
|
edaea39a1b31e2392475f0f6e01e9edd25ffecaa
|
[] |
no_license
|
JulesCourtois/mychesshub
|
94614ee2d98db99a67e9c7a492199cda665da982
|
d9989276b75f83e6a9343375f01e856fddeced52
|
refs/heads/master
| 2020-04-14T23:57:22.657988
| 2019-01-11T12:03:01
| 2019-01-11T12:03:01
| 164,222,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
import os
import urllib.request
import zipfile
from app import db
from app.models import Federation, Ranking
def import_fide(anonymous_user):
temp_txt = "standard_rating_list.txt"
url = "http://ratings.fide.com/download/standard_rating_list.zip"
downloaded_zip, headers = urllib.request.urlretrieve(url)
fide = db.session.query(Federation).filter(Federation.initials == "FID").one_or_none()
if fide is None:
fide = Federation(name="World Chess Federation", initials="FID")
db.session.add(fide)
db.session.commit()
fide = db.session.query(Federation).filter(Federation.initials == "FID").one_or_none()
with zipfile.ZipFile(downloaded_zip, 'r') as zip_ref:
zip_ref.extractall()
file = open(temp_txt)
content = file.readlines()[1:] # ignore first line (column names)
rankings = []
for line in content:
player_id = line[0:9].strip()
elo = int(line[113:117].strip())
# name = line[15:75].strip()
# federation_initials = line[76:79]
# birth = int(line[126:130])
ranking = db.session.query(Ranking)\
.filter(Ranking.federation == fide.id)\
.filter(Ranking.player_id == player_id)\
.first()
if ranking is None:
ranking = Ranking(user_id=anonymous_user.id, federation=fide.id, player_id=player_id)
ranking.elo = elo
rankings.append(ranking)
db.session.add_all(rankings)
db.session.commit()
file.close()
os.remove(temp_txt)
os.remove(downloaded_zip)
|
[
"jules.courtois@epfl.ch"
] |
jules.courtois@epfl.ch
|
c0370459eef6f39c24d1f261241c974acc5577f3
|
b00c2a7c74c46b3d0ad3af4b57c19b7edbb0f61f
|
/proxy.py
|
dbc720d98bf87d41a01d61a275cacaeb24aa417d
|
[] |
no_license
|
rauldoe/cpsc551proj3
|
1cce0b7c4a3851ff370ed6dac63693d56a332fac
|
68e1d645563cb3e741cce0e58f089d24ed07e16c
|
refs/heads/master
| 2020-09-23T15:18:32.242618
| 2019-12-18T11:51:13
| 2019-12-18T11:51:13
| 225,529,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
import re
import typing
import xmlrpc.client
# Credit to Yu Kou (<yuki.coco@csu.fullerton.edu>)
# for making this suggestion and working on type mappings.
class TupleSpaceAdapter:
PYTHON_TO_RUBY = {
'str': 'String',
'int': 'Numeric',
'float': 'Numeric'
}
RANGE_TYPE = type(range(0))
def __init__(self, uri):
self.uri = uri
self.ts = xmlrpc.client.ServerProxy(self.uri, allow_none=True)
def map_template_out(self, item):
if isinstance(item, typing.Type):
python_type = item.__name__
ruby_type = self.PYTHON_TO_RUBY[python_type]
if ruby_type is not None:
return { 'class': ruby_type }
elif isinstance(item, typing.Pattern):
return { 'regexp': item.pattern }
elif isinstance(item, self.RANGE_TYPE):
return { 'from': item.start, 'to': item.stop - 1 }
return item
def map_templates_out(self, tupl):
return [self.map_template_out(item) for item in tupl]
def _in(self, tupl):
return self.ts._in(self.map_templates_out(tupl), None)
def _inp(self, tupl):
return self.ts._in(self.map_templates_out(tupl), 0)
def _rd(self, tupl):
return self.ts._rd(self.map_template_out(tupl), None)
def _rdp(self, tupl):
return self.ts._rd(self.map_template_out(tupl), 0)
def _out(self, tupl):
self.ts._out(tupl)
def _rd_all(self, tupl):
return self.ts._rd_all(self.map_template_out(tupl))
|
[
"khoado@csu.fullerton.edu"
] |
khoado@csu.fullerton.edu
|
131c53910d35f10cf9f46abe44064b5c63681d5d
|
781029dcc468a7d1467a17727870d526da1df985
|
/django/crud_form/articles/urls.py
|
20033f90e9434504b066dd2f73e7934e94e26923
|
[] |
no_license
|
Huijiny/TIL
|
5f0edec5ad187029e04ed2d69e85ae4d278e048d
|
d1a974b3cacfb45b2718f87d5c262a23986c6574
|
refs/heads/master
| 2023-09-03T15:28:11.744287
| 2021-10-21T12:38:10
| 2021-10-21T12:38:10
| 335,220,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
path('index/', views.index, name='index'),
path('create/', views.create, name='create'),
path('detail/<str:pk>', views.detail, name='detail'),
path('update/<str:pk>', views.update, name='update'),
path('delete/<str:pk>', views.delete, name='delete'),
]
|
[
"jiin20803@gmail.com"
] |
jiin20803@gmail.com
|
074260b13dd38e71d53c4becd4a84f776db80e2b
|
a003919560c569114a54182e1d977bd2cd3e67dd
|
/cs231n-assignment2/cs231n/classifiers/fc_net.py
|
087ccbbb0810102b9e9a9a191980552e30a0d92e
|
[] |
no_license
|
MohdElgaar/ML-assignments
|
282bd73d35e171dbf305451c9c0b7049e75382e8
|
7c68452906e68b8d3e6ba75fe65b59e6660053df
|
refs/heads/master
| 2020-04-18T17:33:20.557874
| 2019-01-26T06:28:34
| 2019-01-26T06:30:50
| 167,657,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,086
|
py
|
from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = np.random.normal(0, weight_scale, (input_dim, hidden_dim))
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = np.random.normal(0, weight_scale, (hidden_dim, num_classes))
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
first_activations, first_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])
scores, second_cache = affine_forward(first_activations, self.params['W2'], self.params['b2'])
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dout = softmax_loss(scores,y)
loss +=0.5*self.reg*(np.linalg.norm(self.params['W1'])**2 + np.linalg.norm(self.params['W2'])**2)
dh, grads['W2'], grads['b2'] = affine_backward(dout, second_cache)
grads['W2'] += self.reg * (self.params['W2'])
dx, grads['W1'], grads['b1'] = affine_relu_backward(dh, first_cache)
grads['W1'] += self.reg * (self.params['W1'])
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
current_D = input_dim
current_M = hidden_dims[0]
for i in range(1, self.num_layers + 1):
W = "W" + str(i)
b = "b" + str(i)
gamma = "gamma" + str(i)
beta = "beta" + str(i)
self.params[W] = np.random.normal(0, weight_scale, (current_D, current_M))
self.params[b] = np.zeros(current_M)
if not i == self.num_layers and self.use_batchnorm:
self.params[gamma] = np.ones(current_M)
self.params[beta] = np.zeros(current_M)
current_D = current_M
if i >= len(hidden_dims):
current_M = num_classes
continue
current_M = hidden_dims[i]
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.use_dropout:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
intermediate = X.copy()
caches={'affine': [], 'relu': [], 'batchnorm': [], 'dropout': []}
for i in range(1, self.num_layers + 1):
output_layer = i == self.num_layers
W = "W" + str(i)
b = "b" + str(i)
gamma = "gamma" + str(i)
beta = "beta" + str(i)
if output_layer:
scores, cache = affine_forward(intermediate, self.params[W], self.params[b])
caches['affine'].append(cache)
else:
intermediate, cache = affine_forward(intermediate, self.params[W], self.params[b])
caches['affine'].append(cache)
if self.use_batchnorm:
intermediate, cache = batchnorm_forward(intermediate, self.params[gamma],
self.params[beta], self.bn_params[i-1])
caches['batchnorm'].append(cache)
intermediate, cache = relu_forward(intermediate)
caches['relu'].append(cache)
if self.use_dropout:
intermediate, cache = dropout_forward(intermediate, self.dropout_param)
caches['dropout'].append(cache)
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, intermediate = softmax_loss(scores, y)
for i in range(1, self.num_layers + 1):
W = "W" + str(i)
loss += 0.5*self.reg*(np.linalg.norm(self.params[W])**2)
for i in range(self.num_layers, 0, -1):
W = "W" + str(i)
b = "b" + str(i)
gamma = "gamma" + str(i)
beta = "beta" + str(i)
output_layer = i == self.num_layers
if output_layer:
intermediate, grads[W], grads[b] = affine_backward(intermediate, caches['affine'][i-1])
else:
if self.use_dropout:
intermediate = dropout_backward(intermediate, caches['dropout'][i-1])
intermediate = relu_backward(intermediate, caches['relu'][i-1])
if self.use_batchnorm:
intermediate, grads[gamma], grads[beta] = batchnorm_backward(intermediate, caches['batchnorm'][i-1])
intermediate, grads[W], grads[b] = affine_backward(intermediate, caches['affine'][i-1])
grads[W] += self.reg * self.params[W]
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
|
[
"mohamed@kaist.ac.kr"
] |
mohamed@kaist.ac.kr
|
048e35e12a0d93a9ac53cb672c5f2ec95f0eae85
|
15421e75dab5775e53bf8a20da50f0de6d78d410
|
/ascii_chan/ascii_chan.py
|
8c0b1ca0b4e7fd287e6b5aff170d38912118ddcf
|
[] |
no_license
|
etmoore/intro-to-backend-udacity
|
292dd98d0fb9ebed95d295b41d5abde2e2610544
|
dc3435e43a6a6c89f0c832431667bb0bc4ad99e1
|
refs/heads/master
| 2020-12-26T03:43:15.067050
| 2017-02-13T23:43:35
| 2017-02-13T23:43:35
| 68,500,043
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,573
|
py
|
import os
import jinja2
import webapp2
from google.appengine.ext import db
# configuration for jinja
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
class Art(db.Model):
title = db.StringProperty(required = True)
art = db.TextProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
class Handler(webapp2.RequestHandler):
"""Renders via jinja2 template engine"""
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
class MainPage(Handler):
def render_front(self, title="", art="", error=""):
arts = db.GqlQuery("Select * from Art "
"ORDER BY created DESC ")
self.render("front.html", title=title, art=art, error=error, arts=arts)
def get(self):
self.render_front()
def post(self):
title = self.request.get("title")
art = self.request.get("art")
if title and art:
a = Art(title = title, art = art)
a.put()
self.redirect("/")
else:
error = "We need both a title and some artwork!"
self.render_front(title, art, error)
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True)
|
[
"etmoore@gmail.com"
] |
etmoore@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.