blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c9dd418e88b4feef3c0a136c42c15bd3d73d52c9 | 0309bd25cdd8e89297f507be202634b07f5f6e85 | /LeetCode/Easy/Python3/tests/test_longestuncommonsubsequencei.py | cb5150abb748e1a5134781a8ef1fd36956eb3615 | [] | no_license | AmyShackles/algo-practice | 10fc4a5c5926232ff2b0aed6183cec9f21bf15f3 | 876e3be57357651348465f70ab312d4ac98d667a | refs/heads/main | 2023-04-03T08:29:33.725236 | 2021-04-13T19:20:18 | 2021-04-13T19:20:18 | 338,672,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import unittest
from Python3.longestuncommonsubsequencei import Solution
class TestfindLUSlength(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
# For sanity checking:
# Input: a = "aba", b = "cdc"
# Output: 3
self.assertEqual(Solution.findLUSlength("aba", "cdc"), 3)
def test_2(self):
# For sanity checking:
# Input: a = "aaa", b = "bbb"
# Output: 3
self.assertEqual(Solution.findLUSlength("aaa", "bbb"), 3)
def test_3(self):
# For sanity checking:
# Input: a = "aaa", b = "aaa"
# Output: -1
self.assertEqual(Solution.findLUSlength("aaa", "aaa"), -1)
if __name__ == "__main__":
unittest.main()
| [
"amyshackles@gmail.com"
] | amyshackles@gmail.com |
4f822fd322243a4b318e391561e07b0443f484a5 | 30867fa220859bb5291627e2a219dd25aa4ddfdc | /chapter6/heap_sort.py | 1c9df5a7bcbaa2949dcc4187489bea17749507d8 | [] | no_license | ANDYsGUITAR/IntroduceToAlgorithm | 4c0c1b110b85c5e19811138c1cab0268d41385e5 | e86bb48047e08337d11b036b412b211f67f54eaf | refs/heads/master | 2020-07-25T11:53:46.710330 | 2019-09-27T15:40:48 | 2019-09-27T15:40:48 | 208,280,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | def left(i):
return i * 2
def right(i):
return i * 2 + 1
def parent(i):
return i // 2
def max_heapify(A, i, heap_size):
l = left(i)
r = right(i)
if l <= heap_size - 1 and A[l] > A[i]:
largest = l
else:
largest = i
if r <= heap_size - 1 and A[r] > A[largest]:
largest = r
if largest != i:
A[i], A[largest] = A[largest], A[i]
max_heapify(A, largest, heap_size)
def build_max_heap(A):
heap_size = len(A)
for i in range(len(A) // 2, -1, -1):
print(i)
max_heapify(A, i, heap_size)
def heap_sort(A):
build_max_heap(A)
heap_size = len(A)
for i in range(len(A) - 1, 0, -1):
A[0], A[i] = A[i], A[0]
heap_size -= 1
max_heapify(A, 0, heap_size)
return A
A = [5, 2, 4, 6, 1, 3]
A = heap_sort(A)
print(A) | [
"andyandwei@163.com"
] | andyandwei@163.com |
2577b32165abb593756c8e8427c8c59a9de32fb0 | 3a093f6a40e8fb24957d277ad8f4b097d08c6d04 | /result/tools/remaplabelsincards/remaphistograms_sig_only.py | b28edd68b6f626f213764ba39a35c3d909b9a651 | [] | no_license | dlont/FourTops2016 | ab9e953760e93b0e777b23478938efd30d640286 | 88c929bf98625735a92a31210f7233f799c5a10c | refs/heads/master | 2021-01-18T22:23:52.796080 | 2019-07-31T12:34:03 | 2019-07-31T12:34:03 | 72,439,490 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | #!/usr/bin/python
import sys, getopt, json, re
from ROOT import gROOT
from ROOT import TKey, TClass
from ROOT import TFile, TObject, TList, TIter
from ROOT import TH1
def parseArgs(argv):
"""
Parse commandline arguments
"""
scriptName = "remaphistograms.py"
rootfile = ''
dic = {}
try:
opts, args = getopt.getopt(argv,"ur:d:",["usage","rootfile=","dictionary="])
except getopt.GetoptError:
print scriptName + ' -r <rootfile>'
sys.exit(2)
for opt, arg in opts:
if opt in ('-u', "--usage"):
print scriptName + ' -r <rootfile>'
sys.exit()
elif opt in ("-r", "--rootfile"):
rootfile = arg
elif opt in ("-d", "--dictionary"):
print arg
dic = json.loads(arg)
print 'ROOT file is "', rootfile, '"'
return rootfile, dic
def remaphistnames(rootFileName,labelMap):
rootKeyList = ''
rootFile = TFile.Open(rootFileName,"UPDATE")
if rootFile is None:
sys.exit("Can't open root file: "+rootFileName+" Terminating...")
matchstring = r'(^' + '$|^'.join(labelMap.keys()) + r'$)'
print 'search pattern: ' + matchstring
pattern = re.compile(matchstring)
rootKeyList = rootFile.GetListOfKeys()
for key in rootKeyList:
cl = gROOT.GetClass(key.GetClassName());
if not cl.InheritsFrom("TH1"):
continue
hist = key.ReadObj();
origHistName = hist.GetName()
# find any key in the labelMap dic
# replace matching key with its value in the dic
resultHistName = pattern.sub(lambda x: labelMap[x.group()], origHistName )
print origHistName + ':' + resultHistName
if origHistName != resultHistName:
hist.Write("",TObject.kOverwrite)
hist.Write(resultHistName,TObject.kOverwrite)
break
rootFile.Close()
def main(argv):
rootFileName, labelMapDic = parseArgs(argv)
print labelMapDic
remaphistnames(rootFileName, labelMapDic)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"denys.lontkovskyi@cern.ch"
] | denys.lontkovskyi@cern.ch |
f14cff4951f929b4bb2934cb376743983b718784 | 6d051f9348d547fa9e49d63f53afd7967b4f8043 | /djangobmf/contrib/quotation/migrations/0004_optional_quotation_customer.py | 462093dbebb53e0761c004b779395902817960b8 | [
"BSD-3-Clause"
] | permissive | caputomarcos/django-bmf | 39d7f1284ebf58e12e914f0a217368f8a664ef6b | 0d07a7d3f6a3ecfaca6c9376e764add1715cfd33 | refs/heads/develop | 2022-02-09T08:23:52.983948 | 2015-11-16T01:48:22 | 2015-11-16T01:48:22 | 46,672,438 | 0 | 0 | NOASSERTION | 2022-02-02T10:46:15 | 2015-11-22T17:57:11 | Python | UTF-8 | Python | false | false | 673 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('djangobmf_quotation', '0003_optional_quotation_employee'),
migrations.swappable_dependency(settings.BMF_CONTRIB_CUSTOMER),
]
operations = [
migrations.AddField(
model_name='quotation',
name='customer',
field=models.ForeignKey(to=settings.BMF_CONTRIB_CUSTOMER, on_delete=django.db.models.deletion.SET_NULL, null=True),
preserve_default=True,
),
]
| [
"sebastian@elmnt.de"
] | sebastian@elmnt.de |
76b221415354349ee78355c384fd34d3022f9069 | 4b00a2fd9f926bab452ede4319ee948dddc84215 | /wechatApp/application.py | dc68c146ea7c48e124aeeca5a4ea0b74113265d5 | [] | no_license | maketubu7/make_python3 | 64c00b37600f90d6543f919446cfe7479e883841 | 70405be146cfb83250c51d7211c06a4b6db6db93 | refs/heads/master | 2020-11-29T17:15:46.312929 | 2020-07-27T09:45:29 | 2020-07-27T09:45:29 | 230,176,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | # -*- coding: utf-8 -*-
# @Time : 2020/2/10 22:54
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : application.py.py
# @Software: PyCharm
import sys
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask import Flask
class Application(Flask):
def __init__(self, import_name):
super(Application,self).__init__(import_name)
self.config.from_pyfile('config/base_setting.py')
db.init_app(self)
db = SQLAlchemy()
app = Application(__name__)
manager = Manager(app)
class Demo(object):
pass
if __name__ == "__main__":
pass | [
"601176930@qq.com"
] | 601176930@qq.com |
04835ada0c213e3afdd480c774caeb0e6334a3b1 | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/core/management/commands/runserver.py | 0e0fd1ca2c0dc9eadbce38fabd6f392fbb06e452 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 6,274 | py | import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import (
WSGIServer, get_internal_wsgi_application, run,
)
from django.utils import autoreload
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
stealth_options = ('shutdown_message',)
default_addr = '127.0.0.1'
default_addr_ipv6 = '::1'
default_port = '8000'
protocol = 'http'
server_cls = WSGIServer
def add_arguments(self, parser):
parser.add_argument(
'addrport', nargs='?',
help='Optional port number, or ipaddr:port'
)
parser.add_argument(
'--ipv6', '-6', action='store_true', dest='use_ipv6',
help='Tells Django to use an IPv6 address.',
)
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading',
help='Tells Django to NOT use threading.',
)
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader',
help='Tells Django to NOT use the auto-reloader.',
)
def execute(self, *args, **options):
if options['no_color']:
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ["DJANGO_COLORS"] = "nocolor"
super().execute(*args, **options)
def get_handler(self, *args, **options):
"""Return the default WSGI handler for the runner."""
return get_internal_wsgi_application()
def handle(self, *args, **options):
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options['use_ipv6']
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options['addrport']:
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = self.default_addr_ipv6 if self.use_ipv6 else self.default_addr
self._raw_ipv6 = self.use_ipv6
self.run(**options)
def run(self, **options):
"""Run the server, using the autoreloader if needed."""
use_reloader = options['use_reloader']
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
# If an exception was silenced in ManagementUtility.execute in order
# to be raised in the child process, raise it now.
autoreload.raise_last_exception()
threading = options['use_threading']
# 'shutdown_message' is a stealth option.
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
# Need to check migrations here, so can't use the
# requires_migrations_check attribute.
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at %(protocol)s://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"protocol": self.protocol,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading, server_cls=self.server_cls)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = e
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
# Kept for backward compatibility
BaseRunserverCommand = Command
| [
"rizwansoaib@gmail.com"
] | rizwansoaib@gmail.com |
422ade437e3e4afc6cab2ea04be5ae59d5d4d2ff | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jedi/third_party/typeshed/stdlib/2and3/token.pyi | a806a466b8eaa65b05e25b80d76118f18ddb69f4 | [
"MIT",
"Apache-2.0"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 1,468 | pyi | import sys
from typing import Dict
ENDMARKER: int
NAME: int
NUMBER: int
STRING: int
NEWLINE: int
INDENT: int
DEDENT: int
LPAR: int
RPAR: int
LSQB: int
RSQB: int
COLON: int
COMMA: int
SEMI: int
PLUS: int
MINUS: int
STAR: int
SLASH: int
VBAR: int
AMPER: int
LESS: int
GREATER: int
EQUAL: int
DOT: int
PERCENT: int
if sys.version_info < (3,):
BACKQUOTE: int
LBRACE: int
RBRACE: int
EQEQUAL: int
NOTEQUAL: int
LESSEQUAL: int
GREATEREQUAL: int
TILDE: int
CIRCUMFLEX: int
LEFTSHIFT: int
RIGHTSHIFT: int
DOUBLESTAR: int
PLUSEQUAL: int
MINEQUAL: int
STAREQUAL: int
SLASHEQUAL: int
PERCENTEQUAL: int
AMPEREQUAL: int
VBAREQUAL: int
CIRCUMFLEXEQUAL: int
LEFTSHIFTEQUAL: int
RIGHTSHIFTEQUAL: int
DOUBLESTAREQUAL: int
DOUBLESLASH: int
DOUBLESLASHEQUAL: int
AT: int
if sys.version_info >= (3,):
RARROW: int
ELLIPSIS: int
if sys.version_info >= (3, 5):
ATEQUAL: int
if sys.version_info < (3, 7):
# These were removed in Python 3.7 but added back in Python 3.8
AWAIT: int
ASYNC: int
if sys.version_info >= (3, 8):
AWAIT: int
ASYNC: int
OP: int
ERRORTOKEN: int
N_TOKENS: int
NT_OFFSET: int
tok_name: Dict[int, str]
if sys.version_info >= (3, 7):
COMMENT: int
NL: int
ENCODING: int
if sys.version_info >= (3, 8):
TYPE_COMMENT: int
TYPE_IGNORE: int
COLONEQUAL: int
EXACT_TOKEN_TYPES: Dict[str, int]
def ISTERMINAL(x: int) -> bool: ...
def ISNONTERMINAL(x: int) -> bool: ...
def ISEOF(x: int) -> bool: ...
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
f48b5176aa76b1545ac19b4b842a6d837687cc1b | 5c19d0cd1ad792d7e4dc619d4a359a9b73187f85 | /exile/exile/celery.py | 856d204484f0dcd442921e9a2754499e98208463 | [
"MIT"
] | permissive | wahello/Exile-Web | c6545b41472c5da7368d0194918cd853f426eb34 | f8d8dc18e44c65d12a45e0821002f26ca07cf516 | refs/heads/master | 2020-03-29T01:51:29.393527 | 2018-09-19T03:53:27 | 2018-09-19T03:53:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,158 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
from celery.decorators import task
from celery import schedules
import datetime
import os
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'exile.settings')
app = Celery('exile')
# Using a string here means the worker will not have to
# pickle the objects when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@task(name="notification")
def notification(tarea):
from operacion.models import Tarea, Notificacion, SubTarea, SubNotificacion
tarea = Tarea.objects.filter(pk=tarea).first()
if tarea:
notificacion = Notificacion.objects.create(
tarea=tarea
)
subtareas = SubTarea.objects.filter(tarea=tarea)
for subtarea in subtareas:
SubNotificacion.objects.create(
notificacion = notification,
subtarea = subtarea,
nombre = subtarea.nombre,
descripcion = subtarea.descripcion
)
# end if
# end if
# make the notification here
file = open(os.path.join(BASE_DIR, "notix.txt"), "w+")
file.write(str(datetime.datetime.now()))
file.close()
# end def
@task(name="ejecutar")
def ejecutar(tarea):
from djcelery.models import PeriodicTask
from operacion.models import Tarea
tarea = Tarea.objects.filter(pk=tarea).first()
if tarea:
if tarea.interval or tarea.crontab:
tsk, created = PeriodicTask.objects.get_or_create(
interval = tarea.interval,
crontab = tarea.crontab,
name = 'Tarea #%d' % (tarea.pk, ),
task = 'notification',
args = [tarea.pk],
expires = tarea.fecha_finalizacion or tarea.fecha_ejecucion
)
else:
notification.dalay(tarea.pk)
# end if
# end if
# end def
| [
"luismiguel.mopa@gmail.com"
] | luismiguel.mopa@gmail.com |
416b91d86239b21827ade38d832cf48c85d4b70d | d57fe8c5e9fd91091999c293f17a35e310a7b56d | /testblock.py | 2f942ce55336fcc921dad329b6f8b3ca8b2bfb48 | [] | no_license | barrystyle/dcrypt_hash | 814d9d15079a9c895ce57ddafb9aba2f6b62156d | f75ae2e74f9dee2257d9509b67d14e3a71120e4e | refs/heads/master | 2020-03-19T00:08:36.920197 | 2018-05-30T14:43:06 | 2018-05-30T14:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | import dcrypt_hash
import binascii
import os, sys, time, datetime, random
def send(cmd):
sys.stdout.write(cmd)
sys.stdout.flush()
def pos(line, column):
send('\033[%s;%sf' % (line, column))
def hexifynonce(nonce):
hexnonce=''
hexnonce = hex(nonce).replace('0x','')
while(len(hexnonce)<8):
hexnonce="0"+hexnonce
return(hexnonce)
def rand64byte():
buffer = ''
while (len(buffer) < 64):
buffer += '0' #should solve at 511a
return (buffer)
def returnheader():
header_hex = "02000000" + \
rand64byte() + \
rand64byte() + \
"814cdb52" + \
"f0ff0f1e"
return(header_hex)
# main
nonce = 0
print ''
target = "0000ffff00000000000000000000000000000000000000000000000000000000"
prehash = returnheader()
starthash = time.time()
while True:
if nonce % 7 == 0:
header = prehash + hexifynonce(nonce)
pos(1,1)
print 'nonce: ' + hexifynonce(nonce)
pos(3,1)
print 'header: ' + header
# hash fn with timers
pretimer = datetime.datetime.now()
hashbin = binascii.unhexlify(header)
posthash = dcrypt_hash.getPoWHash(hashbin)
posthashhex = binascii.hexlify(posthash[:32])
posttimer = datetime.datetime.now()
hashtime = posttimer - pretimer
pos(7,1)
print 'hashed: ' + posthashhex
pos (10,1)
print '%d h/s ' % (1000000/hashtime.microseconds)
if posthash < binascii.unhexlify(target):
pos(12,1)
print posthashhex
print target
finishhash = time.time()
print 'BLOCK (took ' + str(int(finishhash-starthash)) + 's to solve)'
sys.exit()
nonce=nonce+1
| [
"barrystyle@westnet.com.au"
] | barrystyle@westnet.com.au |
6592281fb24a2149ff5343af27e9394d9e210a1b | f0f9d9b384e13c69700a9dfaa4163b272828eca4 | /lambda_tiler/ogc.py | fffcf6484478b799c0428229837334328ed0e776 | [
"BSD-3-Clause"
] | permissive | mwengren/lambda-tiler | 739ebade32182aef26cc70d8da1d170ca382e14c | adbf119ee0289bfe6ad7fa9e88a2968aa22a65f2 | refs/heads/master | 2021-02-04T07:27:45.504403 | 2020-01-29T17:45:12 | 2020-01-29T17:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,411 | py | """OCG wmts template."""
from typing import Tuple
def wmts_template(
endpoint: str,
layer_name: str,
query_string: str = "",
minzoom: int = 0,
maxzoom: int = 25,
bounds: Tuple = [-180, -85.051129, 85.051129, 180],
tile_scale: int = 1,
tile_format: str = "png",
title: str = "Cloud Optimizied GeoTIFF",
) -> str:
"""
Create WMTS XML template.
Attributes
----------
endpoint : str, required
lambda tiler endpoint.
layer_name : str, required
Layer name.
query_string : str, optional
Endpoint querystring.
minzoom : int, optional (default: 0)
Mosaic min zoom.
maxzoom : int, optional (default: 25)
Mosaic max zoom.
bounds : tuple, optional (default: [-180, -85.051129, 85.051129, 180])
WGS84 layer bounds.
tile_scale : int, optional (default: 1 -> 256px)
Tile endpoint size scale.
tile_format: str, optional (default: png)
Tile image type.
title: str, optional (default: "Cloud Optimizied GeoTIFF")
Layer title.
Returns
-------
xml : str
OGC Web Map Tile Service (WMTS) XML template.
"""
media_type = "tiff" if tile_format == "tif" else tile_format
content_type = f"image/{media_type}"
tilesize = 256 * tile_scale
tileMatrix = []
for zoom in range(minzoom, maxzoom + 1):
tm = f"""<TileMatrix>
<ows:Identifier>{zoom}</ows:Identifier>
<ScaleDenominator>{559082264.02872 / 2 ** zoom / tile_scale}</ScaleDenominator>
<TopLeftCorner>-20037508.34278925 20037508.34278925</TopLeftCorner>
<TileWidth>{tilesize}</TileWidth>
<TileHeight>{tilesize}</TileHeight>
<MatrixWidth>{2 ** zoom}</MatrixWidth>
<MatrixHeight>{2 ** zoom}</MatrixHeight>
</TileMatrix>"""
tileMatrix.append(tm)
tileMatrix = "\n".join(tileMatrix)
xml = f"""<Capabilities
xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://www.opengis.net/wmts/1.0 http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd"
version="1.0.0">
<ows:ServiceIdentification>
<ows:Title>{title}</ows:Title>
<ows:ServiceType>OGC WMTS</ows:ServiceType>
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
</ows:ServiceIdentification>
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="{endpoint}/wmts?{query_string}">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>RESTful</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="{endpoint}/wmts?{query_string}">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>RESTful</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<Contents>
<Layer>
<ows:Title>{title}</ows:Title>
<ows:Identifier>{layer_name}</ows:Identifier>
<ows:Abstract>cogeo-mosaic</ows:Abstract>
<ows:WGS84BoundingBox crs="urn:ogc:def:crs:OGC:2:84">
<ows:LowerCorner>{bounds[0]} {bounds[1]}</ows:LowerCorner>
<ows:UpperCorner>{bounds[2]} {bounds[3]}</ows:UpperCorner>
</ows:WGS84BoundingBox>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>{content_type}</Format>
<TileMatrixSetLink>
<TileMatrixSet>GoogleMapsCompatible</TileMatrixSet>
</TileMatrixSetLink>
<ResourceURL
format="{content_type}"
resourceType="tile"
template="{endpoint}/tiles/{{TileMatrix}}/{{TileCol}}/{{TileRow}}@{tile_scale}x.{tile_format}?{query_string}"/>
</Layer>
<TileMatrixSet>
<ows:Title>GoogleMapsCompatible</ows:Title>
<ows:Abstract>GoogleMapsCompatible EPSG:3857</ows:Abstract>
<ows:Identifier>GoogleMapsCompatible</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::3857</ows:SupportedCRS>
{tileMatrix}
</TileMatrixSet>
</Contents>
<ServiceMetadataURL xlink:href='{endpoint}/wmts?{query_string}'/>
</Capabilities>"""
return xml
| [
"vincent.sarago@gmail.com"
] | vincent.sarago@gmail.com |
a9c2854fbabe62e833ebb36095fe9dd126dc6fdf | 4e96f383d4703ad8ee58869ed91a0c8432c8a051 | /Cura/Cura/run_mypy.py | 6be424bda8a3a84b0a2f8e8d1e9d238a72d67068 | [
"GPL-3.0-only",
"LGPL-3.0-only"
] | permissive | flight7788/3d-printing-with-moveo-1 | b2dba26010c4fa31815bc1d2d0966161a8600081 | 7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0 | refs/heads/Feature_Marlin_with_AlanBoy | 2022-08-30T18:36:44.785058 | 2020-05-30T07:52:58 | 2020-05-30T07:52:58 | 212,583,912 | 0 | 0 | MIT | 2020-05-16T07:39:47 | 2019-10-03T13:13:01 | C | UTF-8 | Python | false | false | 2,893 | py | #!/usr/bin/env python
import os
import sys
import subprocess
# A quick Python implementation of unix 'where' command.
def where(exe_name: str, search_path: str = os.getenv("PATH")) -> str:
if search_path is None:
search_path = ""
paths = search_path.split(os.pathsep)
result = ""
print(" -> sys.executable location: %s" % sys.executable)
sys_exec_dir = os.path.dirname(sys.executable)
root_dir = os.path.dirname(sys_exec_dir)
paths += [sys_exec_dir,
os.path.join(root_dir, "bin"),
os.path.join(root_dir, "scripts"),
]
paths = set(paths)
for path in sorted(paths):
print(" -> Searching %s" % path)
candidate_path = os.path.join(path, exe_name)
if os.path.exists(candidate_path):
result = candidate_path
break
return result
def findModules(path):
result = []
for entry in os.scandir(path):
if entry.is_dir() and os.path.exists(os.path.join(path, entry.name, "__init__.py")):
result.append(entry.name)
return result
def main():
# Find Uranium via the PYTHONPATH var
uraniumUMPath = where("UM", os.getenv("PYTHONPATH"))
if uraniumUMPath is None:
uraniumUMPath = os.path.join("..", "Uranium")
uraniumPath = os.path.dirname(uraniumUMPath)
mypy_path_parts = [".", os.path.join(".", "plugins"), os.path.join(".", "plugins", "VersionUpgrade"),
uraniumPath, os.path.join(uraniumPath, "stubs")]
if sys.platform == "win32":
os.putenv("MYPYPATH", ";".join(mypy_path_parts))
else:
os.putenv("MYPYPATH", ":".join(mypy_path_parts))
# Mypy really needs to be run via its Python script otherwise it can't find its data files.
mypy_exe_name = "mypy.exe" if sys.platform == "win32" else "mypy"
mypy_exe_dir = where(mypy_exe_name)
mypy_module = os.path.join(os.path.dirname(mypy_exe_dir), mypy_exe_name)
print("Found mypy exe path: %s" % mypy_exe_dir)
print("Found mypy module path: %s" % mypy_module)
plugins = findModules("plugins")
plugins.sort()
mods = ["cura"] + plugins + findModules("plugins/VersionUpgrade")
success_code = 0
for mod in mods:
print("------------- Checking module {mod}".format(**locals()))
if sys.platform == "win32":
result = subprocess.run([mypy_module, "-p", mod, "--ignore-missing-imports"])
else:
result = subprocess.run([sys.executable, mypy_module, "-p", mod, "--ignore-missing-imports"])
if result.returncode != 0:
print("\nModule {mod} failed checking. :(".format(**locals()))
success_code = 1
if success_code:
print("\n\nSome modules failed checking!")
else:
print("\n\nDone checking. All is good.")
return success_code
if __name__ == "__main__":
sys.exit(main())
| [
"t106360212@ntut.org.tw"
] | t106360212@ntut.org.tw |
9981fe4f38e89a388281ac15a1fb30d420655380 | 5bdd9c4eccf7e43f7766cfd64fe847eb3f62ad99 | /devel/.private/camera_calibration_parsers/lib/python2.7/dist-packages/camera_calibration_parsers/__init__.py | 0cba4d6f187c250c65f80e6dc22bf6777a4b24de | [] | no_license | UAVMasterLabs/vision_ws | 3467a223ec0d7012207978f803e3cd4b554c9ef6 | d0a6052b9f60d0ecc52033fd1caa5e1de5606b35 | refs/heads/master | 2021-04-29T03:59:57.903441 | 2017-01-04T17:13:31 | 2017-01-04T17:13:31 | 78,033,751 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | # -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/uav_master/vision_ws/src/image_common/camera_calibration_parsers/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
| [
"sputnick1124@comcast.net"
] | sputnick1124@comcast.net |
cda00c7141b39975d7776a233880dd653bc965dd | 8fa938eddcc75eb7dff1f2055c49cb3817a00c63 | /Basic - Part1/ex103.py | 761d67ac4c07d80674c52d517a2e7bba681011cc | [] | no_license | jayhebe/w3resource_exercises | f27109759d112b0611574aa70eb378ace447c2a0 | b29aa7c806f6021a8988e83bb9f674522a41380d | refs/heads/master | 2020-05-07T09:23:24.039271 | 2020-01-30T15:05:06 | 2020-01-30T15:05:06 | 180,374,062 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | import os
def get_filenname(path):
return os.path.basename(path)
if __name__ == '__main__':
print(get_filenname(r"C:/Study/Programming/Python/w3resource_exercises/Basic - Part1/ex102.py"))
| [
"jayhebe1983@sina.com"
] | jayhebe1983@sina.com |
1a86d8e204b5968a4983f1b99e3e0f617047c089 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/machinelearningservices/v20200601/get_private_endpoint_connection.py | 44e976b4acafd06471374d33c9291151ea432b91 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,477 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
ResourceId of the private endpoint connection.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Friendly name of the private endpoint connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type of private endpoint connection.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20200601:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
The Private Endpoint Connection resource.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the workspace
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
193d277299210bfaf8367349dfbc2e841745a28c | 3e5ecad4d2f681f2f4f749109cc99deea1209ea4 | /keras/keras20_boston_keras2.py | 31b6dc77e79773729c7cda8127ecb3db118a9c2e | [] | no_license | SunghoonSeok/Study | f41ede390079037b2090e6df20e5fb38f2e59b8f | 50f02b9c9bac904cd4f6923b41efabe524ff3d8a | refs/heads/master | 2023-06-18T06:47:55.545323 | 2021-07-05T00:47:55 | 2021-07-05T00:47:55 | 324,866,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | # 2개의 파일을 만드시오.
# 1. EarlyStopping을 적용하지 않은 최고의 모델
# 2. EarlyStopping을 적용한 최고의 모델
import numpy as np
from tensorflow.keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.8, shuffle=True, random_state = 66)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_val = scaler.transform(x_val)
x_test = scaler.transform(x_test)
#2. 모델구성
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
inputs = Input(shape=(13,))
dense1 = Dense(64, activation='relu')(inputs)
dense1 = Dense(128, activation='relu')(dense1)
dense1 = Dense(128, activation='relu')(dense1)
dense1 = Dense(256, activation='relu')(dense1)
dense1 = Dense(128, activation='relu')(dense1)
dense1 = Dense(64, activation='relu')(dense1)
outputs = Dense(1)(dense1)
model = Model(inputs=inputs, outputs=outputs)
model.summary()
#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=20, mode='auto')
model.fit(x_train, y_train, batch_size=8, epochs=1000, validation_data=(x_val, y_val),callbacks=[early_stopping])
#4. 평가, 예측
loss, mae = model.evaluate(x_test, y_test, batch_size=8)
y_predict = model.predict(x_test)
print("loss, mae : ", loss, mae)
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict))
#print("mse : ", mean_squared_error(y_test, y_predict))
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_predict)
print("R2 : ", r2)
# 이걸로 만들어라 !!! 아까처럼 사이킷런으로 당기지 않는다
# early stopping 전
# loss, mae : 11.031598091125488 2.355877637863159
# RMSE : 3.3213850693202898
# R2 : 0.8674785107264735
# early stopping 후
# loss, mae : 15.436517715454102 2.6361069679260254
# RMSE : 3.9289335323222736
# R2 : 0.8145626496625189 | [
"76455292+SunghoonSeok@users.noreply.github.com"
] | 76455292+SunghoonSeok@users.noreply.github.com |
cc49651177ddaf4f27d72fbd15c45825d0a9aaad | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/track/management/tracked_command.py | 36f725a3e5e70bc307bce4534ec945c707c19579 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 2,220 | py | """Provides management command calling info to tracking context."""
from django.core.management.base import BaseCommand
from eventtracking import tracker
class TrackedCommand(BaseCommand): # lint-amnesty, pylint: disable=abstract-method
"""
Provides management command calling info to tracking context.
Information provided to context includes the following value:
'command': the program name and the subcommand used to run a management command.
In future, other values (such as args and options) could be added as needed.
An example tracking log entry resulting from running the 'create_user' management command:
{
"username": "anonymous",
"host": "",
"event_source": "server",
"event_type": "edx.course.enrollment.activated",
"context": {
"course_id": "edX/Open_DemoX/edx_demo_course",
"org_id": "edX",
"command": "./manage.py create_user",
},
"time": "2014-01-06T15:59:49.599522+00:00",
"ip": "",
"event": {
"course_id": "edX/Open_DemoX/edx_demo_course",
"user_id": 29,
"mode": "verified"
},
"agent": "",
"page": null
}
The name of the context used to add (and remove) these values is "edx.mgmt.command".
The context name is used to allow the context additions to be scoped, but doesn't
appear in the context itself.
"""
prog_name = 'unknown'
def create_parser(self, prog_name, subcommand): # lint-amnesty, pylint: disable=arguments-differ
"""Wraps create_parser to snag command line info."""
self.prog_name = f"{prog_name} {subcommand}"
return super().create_parser(prog_name, subcommand) # lint-amnesty, pylint: disable=super-with-arguments
def execute(self, *args, **options):
"""Wraps base execute() to add command line to tracking context."""
context = {
'command': self.prog_name,
}
COMMAND_CONTEXT_NAME = 'edx.mgmt.command'
with tracker.get_tracker().context(COMMAND_CONTEXT_NAME, context):
super().execute(*args, **options) # lint-amnesty, pylint: disable=super-with-arguments
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
1673ced94afb1eea71c1e0a25c40e309d7ddaafe | 9c337ea179ce44d1570219647b60b78c84ed54a7 | /ad_donation/admin.py | 118cd343ec30421963b3eb931adf643d0abcf2c7 | [] | no_license | timshin43/donaddcom | aa9cb5616907f145b388f290429dd9d1fd61f73b | d372b6677a15d699abc634f8f49581b8aa23f9a0 | refs/heads/master | 2022-12-10T10:10:26.494884 | 2020-12-08T02:10:48 | 2020-12-08T02:10:48 | 180,018,996 | 0 | 0 | null | 2022-12-08T02:31:05 | 2019-04-07T20:20:16 | Python | UTF-8 | Python | false | false | 636 | py | from django.contrib import admin
from .models import Total_donation, Video, Donations, Project_for_donations
from embed_video.admin import AdminVideoMixin
from modeltranslation.admin import TranslationAdmin, TranslationTabularInline, TranslationGenericStackedInline
# Register your models here.
# this video class is required to show video field in an admin panel
class VideoAdmin(AdminVideoMixin, admin.ModelAdmin):
pass
class BlogAdmin(TranslationAdmin):
pass
admin.site.register(Total_donation)
admin.site.register(Donations)
admin.site.register(Video, VideoAdmin)
admin.site.register(Project_for_donations,BlogAdmin)
| [
"timshin.sergei@gmail.com"
] | timshin.sergei@gmail.com |
801e2e1987071a7ff34f924cc7f2f8111ed54324 | e4bb415dbdf69325b129dac01f3caf0a403fc00f | /fb_post_v2/interactors/add_comment_interactor.py | 72e591a20400c70609c7fd15dcbe28a765832418 | [] | no_license | bharatmudragada/fb_post_data | d67d7cd4be6eabc76091fc9ff86daebb7101ce5e | 58ee0b3581851b8ca1a963ee1aa1d1dd263c8e03 | refs/heads/master | 2022-05-11T16:10:20.220419 | 2019-07-29T05:37:17 | 2019-07-29T05:37:17 | 197,405,858 | 0 | 0 | null | 2022-04-22T21:56:53 | 2019-07-17T14:30:23 | Python | UTF-8 | Python | false | false | 1,178 | py | from fb_post_v2.interactors.presenters.json_presenter import JsonPresenter
from fb_post_v2.interactors.storages.post_storage import PostStorage
class AddCommentInteractor:
def __init__(self, post_storage: PostStorage, presenter: JsonPresenter):
self.post_storage = post_storage
self.presenter = presenter
def add_comment_to_post(self, post_id: int,
comment_user_id: int, comment_text: str):
comment_dto = self.post_storage.add_comment_to_post(
post_id, comment_user_id, comment_text)
response = self.presenter.get_add_comment_to_post_response(comment_dto)
return response
def add_reply_to_comment(self, comment_id: int,
reply_user_id: int, reply_text: str):
is_reply = self.post_storage.is_reply(comment_id)
if is_reply:
comment_id = self.post_storage.get_comment_id_for_reply(comment_id)
comment_dto = self.post_storage.add_reply_to_comment(
comment_id, reply_user_id, reply_text)
response = self.presenter.get_add_reply_to_comment_response(comment_dto)
return response
| [
"bharathmudragada123@gmail.com"
] | bharathmudragada123@gmail.com |
a221119c934898812823905d08bbfc4e752cd7be | 92d8bf41165e5e19c165af12715a5fed9ee5599e | /rpc_rabbitmq_caesarlinsa/config.py | fa3e04f83892c3d797bd8f7a3a4cf59eaf9a8ca2 | [] | no_license | CaesarLinsa/rpc_rabbitmq_caesarlinsa | 40e436445d9160c02c06298f9c0fce5195e43ad6 | 100b8606486eb71537ac48ab98b6b78ba76d0d86 | refs/heads/master | 2022-12-13T07:24:06.887750 | 2020-09-18T06:18:25 | 2020-09-18T06:18:25 | 287,790,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from rpc_rabbitmq_caesarlinsa.options import define, options
define(name="connection_pool_size", default=100, type=int)
define(name="rabbit_user", default="guest", type=str)
define(name="rabbit_password", default="guest", type=str)
define(name="rabbit_host", default="localhost", type=str)
define(name="rabbit_port", default="5672", type=str)
| [
"Caesar_Linsa@163.com"
] | Caesar_Linsa@163.com |
4704d8ec3de4a73dc42240a747eb6728b5dc9979 | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/scripts_auto_alldataset/config_GMM_256_fold4.py | a02894381fb036048231cc18763890d4e42ebcfe | [] | no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista (nathbapt@decom.fee.unicamp.br)
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/all_dataset/GMM/256/fold_4/temp/'
result_directory = './results/all_dataset/GMM/256/fold_4/results/'
sub_directory = 'subdirectory'
database = 'database_GMM_256_fold4.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.GMMRegular(number_of_gaussians = 256, kmeans_training_iterations = 10, gmm_training_iterations = 10,
training_threshold = 5e-4, variance_threshold = 5e-4, update_weights = True, update_means = True, update_variances = True, relevance_factor = 4, gmm_enroll_iterations = 1, responsibility_threshold = 0, INIT_SEED = 5489)
parallel = 40
verbose = 2
| [
"nathbapt@decom.fee.unicamp.br"
] | nathbapt@decom.fee.unicamp.br |
93ecddb61429984b724d60a3b07287aab5abfee7 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-cloudauth/aliyunsdkcloudauth/request/v20190307/InitFaceVerifyRequest.py | b1177a3d4b6c4a11a33d80c494634a736c3486c5 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 4,540 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class InitFaceVerifyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'InitFaceVerify','cloudauth')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductCode(self):
return self.get_query_params().get('ProductCode')
def set_ProductCode(self,ProductCode):
self.add_query_param('ProductCode',ProductCode)
def get_FaceContrastPicture(self):
return self.get_body_params().get('FaceContrastPicture')
def set_FaceContrastPicture(self,FaceContrastPicture):
self.add_body_params('FaceContrastPicture', FaceContrastPicture)
def get_UserId(self):
return self.get_query_params().get('UserId')
def set_UserId(self,UserId):
self.add_query_param('UserId',UserId)
def get_CertifyId(self):
return self.get_query_params().get('CertifyId')
def set_CertifyId(self,CertifyId):
self.add_query_param('CertifyId',CertifyId)
def get_CertNo(self):
return self.get_query_params().get('CertNo')
def set_CertNo(self,CertNo):
self.add_query_param('CertNo',CertNo)
def get_OuterOrderNo(self):
return self.get_query_params().get('OuterOrderNo')
def set_OuterOrderNo(self,OuterOrderNo):
self.add_query_param('OuterOrderNo',OuterOrderNo)
def get_CertType(self):
return self.get_query_params().get('CertType')
def set_CertType(self,CertType):
self.add_query_param('CertType',CertType)
def get_FaceContrastPictureUrl(self):
return self.get_query_params().get('FaceContrastPictureUrl')
def set_FaceContrastPictureUrl(self,FaceContrastPictureUrl):
self.add_query_param('FaceContrastPictureUrl',FaceContrastPictureUrl)
def get_Model(self):
return self.get_body_params().get('Model')
def set_Model(self,Model):
self.add_body_params('Model', Model)
def get_MetaInfo(self):
return self.get_query_params().get('MetaInfo')
def set_MetaInfo(self,MetaInfo):
self.add_query_param('MetaInfo',MetaInfo)
def get_OssObjectName(self):
return self.get_query_params().get('OssObjectName')
def set_OssObjectName(self,OssObjectName):
self.add_query_param('OssObjectName',OssObjectName)
def get_CertName(self):
return self.get_query_params().get('CertName')
def set_CertName(self,CertName):
self.add_query_param('CertName',CertName)
def get_Ip(self):
return self.get_query_params().get('Ip')
def set_Ip(self,Ip):
self.add_query_param('Ip',Ip)
def get_Mobile(self):
return self.get_query_params().get('Mobile')
def set_Mobile(self,Mobile):
self.add_query_param('Mobile',Mobile)
def get_SceneId(self):
return self.get_query_params().get('SceneId')
def set_SceneId(self,SceneId):
self.add_query_param('SceneId',SceneId)
def get_OssBucketName(self):
return self.get_query_params().get('OssBucketName')
def set_OssBucketName(self,OssBucketName):
self.add_query_param('OssBucketName',OssBucketName)
def get_CallbackToken(self):
return self.get_query_params().get('CallbackToken')
def set_CallbackToken(self,CallbackToken):
self.add_query_param('CallbackToken',CallbackToken)
def get_ReturnUrl(self):
return self.get_query_params().get('ReturnUrl')
def set_ReturnUrl(self,ReturnUrl):
self.add_query_param('ReturnUrl',ReturnUrl)
def get_CallbackUrl(self):
return self.get_query_params().get('CallbackUrl')
def set_CallbackUrl(self,CallbackUrl):
self.add_query_param('CallbackUrl',CallbackUrl) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
f5aca996f408f570a42e2439bd7d2000942fe68a | 660e35c822423685aea19d038daa8356722dc744 | /ldap_authentication/__init__.py | 90d1804c171db1b34d0a654f8f9f8d6d8d2e308e | [] | no_license | saifkazi/tryton_modules | a05cb4a90ae2c46ba39d60d2005ffc18ce5e44bb | 94bd3a4e3fd86556725cdff33b314274dcb20afd | refs/heads/main | 2023-05-05T12:20:02.059236 | 2021-05-19T10:46:37 | 2021-05-19T10:46:37 | 368,768,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.pool import Pool
from . import res
def register():
Pool.register(
res.User,
module='ldap_authentication', type_='model')
| [
"saif.kazi76@gmail.com"
] | saif.kazi76@gmail.com |
cea9b5cfb24276d6803124aa50bdb466cefde694 | 3fccfdc82e5150f99708794eba07f51467f9ecaa | /mysite/settings.py | c2bc1e85fa007401c819c7815a9c1250a4a31028 | [] | no_license | tanveerahmad1517/blogchapter2 | c1a23b5533982933fe8cd58b3b09109f795b6288 | 374711eef4ba624c935fabccd59f2d03610c2eda | refs/heads/master | 2020-03-23T20:19:39.004218 | 2018-07-23T15:42:23 | 2018-07-23T15:42:23 | 142,035,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,340 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k9x1mot@(^vb9!*%020&eyr+v*o77mrt-5kl@3(ukii8atx_jc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
'taggit',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_PASSWORD = 'tanveer1236'
EMAIL_HOST_USER = 'tanveerobjects@gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True | [
"tanveerobjects@gmail.com"
] | tanveerobjects@gmail.com |
fb99997e8135db71490449bab0d57e5898d525fe | ed5629376d293b7dbda9f53ef1b57e38cd52d655 | /lstelementcheck.py | 419b728fb7f185706eecc650ca0ff15dca273659 | [] | no_license | sabariks/pythonpgm | eb46172b8ffd17b945f6ccd8241015c9874e37e7 | 1bb9264b6f219b69b9a782591d526fc7adb891cd | refs/heads/master | 2021-01-08T23:22:15.891688 | 2020-02-27T17:37:34 | 2020-02-27T17:37:34 | 242,174,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | n=int(input())
a=list(map(int,input().strip().split()))[:n]
b=list(map(int,input().strip().split()))[:n]
lst=[]
ans = []
for i in range(0,n):
if a[i] in b:
lst.append(a[i])
if len(lst) == 0:
print(-1)
else:
for i in lst:
if i not in ans:
ans.append(i)
print(*ans)
| [
"noreply@github.com"
] | sabariks.noreply@github.com |
5203b5a3a174a6ec97dbfd71224c43900e1d12cc | 3bbbdf87b52c49dc9dcf5d91b9ed58e3e1d161cb | /my_electric_car.py | 61b5b5d433770ae4743a0bb26f06929c395cf0aa | [] | no_license | Weeeendi/Python | 2d5348c5aee48b8e24c12b392f1f8061d9107351 | 9dfd014b0606a9ba82cfc8d02a1bf51be43e7129 | refs/heads/master | 2020-04-20T14:32:53.927902 | 2019-03-05T15:34:06 | 2019-03-05T15:34:06 | 168,901,906 | 2 | 0 | null | 2019-03-05T15:34:09 | 2019-02-03T02:36:19 | HTML | UTF-8 | Python | false | false | 2,327 | py | #!/usr/bin/env python
# coding: utf-8
class Car():
"""一次模拟汽车的简单测试"""
def __init__(self,make,model,year):
"""初始化描述汽车的属性"""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_desciptive_name(self):
"""return the clearly desciptive message"""
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""打印一条指出汽车里程的信息"""
print("This car has " + str(self.odometer_reading) + " mile on it.")
def update_odometer(self,milage):
"""将里程表读数设置为指定的值"""
"""禁止将里程表读数进行回调"""
if milage >= self.odometer_reading:
self.odometer_reading = milage
else:
print("You can't roll back the odometer!")
def increment_odometer(self,miles):
if miles < 0:
print("You can't roll back the odometer!")
else:
self.odometer_reading += miles
def fill_gas_tank(self):
print("This car's gas tank is full.")
class ElectricCar(Car):
"""电动汽车的独特之处"""
def __init__(self,make,model,year):
"""初始化父类的属性"""
super().__init__(make,model,year)
self.battery = Battery()
"""对父类中的fill_gas_tank进行重写"""
def fill_gas_tank(self):
print("This car doesn't need a gas tank.")
class Battery():
"""一次模拟电动汽车电瓶的简单尝试"""
def __init__(self,battery_size=70):
"""初始化电瓶的属性"""
self.battery_size = battery_size
def describe_battery(self):
"""打印一条描述电瓶容量的信息"""
print("This car has a " + str(self.battery_size) + "-KWh battery.")
def get_range(self):
"""display the range mileage of battery"""
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " mlies on a full charge."
print(message)
| [
"wendi1078771091@gmail.com"
] | wendi1078771091@gmail.com |
544e1d3e53703b21219ea08608d86dc5c507b3a6 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/tenant_service/transports/__init__.py | bdfa62764038823c5990cbe7a72f795db70bb258 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import TenantServiceTransport
from .grpc import TenantServiceGrpcTransport
from .grpc_asyncio import TenantServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[TenantServiceTransport]]
_transport_registry['grpc'] = TenantServiceGrpcTransport
_transport_registry['grpc_asyncio'] = TenantServiceGrpcAsyncIOTransport
__all__ = (
'TenantServiceTransport',
'TenantServiceGrpcTransport',
'TenantServiceGrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
2ac83f3869109cbe8e35999dfbe0d94f3ade12c1 | 92c516858c6cff00365555782d3037ff028ec28f | /build/robotika_x2_sensor_config_1/catkin_generated/generate_cached_setup.py | 013c3965f56c34f105cb83a11d27f2fd585bb9de | [] | no_license | bobusfil/aloam_ws | 8a9bdedbcc2fa54155950761772e671ff48f3ea3 | 43b5fc6d0086cb79df688a771f26b972696cf5c4 | refs/heads/master | 2023-03-29T13:33:02.677422 | 2021-03-24T17:35:44 | 2021-03-24T17:35:44 | 351,167,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/admin-vras/aloam_ws/devel;/home/admin-vras/workspace/cras_subt/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/admin-vras/aloam_ws/devel/.private/robotika_x2_sensor_config_1/env.sh')
output_filename = '/home/admin-vras/aloam_ws/build/robotika_x2_sensor_config_1/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"bobusfil@fel.cvut.cz"
] | bobusfil@fel.cvut.cz |
da8a2f5c35da076ac327193b2efc9d17d613163f | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-cosmosdb/azure/mgmt/cosmosdb/models/mongo_db_database_create_update_parameters.py | b0fce2563681071b835401427c6454f3f0ecd9a9 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,557 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MongoDBDatabaseCreateUpdateParameters(Model):
"""Parameters to create and update Cosmos DB MongoDB database.
All required parameters must be populated in order to send to Azure.
:param resource: Required. The standard JSON format of a MongoDB database
:type resource: ~azure.mgmt.cosmosdb.models.MongoDBDatabaseResource
:param options: Required. A key-value pair of options to be applied for
the request. This corresponds to the headers sent with the request.
:type options: dict[str, str]
"""
_validation = {
'resource': {'required': True},
'options': {'required': True},
}
_attribute_map = {
'resource': {'key': 'properties.resource', 'type': 'MongoDBDatabaseResource'},
'options': {'key': 'properties.options', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(MongoDBDatabaseCreateUpdateParameters, self).__init__(**kwargs)
self.resource = kwargs.get('resource', None)
self.options = kwargs.get('options', None)
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
c965ada61d36d3b6df57e862c30ed3558819a500 | ab4f552d894678542eb9ad772955088868be61d6 | /tests/vi/test_find_paragraph_text_object.py | 21f661e4d74c6d666bf638708d888c0e6109c571 | [
"MIT"
] | permissive | kemiller/Vintageous | edf6e080cd75621f09583a42376911993e66e760 | 7961b435163117609269866199b765e396799721 | refs/heads/master | 2021-01-22T02:39:58.565311 | 2014-03-26T18:21:41 | 2014-03-26T18:21:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,345 | py | import unittest
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.tests import BufferTest
from Vintageous.tests import set_text
from Vintageous.tests import add_sel
from Vintageous.vi.text_objects import find_paragraph_text_object
class Test_find_paragraph_text_object_InInternalNormalMode_Inclusive(BufferTest):
def testReturnsFullParagraph_CountOne(self):
text = (
'line 1 in paragraph 1',
'line 2 in paragraph 1',
'line 3 in paragraph 1',
'',
'line 1 in paragraph 2',
'line 2 in paragraph 2',
'line 3 in paragraph 2',
'',
'line 1 in paragraph 3',
'line 2 in paragraph 3',
'line 3 in paragraph 3',
)
set_text(self.view, '\n'.join(text))
r = self.R((4, 2), (4, 2))
add_sel(self.view, r)
expected = (
'\nline 1 in paragraph 2\n',
'line 2 in paragraph 2\n',
'line 3 in paragraph 2\n',
)
reg = find_paragraph_text_object(self.view, r)
self.assertEqual(''.join(expected), self.view.substr(reg))
# def testReturnsWordAndPrecedingWhiteSpace_CountOne(self):
# set_text(self.view, '(foo bar) baz\n')
# r = self.R(5, 5)
# add_sel(self.view, r)
# reg = a_word(self.view, r.b)
# self.assertEqual(' bar', self.view.substr(reg))
# def testReturnsWordAndAllPrecedingWhiteSpace_CountOne(self):
# set_text(self.view, '(foo bar) baz\n')
# r = self.R(8, 8)
# add_sel(self.view, r)
# reg = a_word(self.view, r.b)
# self.assertEqual(' bar', self.view.substr(reg))
class Test_find_paragraph_text_object_InInternalNormalMode_Exclusive(BufferTest):
def testReturnsFullParagraph_CountOne(self):
text = (
'line 1 in paragraph 1',
'line 2 in paragraph 1',
'line 3 in paragraph 1',
'',
'line 1 in paragraph 2',
'line 2 in paragraph 2',
'line 3 in paragraph 2',
'',
'line 1 in paragraph 3',
'line 2 in paragraph 3',
'line 3 in paragraph 3',
)
set_text(self.view, '\n'.join(text))
r = self.R((4, 2), (4, 2))
add_sel(self.view, r)
expected = (
'line 1 in paragraph 2\n',
'line 2 in paragraph 2\n',
'line 3 in paragraph 2\n',
)
reg = find_paragraph_text_object(self.view, r, inclusive=False)
self.assertEqual(''.join(expected), self.view.substr(reg))
# def testReturnsWordAndPrecedingWhiteSpace_CountOne(self):
# set_text(self.view, '(foo bar) baz\n')
# r = self.R(5, 5)
# add_sel(self.view, r)
# reg = a_word(self.view, r.b)
# self.assertEqual(' bar', self.view.substr(reg))
# def testReturnsWordAndAllPrecedingWhiteSpace_CountOne(self):
# set_text(self.view, '(foo bar) baz\n')
# r = self.R(8, 8)
# add_sel(self.view, r)
# reg = a_word(self.view, r.b)
# self.assertEqual(' bar', self.view.substr(reg))
| [
"guillermo.lopez@outlook.com"
] | guillermo.lopez@outlook.com |
e1c119237af11cc410ea14a8cfaeee5ba13bb0d6 | f1961c86e6da14f35c21d7235f4fc8a89fabdcad | /DailyProgrammer/DP20150909B.py | adb016baa3e8625282dbf878a5baf0903e126cad | [
"MIT"
] | permissive | DayGitH/Python-Challenges | d4930bdd85cd1a977d8f6192775ca956a375fcde | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | refs/heads/master | 2021-01-17T13:01:03.784523 | 2018-06-29T23:49:04 | 2018-06-29T23:49:04 | 58,497,683 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | """
[2015-09-09] Challenge #231 [Intermediate] Set Game Solver
https://www.reddit.com/r/dailyprogrammer/comments/3ke4l6/20150909_challenge_231_intermediate_set_game/
Our apologies for the delay in getting this posted, there was some technical difficulties behind the scenes.
# Description
Set is a card game where each card is defined by a combination of four attributes: shape (diamond, oval, or squiggle),
color (red, purple, green), number (one, two, or three elements), and shading (open, hatched, or filled). The object of
the game is to find sets in the 12 cards drawn at a time that are distinct in every way or identical in just one way
(e.g. all of the same color). From Wikipedia: A set consists of three cards which satisfy all of these conditions:
* They all have the same number, or they have three different numbers.
* They all have the same symbol, or they have three different symbols.
* They all have the same shading, or they have three different shadings.
* They all have the same color, or they have three different colors.
The rules of Set are summarized by: If you can sort a group of three cards into "Two of ____ and one of _____," then it
is not a set.
See the [Wikipedia page for the Set game](http://en.wikipedia.org/wiki/Set_(game\))
for for more background.
# Input Description
A game will present 12 cards described with four characters for shape, color, number, and shading: (D)iamond, (O)val,
(S)quiggle; (R)ed, (P)urple, (G)reen; (1), (2), or (3); and (O)pen, (H)atched, (F)illed.
# Output Description
Your program should list all of the possible sets in the game of 12 cards in sets of triplets.
# Example Input
SP3F
DP3O
DR2F
SP3H
DG3O
SR1H
SG2O
SP1F
SP3O
OR3O
OR3H
OR2H
# Example Output
SP3F SR1H SG2O
SP3F DG3O OR3H
SP3F SP3H SP3O
DR2F SR1H OR3O
DG3O SP1F OR2H
DG3O SP3O OR3O
# Challenge Input
DP2H
DP1F
SR2F
SP1O
OG3F
SP3H
OR2O
SG3O
DG2H
DR2H
DR1O
DR3O
# Challenge Output
DP1F SR2F OG3F
DP2H DG2H DR2H
DP1F DG2H DR3O
SR2F OR2O DR2H
SP1O OG3F DR2H
OG3F SP3H DR3O
"""
def main():
pass
if __name__ == "__main__":
main()
| [
"akber91@gmail.com"
] | akber91@gmail.com |
fb5a1c9efd1ca790261ba44636b050b5497a04a2 | 1bf48ef7e7e2bff6f9f7d629282f50281579051e | /compute/wps/tests/test_user_views.py | 962d296c9d3b5128d6e9a6449161e09ef62a2e4d | [] | no_license | maheshguru/esgf-compute-wps | 4316b5f3a0f11912e97d7d4624100d39331675e9 | f1aeaaaacb31f4d1956aef671e22f56862015004 | refs/heads/master | 2021-07-15T06:00:36.283521 | 2017-10-20T11:43:22 | 2017-10-20T11:43:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,498 | py | import random
from wps import models
from .common import CommonTestCase
class UserViewsTestCase(CommonTestCase):
def setUp(self):
self.update = models.User.objects.create_user('update', 'update@gmail.com', 'update')
models.Auth.objects.create(user=self.update)
for _ in xrange(10):
file_obj = random.choice(self.files)
models.UserFile.objects.create(user=self.update, file=file_obj)
for _ in xrange(10):
process_obj = random.choice(self.processes)
models.UserProcess.objects.create(user=self.update, process=process_obj)
def test_user_stats_process_auth(self):
self.client.login(username='update', password='update')
response = self.client.get('/auth/user/stats/', {'stat': 'process'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'success')
self.assertIn('processes', data['data'])
self.assertEqual(len(data['data']['processes']), 10)
def test_user_stats_process(self):
response = self.client.get('/auth/user/stats/', {'type': 'process'})
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'failed')
self.assertEqual(data['error'], 'Unauthorized access')
def test_user_stats_files_auth(self):
self.client.login(username='update', password='update')
response = self.client.get('/auth/user/stats/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'success')
self.assertIn('files', data['data'])
self.assertEqual(len(data['data']['files']), 10)
def test_user_stats_files(self):
response = self.client.get('/auth/user/stats/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'failed')
self.assertEqual(data['error'], 'Unauthorized access')
def test_user_details_auth(self):
self.client.login(username='update', password='update')
response = self.client.get('/auth/user/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'success')
expected = ('username', 'openid', 'admin', 'local_init', 'api_key', 'type', 'email')
for exp in expected:
self.assertIn(exp, data['data'])
def test_user_details(self):
response = self.client.get('/auth/user/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'failed')
self.assertEqual(data['error'], 'Unauthorized access')
def test_update_auth(self):
self.client.login(username='update', password='update')
params = {
'email': 'imdifferent@hello.com',
'openid': 'http://test',
'password': 'test2'
}
response = self.client.post('/auth/update/', params)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'success')
expected = ('username', 'openid', 'admin', 'local_init', 'api_key', 'type', 'email')
for exp in expected:
self.assertIn(exp, data['data'])
if exp in params:
self.assertEqual(params[exp], data['data'][exp])
def test_update(self):
response = self.client.post('/auth/update/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'failed')
self.assertEqual(data['error'], 'Unauthorized access')
def test_regenerate_auth(self):
self.client.login(username='test', password='test')
response = self.client.get('/auth/user/regenerate/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'failed')
self.assertEqual(data['error'], 'Need to generate an api_key, log into either OAuth2 or MyProxyClient')
def test_regenerate(self):
response = self.client.get('/auth/user/regenerate/')
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['status'], 'failed')
self.assertEqual(data['error'], 'Unauthorized access')
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
b608ee5c5b21148d68d3abba45f8fa9430dbe861 | bbd18b9e0c68db1c83b2586b16fa5b06fa5eec03 | /.history/login_20201029004936.py | 14f19ea54c8eff8f4341d81cbcd8b02829c4d39b | [] | no_license | tasnimsamin/Game-Develepment-Project | f6fa0057e0e1a48d7cb86e1b57b3f5c4cd4346f5 | b55b2c4599f94c3241e67d45f860eb044a086afc | refs/heads/main | 2023-09-01T15:25:32.383163 | 2021-10-23T07:32:12 | 2021-10-23T07:32:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py |
import mysql.connector
import cgi
form = cgi.FieldStorage()
first_name=form.getvalue("name")
last_name = form.getvalue("emailid")
email = form.getvalue("password")
new_pass
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="",
database="login"
)
mycursor = mydb.cursor()
sql = "INSERT INTO reg_tb (name, email, pass) VALUES (%s, %s, %s)"
val = (n, e, p)
mycursor.execute(sql, val)
mydb.commit()
mydb.close()
print(mycursor.rowcount, "record inserted.") | [
"somiyatasnim1086@gmail.com"
] | somiyatasnim1086@gmail.com |
752bd647c99ad94b28ec4f50fb8526f82cda9f88 | a6cba5b8b36f3f4ef80d7351725da0bc8ddbfad4 | /DA/lab6/bench.py | 17f8bcd3fb64be2c7be556c76a309d35a5911e73 | [] | no_license | tutkarma/mai_study | 7de61a406c7c5701ea9bbea7da687cc147653e53 | 39359eb8b5701c752d1f4e8e0b26911e50df12ab | refs/heads/master | 2023-03-15T18:28:05.814809 | 2022-01-18T08:40:39 | 2022-01-18T08:40:39 | 103,191,526 | 38 | 99 | null | 2023-03-04T02:20:21 | 2017-09-11T21:46:56 | Jupyter Notebook | UTF-8 | Python | false | false | 966 | py | import time
class Profiler(object):
def __enter__(self):
self._startTime = time.time()
def __exit__(self, type, value, traceback):
print("Python time: {:.6f} sec.".format(time.time() - self._startTime))
with Profiler() as p:
while True:
try:
x = int(raw_input())
except EOFError:
break
y = int(raw_input())
s = raw_input()
ops = { '+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b,
'^': lambda a, b: a ** b,
'/': lambda a, b: a / b,
'<': lambda a, b: a < b,
'>': lambda a, b: a > b,
'=': lambda a, b: a == b }
res = ops[s](x,y)
if s in '<>=':
res = str(res).lower()
if s in '-' and x < y:
res = "Error"
if s in '/' and y == 0:
res = "Error"
print(res)
| [
"tutkarma@gmail.com"
] | tutkarma@gmail.com |
059f3dd54481fe72ad1bbce8a7d5f44dd19cae0f | cc3f0afc1e2be41a98bddf97dae155e41c705ed8 | /src/model.py | 1cbe554215a689e5c5924a562d49521ea9622488 | [] | no_license | afcarl/LabelPropagation | 5983b0315a15f6187b63dc3002788355051a9c27 | e40b92793d6366f6d491dae2916da5d1d10a6ef1 | refs/heads/master | 2020-03-22T11:59:22.757170 | 2018-07-02T21:23:20 | 2018-07-02T21:23:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | import networkx as nx
import random
from tqdm import tqdm
from community import modularity
import pandas as pd
from calculation_helper import overlap, unit, min_norm, normalized_overlap, overlap_generator
from print_and_read import json_dumper
class LabelPropagator:
def __init__(self, graph, args):
"""
"""
self.args = args
self.seeding = args.seed
self.graph = graph
self.nodes = graph.nodes()
self.rounds = args.rounds
self.labels = {node: node for node in self.nodes}
self.label_count = len(set(self.labels.values()))
self.flag = True
self.weight_setup(args.weighting)
def weight_setup(self, weighting):
"""
"""
if weighting == "overlap":
self.weights = overlap_generator(overlap, self.graph)
elif weighting == "unit":
self.weights = overlap_generator(unit, self.graph)
elif weighting == "min_norm":
self.weights = overlap_generator(min_norm, self.graph)
else:
self.weights = overlap_generator(normalized_overlap, self.graph)
def make_a_pick(self, source, neighbors):
"""
"""
scores = {}
for neighbor in neighbors:
neighbor_label = self.labels[neighbor]
if neighbor_label in scores.keys():
scores[neighbor_label] = scores[neighbor_label] + self.weights[(neighbor,source)]
else:
scores[neighbor_label] = self.weights[(neighbor,source)]
top = [key for key,val in scores.iteritems() if val == max(scores.values())]
return random.sample(top,1)[0]
def do_a_propagation(self):
"""
"""
random.seed(self.seeding)
random.shuffle(self.nodes)
for node in tqdm(self.nodes):
neighbors = nx.neighbors(self.graph, node)
pick = self.make_a_pick(node, neighbors)
self.labels[node] = pick
current_label_count = len(set(self.labels.values()))
if self.label_count == current_label_count:
self.flag = False
else:
self.label_count = current_label_count
def do_a_series_of_propagations(self):
index = 0
while index < self.rounds and self.flag:
index = index + 1
print("Label propagation round: " + str(index))
self.do_a_propagation()
print("")
print("Modularity is: "+ str(round(modularity(self.labels,self.graph),3)) + ".")
json_dumper(self.labels, self.args.assignment_output)
| [
"noreply@github.com"
] | afcarl.noreply@github.com |
3fb4a9c6b3d63579316a3dc2ff6249b694e620aa | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_360/ch14_2020_03_09_19_11_33_027171.py | a3c691ebb90846fb15c2fdedd68439cc5160589a | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | import math
def calcula_distancia_do_projetil(v,teta,y0):
a= (v**2)/(2*9.8)
b= 2*9.8*y0
c= v**2*(math.sin(2*teta)**2)
d= math.sqrt(1+ (b/c))
e= a*(1+d) * math.sin(2*teta)
return e
| [
"you@example.com"
] | you@example.com |
315870e1acad9706b43b5c1d2b964e5822245fc0 | 99c7690c57a1f7f2da620f1be0efef3e63d27ebd | /collective/bibliotheek/utils/views.py | e8962f8574bca0f7d2f36d9e2aeac6f5afd9ee37 | [] | no_license | andreesg/collective.bibliotheek | 1e21e1601a709b26055fe45a99548462d12390a5 | edef14463cc6330a457e957aab4b4daeb1081894 | refs/heads/master | 2021-01-19T07:54:22.696171 | 2015-11-17T11:03:32 | 2015-11-17T11:03:32 | 37,079,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,009 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from Products.Five import BrowserView
from plone.dexterity.browser.view import DefaultView
from zope.component import getMultiAdapter
from Products.CMFCore.utils import getToolByName
from collective.bibliotheek import MessageFactory as _
from AccessControl import getSecurityManager
from Products.CMFCore.permissions import ModifyPortalContent
from zope.interface import alsoProvides
from .interfaces import IFormWidget
from plone.dexterity.browser.edit import DefaultEditForm
from collective.z3cform.datagridfield.interfaces import IDataGridField
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.component import getUtility
from zope.intid.interfaces import IIntIds
from Acquisition import aq_inner
from zc.relation.interfaces import ICatalog
# # # # # # # # # # # # #
# View specific methods #
# # # # # # # # # # # # #
class LibraryView(DefaultEditForm):
""" View class """
template = ViewPageTemplateFile('../bibliotheek_templates/library_view.pt')
def update(self):
super(LibraryView, self).update()
for group in self.groups:
for widget in group.widgets.values():
if IDataGridField.providedBy(widget):
widget.auto_append = False
widget.allow_reorder = True
alsoProvides(widget, IFormWidget)
def checkUserPermission(self):
sm = getSecurityManager()
if sm.checkPermission(ModifyPortalContent, self.context):
return True
return False
def getRelatedObjects(self):
catalog = getUtility(ICatalog)
intids = getUtility(IIntIds)
source_object = self.context
relations = catalog.findRelations(
dict(to_id=intids.getId(aq_inner(source_object)),
from_attribute="documentation_documentation")
)
structure = ""
for rel in list(relations):
from_object = rel.from_object
title = getattr(from_object, 'title', '')
obj_number = getattr(from_object, 'identification_identification_objectNumber', '')
url = from_object.absolute_url()
structure += "<p><a href='%s'><span>%s</span> - <span>%s</span></a></p>" %(url, obj_number, title)
return structure
class BookView(DefaultEditForm):
""" View class """
template = ViewPageTemplateFile('../bibliotheek_templates/view.pt')
def update(self):
super(BookView, self).update()
for group in self.groups:
for widget in group.widgets.values():
if IDataGridField.providedBy(widget):
widget.auto_append = False
widget.allow_reorder = True
alsoProvides(widget, IFormWidget)
def checkUserPermission(self):
sm = getSecurityManager()
if sm.checkPermission(ModifyPortalContent, self.context):
return True
return False
| [
"andreslb1@gmail.com"
] | andreslb1@gmail.com |
1d0cb9faeda11d5e86f331fc416452076f45ff65 | 4b1939ca7ceef7d227b90cd41af709552f3ad913 | /apps/costreport/migrations/0006_auto_20180509_1104.py | 5fbdea3c3a660a19ded5e5a52d3655513ffe599b | [] | no_license | zhuoxiaojian/xadminTest | 45cfa2d4fd5c658b29fc598f342a199695a1adc5 | 76abc02224bcd8526f4f6a41d569a315681e6e6e | refs/heads/master | 2020-03-18T06:24:47.459085 | 2018-05-22T09:40:32 | 2018-05-22T09:40:43 | 134,393,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # Generated by Django 2.0.4 on 2018-05-09 11:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('costreport', '0005_auto_20180509_0945'),
]
operations = [
migrations.RemoveField(
model_name='costdailyreport',
name='exchangeRate',
),
migrations.RemoveField(
model_name='costhourreport',
name='exchangeRate',
),
]
| [
"1933860854@qq.com"
] | 1933860854@qq.com |
598d348e8a67d2740cd995ebc384ec7e16e08d62 | 3f46af2da32d9f02d1ebbdef6784ece1d64aace3 | /Production/python/PrivateSamples/EMJ_2016_mMed-1500_mDark-20_ctau-500_unflavored-down_cff.py | 6543d851a9367c4a8547408af96fcfa7b6caa887 | [] | no_license | cms-svj/TreeMaker | 53bf4b1e35d2e2a4fa99c13c2c8b60a207676b6d | 0ded877bcac801a2a394ad90ed987a20caa72a4c | refs/heads/Run2_2017 | 2023-07-19T07:14:39.175712 | 2020-10-06T21:10:26 | 2020-10-06T21:10:26 | 305,753,513 | 0 | 0 | null | 2021-01-26T18:58:54 | 2020-10-20T15:32:19 | null | UTF-8 | Python | false | false | 1,892 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-1.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-2.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-3.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-4.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-5.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-6.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-7.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-8.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-9.root',
'gsiftp://hepcms-gridftp.umd.edu//mnt/hadoop/cms/store/group/EMJRunII/2016/step4_MINIAOD_mMed-1500_mDark-20_ctau-500_unflavored-down_n-500_part-10.root',
] )
| [
"enochnotsocool@gmail.com"
] | enochnotsocool@gmail.com |
e3e71fda673200e5b97537a293cf75b13f19ecc9 | e322d01555aebbcf9f23a68fa9160e75d4397969 | /YouCompleteMe/python/ycm/base.py | 1fb73dba383222def2a0e5022f086041dd323b95 | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | liqiang0330/i3ForDebian9 | 3b2bb5ce104f25cadab7a57cdc7096fadeb4a9ef | 37a63bdaf18dab847e57d328cdcb678668ab6207 | refs/heads/master | 2022-10-25T13:30:26.723690 | 2018-03-17T05:22:55 | 2018-03-17T05:22:55 | 162,018,419 | 1 | 1 | Apache-2.0 | 2022-10-08T20:30:23 | 2018-12-16T16:11:57 | Python | UTF-8 | Python | false | false | 5,591 | py | # Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from future.utils import iteritems
from ycm import vimsupport
from ycmd import user_options_store
from ycmd import identifier_utils
YCM_VAR_PREFIX = 'ycm_'
def BuildServerConf():
"""Builds a dictionary mapping YCM Vim user options to values. Option names
don't have the 'ycm_' prefix."""
# We only evaluate the keys of the vim globals and not the whole dictionary
# to avoid unicode issues.
# See https://github.com/Valloric/YouCompleteMe/pull/2151 for details.
keys = vimsupport.GetVimGlobalsKeys()
server_conf = {}
for key in keys:
if not key.startswith( YCM_VAR_PREFIX ):
continue
new_key = key[ len( YCM_VAR_PREFIX ): ]
new_value = vimsupport.VimExpressionToPythonType( 'g:' + key )
server_conf[ new_key ] = new_value
return server_conf
def LoadJsonDefaultsIntoVim():
defaults = user_options_store.DefaultOptions()
for key, value in iteritems( defaults ):
new_key = 'g:ycm_' + key
if not vimsupport.VariableExists( new_key ):
vimsupport.SetVariableValue( new_key, value )
def CurrentIdentifierFinished():
line, current_column = vimsupport.CurrentLineContentsAndCodepointColumn()
previous_char_index = current_column - 1
if previous_char_index < 0:
return True
filetype = vimsupport.CurrentFiletypes()[ 0 ]
regex = identifier_utils.IdentifierRegexForFiletype( filetype )
for match in regex.finditer( line ):
if match.end() == previous_char_index:
return True
# If the whole line is whitespace, that means the user probably finished an
# identifier on the previous line.
return line[ : current_column ].isspace()
def LastEnteredCharIsIdentifierChar():
line, current_column = vimsupport.CurrentLineContentsAndCodepointColumn()
if current_column - 1 < 0:
return False
filetype = vimsupport.CurrentFiletypes()[ 0 ]
return (
identifier_utils.StartOfLongestIdentifierEndingAtIndex(
line, current_column, filetype ) != current_column )
def AdjustCandidateInsertionText( candidates ):
"""This function adjusts the candidate insertion text to take into account the
text that's currently in front of the cursor.
For instance ('|' represents the cursor):
1. Buffer state: 'foo.|bar'
2. A completion candidate of 'zoobar' is shown and the user selects it.
3. Buffer state: 'foo.zoobar|bar' instead of 'foo.zoo|bar' which is what the
user wanted.
This function changes candidates to resolve that issue.
It could be argued that the user actually wants the final buffer state to be
'foo.zoobar|' (the cursor at the end), but that would be much more difficult
to implement and is probably not worth doing.
"""
def NewCandidateInsertionText( to_insert, text_after_cursor ):
overlap_len = OverlapLength( to_insert, text_after_cursor )
if overlap_len:
return to_insert[ :-overlap_len ]
return to_insert
text_after_cursor = vimsupport.TextAfterCursor()
if not text_after_cursor:
return candidates
new_candidates = []
for candidate in candidates:
if isinstance( candidate, dict ):
new_candidate = candidate.copy()
if 'abbr' not in new_candidate:
new_candidate[ 'abbr' ] = new_candidate[ 'word' ]
new_candidate[ 'word' ] = NewCandidateInsertionText(
new_candidate[ 'word' ],
text_after_cursor )
new_candidates.append( new_candidate )
elif isinstance( candidate, str ) or isinstance( candidate, bytes ):
new_candidates.append(
{ 'abbr': candidate,
'word': NewCandidateInsertionText( candidate, text_after_cursor ) } )
return new_candidates
def OverlapLength( left_string, right_string ):
"""Returns the length of the overlap between two strings.
Example: "foo baro" and "baro zoo" -> 4
"""
left_string_length = len( left_string )
right_string_length = len( right_string )
if not left_string_length or not right_string_length:
return 0
# Truncate the longer string.
if left_string_length > right_string_length:
left_string = left_string[ -right_string_length: ]
elif left_string_length < right_string_length:
right_string = right_string[ :left_string_length ]
if left_string == right_string:
return min( left_string_length, right_string_length )
# Start by looking for a single character match
# and increase length until no match is found.
best = 0
length = 1
while True:
pattern = left_string[ -length: ]
found = right_string.find( pattern )
if found < 0:
return best
length += found
if left_string[ -length: ] == right_string[ :length ]:
best = length
length += 1
| [
"yuan705791627@gmail.com"
] | yuan705791627@gmail.com |
71f55533202ebe5e7c0a835950498b96cae0052f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/11/usersdata/82/5672/submittedfiles/jogo.py | 49f1ee265f4324d776bb35c4ca3c9c71e3a2c437 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
#ENTRADA
Cv = input ('Digite Cv:')
Ce = input ('Digite Ce:')
Cs = input ('Digite Cs:')
Fv = input ('Digite Fv:')
Fe = input ('Digite Fe:')
Fs = input ('Digite Fs:')
#PROCESSAMENTO E SAIDA | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
877ea68bb77143b94b967841ee6f41341972cde4 | 855501a4cb8a54e0c977d53e6f5d76d8938f99cb | /Constructing a Number.py | b26ef9d2c3ab95b24b1ca05134ede9f326fbeb80 | [] | no_license | Beowulfdgo/HackerRank | 3d7713f68a595af76d857ac9955ae55565b8391f | e4384253f27eee296e0cad39a402cadf47c90164 | refs/heads/master | 2023-05-31T05:30:21.425792 | 2021-06-29T08:47:11 | 2021-06-29T08:47:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | import math
import os
import random
import re
import sys
def canConstruct(a):
if(sum(a)%3==0):
return "Yes"
else:
return "No"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
a = list(map(int, input().rstrip().split()))
result = canConstruct(a)
fptr.write(result + '\n')
fptr.close()
| [
"54479676+CormacKrum@users.noreply.github.com"
] | 54479676+CormacKrum@users.noreply.github.com |
aa03d7ec1a53a7b3d047d38856a9896dafd2b159 | 3a9924e0e2f8e68cfed2e70bac79ccb904eac245 | /libertybank/pipelines.py | fdd0d80ea4c95e1d8e9854a06699ebdab809f369 | [] | no_license | daniel-kanchev/libertybank | d3b6909f81187c881f044e009ed9e5baacc4f158 | 446987e29cb4cf3c26e192b6a5bd395d4b965dab | refs/heads/main | 2023-03-27T06:55:06.704287 | 2021-03-12T13:05:48 | 2021-03-12T13:05:48 | 347,069,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | from itemadapter import ItemAdapter
import sqlite3
class DatabasePipeline:
# Database setup
conn = sqlite3.connect('libertybank.db')
c = conn.cursor()
def open_spider(self, spider):
self.c.execute(""" CREATE TABLE IF NOT EXISTS articles (
title text,
date text,
link text,
content text
) """)
def process_item(self, item, spider):
self.c.execute("""SELECT * FROM articles WHERE title = ?""",
(item.get('title'), ))
duplicate = self.c.fetchall()
if len(duplicate):
return item
if 'link' in item.keys():
print(f"New Article: {item['link']}")
else:
print(f"New Article: {item['title']}")
# Insert values
self.c.execute("INSERT INTO articles ("
"title, "
"date, "
"link, "
"content)"
" VALUES (?,?,?,?)",
(item.get('title'),
item.get('date'),
item.get('link'),
item.get('content')
))
self.conn.commit() # commit after every entry
return item
def close_spider(self, spider):
self.conn.commit()
self.conn.close()
| [
"daniel.kanchev@adata.pro"
] | daniel.kanchev@adata.pro |
0411315a72c4aa485a3042e68566ba52f4cf54f7 | ef0ce2f2dd9ddafa45ec583d23e6a099976e633d | /DataManage.py | 48277ed0b481adb2a4fcf8f27dc264fc54b702b1 | [] | no_license | wystephen/satellite_data_process | 86b718d735f23f9092e6e75b126810531e7b853b | 1ad511be37fe26963904d448ac4035b245dbb22f | refs/heads/master | 2020-05-22T01:43:23.746795 | 2016-10-01T12:56:21 | 2016-10-01T12:56:21 | 65,731,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,696 | py | # -*- coding:utf-8 -*-
# carete by steve at 2016 / 08 / 15 20:10
import scipy as sp
import numpy as np
#import theano
#import theano.tensor as T
#import theano.configdefaults
# __float__ = 'float32'
class DataManage:
def __init__(self):
self.x = np.loadtxt("outX.txt")
self.y = np.loadtxt("outY.txt")
def get_train_set(self):
return self.x, self.y
def pca(self):
print 'pca'
#
# def shared_data(X, Y, self):
#
# shared_x = theano.shared(np.asarray(X,dtype=theano.config.floatX), borrow=True)
# shared_y = theano.shared(np.asarray(Y,dtype=theano.config.floatX), borrow=True)
#
# return shared_x, T.cast(shared_y, 'int32')
# def theano_type_data(self, train_precent=0.5, valid_precent=0.3):
# if train_precent + valid_precent > 1.0:
# print (u"train dataset 和 valid dataset 占比必须小于一")
# train_index = int(self.x.shape[0]* train_precent)
# valid_index = int(self.x.shape[0] * (train_precent + valid_precent))
#
# train_x, train_y = self.shared_data(self.x[1:train_index, :], self.y[1:train_index])
# test_x, test_y = self.shared_data(self.x[train_index:valid_index, :], self.y[train_index:valid_index])
# valid_x, valid_y = self.shared_data(self.x[(train_index + valid_index)::],
# self.y[(train_index + valid_index)::])
# return train_x, train_y, test_x, test_y, valid_x, valid_y
if __name__ == '__main__':
dm = DataManage()
x, y = dm.get_train_set()
print x.shape
#
#
# train_x,train_y,test_x,test_y,valid_x,valid_y = dm.theano_type_data()
| [
"551619855@qq.com"
] | 551619855@qq.com |
bb23fb974538fb9d267c05480d0a82d809f3929a | 314245750f897949bc7867883d22b8ff1465fbe1 | /union_find/topolgy_sort.py | dbbd21482663a383caf2f2d6d85eeab285c67a3b | [] | no_license | dongho108/CodingTestByPython | e608d70235cc6c6a27c71eea86ee28d1271d4d1d | 475b3665377a8f74944d7698e894ad3eafc49ad4 | refs/heads/master | 2023-05-24T15:01:56.563359 | 2021-07-01T14:23:20 | 2021-07-01T14:23:20 | 330,833,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | from collections import deque
def topolgy_sort():
result = []
q = deque()
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
while q:
now = q.popleft()
result.append(now)
for i in graph[now]:
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
for i in result:
print(i, end=' ')
v, e = map(int, input().split())
indegree = [0] * (v+1)
graph = [[] for i in range(v+1)]
for _ in range(e):
a, b = map(int, input().split())
graph[a].append(b)
indegree[b] += 1
topolgy_sort()
'''
7 8
1 2
1 5
2 3
2 6
3 4
4 7
5 6
6 4
''' | [
"dongho108@naver.com"
] | dongho108@naver.com |
ce014188d3bcd0609073e20ec8af2a585bd49521 | b121b4135f0edf0e39c1ae7343c7df19f56a077f | /prototypes/ModelsAsScriptsWithSpecialVarNames/Deprecated/MvarsAndComputer.py | c95027ea729445dd88271eef8c82d8fc20f12ed0 | [] | no_license | MPIBGC-TEE/bgc-md | 25379c03d2333481bd385211f49aff6351e5dd05 | 8912a26d1b7e404ed3ebee4d4799a3518f507756 | refs/heads/master | 2021-05-08T19:07:46.930394 | 2020-10-21T12:08:53 | 2020-10-21T12:08:53 | 119,548,100 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,112 | py | from sympy import Symbol,Number
from typing import List
from CompartmentalSystems.smooth_reservoir_model import SmoothReservoirModel
from CompartmentalSystems.smooth_model_run import SmoothModelRun
#from CompartmentalSystems import smooth_reservoir_model
from testinfrastructure.helpers import pe
from .Classes import MVar,Computer
from .functions import srm_from_B_u_tens
########## README ###################
# This module defines MVar and Computer instances simultaniously since they
# refer to each other recursively.
# In this way it (implicitly) defines a kind of convex hull or a closure
# which has the consequence that
# the smallest possible consistent change might NOT be a single 'Computer' or a
# single 'MVar'.
# In particular you can NOT add a new Computer to a Mvar's computers set
# before defining the Computer instance.
# This in turn might require you to define new
# Mvars (since they appear as the computers arguments).
# For these you can first rely on the empty Computers list so that there is no
# infinite regress.
# fixme?:
# Since this module is regular python code even the order in which variables
# are defined is not arbitrary. This might become troublesome. Maybe we need
# a more 'lazy' approach than a module containing variables that have to be
# defined in order.
# a) One possibility is to define the Mvars first and then
# 'register' the computers later. One consequence is that the Mvars
# can not be immutable in this approach
# which does not allow caching by functools
#
# b) Another possibility is to define both the 'args; of the
# Computer instance and the 'computers' in a MVar instance
# not as objects but as strings interpreted with respect to a
# dictionary of Mvars or Computers respectively and resolve
# the relationship at runtime. This allows any kind of cross
# referencing even if the variables or computers do not
# exist yet or not at all. The latter possibility must be excluded
# by a consistence check
# fixme?:
# possible convention:
# for Mvars that have a very specific class (like SmoothModelRun ) we could
# call the MVar like the class? The computers act then like constructors of
# this class.
# This raises the question if we make subclasses for all
# MVars (and find the appropriate Computers by their signature)
coord_sys = MVar(name='coord_sys')
state_vector = MVar(name='state_vector')
time_symbol = MVar(name='time_symbol')
compartmental_dyad = MVar(name='compartmental_dyad')
input_vector = MVar(name='input_vector')
parameter_dictionary= MVar(name='parameter_dictionary')
start_vector = MVar(name='start_vector')
time_vector = MVar(name='time_vector')
function_dictionary = MVar(name='function_dictionary')
srm_bu_tens=Computer(
func=srm_from_B_u_tens
,args=[
coord_sys
,state_vector
,time_symbol
,compartmental_dyad
,input_vector
]
,description="""Produces a smoth reservoir model"""
)
smooth_reservoir_model=MVar(
name='smooth_reservoir_model'
,computers=[srm_bu_tens]
,description='A smooth reservroir Model'
)
smr=Computer(
func=SmoothModelRun
,args=[
smooth_reservoir_model
,parameter_dictionary
,start_vector
,time_vector
,function_dictionary
]
,description="""Creates a single instance of a SmoothModelRun"""
)
smooth_model_run_dictionary=MVar(
'smooth_model_run_dictionary'
,computers=[] # at the moment empty list, consequently
# only available when explicitly defined.
# Although automatic computation would be simple
# the keys make most sense if defined by the user
,description= """
The dictionary values are SmoothModelRun objects.
The keys can be used in user code to refer to special
simulations. """
)
smooth_model_run=MVar(
'smooth_model_run'
,computers=[smr]
,description= """A single simulation"""
)
| [
"markus.mueller.1.g@googlemail.com"
] | markus.mueller.1.g@googlemail.com |
4ffd358e3f78a61fd87db9fcb576cc6e32cd8315 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Quantization/trend_MovingMedian/cycle_5/ar_/test_artificial_128_Quantization_MovingMedian_5__0.py | 184a722c83d9481caea039d4a9560b8a736445d0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 269 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
de6c0324f453aab5c78a021a5f58767b145614da | dffb316df646b2d1c4739e30f9547a6860867ccf | /leetcode/_853_CarFleet.py | 672b2e2fbdba11caaeb07d5856fa42d7bd4ca8c1 | [] | no_license | scolphew/leetcode_python | 50051f78085e3dd13a39e51bf50c40db9abb8295 | 86352d3f51ab030afdb7b472a80bc8cab7260c08 | refs/heads/master | 2022-09-13T02:47:05.900375 | 2022-09-07T06:17:14 | 2022-09-07T06:17:14 | 68,989,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from typing import List
class Solution:
def carFleet(self, target: int, position: List[int], speed: List[int]) -> int: # noqa
cats = zip(position, speed)
cats = sorted(cats)
pre_time = 0
ans = 0
for cat_p, car_s in cats[::-1]:
target_time = (target - cat_p) / car_s
if target_time > pre_time: # 追不上
pre_time = target_time
ans += 1
return ans
if __name__ == "__main__":
s = Solution()
y = s.carFleet(12, [10, 8, 0, 5, 3], [2, 4, 1, 1, 3])
print(y)
| [
"scolphew@pku.edu.cn"
] | scolphew@pku.edu.cn |
fb0d492e806bef8d22a974b02db14162177a5ec3 | 512a1d6495869dcec5cb0f92483e4740c349b9ef | /revscoring/languages/space_delimited/__init__.py | ff2ae17ae49e1e9c19d82c4a9f86ec11d904a9fe | [
"MIT"
] | permissive | ewulczyn/revscoring | d294b0c86b4e85f210a1f1a00e107893adcb3d4e | 3a7e3cc5ba27fe0cec62c061c08489550ba8096e | refs/heads/master | 2020-12-30T21:42:08.197156 | 2015-12-23T04:18:58 | 2015-12-23T04:18:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | """
.. autoclass:: revscoring.languages.space_delimited.SpaceDelimited
"""
from .space_delimited import SpaceDelimited
__all__ = [SpaceDelimited]
| [
"aaron.halfaker@gmail.com"
] | aaron.halfaker@gmail.com |
2ad5a64103371327b62d06ade1b00e360ec34b63 | 4f510470b3093ab2c60f929221af82c79b121ca7 | /ML/SCIENCE/day07/filter.py | ad62eb2b1306ef010b3846a1ef4d4f35f2804748 | [] | no_license | q737645224/python3 | ce98926c701214f0fc7da964af45ba0baf8edacf | 4bfabe3f4bf5ba4133a16102c51bf079d500e4eb | refs/heads/master | 2020-03-30T07:11:17.202996 | 2018-10-30T06:14:51 | 2018-10-30T06:14:51 | 150,921,088 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,544 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import numpy.fft as nf
import scipy.io.wavfile as wf # 用来读取wav文件
import matplotlib.pyplot as mp
sample_rate, noised_sigs = wf.read(
'../data/noised.wav')
print(sample_rate)
print(noised_sigs.shape)
print(noised_sigs.dtype)
# 声场强度本身是(0到1)浮点数,为了让计算机保留更多的有效数位
# 所以扩大了2的15次方倍
noised_sigs = noised_sigs / 2**15 # 还原真实的声场强度
times = np.arange(len(noised_sigs)) / sample_rate
# 采样率的倒数是每个采样点的时间间隔
# 时间域到频率域的转换:
freqs = nf.fftfreq(times.size, d=1 / sample_rate)
# 将时间域的采样值变成频率域的复数数组:
noised_ffts = nf.fft(noised_sigs)
# 求复数的模:
noised_pows = np.abs(noised_ffts)
# 信号中能量最大的下标给频率数组,得到基波的频率:
fund_freq = freqs[noised_pows.argmax()]
print(fund_freq)
# 将复数数组中噪声的部分过滤掉:
noised_indices = np.where(
np.abs(freqs) != fund_freq)
filter_ffts = noised_ffts.copy()
filter_ffts[noised_indices] = 0
# 得到没有噪声的能量
filter_pows = np.abs(filter_ffts)
# 通过傅里叶逆变换,将声场强度从频率域映射回时间域:
filter_sigs = nf.ifft(filter_ffts).real
wf.write('../data/filter.wav', sample_rate,
(filter_sigs * 2**15).astype(np.int16))
mp.figure('Filter', facecolor='lightgray')
mp.subplot(221)
mp.title('Time Domain', fontsize=16)
mp.ylabel('Signal', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(times[:178], noised_sigs[:178],
c='orangered', label='Noised')
mp.legend()
mp.subplot(222)
mp.title('Frequency Domain', fontsize=16)
mp.ylabel('Power', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.semilogy(freqs[freqs >= 0],
noised_pows[freqs >= 0],
c='limegreen', label='Noised')
mp.legend()
mp.subplot(223)
mp.title('Time', fontsize=16)
mp.ylabel('Signal', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(times[:178], filter_sigs[:178],
c='hotpink', label='Filter')
mp.legend()
mp.subplot(224)
mp.title('Frequency', fontsize=16)
mp.ylabel('Power', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(freqs[freqs >= 0],
filter_pows[freqs >= 0],
c='dodgerblue', label='Filter')
mp.legend()
mp.tight_layout()
mp.show()
| [
"764375224@qq.com"
] | 764375224@qq.com |
7b7a9594bde8fd27a8f977cd70136286c78d2279 | 9f5ec0afcd24b8e22b82c912ad7e6d4d9a4afc7c | /scripts/anagram.py | bd9a3f2b88812ec8eb04c65f8e515721a6293b65 | [] | no_license | Angelica137/AnagramPython | 6a51672f71961fbdf873b861eb522627e4c514a5 | 4bafcac2f60b0652320561088d21577e58bf233d | refs/heads/main | 2023-06-11T23:59:45.430464 | 2021-07-09T21:37:50 | 2021-07-09T21:37:50 | 380,818,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | def findAnagram(str, arr):
anagrams = []
for item in range(len(arr)):
if len(arr[item]) == len(str):
sorted_item = sorted(arr[item])
sorted_str = sorted(str)
if sorted_item == sorted_str:
anagrams.append(arr[item])
return anagrams
| [
"Angelica137@users.noreply.github.com"
] | Angelica137@users.noreply.github.com |
4bccc69ad21ca874c6040662089e9b4790183327 | d9955a186b627105e4f606350a942c4fc7ae1fc4 | /inference/InferenceEngine.py | f0ed785c167f9c90dc619f6ac7f86be20a3a3360 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Statici/quantum-fog | ddbf5ea0d982866693f29afd2986ad989be052a5 | 92e1d2a38de92422642545623341ecd9cfb7a39f | refs/heads/master | 2021-01-20T00:46:49.969745 | 2016-08-22T23:50:27 | 2016-08-22T23:50:27 | 64,971,623 | 2 | 0 | null | 2016-08-22T20:36:16 | 2016-08-04T22:35:52 | C++ | UTF-8 | Python | false | false | 1,264 | py | # Most of the code in this file comes from PBNT by Elliot Cohen. See
# separate file in this project with PBNT license.
# from BayesNet import *
class InferenceEngine:
"""
This is the parent class of all inference engines.
Attributes
----------
bnet : BayesNet
do_print : bool
is_quantum : bool
"""
def __init__(self, bnet, do_print=False, is_quantum=False):
"""
Constructor
Parameters
----------
bnet : BayesNet
do_print : bool
is_quantum : bool
Returns
-------
"""
self.bnet = bnet
self.do_print = do_print
self.is_quantum = is_quantum
@staticmethod
def print_annotated_story(annotated_story):
"""
Prints in a pretty way an annotated story, which is a dictionary
mapping all nodes to their current state.
Parameters
----------
annotated_story : dict(BayesNode, int)
Returns
-------
"""
story_line = ""
for node in annotated_story.keys():
story_line += node.name + "="
story_line += str(annotated_story[node]) + ", "
print(story_line[:-2])
if __name__ == "__main__":
print(5)
| [
"tucci@ar-tiste.com"
] | tucci@ar-tiste.com |
b109cbb3b59ba24f923465bbc7ca9f7804513284 | 08cfc4fb5f0d2f11e4e226f12520a17c5160f0a2 | /kubernetes/test/test_v1_self_subject_rules_review_spec.py | 19847b247308a28c7283a7ff6b81087ee9af682a | [
"Apache-2.0"
] | permissive | ex3cv/client-python | 5c6ee93dff2424828d064b5a2cdbed3f80b74868 | 2c0bed9c4f653472289324914a8f0ad4cbb3a1cb | refs/heads/master | 2021-07-12T13:37:26.049372 | 2017-10-16T20:19:01 | 2017-10-16T20:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_self_subject_rules_review_spec import V1SelfSubjectRulesReviewSpec
class TestV1SelfSubjectRulesReviewSpec(unittest.TestCase):
""" V1SelfSubjectRulesReviewSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SelfSubjectRulesReviewSpec(self):
"""
Test V1SelfSubjectRulesReviewSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_self_subject_rules_review_spec.V1SelfSubjectRulesReviewSpec()
pass
if __name__ == '__main__':
unittest.main()
| [
"mehdy@google.com"
] | mehdy@google.com |
b98a1b34a7ad00f4597be6d2f690523fdcb9a5cb | ac42f1d918bdbd229968cea0954ed75250acd55c | /admin/dashboard/openstack_dashboard/enabled/_1020_overview_physical_monitor.py | 952cafdc5166fc655ad0ff6d441507a9a0639d70 | [
"Apache-2.0"
] | permissive | naanal/product | 016e18fd2f35608a0d8b8e5d2f75b653bac7111a | bbaa4cd60d4f2cdda6ce4ba3d36312c1757deac7 | refs/heads/master | 2020-04-03T22:40:48.712243 | 2016-11-15T11:22:00 | 2016-11-15T11:22:00 | 57,004,514 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'physical_monitor'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'overview'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'default'
# If set, it will update the default panel of the PANEL_DASHBOARD.
DEFAULT_PANEL = 'physical_monitor'
ADD_PANEL = ('openstack_dashboard.dashboards.overview.'
'physical_monitor.panel.Physical_Monitor')
| [
"rajagopalx@gmail.com"
] | rajagopalx@gmail.com |
cf68174ebbfb082a395c8ad1f2109ffa4b39f19c | 99aa3c8b292849013200c1af38b6b8e8accd6a27 | /backend/lib/qn.py | c0914a01fc383a051b509547d89bfe1f52392915 | [
"Zlib"
] | permissive | LiangTang1993/Icarus | 368e38bcd2cde72b0a3b546648de0a8896685d88 | c3a4af0f98693a08b850b47ff01091c4e884cc18 | refs/heads/master | 2023-02-23T07:58:44.288959 | 2021-01-23T12:01:05 | 2021-01-23T12:01:05 | 257,802,623 | 1 | 0 | Zlib | 2021-01-23T12:01:06 | 2020-04-22T05:39:00 | null | UTF-8 | Python | false | false | 2,105 | py | import json
import time
import qiniu
import config
q = None
def init():
global q
q = qiniu.Auth(config.UPLOAD_QINIU_ACCESS_KEY, config.UPLOAD_QINIU_SECRET_KEY)
def get_token(user_id=None, type_name=None):
"""
:param user_id:
:param type_name: None, avatar
:return:
"""
if not config.UPLOAD_ENABLE: return
token = q.upload_token(config.UPLOAD_QINIU_BUCKET, policy={
'scope': config.UPLOAD_QINIU_BUCKET,
'saveKey': config.UPLOAD_QINIU_SAVEKEY,
'deadline': int(time.time()) + config.UPLOAD_QINIU_DEADLINE_OFFSET,
'callbackUrl': config.UPLOAD_QINIU_CALLBACK_URL,
'callbackBody': json.dumps({"key": "$(key)", "user_id": user_id,
"type_name": type_name, "size": "$(fsize)", "ext": "$(ext)",
"image_info": {
"format": "$(imageInfo.format)",
"width": "$(imageInfo.width)",
"height": "$(imageInfo.height)",
"colorModel": "$(imageInfo.colorModel)",
"size": "$(imageInfo.size)",
}}),
'callbackBodyType': 'application/json',
#'callbackBody': 'key=$(key)&hash=$(etag)&w=$(imageInfo.width)&h=$(imageInfo.height)'
# f'&user_id={user_id or empty}&type_name={type_name or empty}',
'fsizeMin': config.UPLOAD_FILE_SIZE_MIN,
'fsizeLimit': config.UPLOAD_FILE_SIZE_MAX,
'mimeLimit': config.UPLOAD_QINIU_MIME_LIMIT,
'endUser': user_id,
})
return token
def verify_callback(auth, url: str, body: str):
if not config.UPLOAD_ENABLE: return
return q.verify_callback(auth, url, body, 'application/json')
def upload_local(token, data, key=None):
if not config.UPLOAD_ENABLE: return
return qiniu.put_data(token, key, data)
if __name__ == '__main__':
init()
t = get_token()
print(upload_local(t, open('test.png', 'rb').read(), None))
| [
"fy0@qq.com"
] | fy0@qq.com |
8e375c6bc87cb68b0d562ea945ee7f399c35ad21 | d224a781d02a24a2594c7bb9c1c9bb990735f8a8 | /MuMu/test/escale/scaleFitMcTrue.py | 3157b6c28a8843c3e7609acceef8c0fc3f443a96 | [] | no_license | janveverka/JPsi | 0d5cd36f9410aa651d6143cb527e4bce13c48d2b | 14efc5a6f18f36f4eb46f4f1dad748a0fb480aa9 | refs/heads/master | 2021-01-18T17:25:04.545379 | 2014-03-06T15:11:14 | 2014-03-06T15:11:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,792 | py | import copy
import os
import re
import JPsi.MuMu.common.dataset as dataset
import JPsi.MuMu.common.energyScaleChains as esChains
from JPsi.MuMu.common.basicRoot import *
from JPsi.MuMu.common.roofit import *
from JPsi.MuMu.common.plotData import PlotData
from JPsi.MuMu.common.binedges import BinEdges
from JPsi.MuMu.scaleFitter import ScaleFitter
from JPsi.MuMu.scaleFitter import PhoEtBin
from JPsi.MuMu.scaleFitter import Model
from JPsi.MuMu.scaleFitter import DimuonMassMax
from JPsi.MuMu.scaleFitter import subdet_r9_categories
from JPsi.MuMu.scaleFitModels import ws1
gROOT.LoadMacro('tools.C+');
gROOT.LoadMacro("CMSStyle.C")
ROOT.CMSstyle()
## Get the data
## 715/pb for Vg Summer conferences
# _chains = esChains.getChains('v7')
## 2/fb of LP11 dataset
_chains = esChains.getChains('v11')
## Default fit of strue = Ereco / Egen - 1
struefit = ScaleFitter(
name = 'strue_mc_NominalFitRange68',
## name = 'strue_mc_FitRangePositive',
title = 'strue-Fit, Powheg S4',
labels = ['Powheg S4 Summer11 MC'],
source = _chains['z'],
xName = 's',
xTitle = 's_{true} = E^{#gamma}_{reco}/E^{#gamma}_{gen} - 1',
xExpression = '100 * (phoE/phoGenE - 1)',
cuts = ['isFSR', 'phoGenE > 0'],
xRange = (-50, 100),
xUnit = '%',
nBins = 150,
pdf = 'gauss',
graphicsExtensions = ['png'],
massWindowScale = 1.5,
massWindow = (87.2, 95.2),
fitScale = 1.2,
fitRange = (0,50),
doAutoBinning = True,
binContentMax = 200,
binContentMin = 35,
canvasStyle = 'landscape',
doAutoXRange = True,
doAutoXRangeZoom = True,
doAutoFitRange = True,
xRangeSigmaLevel = 5,
xRangeSigmaLevelZoom = 2,
fitRangeMode = 'Fraction',
fitRangeSigmaLevel = 2.0,
fitRangeNumberOfEntries = 3000,
fitRangeFraction = 0.68,
paramLayout = (0.57, 0.92, 0.92),
useCustomChi2Calculator = True,
)
## Default fit of sgen = Egen / Ekingen - 1
mu1 = 'mu1Pt,mu1Eta,mu1Phi,0.1056'
mu2 = 'mu2Pt,mu2Eta,mu2Phi,0.1056'
mu1gen = 'mu1GenPt,mu1GenEta,mu1GenPhi,0.1056'
mu2gen = 'mu2GenPt,mu2GenEta,mu2GenPhi,0.1056'
phogen = 'phoGenEt,phoGenEta,phoGenPhi,0'
mmMassGen = 'twoBodyMass({mu1}, {mu2})'.format(mu1=mu1gen, mu2=mu2gen)
mmgMassGen = 'threeBodyMass({mu1}, {mu2}, {pho})'.format(mu1=mu1gen,
mu2=mu2gen,
pho=phogen)
kRatioGen = 'kRatio({mmgMass}, {mmMass})'.format(mmgMass=mmgMassGen,
mmMass=mmMassGen)
## ----------------------------------------------------------------------------
## Customize below
struefit.applyDefinitions([DimuonMassMax(80)])
struefits =[]
# models = ('gauss lognormal bifurGauss cbShape gamma'.split() +
# 'cruijff gsh bifurGsh bw sumGaussGauss'.split())
models = 'bifurGauss'.split()
# models += 'sumGaussGauss sumGauss3 sumCruijffGauss sumBwGauss'.split()
for subdet_r9_cat in subdet_r9_categories:
for lo, hi in BinEdges([10, 12, 15, 20, 25, 30, 100]):
## for lo, hi in BinEdges([10, 12, 15]):
for model in models:
fit = struefit.clone().applyDefinitions([subdet_r9_cat,
PhoEtBin(lo, hi),
Model(model)])
if ('EB_lowR9_PhoEt10-12' in fit.name or
'EB_lowR9_PhoEt12-15' in fit.name):
fit.fitRangeFraction -= 0.2
if 'EE' in fit.name:
fit.fitRangeFraction += 0.1
fit.binContentMax = 100
if fit.fitRangeFraction > 1.:
fit.fitRangeFraction = 1.
fit.labels.append('Fit Range Coverage: %.0f%%' %
(100 * fit.fitRangeFraction))
# fit.labels.append('Fit Range: (%.0f, %.0f)%%' % fit.fitRange)
struefits.append(fit)
_fits = struefits
## Loop over plots
for fitter in _fits[:1]:
## Log the current fit configuration
print "++ Processing", fitter.title
print "++ Configuration:"
print fitter.pydump()
## Get the data
fitter.getData(ws1)
## Load the initial paramter values
ws1.loadSnapshot(fitter.pdf + '_init')
## Make the fit
fitter.fitToData(ws1)
fitter.makePlot(ws1)
## Save the fit result in the workspace
ws1.saveSnapshot('sFit_' + fitter.name, fitter.parameters, True)
## Make graphics
if hasattr(fitter, 'graphicsExtensions'):
for ext in fitter.graphicsExtensions:
fitter.canvas.Print(fitter.name + '.' + ext)
## <-- loop over fitters
## Print an ASCII report
print '\nASCII report'
is_first_srecofit = True
is_first_struefit = True
is_first_sgenfit = True
is_first_shybfit = True
for plot in _fits:
if not hasattr(plot, 'niter'):
continue
## Extract the bare name w/o the appended iteration index
m = re.search('(.*_iter)\d+', plot.name)
if m:
bareName = 'sFit_' + m.groups()[0]
else:
raise RuntimeError, "Failed to parse fit name `%s'" % plot.name
for i in range (plot.niter-1, plot.niter):
ws1.loadSnapshot( bareName + '%d' % i )
if 'srecofit' in vars() and srecofit.title in plot.title:
if is_first_srecofit:
is_first_srecofit = False
print srecofit.title
elif 'struefit' in vars() and struefit.title in plot.title:
if is_first_struefit:
is_first_struefit = False
print struefit.title
elif 'sgenfit' in vars() and sgenfit.title in plot.title:
if is_first_sgenfit:
is_first_sgenfit = False
print sgenfit.title
elif 'shybfit' in vars() and shybfit.title in plot.title:
if is_first_shybfit:
is_first_shybfit = False
print shybfit.title
print '%6.2f +/- %4.2f' % ( ws1.var('#Deltas').getVal(),
ws1.var('#Deltas').getError() ),
if 'srecofit' in vars() and srecofit.title in plot.title:
print plot.title[len(srecofit.title)+2:],
elif 'struefit' in vars() and struefit.title in plot.title:
print plot.title[len(struefit.title)+2:],
elif 'sgenfit' in vars() and sgenfit.title in plot.title:
print plot.title[len(sgenfit.title)+2:],
elif 'shybfit' in vars() and shybfit.title in plot.title:
print plot.title[len(shybfit.title)+2:],
else:
print plot.title,
print i, "%.3g" % plot.chi2s[i]
## <-- loop over plots
#ws1.writeToFile('test.root')
#wn1.writeToFile('strue_FitRange71.root')
# ws1.writeToFile('strue_FitRangePositive.root')
if __name__ == "__main__":
import user
| [
"jan.veverka@gmail.com"
] | jan.veverka@gmail.com |
384fa5a003ab2be5f14e03715733dc363b158e38 | 5963c12367490ffc01c9905c028d1d5480078dec | /homeassistant/components/atag/sensor.py | 88ccbdc899ff29505cb1a5b11adef88814af021f | [
"Apache-2.0"
] | permissive | BenWoodford/home-assistant | eb03f73165d11935e8d6a9756272014267d7d66a | 2fee32fce03bc49e86cf2e7b741a15621a97cce5 | refs/heads/dev | 2023-03-05T06:13:30.354545 | 2021-07-18T09:51:53 | 2021-07-18T09:51:53 | 117,122,037 | 11 | 6 | Apache-2.0 | 2023-02-22T06:16:51 | 2018-01-11T16:10:19 | Python | UTF-8 | Python | false | false | 2,145 | py | """Initialization of ATAG One sensor platform."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import (
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_BAR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TIME_HOURS,
)
from . import DOMAIN, AtagEntity
SENSORS = {
"Outside Temperature": "outside_temp",
"Average Outside Temperature": "tout_avg",
"Weather Status": "weather_status",
"CH Water Pressure": "ch_water_pres",
"CH Water Temperature": "ch_water_temp",
"CH Return Temperature": "ch_return_temp",
"Burning Hours": "burning_hours",
"Flame": "rel_mod_level",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Initialize sensor platform from config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities([AtagSensor(coordinator, sensor) for sensor in SENSORS])
class AtagSensor(AtagEntity, SensorEntity):
"""Representation of a AtagOne Sensor."""
def __init__(self, coordinator, sensor):
"""Initialize Atag sensor."""
super().__init__(coordinator, SENSORS[sensor])
self._name = sensor
@property
def state(self):
"""Return the state of the sensor."""
return self.coordinator.data.report[self._id].state
@property
def icon(self):
"""Return icon."""
return self.coordinator.data.report[self._id].icon
@property
def device_class(self):
"""Return deviceclass."""
if self.coordinator.data.report[self._id].sensorclass in [
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
]:
return self.coordinator.data.report[self._id].sensorclass
return None
@property
def unit_of_measurement(self):
"""Return measure."""
if self.coordinator.data.report[self._id].measure in [
PRESSURE_BAR,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
PERCENTAGE,
TIME_HOURS,
]:
return self.coordinator.data.report[self._id].measure
return None
| [
"noreply@github.com"
] | BenWoodford.noreply@github.com |
26a7005a6374506fcbe9be9f99e524001f7e608b | 5ada3fcf4dea8f38765529f2c8e73afcdf912a0e | /realestate/realestate/doctype/realestate_partner/realestate_partner.py | df469c2d0b32f28e927b257210bbc0dcb3fc80bc | [
"MIT"
] | permissive | Sysanaung/realestate | 7177ce8d58c3840030f791638e890624d92bac79 | 1533bade2e9ba66925da0688c8e6a511b80b4806 | refs/heads/master | 2022-11-07T04:29:28.999987 | 2019-07-17T11:39:45 | 2019-07-17T11:39:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Jigar Tarpara and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RealEstatePartner(Document):
def validate(self):
if not self.shareholder:
self.create_shareholder()
def create_shareholder(self):
if frappe.db.get_value("Shareholder", self.partner_name, "name"):
shareholder = frappe.get_doc("Shareholder",self.partner_name)
else:
shareholder = frappe.get_doc({
"doctype": "Shareholder",
"title": self.partner_name
})
shareholder.save()
self.shareholder = shareholder.name
| [
"jigartarpara68@gmail.com"
] | jigartarpara68@gmail.com |
a9d4da40f962bdf1b01c3cab68f4e776d07fc0ce | d67de89260ea46ed62546b657d2087da3aa04b1d | /hr37_pagecount.py | 6fd22ea5b6ad1b480424b785dfd6783c33269ea7 | [] | no_license | jefinagilbert/problemSolving | 534455fd2df56e8a12744571ab24a0254d85d336 | 9f3d165866dd82a9b08119e65f2eeb66bae3c434 | refs/heads/main | 2023-06-14T15:17:58.938962 | 2021-07-10T18:02:30 | 2021-07-10T18:02:30 | 366,144,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | def pageCount(n, p):
llist = []
f = 0
l = 0
for i in range(n+1):
if i%2 == 0:
llist.append([i,i+1])
print(llist)
for i in range(len(llist)):
for j in range(2):
if llist[i][j] == p:
break
if llist[i][j] == p:
break
f += 1
for i in reversed(range(len(llist))):
for j in range(2):
if llist[i][j] == p:
break
if llist[i][j] == p:
break
l += 1
if f>l:
return l
else:
return f
n = 6
p = 2
print(pageCount(n,p)) | [
"noreply@github.com"
] | jefinagilbert.noreply@github.com |
51b7ac64c13348c0d1e6d7792b7a763459c523dd | 8fd28b248511f42ad8732ca1e574aada33908376 | /tools/data/activitynet/activitynet_feature_extraction.py | ca49030b1f8d61e83d791f720f00c7f9bf64018e | [
"Apache-2.0"
] | permissive | vt-vl-lab/video-data-aug | 28bd175535cab1444055502389c8f5d7d75e4bd2 | 01667cdbd1b952f2510af3422beeeb76e0d9e15a | refs/heads/main | 2023-09-01T02:36:40.034893 | 2021-07-21T01:31:42 | 2021-07-21T01:31:42 | 352,920,339 | 29 | 6 | Apache-2.0 | 2021-07-21T01:29:36 | 2021-03-30T08:06:54 | Jupyter Notebook | UTF-8 | Python | false | false | 3,172 | py | import argparse
import multiprocessing
import os
import os.path as osp
import numpy as np
import scipy.interpolate
from mmcv import dump, load
args = None
def parse_args():
parser = argparse.ArgumentParser(description='ANet Feature Prepare')
parser.add_argument('--rgb', default='', help='rgb feature root')
parser.add_argument('--flow', default='', help='flow feature root')
parser.add_argument('--dest', default='', help='dest root')
parser.add_argument('--output-format', default='pkl', help='clip length')
args = parser.parse_args()
return args
def pool_feature(data, num_proposals=100, num_sample_bins=3, pool_type='mean'):
"""Pool features with arbitrary temporal length.
Args:
data (list[np.ndarray] | np.ndarray): Features of an untrimmed video,
with arbitrary temporal length.
num_proposals (int): The temporal dim of pooled feature. Default: 100.
num_sample_bins (int): How many points to sample to get the feature
vector at one timestamp. Default: 3.
pool_type (str): Type of pooling to pool features. Choices are
['mean', 'max']. Default: 'mean'.
Returns:
np.ndarray: The pooled feature with shape num_proposals x feature_dim.
"""
if len(data) == 1:
return np.concatenate([data] * num_proposals)
x_range = list(range(len(data)))
f = scipy.interpolate.interp1d(x_range, data, axis=0)
eps = 1e-4
start, end = eps, len(data) - 1 - eps
anchor_size = (end - start) / num_proposals
ptr = start
feature = []
for i in range(num_proposals):
x_new = [
ptr + i / num_sample_bins * anchor_size
for i in range(num_sample_bins)
]
y_new = f(x_new)
if pool_type == 'mean':
y_new = np.mean(y_new, axis=0)
elif pool_type == 'max':
y_new = np.max(y_new, axis=0)
else:
raise NotImplementedError('Unsupported pool type')
feature.append(y_new)
ptr += anchor_size
feature = np.stack(feature)
return feature
def merge_feat(name):
# concatenate rgb feat and flow feat for a single sample
global args
rgb_feat = load(osp.join(args.rgb, name))
flow_feat = load(osp.join(args.flow, name))
rgb_feat = pool_feature(rgb_feat)
flow_feat = pool_feature(flow_feat)
feat = np.concatenate([rgb_feat, flow_feat], axis=-1)
if args.output_format == 'pkl':
dump(feat, osp.join(args.dest, name))
elif args.output_format == 'csv':
feat = feat.tolist()
lines = []
line0 = ','.join([f'f{i}' for i in range(400)])
lines.append(line0)
for line in feat:
lines.append(','.join([f'{x:.4f}' for x in line]))
with open(osp.join(args.dest, name.replace('.pkl', '.csv')), 'w') as f:
f.write('\n'.join(lines))
def main():
global args
args = parse_args()
rgb_feat = os.listdir(args.rgb)
flow_feat = os.listdir(args.flow)
assert set(rgb_feat) == set(flow_feat)
pool = multiprocessing.Pool(32)
pool.map(merge_feat, rgb_feat)
if __name__ == '__main__':
main()
| [
"zouyuliang123@gmail.com"
] | zouyuliang123@gmail.com |
6308edbe3499a2f7e3f32b7d47e39e54a060541c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_354/ch121_2020_09_30_20_23_29_250621.py | 8e65d0644cab5e9c5511ffdf2df2daca2d613729 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | def subtracao_de_listas(lista,lista_2):
x=[]
i=0
while i<len(lista):
if lista[i] not in lista_2:
x.append(lista[i])
i+=1 | [
"you@example.com"
] | you@example.com |
94993e2f199013b9d5171162aca032c3c92318b2 | 2c32cf726e111b8625265c458feeaea436652e83 | /Dequeue SlidingWindow 2Pointers/longest-substring-ditct.py | 188b786a14f2aa4233bebf7de7800d27e83f7a6e | [] | no_license | minhthe/practice-algorithms-and-data-structures | 6fa3bf98e8e2fe98f4e32419fb797b1df4400364 | 488a82dd3a0c797859a6c9e1195d6d579d676073 | refs/heads/master | 2021-05-16T23:01:20.026475 | 2020-09-23T04:17:13 | 2020-09-23T04:17:13 | 250,505,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | '''
https://leetcode.com/problems/longest-substring-without-repeating-characters/
'''
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
n= len(s)
i, j = 0, 0
mp= {}
rst = 0
while j < n :
if s[j] in mp :
if mp[s[j]] >= i:
rst = max(rst, j-i)
i = mp[s[j]] +1
mp[s[j]] = j
j +=1
return max(rst, j - i) | [
"minhthe.007@gmail.com"
] | minhthe.007@gmail.com |
4d4138236173a28fb7ee5d33f4886b2c6e65e352 | 8f61d6ae3a80eb6c6d45aab55d9e73df402446fe | /kate3/tick/views/pages.py | e21cbce516d4c67768652b2a9fae0717fcc254a7 | [
"MIT"
] | permissive | katemsu/kate_website | 2047314598e215b0e8b3d3d71b21b4c70df36213 | 9e6912156fe7ce07a13f54009ff1823b3558784d | refs/heads/master | 2021-01-16T20:25:16.264407 | 2013-11-02T20:14:40 | 2013-11-02T20:14:40 | 14,073,589 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | import locale
from django.shortcuts import render_to_response
from django.template import RequestContext
from tick.forms.search import FullSearchForm
from tick.models import Announcement, Notice, Resource
def index(request):
if request.GET.has_key('keyword'):
form = FullSearchForm(request.GET)
else:
form = FullSearchForm()
context = {
'announcement': Announcement.objects.latest('created_at'),
'notice': Notice.objects.latest('created_at'),
'form': form,
}
return render_to_response('tick/pages/index.haml',
context,
context_instance=RequestContext(request))
def about(request):
locale.setlocale(locale.LC_ALL, "")
count = Resource.public_objects.count()
# If the count is greater than 100, we round down to the nearest 100.
# That way we can say, "we have over 3100 resources" when we have 3102.
if count > 100:
count = int(count/100) * 100
count = locale.format('%d', count, True)
return render_to_response('tick/pages/about.haml',
{'count': count},
context_instance=RequestContext(request))
def news(request):
announcement = Announcement.objects.latest('created_at')
return render_to_response('tick/pages/news.haml',
{'announcement': announcement},
context_instance=RequestContext(request))
def prizes(request):
notice = Notice.objects.latest('created_at')
return render_to_response('tick/pages/prizes.haml',
{'notice': notice},
context_instance=RequestContext(request)) | [
"smizell@gmail.com"
] | smizell@gmail.com |
181f0fbc190ed9f3ca1a01fc34fb4a24065c5c74 | 9dfb3372a1e4516d970a6e9d0a9fd8360580eae7 | /python pySerial/recod_data.py | 8115b2f04ac3935e59eb5c92a1782fec05d6cf85 | [] | no_license | clambering-goat/cameron_pyton | d1cd0e7b04da14e7ba4f89dcb4d973f297a4626c | df0b0365b86e75cfcfc2c1fc21608f1536a3b79f | refs/heads/master | 2021-07-14T20:37:37.021401 | 2019-02-28T07:52:11 | 2019-02-28T07:52:11 | 137,251,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import serial
fire=open("data_logg pot","w")
with serial.Serial('COM5', 115200, timeout=2) as ser:
while 1 :
line =ser.readline()
try:
print(line)
x=line.decode("utf-8")
print(x)
#number= int(x[0])
#print(number+5)
fire.write(x)
except:
print("error")
fire.close()
| [
"camerondrain@gmail.com"
] | camerondrain@gmail.com |
753834b7599898d89d18a2e02d6f483fe8286888 | db891dc74972e7ccf2ad1a8dabe8de5cbb93c024 | /honduras_invoices/models/financial_res_percent.py | da2a07095fdf493a5335862d707b2a9b50ff3eed | [] | no_license | LuisMalave2001/GarryTesting | 2a439b1ad6d1216f64e3e766028750a0bc9e4db0 | f83efeb54e22313e8b533036ff4a5befa5d3a59b | refs/heads/main | 2023-05-04T06:54:17.069804 | 2021-05-25T13:41:22 | 2021-05-25T13:41:22 | 336,295,640 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class FinacialResponsabilityPercent(models.Model):
_name = "honduras_invoices.financial.res.percent"
_description = "Realted model to finance responsabilty"
partner_id = fields.Many2one("res.partner", string="Customer", domain=[("is_family", "=", False)])
partner_family_ids = fields.Many2many(related="partner_id.family_ids")
family_id = fields.Many2one("res.partner", required=True, string="Family", domain=[("is_family", "=", True), ('is_company', '=', True)])
category_id = fields.Many2one("product.category", required=True, string="Category", domain=[("parent_id", "=", False)])
percent = fields.Integer("Percent")
@api.onchange('family_id')
def _get_family_domain(self):
self.ensure_one()
family_ids = self.partner_id.family_ids.ids
return {'domain':{'family_id':[('id', 'in', family_ids)]}}
| [
"LuisAngelMalaveMora@gmail.com"
] | LuisAngelMalaveMora@gmail.com |
70910c24b69d0d292914de5bafbc50249e53430a | 6268a19db5d7806b3a91d6350ec2777b3e13cee6 | /old_stuff/code/mlcv_exp_04/setup/list_eyth_bbox.py | d1e638d1397bd41847958a02070ca19a967950e5 | [] | no_license | aaronlws95/phd_2019 | 3ae48b4936f039f369be3a40404292182768cf3f | 22ab0f5029b7d67d32421d06caaf3e8097a57772 | refs/heads/master | 2023-03-22T14:38:18.275184 | 2021-03-21T11:39:29 | 2021-03-21T11:39:29 | 186,387,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | import sys
import numpy as np
from pathlib import Path
from tqdm import tqdm
sys.path.append(str(Path(Path(__file__).resolve()).parents[1]))
from src import ROOT
from src.utils import *
def list_eyth_bbox():
img_root = Path(ROOT)/'datasets'/'eyth_dataset'
train_bbox_list = []
val_bbox_list = []
test_bbox_list = []
train_img_list = []
val_img_list = []
test_img_list = []
pad = 10
img_path = img_root/'images'
mask_path = img_root/'masks'
split = ['test', 'train', 'val']
for spl in split:
with open(img_root/'train-val-test-split'/'{}.txt'.format(spl)) as f:
lines = f.readlines()
for l in tqdm(lines):
mask_file = str(mask_path/l).strip()
if l.split('/')[0] == 'vid6':
mask_file = mask_file.replace('.jpg', '.png')
else:
mask_file = mask_file.replace('.jpg', '')
mask = cv2.imread(mask_file, 0)
large_comp, small_comp = seperate_2_blobs_in_mask(mask)
bbox = get_bbox_from_mask(large_comp, pad=pad)
bbox_large = bbox.copy()
if np.sum(bbox) != 0:
if spl == 'test':
test_bbox_list.append(bbox)
test_img_list.append(l.strip())
elif spl == 'val':
val_bbox_list.append(bbox)
val_img_list.append(l.strip())
elif spl == 'train':
train_bbox_list.append(bbox)
train_img_list.append(l.strip())
bbox = get_bbox_from_mask(small_comp, pad=pad)
if np.sum(bbox) != 0:
if spl == 'test':
test_bbox_list.append(bbox)
test_img_list.append(l.strip())
elif spl == 'val':
val_bbox_list.append(bbox)
val_img_list.append(l.strip())
elif spl == 'train':
train_bbox_list.append(bbox)
train_img_list.append(l.strip())
print(str(img_path/l).strip())
fig, ax = plt.subplots(2, 2)
img = cv2.imread(str(img_path/l).strip())[:, :, ::-1]
ax[0, 0].imshow(large_comp)
ax[0, 1].imshow(small_comp)
ax[1, 0].imshow(mask)
ax[1, 1].imshow(img)
draw_bbox(ax[1, 1], bbox_large)
draw_bbox(ax[1, 1], bbox)
plt.show()
# parent_dir = Path(__file__).absolute().parents[1]
# np.savetxt(parent_dir/'data'/'labels'/'eyth_bbox_pad{}_train.txt'.format(pad), train_bbox_list)
# np.savetxt(parent_dir/'data'/'labels'/'eyth_bbox_pad{}_val.txt'.format(pad), val_bbox_list)
# np.savetxt(parent_dir/'data'/'labels'/'eyth_bbox_pad{}_test.txt'.format(pad), test_bbox_list)
# with open(parent_dir/'data'/'labels'/'eyth_img_train.txt', 'w') as f:
# for i in train_img_list:
# f.write("%s\n" %i)
# with open(parent_dir/'data'/'labels'/'eyth_img_val.txt', 'w') as f:
# for i in val_img_list:
# f.write("%s\n" %i)
# with open(parent_dir/'data'/'labels'/'eyth_img_test.txt', 'w') as f:
# for i in test_img_list:
# f.write("%s\n" %i)
if __name__ == '__main__':
list_eyth_bbox() | [
"aaronlws95@gmail.com"
] | aaronlws95@gmail.com |
2b76ff04dbf7f5cecbc35d380272ebd9d4a8c375 | 99dcb18a9e3ea367272f740b8cbf3c34285a0c08 | /samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_metadata_service_query_artifact_lineage_subgraph_sync.py | e703c3aca28bc9594604c0ddde2ea92e49a58f6a | [
"Apache-2.0"
] | permissive | googleapis/python-aiplatform | 926a4873f35dbea15b2fd86c0e16b5e6556d803e | 76b95b92c1d3b87c72d754d8c02b1bca652b9a27 | refs/heads/main | 2023-08-19T23:49:02.180075 | 2023-08-19T13:25:59 | 2023-08-19T13:27:27 | 298,017,988 | 418 | 240 | Apache-2.0 | 2023-09-14T21:08:33 | 2020-09-23T15:43:39 | Python | UTF-8 | Python | false | false | 1,613 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for QueryArtifactLineageSubgraph
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_sync]
from google.cloud import aiplatform_v1beta1
def sample_query_artifact_lineage_subgraph():
# Create a client
client = aiplatform_v1beta1.MetadataServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.QueryArtifactLineageSubgraphRequest(
artifact="artifact_value",
)
# Make the request
response = client.query_artifact_lineage_subgraph(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_MetadataService_QueryArtifactLineageSubgraph_sync]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
182f3d3394c13b1ca271c3d46b89c5c1a8924f4e | 22bcb68759d516eea70d18116cd434fcd0a9d842 | /scrap/indiaplaza_mobiles_scrap.py | aae94f98723536b182ea318ce9c69b0a404817ab | [] | no_license | lovesh/abhiabhi-web-scrapper | 1f5da38c873fea74870d59f61c3c4f52b50f1886 | b66fcadc56377276f625530bdf8e739a01cbe16b | refs/heads/master | 2021-01-01T17:16:51.577914 | 2014-10-18T15:56:42 | 2014-10-18T15:56:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,880 | py | import downloader
import dom
import re
import urllib2
import math
import datetime
import pymongo
import requests
import sys
siteurl='http://www.indiaplaza.com'
dl=downloader.Downloader()
dl.addHeaders({'Origin':siteurl,'Referer':siteurl})
if len(sys.argv)>1:
proxy={'http':sys.argv[1]}
else:
proxy={'http':'92.62.161.205:8080'}
brand_urls={}
brands=('apple','blackberry','dell','htc','karbonn','micromax','nokia','samsung','sony-ericsson','videocon','sansui','lg','spice','motorola','iball','alcatel','rage','wynncom','gfive','sony','salora')
for brand in brands:
brand_urls[brand]='%s/%s-mobiles-4.htm'%(siteurl,brand)
def getPhonesFromBrandPage(url=None,string=None):
mobiles=[]
mobile_block_path='//div[@class="skuRow"]'
if string:
page=dom.DOM(string=string)
else:
page=dom.DOM(url=url)
mobile_blocks=page.getNodesWithXpath(mobile_block_path)
img_path='.//div[@class="skuImg"]/a/img'
name_path='.//div[@class="skuName"]/a'
price_path='.//div[@class="ourPrice"]/span'
shipping_path='.//span[@class="delDateQuest"]'
name_junk_pattern=re.compile('.+(\(.+\)).*')
features_path='.//div[@class="col2"]/ul/li'
for mobile_block in mobile_blocks:
mobile={}
mobile['img_url']={'0':mobile_block.xpath(img_path)[0].get('src')}
name=mobile_block.xpath(name_path)[0].text.encode('ascii','ignore').strip()
junk=name_junk_pattern.search(name)
if junk:
junk=junk.group(1)
name=name.replace(junk,'').strip()
mobile['name']=name
mobile['url']=siteurl+mobile_block.xpath(name_path)[0].get('href')
price_string=mobile_block.xpath(price_path)[0].text
mobile['price']=int(re.search('(\D)+(\d+)',price_string).group(2))
shipping=re.search('Ships In (\d+)',mobile_block.xpath(shipping_path)[0].text)
if shipping:
mobile['shipping']=(shipping.group(1),)
mobile['availability']=1
feature_nodes=mobile_block.xpath(features_path)
features=[]
if feature_nodes:
for node in feature_nodes:
features.append(node.text.strip())
mobile['features']=features
if junk:
mobile['features'].append(junk.strip(')('))
mobile['last_modified_datetime']=datetime.datetime.now()
product_history={}
if 'price' in mobile:
product_history['price']=mobile['price']
if 'shipping' in mobile:
product_history['shipping']=mobile['shipping']
product_history['availability']=mobile['availability']
product_history['datetime']=mobile['last_modified_datetime']
mobile['product_history']=[product_history,]
mobile['site']='indiaplaza'
mobiles.append(mobile)
return mobiles
def getPhonesOfBrand(brand_url,get_details=False):
html=requests.get(url=brand_url,proxies=proxy).content
first_page=dom.DOM(string=html)
brand_validity_path='//div[@id="ContentPlaceHolder1_SpecificValuesHolder_ctl00_ErrorDiv"]'
brand_validity=first_page.getNodesWithXpath(brand_validity_path)
if len(brand_validity)==0:
return {}
if len(brand_validity)>0:
if brand_validity[0].text.strip()=='':
return {}
brand=re.search('.com/(.+)-mobiles-.',brand_url).group(1)
mobiles=[]
mobiles.extend(getPhonesFromBrandPage(string=first_page.html))
count_path='//div[@class="prodNoArea"]'
count_string=first_page.getNodesWithXpath(count_path)[0].text
count=int(re.search('Showing.+of (\d+)',count_string).group(1))
if count>20:
num_pages=int(math.ceil(count/20.0))
page_urls=[brand_url+'?PageNo='+str(n) for n in xrange(2,num_pages+1)]
dl.putUrls(page_urls)
result=dl.download(proxy=proxy)
for r in result:
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
mobiles.extend(getPhonesFromBrandPage(string=html))
for mobile in mobiles:
mobile['brand']=brand
return mobiles
def scrapAllPhones():
f=open('indiaplaza_mobiles_log.txt','w')
mobiles=[]
for brand in brand_urls:
mobiles.extend(getPhonesOfBrand(brand_urls[brand]))
f.write("Got mobiles of brand %s\n"%brand)
f.flush()
return mobiles
def insertIntoDB(log=True):
con=pymongo.Connection('localhost',27017)
db=con['abhiabhi']
mobile_coll=db['scraped_mobiles']
inserted_count=0
updated_count=0
inserted_urls=[]
updated_urls=[]
mobiles=scrapAllPhones()
for mobile in mobiles:
try:
mobile_coll.insert(mobile,safe=True)
inserted_count+=1
inserted_urls.append(mobile['url'])
except pymongo.errors.DuplicateKeyError:
upd={'last_modified_datetime':datetime.datetime.now()}
if 'availability' in mobile:
upd['availability']=mobile['availability']
if 'price' in mobile:
upd['price']=mobile['price']
if 'shipping' in mobile:
upd['shipping']=mobile['shipping']
if 'offer' in mobile:
upd['offer']=mobile['offer']
else:
upd['offer']=''
mobile_coll.update({'url':mobile['url']},{'$push':{'product_history':mobile['product_history'][0]},'$set':upd})
updated_count+=1
updated_urls.append(mobile['url'])
if log:
scrap_log=db['scrap_log']
log={'siteurl':siteurl,'datetime':datetime.datetime.now(),'product':'mobile','products_updated_count':updated_count,'products_inserted_count':inserted_count,'products_updated_urls':updated_urls,'products_inserted_urls':inserted_urls}
scrap_log.insert(log)
print "%d inserted and %d updated"%(inserted_count,updated_count)
if __name__=='__main__':
insertIntoDB()
| [
"lovesh.bond@gmail.com"
] | lovesh.bond@gmail.com |
e141c74f463c7c6d63d3321d328ca9745c3e4756 | aabd50c4ee53c97d33daaf6240f3a26a5e4f61c5 | /src/274.h-index.py | 47ae94d8cfdb198f71a236073076867e3e088435 | [
"MIT"
] | permissive | wisesky/LeetCode-Practice | 6d955086803c9dffa536c867f9e889af12bbcca5 | 65549f72c565d9f11641c86d6cef9c7988805817 | refs/heads/master | 2021-12-15T22:33:14.140804 | 2021-12-06T09:26:42 | 2021-12-06T09:26:42 | 188,990,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | #
# @lc app=leetcode id=274 lang=python3
#
# [274] H-Index
#
# https://leetcode.com/problems/h-index/description/
#
# algorithms
# Medium (36.63%)
# Likes: 978
# Dislikes: 1578
# Total Accepted: 209.1K
# Total Submissions: 569.5K
# Testcase Example: '[3,0,6,1,5]'
#
# Given an array of integers citations where citations[i] is the number of
# citations a researcher received for their i^th paper, return compute the
# researcher's h-index.
#
# According to the definition of h-index on Wikipedia: A scientist has an index
# h if h of their n papers have at least h citations each, and the other n − h
# papers have no more than h citations each.
#
# If there are several possible values for h, the maximum one is taken as the
# h-index.
#
#
# Example 1:
#
#
# Input: citations = [3,0,6,1,5]
# Output: 3
# Explanation: [3,0,6,1,5] means the researcher has 5 papers in total and each
# of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and the
# remaining two with no more than 3 citations each, their h-index is 3.
#
#
# Example 2:
#
#
# Input: citations = [1,3,1]
# Output: 1
#
#
#
# Constraints:
#
#
# n == citations.length
# 1 <= n <= 5000
# 0 <= citations[i] <= 1000
#
#
#
from typing import List
# @lc code=start
class Solution:
def hIndex(self, citations: List[int]) -> int:
"""
逆排序 数组, 分别有 引用次数递减序列 和 大于等于当前引用次数递增序列
more: 1,2,3,4,5 递增
citation: 6,5,3,1,0 递减
第一次出现 more > citaiton 的指针的前一个 more 即所求
最后,遍历所有 citaitons 仍然未出现more>citaiton的终止条件,
那么当前的more即所求,相当于citations的末尾就是 终止条件,
"""
citations.sort(reverse=True)
for i in range(len(citations)):
more = i+1
citation = citations[i]
if more > citation:
return i # more-1
return more
# @lc code=end
so = Solution()
nums = [1,2,3,4,5,6,7,8]
# nums = [100]
nums = [0,0,4,4]
# nums = [0,1,3,5,6]
nums = [1,7,9 ,4]
print(so.hIndex(nums)) | [
"wisesky1988@gmail.com"
] | wisesky1988@gmail.com |
2eb91786a782bff5522dde86e862095d33a68bcf | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1719708/homework01/program01.py | 0da1cb303e2e24f07abf1255c1a5385a203cdcf3 | [] | no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,113 | py | from math import sqrt
def modi(ls, k):
numeri_primi = []
for i in range(len(ls)-1, -1, -1):
if ls[i]%2:
divisori = dividi_dispari(ls, ls[i], k)
else:
divisori = dividi(ls, ls[i], k)
numeri_primi = primi(divisori, numeri_primi, ls[i])
delete(divisori, k, ls, i)
return numeri_primi
def dividi(ls, i, k):
divisori = []
j = 1
limit = round(sqrt(i))
return calcolo(i, j, limit, divisori, 1)
def dividi_dispari(ls, i, k):
divisori = []
j = 1
limit = round(sqrt(i))
return calcolo(i, j, limit, divisori, 2)
def primi(div, lista, num):
if not div:
lista = [num] + lista
else:
lista = lista
return lista
def delete(div, cond, lista, index):
temp = 1
while div:
temp *= div.count(div[0]) + 1
div = [x for x in div if x != div[0]]
if temp - 2 != cond:
del lista[index]
def calcolo(i, j, limit, divisori, aumento):
temp = j
while i > 1 and temp <= limit + 1:
temp += aumento
if not i%temp:
i = int(i/temp)
divisori += [temp]
temp = j
limit = round(sqrt(i))
elif limit -1 <= temp <= limit + 1 and divisori:
divisori += [i]
i = 1
return divisori | [
"a.sterbini@gmail.com"
] | a.sterbini@gmail.com |
c3a739e44490170fa9fa12ee27486ee8cd5250c9 | e6c0683afc2a3d48ada10ffa9f7d257e7c64589e | /purity_fb/purity_fb_1dot6/models/alert_watcher_response.py | aed12f53706b8c1cc4220336ba9c68a9a3ac03c6 | [
"Apache-2.0"
] | permissive | unixtreme/purity_fb_python_client | 9a5a0375f4505421974aadc674ed04982c2bf84f | e836afe9804ffa99f74bf4b5202f181c3c04d9df | refs/heads/master | 2020-04-24T14:53:56.977344 | 2019-02-22T12:37:45 | 2019-02-22T12:37:45 | 172,042,713 | 0 | 0 | NOASSERTION | 2019-02-22T10:05:44 | 2019-02-22T10:05:44 | null | UTF-8 | Python | false | false | 4,206 | py | # coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.6), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.6
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class AlertWatcherResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[AlertWatcher]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
"""
AlertWatcherResponse - a model defined in Swagger
"""
self._pagination_info = None
self._items = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""
Gets the pagination_info of this AlertWatcherResponse.
pagination information, only available in GET requests
:return: The pagination_info of this AlertWatcherResponse.
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""
Sets the pagination_info of this AlertWatcherResponse.
pagination information, only available in GET requests
:param pagination_info: The pagination_info of this AlertWatcherResponse.
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""
Gets the items of this AlertWatcherResponse.
a list of alert watcher objects
:return: The items of this AlertWatcherResponse.
:rtype: list[AlertWatcher]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this AlertWatcherResponse.
a list of alert watcher objects
:param items: The items of this AlertWatcherResponse.
:type: list[AlertWatcher]
"""
self._items = items
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, AlertWatcherResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"azaria.zornberg@purestorage.com"
] | azaria.zornberg@purestorage.com |
7a437e033fa374701948c46eed2ab67e87f8d419 | bc0c3598eb9d78962a67b3c15f930c24919f4277 | /pykalman/tests/test_unscented.py | 2dae7841b2046c0b4e0795198f8fbafc3b447c06 | [
"BSD-3-Clause"
] | permissive | gtfierro/pykalman | 3515ce78e1e4a753442225923c8c614ff4647c43 | 6208109585eb3d7a68ef0cf4d524c9c60c8a0c9b | refs/heads/master | 2020-12-25T08:38:26.821391 | 2012-10-28T23:55:26 | 2012-10-29T00:46:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,563 | py | import inspect
import numpy as np
from numpy import ma
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true
from pykalman import AdditiveUnscentedKalmanFilter, UnscentedKalmanFilter
from pykalman.datasets import load_robot
data = load_robot()
def build_unscented_filter(cls):
'''Instantiate the Unscented Kalman Filter'''
# build transition functions
A = np.array([[1, 1], [0, 1]])
C = np.array([[0.5, -0.3]])
if cls == UnscentedKalmanFilter:
f = lambda x, y: A.dot(x) + y
g = lambda x, y: C.dot(x) + y
elif cls == AdditiveUnscentedKalmanFilter:
f = lambda x: A.dot(x)
g = lambda x: C.dot(x)
else:
raise ValueError("How do I make transition functions for %s?" % (cls,))
x = np.array([1, 1])
P = np.array([[1, 0.1], [0.1, 1]])
Q = np.eye(2) * 2
R = 0.5
# build filter
kf = cls(f, g, Q, R, x, P, random_state=0)
return kf
def check_unscented_prediction(method, mu_true, sigma_true):
'''Check output of a method against true mean and covariances'''
Z = ma.array([0, 1, 2, 3], mask=[True, False, False, False])
(mu_est, sigma_est) = method(Z)
mu_est, sigma_est = mu_est[1:], sigma_est[1:]
assert_array_almost_equal(mu_true, mu_est, decimal=8)
assert_array_almost_equal(sigma_true, sigma_est, decimal=8)
def check_dims(n_dim_state, n_dim_obs, n_func_args, kf_cls, kwargs):
kf = kf_cls(**kwargs)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
kf._initialize_parameters()
)
assert_true(
transition_functions.shape == (1,)
if not 'transition_functions' in kwargs
else (len(kwargs['transition_functions']),)
)
assert_true(
all([len(inspect.getargspec(f).args) == n_func_args
for f in transition_functions])
)
assert_true(transition_covariance.shape == (n_dim_state, n_dim_state))
assert_true(
observation_functions.shape == (1,)
if not 'observation_functions' in kwargs
else (len(kwargs['observation_functions']),)
)
assert_true(
all([len(inspect.getargspec(f).args) == n_func_args
for f in observation_functions])
)
assert_true(observation_covariance.shape == (n_dim_obs, n_dim_obs))
assert_true(initial_state_mean.shape == (n_dim_state,))
assert_true(
initial_state_covariance.shape == (n_dim_state, n_dim_state)
)
def test_unscented_sample():
kf = build_unscented_filter(UnscentedKalmanFilter)
(x, z) = kf.sample(100)
assert_true(x.shape == (100, 2))
assert_true(z.shape == (100, 1))
def test_unscented_filter():
# true unscented mean, covariance, as calculated by a MATLAB ukf_predict3
# and ukf_update3 available from
# http://becs.aalto.fi/en/research/bayes/ekfukf/
mu_true = np.zeros((3, 2), dtype=float)
mu_true[0] = [2.35637583900053, 0.92953020131845]
mu_true[1] = [4.39153258583784, 1.15148930114305]
mu_true[2] = [6.71906243764755, 1.52810614201467]
sigma_true = np.zeros((3, 2, 2), dtype=float)
sigma_true[0] = [[2.09738255033564, 1.51577181208054],
[1.51577181208054, 2.91778523489934]]
sigma_true[1] = [[3.62532578216913, 3.14443733560803],
[3.14443733560803, 4.65898912348045]]
sigma_true[2] = [[4.3902465859811, 3.90194406652627],
[3.90194406652627, 5.40957304471697]]
check_unscented_prediction(
build_unscented_filter(UnscentedKalmanFilter).filter,
mu_true, sigma_true
)
def test_unscented_smoother():
# true unscented mean, covariance, as calculated by a MATLAB urts_smooth2
# available in http://becs.aalto.fi/en/research/bayes/ekfukf/
mu_true = np.zeros((3, 2), dtype=float)
mu_true[0] = [2.92725011530645, 1.63582509442842]
mu_true[1] = [4.87447429684622, 1.6467868915685]
mu_true[2] = [6.71906243764755, 1.52810614201467]
sigma_true = np.zeros((3, 2, 2), dtype=float)
sigma_true[0] = [[0.993799756492982, 0.216014513083516],
[0.216014513083516, 1.25274857496387]]
sigma_true[1] = [[1.57086880378025, 1.03741785934464],
[1.03741785934464, 2.49806235789068]]
sigma_true[2] = [[4.3902465859811, 3.90194406652627],
[3.90194406652627, 5.40957304471697]]
check_unscented_prediction(
build_unscented_filter(UnscentedKalmanFilter).smooth,
mu_true, sigma_true
)
def test_additive_sample():
kf = build_unscented_filter(AdditiveUnscentedKalmanFilter)
(x, z) = kf.sample(100)
assert_true(x.shape == (100, 2))
assert_true(z.shape == (100, 1))
def test_additive_filter():
# true unscented mean, covariance, as calculated by a MATLAB ukf_predict1
# and ukf_update1 available from
# http://becs.aalto.fi/en/research/bayes/ekfukf/
mu_true = np.zeros((3, 2), dtype=float)
mu_true[0] = [2.3563758389014, 0.929530201358681]
mu_true[1] = [4.39153258609087, 1.15148930112108]
mu_true[2] = [6.71906243585852, 1.52810614139809]
sigma_true = np.zeros((3, 2, 2), dtype=float)
sigma_true[0] = [[2.09738255033572, 1.51577181208044],
[1.51577181208044, 2.91778523489926]]
sigma_true[1] = [[3.62532578216869, 3.14443733560774],
[3.14443733560774, 4.65898912348032]]
sigma_true[2] = [[4.39024658597909, 3.90194406652556],
[3.90194406652556, 5.40957304471631]]
check_unscented_prediction(
build_unscented_filter(AdditiveUnscentedKalmanFilter).filter,
mu_true, sigma_true
)
def test_additive_smoother():
# true unscented mean, covariance, as calculated by a MATLAB urts_smooth1
# available in http://becs.aalto.fi/en/research/bayes/ekfukf/
mu_true = np.zeros((3, 2), dtype=float)
mu_true[0] = [2.92725011499923, 1.63582509399207]
mu_true[1] = [4.87447429622188, 1.64678689063005]
mu_true[2] = [6.71906243585852, 1.52810614139809]
sigma_true = np.zeros((3, 2, 2), dtype=float)
sigma_true[0] = [[0.99379975649288, 0.21601451308325],
[0.21601451308325, 1.25274857496361]]
sigma_true[1] = [[1.570868803779, 1.03741785934372],
[1.03741785934372, 2.49806235789009]]
sigma_true[2] = [[4.39024658597909, 3.90194406652556],
[3.90194406652556, 5.40957304471631]]
check_unscented_prediction(
build_unscented_filter(AdditiveUnscentedKalmanFilter).smooth,
mu_true, sigma_true
)
def test_unscented_initialize_parameters():
check_dims(1, 1, 2, UnscentedKalmanFilter,
{'transition_functions': [lambda x, y: x, lambda x, y: x]})
check_dims(3, 5, 2, UnscentedKalmanFilter,
{'n_dim_state': 3, 'n_dim_obs': 5})
check_dims(1, 3, 2, UnscentedKalmanFilter,
{'observation_covariance': np.eye(3)})
check_dims(2, 1, 2, UnscentedKalmanFilter,
{'initial_state_mean': np.zeros(2)})
def test_additive_initialize_parameters():
check_dims(1, 1, 1, AdditiveUnscentedKalmanFilter,
{'transition_functions': [lambda x: x, lambda x: x]})
check_dims(3, 5, 1, AdditiveUnscentedKalmanFilter,
{'n_dim_state': 3, 'n_dim_obs': 5})
check_dims(1, 3, 1, AdditiveUnscentedKalmanFilter,
{'observation_covariance': np.eye(3)})
check_dims(2, 1, 1, AdditiveUnscentedKalmanFilter,
{'initial_state_mean': np.zeros(2)})
| [
"duckworthd@gmail.com"
] | duckworthd@gmail.com |
e61fdb0f57c7b67676b9c5468a45e6b8c5f7dea7 | fa76868608739eb514c7bf9cb3be6ca1a0283409 | /l6-threading+mulitprocessing/lesson/examples/03_queue_proc.py | 2c7d0c4e5bd56bcdef0e6333fc0767299c0fa076 | [] | no_license | k1r91/course2 | efa4b200f19798275251d1b737613cf4560e3f47 | a4b0413030e17d37406feb8f58314356e3ab15e3 | refs/heads/master | 2021-08-16T04:04:26.796036 | 2018-10-23T17:04:54 | 2018-10-23T17:04:54 | 135,111,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,885 | py |
# ----------------- Пример работы с очередью в процессах -----------------------
# Модуль multiprocessing поддерживает два основных способа взаимодействия процессов:
# каналы и очереди. Оба способа реализованы на основе механизма передачи сообщений.
# Интерфейс очередей очень близко имитирует интерфейс очередей в многопоточных программах.
import multiprocessing as mp
import time
from random import choice, randint
from lesson.app_log import get_logger
# Гном-производитель
def gnome(out_q, treasures):
logger = get_logger('queue_proc', 'queue_proc.log')
while True:
# Гном-производитель откапывает случайное сокровище
for i in range(randint(1,10)):
data = choice(treasures)
out_q.put(data)
logger.info('Гном. Немного поработал, немного отдохну...')
time.sleep(0.1)
# Потребитель гномьих трудов
def gnome_king(in_q):
logger = get_logger('queue_proc', 'queue_proc.log')
gnomes_cave = []
while True:
# Получаем данные из очереди
data = in_q.get()
if data is None:
logger.info('Король. Упс, меня попросили выйти'.format(data))
break
logger.info('Король. Гном принёс мне {}'.format(data))
# Кладём сокровища в... сберкассу
gnomes_cave.append(data)
return gnomes_cave
if __name__ == '__main__':
treasure = ('золото', 'серебро', 'алмазы', 'рубины')
# Создаём очередь и запускаем оба процесса
# Queue([maxsize])
# Создает очередь для организации обмена сообщениями между процессами.
# При вызове без аргумента размер очереди не ограничивается.
# Внутренняя реализация очередей основана на использовании каналов и блокировок (Lock).
q = mp.Queue()
p1 = mp.Process(target=gnome_king, args=(q,), daemon=True)
p2 = mp.Process(target=gnome, args=(q, treasure), daemon=True)
print('Запускаем гномью работу')
p1.start()
p2.start()
time.sleep(5)
print('Проверим, сколько гномы накопили богатств...')
p2.terminate()
q.put(None)
print(p1)
| [
"cherkasov.kirill@gmail.com"
] | cherkasov.kirill@gmail.com |
dfe036a346baca08e61fdadb0ecfc8241c735f87 | 71946cf7e285a74b087f316170fe1f3dbb8d0247 | /nscms/contrib/block/migrations/0001_initial.py | c357dda7b5a0328f0079af222cced4826e2ccd87 | [] | no_license | mauler/nscms | 8964e8113351676dc5ebf1d7ecb0622c5c434640 | 5795f568880c7b11f12291d7d9e2581f0d1cd46d | refs/heads/master | 2020-06-06T09:19:16.946139 | 2015-02-27T01:21:12 | 2015-02-27T01:21:12 | 2,143,897 | 7 | 1 | null | 2018-03-05T17:25:23 | 2011-08-02T17:20:10 | Python | UTF-8 | Python | false | false | 2,538 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Block'
db.create_table(u'block_block', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django_extensions.db.fields.AutoSlugField')(allow_duplicates=False, max_length=255, separator=u'-', blank=True, populate_from='title', overwrite=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False)),
('publish_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('expire_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('content', self.gf('ckeditor.fields.RichTextField')()),
))
db.send_create_signal(u'block', ['Block'])
def backwards(self, orm):
# Deleting model 'Block'
db.delete_table(u'block_block')
models = {
u'block.block': {
'Meta': {'object_name': 'Block'},
'content': ('ckeditor.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'expire_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '255', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'title'", 'overwrite': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['block'] | [
"proberto.macedo@gmail.com"
] | proberto.macedo@gmail.com |
2033278f83428fd33d99bcde8ccdb8499a3de1a7 | f1cb02057956e12c352a8df4ad935d56cb2426d5 | /LeetCode/2178. Maximum Split of Positive Even Integers/Solution.py | 1a44f60aa79a70660a191dfec9b2b6491ab1eb45 | [] | no_license | nhatsmrt/AlgorithmPractice | 191a6d816d98342d723e2ab740e9a7ac7beac4ac | f27ba208b97ed2d92b4c059848cc60f6b90ce75e | refs/heads/master | 2023-06-10T18:28:45.876046 | 2023-05-26T07:46:42 | 2023-05-26T07:47:10 | 147,932,664 | 15 | 2 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | class Solution:
def maximumEvenSplit(self, finalSum: int) -> List[int]:
# Time and Space Complexity: O(Q)
# Where Q is the size of the answer set
# we have: 1 + 2 + ... + Q < N
# Q(Q + 1) / 2 < N
# So Q = O(sqrt(N))
# Greedy proof: use exchange argument
if finalSum % 2 == 1: # odd
return []
target = finalSum // 2
ret = []
lower_bound = 1
ret = []
while target > 0:
if lower_bound * 2 + 1 > target:
choice = target
else:
choice = lower_bound
lower_bound += 1
target -= choice
ret.append(choice * 2)
return ret
| [
"nphamcs@gmail.com"
] | nphamcs@gmail.com |
6b0a838181c5dcde7650fc4f985f44642c95a368 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03045/s270042593.py | a238e994b0b00351659646b271892c76285cdedc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1]*n
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.parents[x] > self.parents[y]:
x,y = y,x
self.parents[x] += self.parents[y]
self.parents[y] = x
def same(self, x, y):
return self.find(x) == self.find(y)
N, M = map(int, input().split())
uf = UnionFind(N+1)
for i in range(M):
x, y, z = map(int, input().split())
uf.union(x, y)
ans = 0
for i in range(1, N+1):
if uf.parents[i] < 0:
ans += 1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
487e3f8a3d1c94d9bd44e46c17e6fb0a73e57dc9 | 4684afb5a2fc22e9a08681c11618aaadc1332ac7 | /scrape.py | f1ffe4f2f73519e2a952506a4a6ff7697c39b782 | [] | no_license | cinger007/scraperwikiscraper | 602df42fbe56937adbba94723ea4eb3e6eef6983 | 167de4ba8c2d966540f5ab3c02a92b9b744802df | refs/heads/master | 2020-06-05T17:31:39.553411 | 2012-03-13T03:33:25 | 2012-03-13T03:33:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | import scraperwiki
try:
swutils = scraperwiki.utils
except AttributeError:
import gasp_helper
else:
swutils.swimport("gasp_helper")
API_KEY = '' # sunlight api key here
BIOGUIDE_ID = '' # bioguide id here
gasp = gasp_helper.GaspHelper(API_KEY, BIOGUIDE_ID)
| [
"twneale@gmail.com"
] | twneale@gmail.com |
106014fbf30bb841dfd2f73b0e88623015aa5a57 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /czLhTsGjScMTDtZxJ_17.py | f64a6b297b19b1ee24ac164ac7d57b03bf080afa | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py |
def primorial(n):
L = []
i =2;
while len(L) != n:
if isPrime(i):
L.append(i);
i+=1;
else:
i+=1;
res = 1;
for i in L:
res *= i;
return res;
def isPrime(X):
if X<=1:
return False
C=0;
n = 1;
while n<X+1:
if X % n ==0:
C+=1;
n+=1;
else:
n+=1;
if C>2:
return False;
return True;
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
c706787b3533642049b19e4f9b489e4ce9a45563 | 401f8b9b137f5f99cfeff3d63eb5a1fb619df2b4 | /tests.py | c8dddb4a3c57716dc71024eba93f25b3e8c771dd | [] | no_license | goodtune/pypin | bacfe14033bc16bf9aa69a0861330ed14e960b78 | 93cc447f929c5bc4e5fa375022426b8c4bbaa2b8 | refs/heads/master | 2023-04-10T17:50:13.640735 | 2013-06-29T01:02:02 | 2013-06-29T01:02:02 | 11,039,863 | 0 | 0 | null | 2023-03-16T12:48:18 | 2013-06-28T22:32:51 | Python | UTF-8 | Python | false | false | 478 | py | import os
import pypin
import unittest2 as unittest
class SimpleTest(unittest.TestCase):
def setUp(self):
api_key = os.environ.get('API_KEY', '')
self.api = pypin.API(api_key=api_key, debug=True)
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
def test_list_charges(self):
charges = self.api.list_charges()
self.assertIsInstance(charges, dict)
| [
"gary@touch.asn.au"
] | gary@touch.asn.au |
28d133235f6c9ecf29f44cb0390a5b2b9cf713a8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02813/s470897253.py | 09dcaa047890a2daf5941bee75dd3d46acc8c6e7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | import itertools # accumulate, compress, permutations(nPr), combinations(nCr)
# import bisect # bisect_left(insert位置), bisect_right(slice用)
import math # factorical(階乗) # hypot(距離)
# import heapq
# from fractions import gcd # Python3.5以前 # lcm(最小公倍数) = (a*b)//gcd(a,b)
# from fractions import Fraction
# from math import gcd # Python3.6以降
# --------------------------------------------------------------
n = int(input())
p = list(input().split())
q = list(input().split())
nlis = sorted(p)
cntp = 0
cntq = 0
for idx, i in enumerate(itertools.permutations(nlis,n)):
if list(i)==p:
cntp = idx
if list(i)==q:
cntq = idx
print(abs(cntp-cntq)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c01ef0275c9f1c8ff27ea3988bf8ef973424588e | 0afd765c0a3c06e6c893782fc8bd9d5bd4eac20d | /synchronized_ppo_cnn_ubuntu/action_group.py | 0c15e88118153b1db11c6d6573da07659fbff266 | [] | no_license | chagmgang/synch_pysc2 | fdcb2bbb36c81af6ac2c31183b02f26aee33d739 | 57ca1e533446b1ed61c4d3d432d47d293148b6be | refs/heads/master | 2020-03-19T15:40:24.573995 | 2018-07-02T05:36:35 | 2018-07-02T05:36:35 | 136,680,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | import sys
from absl import flags
from pysc2.env import sc2_env
from pysc2.lib import actions, features
import matplotlib.pyplot as plt
import numpy as np
# Define the constant
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
friendly = 1
neutral = 16
_SELECTED_UNIT = features.SCREEN_FEATURES.selected.index
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_NO_OP = actions.FUNCTIONS.no_op.id
_RALLY_UNITS_SCREEN = actions.FUNCTIONS.Rally_Units_screen.id
_SELECT_ALL = [0]
_NOT_QUEUED = [0]
def no_operation(obs):
action = actions.FunctionCall(actions.FUNCTIONS.no_op.id, [])
return action
def move_unit(obs, mode): # mode= 1,2,3,4 & up,down,left,right
obs = obs.reshape(32,32,2)
obs = obs[:,:,0]
selected_unit_position_y, selected_unit_position_x = (
obs == friendly).nonzero()
target_x, target_y = np.mean(selected_unit_position_x), np.mean(selected_unit_position_y)
if mode == 1: #up
dest_x, dest_y = np.clip(target_x, 0, 31), np.clip(target_y - 10, 0, 31)
elif mode == 2: #down
dest_x, dest_y = np.clip(target_x, 0, 31), np.clip(target_y + 10, 0, 31)
elif mode == 3: #left
dest_x, dest_y = np.clip(target_x - 10, 0, 31), np.clip(target_y, 0, 31)
elif mode == 4: #right
dest_x, dest_y = np.clip(target_x + 10, 0, 31), np.clip(target_y, 0, 31)
action = actions.FunctionCall(_MOVE_SCREEN, [_NOT_QUEUED, [dest_x, dest_y]]) # move Up
return action
def actAgent2Pysc2(i, obs):
if i == 0:
action = move_unit(obs, 1)
elif i == 1:
action = move_unit(obs, 2)
elif i == 2:
action = move_unit(obs, 3)
elif i == 3:
action = move_unit(obs, 4)
elif i == 100:
action = actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])
return action | [
"chagmgang@gmail.com"
] | chagmgang@gmail.com |
46f2f310fbc0c07fadacd6eba2c75cad0fb40d84 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/classicchars/DistributedWitchMinnieAI.py | 4c22d30c5313eefaf317368a95e2b72b7fc9b58e | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from direct.directnotify import DirectNotifyGlobal
from toontown.classicchars.DistributedMickeyAI import DistributedMickeyAI
class DistributedWitchMinnieAI(DistributedMickeyAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedWitchMinnieAI') | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
0a210420d6cd53cbe71c9fa1b58c31264153cd56 | f3b233e5053e28fa95c549017bd75a30456eb50c | /jnk1_input/U24/24-34_wat_20Abox/set_3.py | 48123a4bd5e0dbd1a508f8cf4cec9332df7af81c | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | import os
dir = '/mnt/scratch/songlin3/run/jnkl/L124/wat_20Abox/ti_one-step/24_34/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_3.in'
temp_pbs = filesdir + 'temp_3.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_3.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_3.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
353d08b11d2294b2637451f68d6092ee7a1dfa06 | 40e7f2734676250b985cd18b344329cea8a7c42b | /smoke-tests/images/servlet/src/main/docker/websphere/changePort.py | 4f71d65c355072fc4a1cc8f3ffebdb1ad070ecde | [
"Apache-2.0"
] | permissive | open-telemetry/opentelemetry-java-instrumentation | d6f375148715f8ddb2d4a987c0b042cb6badbfa2 | b0a8bd4f47e55624fb3942e34b7a7051d075f2a5 | refs/heads/main | 2023-09-01T15:10:10.287883 | 2023-09-01T08:07:11 | 2023-09-01T08:07:11 | 210,933,087 | 1,349 | 689 | Apache-2.0 | 2023-09-14T18:42:00 | 2019-09-25T20:19:14 | Java | UTF-8 | Python | false | false | 391 | py | # change port from 9080 to 8080
AdminTask.modifyServerPort('server1', '[-nodeName ' + AdminControl.getNode() + ' -endPointName WC_defaulthost -host * -port 8080 -modifyShared true]')
# add new port to default virtual host
AdminConfig.create('HostAlias', AdminConfig.getid('/Cell:' + AdminControl.getCell() + '/VirtualHost:default_host/'), '[[port "8080"] [hostname "*"]]')
AdminConfig.save() | [
"noreply@github.com"
] | open-telemetry.noreply@github.com |
aabdfe6aa6b3e2795f6bcab7b42f5b3edcb31599 | 7c79c8caee77d08aa05cdc59eb68e569abf54a7e | /ics 33/solutions/ile2 solutions/Lab 6/ReidShaun/poly.py | c6142207221e50a778d34680fa7347d392ca47ab | [] | no_license | solomc1/python | 2e4715cc24e7b23d91c879fc95954f615a615982 | 119e388fb6f4ab42f581e48393919d4052a08ef6 | refs/heads/master | 2021-01-17T16:48:02.671810 | 2016-07-29T05:27:50 | 2016-07-29T05:27:50 | 64,452,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,807 | py | class Poly:
def __init__(self,*terms):
# __str__ uses the name self.terms for the dictionary of terms
# So __init__ should build this dictionary from terms
self.terms = {}
for v,k in terms:
assert type(v) in [int, float],"Poly.__init__: Coefficient must be an int or float" + str(v)
assert type(k) is int and k>=0, "Poly.__init__: Power must be int whose value is >= 0" + str(k)
assert k not in self.terms.keys(), "Poly.__init__: Power cannot be used more than once."
self.coefficient = v
# print("Coefficient = ",self.coefficient)
self.power = k
# print("Power = ",self.power)
self.terms.update({self.power:self.coefficient})
for v in self.terms.values():
if v == 0:
self.terms.popitem()
# print("Terms dictionary = ", self.terms)
# I have written str(...) because it is used in the bsc.txt file and
# it is a bit subtle to get correct. Notice that it assumes that
# every Poly object stores a dict whose keys are powers and whose
# associated values are coefficients. This function does not depend
# on any other method in this class being written correctly.
def __str__(self):
def term(c,p,var):
return (str(c) if p == 0 or c != 1 else '') +\
('' if p == 0 else var+('^'+str(p) if p != 1 else ''))
if len(self.terms) == 0:
return '0'
else:
return ' + '.join([term(c,p,'x') for p,c in sorted(self.terms.items(),reverse=True)]).replace('+ -','- ')
def __repr__(self):
return "Poly("+"".join(str(k) for k in self.terms.items()) + ")"
# Not placing keys/values in correct spots...
def __len__(self):
power_list = []
# print(self.terms.keys())
for k in self.terms.keys():
power_list.append(k)
# print(power_list)
power_list = sorted(power_list, reverse = True)
# print(power_list)
return power_list[0]
def __call__(self,arg):
if type(arg) not in [int, float]:
raise TypeError("Poly.__call__: arg must be int or float. arg = ", type(arg))
poly_str = self.__str__()
# print(poly_str)
# Not finished...
def __iter__(self):
pass
def __getitem__(self,index):
pass
def __setitem__(self,index,value):
pass
def __delitem__(self,index):
pass
def _add_term(self,c,p):
pass
def __add__(self,right):
pass
def __radd__(self,left):
pass
def __mul__(self,right):
pass
def __rmul__(self,left):
pass
def __eq__(self,right):
pass
if __name__ == '__main__':
# Some simple tests; you can comment them out and/or add your own before
# the driver is called.
print('Start simple tests')
p = Poly((3,2),(-2,1), (4,0))
print(' For Polynomial: 3x^2 - 2x + 4')
print(' str(p):',p)
print(' repr(p):',repr(p))
print(' len(p):',len(p))
print(' p(2):',p(2))
# print(' list collecting iterator results:',[t for t in p])
# print(' p+p:',p+p)
# print(' p+2:',p+2)
# print(' p*p:',p*p)
# print(' p*2:',p*2)
print('End simple tests\n')
import driver
#driver.default_show_exception=True
#driver.default_show_exception_message=True
#driver.default_show_traceback=True
driver.driver() | [
"solomc1@uci.edu"
] | solomc1@uci.edu |
a6cabcb9ea1e73f3bab049fdcd298ff5eb732bd9 | ef2cb8b5296f560e0fa23250235513c7c4880f20 | /generate/oogenerateDetectors.py | 84028662fc1ec0b13d49725f54dfcc35c0c6a7e6 | [] | no_license | yetkinyilmaz/TrackML2D | d34a22d63c8ba9af13a12074163155458db84913 | 9ae81faa25a5f3d8f1413cb01611674cbb421f49 | refs/heads/master | 2021-06-10T07:49:43.309911 | 2017-01-20T15:52:51 | 2017-01-20T15:52:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | #!/usr/bin/env python2.7
# oogenerateDetectors.py
# Thomas Boser
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from sympy.geometry import Circle, Point
class Detector:
""" detector constructor """
def __init__(self, radius):
self.center = np.array([0, 0]) #2d vector, origin center assumption
self.radius = radius
self.symcenter = Point(self.center[0], self.center[1])
self.circle = Circle(self.symcenter, self.radius)
#override equals and not equals operators to prevent duplicate detectors
def __eq__(self, other):
return self.radius == other.radius
def __ne__(self, other):
return not self.__eq__(other)
def plotDetector(self):
""" plot a detector (circle) to plt figure """
ax = plt.gca()
ax.add_patch(patches.Circle((self.center[0],
self.center[1]),
self.radius,
fill = False))
def printDetector(self):
""" print detector information to stdout """
print("Center = ", self.center,", radius = ", self.radius, sep='') | [
"yetkin.yilmaz@cern.ch"
] | yetkin.yilmaz@cern.ch |
82edb1c36c41bd2877809c3495a9a0871547f5e0 | 56af421807d628ef95736cf2fff28157d5e380fe | /backend/urls.py | 1fb986d395fb35a29f0048245be54337cd7ae42f | [] | no_license | ramesharun/docker-django-react-postgres | 9d80a06ad47b9d095717bab65c880cab04f775a1 | ae363e6e0bd769bd9ea5b4e5842497fef6c913cb | refs/heads/master | 2022-12-25T23:12:28.025637 | 2020-10-15T11:57:43 | 2020-10-15T11:57:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from django.urls import path, include
from rest_framework import routers
from . import views
# create router for backend api
apiRouter = routers.DefaultRouter()
apiRouter.register(r'users', views.UserViewSet)
apiRouter.register(r'groups', views.GroupViewSet)
# urls provided by the backend
urlpatterns = [
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('api/', include(apiRouter.urls)),
path('', views.index, name='index'),
] | [
"bharathjinka09@gmail.com"
] | bharathjinka09@gmail.com |
e87cab3914d1e206d7e9ea8d3424146277a31569 | ea727658bb22df6dd0a4d8aaff5d13beec8ec8b5 | /examples/大數據資料分析/範例程式/第06章/program6-1.py | adae6ac9820afd93a006db32a3581de23611daca | [] | no_license | kelvinchoiwc/DataScience_1082 | f8e31c230776f45d70f6b96ef81d16881118e453 | 199f915540afe6c9a9ec7055aac5911420d783be | refs/heads/master | 2023-07-24T04:29:01.763893 | 2021-09-01T12:33:21 | 2021-09-01T12:33:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | import random
def rand(n):
for i in range(1, n+1):
rn = random.randint(1, 100)
if i % 10 == 0:
print('%4d'%(rn))
else:
print('%4d'%(rn), end = ' ')
def main():
num = eval(input('Enter num: '))
rand(num)
main() | [
"jumbokh@gmail.com"
] | jumbokh@gmail.com |
929e3e0b42ff91c77e3e0b8035ac87c503ce9a0f | 2a1059be149fb2eb01e0d12a98752ecccada20a6 | /A_closure_decorator/05_装饰器引入.py | fbd786b2c8d868b33d2b37b74a604ccf2a664ab0 | [] | no_license | Jhon-Chen/Code_Practice | 0796c0eb4f3e4ba82e6da9cd5dd19afb5ccec51d | e4bfa900b95764880e2ca2fcfb0389113f570147 | refs/heads/master | 2022-12-11T10:02:14.895722 | 2019-09-08T01:46:32 | 2019-09-08T01:46:32 | 186,768,893 | 0 | 1 | null | 2022-12-08T02:34:33 | 2019-05-15T07:06:10 | Python | UTF-8 | Python | false | false | 264 | py | # 第一波
def foo():
print("foo")
foo()
# 第二波
def fool():
print("fool")
fool = lambda x: x + 1
# 这一句只会执行lambda表达式,而不再是原来的fool函数了,因为fool已经被重新指向了一个匿名的函数
print(fool(5))
| [
"17368089403@163.com"
] | 17368089403@163.com |
a71c8b2923712f909b66fffb0f36142d25d52f6d | 4cdde9df324e1e528d4440a34f5e6bbcdf4b8ed2 | /PythonAssignments/p14/p14p3.py | 282c15b07c0e795a4e726a953f66a7af955181f4 | [] | no_license | enxicui/Python | e2254c2f941bb51b402df852668777ad174878e2 | 3ea8251cd61b995e6f08ff5a906181ca7dc4f1ce | refs/heads/master | 2020-12-09T09:29:59.369488 | 2020-01-11T16:48:47 | 2020-01-11T16:48:47 | 233,263,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | '''
pseudocode
for number between 2 to 20:
for i in between 2 to number::
if number % i = 0:
output number, 'equals', i, '*', number//i
break
endif
else:
print(number, 'is a prime number')
end for
endfor
output 'Finished!'
'''
#
# Program to illustrate the use of the else statement on a for loop
# Search for prime numbers in a range of integers
# Look for prime numbers in a range of integers
for number in range(2, 20):
for i in range(2, number):
if number % i == 0:
print(number, 'equals', i, '*', number//i)
break
else:
# loop fell through without finding a factor
print(number, 'is a prime number')
print('Finished!')
| [
"noreply@github.com"
] | enxicui.noreply@github.com |
1abb33a3fd4ed487a225db01117f7757070b52dc | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /zoJeCZCgmA8pS2iAi_14.py | 64eb36321ba9c4dd18e151696f676657959d34f4 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | """
Create a function that takes a list of functions and sorts them in ascending
order based on how many calls are needed for them to return a non-function.
### Examples
f1 = lambda: "hello"
# f1() ➞ "hello"
f2 = lambda: lambda: "edabit"
# f2()() ➞ "edabit"
f3 = lambda: lambda: lambda: "user"
# f3()()() ➞ "user"
func_sort([f2, f3, f1]) ➞ [f1, f2, f3]
# [f2, f3, f1] ➞ [2, 3, 1] ➞ [1, 2, 3] ➞ [f1, f2, f3]
func_sort([f1, f2, f3]) ➞ [f1, f2, f3]
# [f1, f2, f3] ➞ [1, 2, 3] ➞ [1, 2, 3] ➞ [f1, f2, f3]
func_sort([f2, "func"]) ➞ ["func", f2]
# [f2, "func"] ➞ [2, 0] ➞ [0, 2] ➞ ["func", f2]
### Notes
* Treat non-functions as needing zero calls.
* Every function will be called with empty parameters.
* Every function will need to be called at least once.
* The potentially returned values include `int`s, `float`s, and `list`s, among others.
"""
def func_sort(lst):
func_type = type(func_sort)
def call_count(f):
if type(f) != func_type:
return 0
return call_count(f()) + 1
return sorted(lst, key = lambda f: call_count(f))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
1e25413c147f801ab769e704fd331e6a52fd67bc | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/gui/shared/tooltips/marathon.py | 55d7280f7102b6a13164c7e410b77940e427563f | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 4,655 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/shared/tooltips/marathon.py
from CurrentVehicle import g_currentVehicle
from gui import makeHtmlString
from gui.marathon.marathon_constants import MARATHON_STATE, MARATHON_WARNING
from gui.shared.formatters import text_styles, icons
from gui.shared.tooltips import TOOLTIP_TYPE, formatters
from gui.shared.tooltips.common import BlocksTooltipData
from helpers import dependency
from helpers.i18n import makeString as _ms
from skeletons.gui.game_control import IMarathonEventsController
class MarathonEventTooltipData(BlocksTooltipData):
_marathonsCtrl = dependency.descriptor(IMarathonEventsController)
def __init__(self, context):
super(MarathonEventTooltipData, self).__init__(context, TOOLTIP_TYPE.QUESTS)
self._setContentMargin(top=2, bottom=3, left=1, right=1)
self._setMargins(afterBlock=0)
self._setWidth(303)
def _packBlocks(self, questType, prefix, *args, **kwargs):
self._marathonEvent = self._marathonsCtrl.getMarathon(prefix)
self.__tooltipData = self._marathonEvent.getTooltipData()
self.__iconsData = self._marathonEvent.getIconsData()
items = super(MarathonEventTooltipData, self)._packBlocks()
state = self._marathonEvent.getState()
items.append(self._getHeader(state))
items.append(self._getBody(state))
if state != MARATHON_STATE.NOT_STARTED and self._marathonEvent.data.showFlagTooltipBottom:
items.append(self._getBottom(state))
return items
def _getHeader(self, _):
icon, text = self._marathonEvent.getTooltipHeader()
if icon:
formattedText = '{} {}'.format(icons.makeImageTag(icon, width=16, height=16), text_styles.main(text))
else:
formattedText = '{}'.format(text_styles.main(text))
return formatters.packImageTextBlockData(title=text_styles.highTitle(_ms(self.__tooltipData.header)), img=self.__iconsData.tooltipHeader, txtPadding=formatters.packPadding(top=25), txtOffset=20, txtGap=-8, desc=formattedText)
def _getBody(self, state):
if state == MARATHON_STATE.FINISHED:
text = text_styles.main(_ms(self.__tooltipData.bodyExtra, day=self._marathonEvent.getExtraDaysToBuy()))
else:
text = text_styles.main(self.__tooltipData.body)
return formatters.packTextBlockData(text=text, padding=formatters.packPadding(left=20, top=10, bottom=20, right=10))
def _getBottom(self, state):
vehicle = g_currentVehicle.item
isObtained = self._marathonEvent.isVehicleObtained()
if isObtained:
statusLabel = text_styles.bonusAppliedText(icons.makeImageTag(self.__iconsData.libraryOkIcon, vSpace=-2) + ' ' + _ms(self.__tooltipData.extraStateCompleted))
return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',
'message': statusLabel}), padding=formatters.packPadding(bottom=20))
if state == MARATHON_STATE.IN_PROGRESS:
warning = self._marathonEvent.checkForWarnings(vehicle)
if warning == MARATHON_WARNING.WRONG_BATTLE_TYPE:
return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',
'message': text_styles.critical(_ms(self.__tooltipData.errorBattleType))}), padding=formatters.packPadding(bottom=20))
if warning == MARATHON_WARNING.WRONG_VEH_TYPE:
return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',
'message': text_styles.critical(_ms(self.__tooltipData.errorVehType))}), padding=formatters.packPadding(bottom=20))
currentStep, allStep = self._marathonEvent.getMarathonProgress()
if allStep:
return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',
'message': text_styles.middleTitle(_ms(self.__tooltipData.extraStateSteps, currentStep=currentStep, allStep=text_styles.main(allStep)))}), padding=formatters.packPadding(bottom=20))
else:
discount = self._marathonEvent.getMarathonDiscount()
return formatters.packTextBlockData(text=makeHtmlString('html_templates:lobby/textStyle', 'alignText', {'align': 'center',
'message': text_styles.bonusPreviewText(_ms(self.__tooltipData.extraStateDiscount, discount=discount))}), padding=formatters.packPadding(bottom=20))
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
078a35e55329cd7252f81fd90c8499201bd61398 | 7f6348437a68aef1035824e3c2ac7476e81cf5f0 | /Linked List/remove-nth-node-from-list-end.py | 3c285594d35466a935fd2c69b950ea9f1e8fae53 | [] | no_license | wilfredarin/Interviewbit | eff18ce504a2178dbed0441f9b43df51f0a34dd1 | a13de501db6e0af9d58936ffa9abdd9eebc7578d | refs/heads/master | 2023-01-27T15:36:11.726563 | 2020-12-10T15:09:14 | 2020-12-10T15:09:14 | 233,567,842 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | def removeNthFromEnd(self, A, B):
temp = A
c = 0
prev = None
Next = None
while temp:
temp = temp.next
c+=1
if c==1:
return None
if c<=B :
temp = A
Next = temp.next
temp = None
return Next
n = c-B -1
c = 0
temp = A
while c!=n:
temp = temp.next
c+=1
a = temp.next
temp.next = temp.next.next
a = None
return A
| [
"noreply@github.com"
] | wilfredarin.noreply@github.com |
d6a9ebc28418cacc04b94912ffdc61dcd9a95643 | 2b776f6420af6400120bf08c801159870dbd1ec6 | /astra4.py | fb7b08866d8ca0b3264a36bd9810f6182b4a5462 | [] | no_license | ManojDjs/wescraping | b7dcd0423fa5ba44f38b332afc4540e49d309b75 | fa3ae90f35e07b0ff06037673c50e47e42633900 | refs/heads/master | 2022-01-31T23:19:53.311009 | 2019-07-22T14:51:13 | 2019-07-22T14:51:13 | 191,214,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,339 | py | import time
from selenium import webdriver
import pymongo
from selenium.webdriver.common.keys import Keys
from pymongo import MongoClient, DESCENDING
from flask import Flask, jsonify
import requests
from pymongo import MongoClient
app = Flask(__name__)
@app.route('/')
def success():
client = MongoClient("mongodb://localhost:27017/basics")
if(client):
print("connected")
db = client.JOBS
collections = db.astrazenca
options = webdriver.ChromeOptions()
preferences = {'profile.default_content_setting_values': {'images': 2}}
options.add_experimental_option('prefs', preferences)
options.add_argument("start-maximized")
options.add_argument('headless')
#options.add_argument("--disable-extensions")
driver = webdriver.Chrome(options=options, executable_path=r'D:\Office_Files\chromedriver.exe')
doc=collections.find({'Recheck':0}).skip(150).limit(50)
for i in doc:
try:
url=i['url']
print(url)
print(i['location'])
driver.get(url)
time.sleep(5)
location=driver.find_element_by_xpath('//div[@class="position-location"]').text
jobIdk=driver.find_element_by_xpath('//span[@class="job-id job-info"]').text
jobId=jobIdk.split(":")
jobId=jobId[1]
jobdescription=driver.find_element_by_xpath('//div[@class="ats-description"]').text
postedDatek=driver.find_element_by_xpath('//span[@class="job-date job-info"]').text
postedDate=postedDatek.split(":")
postedDate=postedDate[1]
print(jobId)
print(location)
print(jobdescription)
print(postedDate)
collections.update_one({'_id':i['_id']},{'$set':{
'location':location,
'jobId':jobId,
'Recheck':1,
'jobDescription':jobdescription,
'salary':'',
'validThrough':'',
'postedDate':postedDate,
'jobType':''
}})
print('updated')
except:
collections.update_one({'_id':i['_id']},{'$set':{'Recheck':404, }})
driver.quit()
return "DOne"
if __name__ == '__main__':
app.run(port=3808)
| [
"noreply@github.com"
] | ManojDjs.noreply@github.com |
8bf2e084ba8374c5757eab9c5e6fb265259933f2 | f9e72443eb81e33b47afc32b025ac5fbec914aa0 | /Code/Visualization/TheNatureOfCode/noc_1_3_vectormath.py | bdb39ffbced9e6b2352d5e9e8499674e06c430be | [
"MIT"
] | permissive | samsanket/TeachingDataScience | 568c2a468dff575eed71a80d62156c092cc2aa48 | 2ddf18ff7d0657e0571cab8f43175b23c5e6fd87 | refs/heads/master | 2023-06-04T18:58:19.762306 | 2021-06-20T02:04:47 | 2021-06-20T02:04:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | # The Nature of Code - Daniel Shiffman http://natureofcode.com
# Example 1-3: Vector Math
# PyP5 port by: Yogesh Kulkarni
# Adpoted from processing.py based implementation at:
# https://github.com/nature-of-code/noc-examples-python/blob/master/chp01_vectors/NOC_1_3_vector_subtraction
# https://github.com/nature-of-code/noc-examples-python/blob/master/chp01_vectors/NOC_1_4_vector_multiplication/
# But followed on screen example
# Reference Youtube Video: https://www.youtube.com/watch?v=rqecAdEGW6I&list=PLRqwX-V7Uu6aFlwukCmDf0-1-uSR7mklK&index=9
from p5 import *
def setup():
size(500, 300)
def draw():
background(255)
strokeWeight(2)
stroke(0)
noFill()
translate(width / 2, height / 2)
ellipse(0,0,4,4)
mouse = Vector(mouse_x, mouse_y)
center = Vector(width / 2, height / 2)
mouse -= center # mouse.sub(center)
mouse *= 0.1
line(0, 0, mouse.x, mouse.y)
if __name__ == "__main__":
run() | [
"yogeshkulkarni@yahoo.com"
] | yogeshkulkarni@yahoo.com |
863d79f9526beea090b0d748cb2dc8f654157221 | a88664c77a606a6d2fcf47999805c226c514d111 | /test/integration/elastic_db_repo/disk_usage.py | 6ce82a8ea832165fe9868f80aa3c46057b44a91d | [
"MIT"
] | permissive | deepcoder42/elastic-repo | f10316aa41b927085935ee9d07ff491286d04f20 | 052a7041e45aec2fd2b6ff31a597c47edf2b3fec | refs/heads/master | 2023-03-21T03:03:24.916324 | 2021-03-14T21:19:29 | 2021-03-14T21:19:29 | 347,756,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | #!/usr/bin/python
# Classification (U)
"""Program: disk_usage.py
Description: Integration testing of disk_usage in elastic_db_repo.py.
Usage:
test/integration/elastic_db_repo/disk_usage.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
import shutil
import time
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import elastic_db_repo
import lib.gen_libs as gen_libs
import elastic_lib.elastic_class as elastic_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Integration testing initilization.
test_disk_usage -> Test displaying disk usage.
tearDown -> Clean up of integration testing.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.base_dir = "test/integration/elastic_db_repo"
self.test_path = os.path.join(os.getcwd(), self.base_dir)
self.config_path = os.path.join(self.test_path, "config")
self.cfg = gen_libs.load_module("elastic", self.config_path)
self.repo_name = "TEST_INTR_REPO"
self.repo_dir = os.path.join(self.cfg.log_repo_dir, self.repo_name)
self.phy_repo_dir = os.path.join(self.cfg.phy_repo_dir, self.repo_name)
self.els = elastic_class.ElasticSearchRepo(self.cfg.host,
self.cfg.port)
if self.els.repo_dict:
print("ERROR: Test environment not clean - repositories exist.")
self.skipTest("Pre-conditions not met.")
else:
_, _ = self.els.create_repo(repo_name=self.repo_name,
repo_dir=self.repo_dir)
@unittest.skip("Error: Fails in a docker setup environment.")
def test_disk_usage(self):
"""Function: test_disk_usage
Description: Test displaying disk usage.
Arguments:
"""
# Wait until the repo dir has been created.
while True:
if not os.path.isdir(self.phy_repo_dir):
time.sleep(1)
else:
break
with gen_libs.no_std_out():
self.assertFalse(elastic_db_repo.disk_usage(self.els))
def tearDown(self):
"""Function: tearDown
Description: Clean up of integration testing.
Arguments:
"""
err_flag, msg = self.els.delete_repo(self.repo_name)
if err_flag:
print("Error: Failed to remove repository '%s'"
% self.repo_name)
print("Reason: '%s'" % (msg))
if os.path.isdir(self.phy_repo_dir):
shutil.rmtree(self.phy_repo_dir)
if __name__ == "__main__":
unittest.main()
| [
"deepcoder42@gmail.com"
] | deepcoder42@gmail.com |
f4ba95db8bc42bc20b617620aa2acc8788c55a23 | d61183674ed7de0de626490cfba77d67c298d1be | /py_scripts/plot_nca_illustration.py | 121c1976856946c3c69e9d52d1aac36e257859f1 | [] | no_license | Giannos-G/python_dataset | bc670a53143d92cf781e88dee608da38b0e63886 | 18e24cbef16ada1003a3e15a2ed2a3f995f25e46 | refs/heads/main | 2023-07-25T20:24:31.988271 | 2021-09-09T10:31:41 | 2021-09-09T10:31:41 | 363,489,911 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | """
=============================================
Neighborhood Components Analysis Illustration
=============================================
This example illustrates a learned distance metric that maximizes
the nearest neighbors classification accuracy. It provides a visual
representation of this metric compared to the original point
space. Please refer to the :ref:`User Guide <nca>` for more information.
"""
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.neighbors import NeighborhoodComponentsAnalysis
from matplotlib import cm
from scipy.special import logsumexp
print(__doc__)
# %%
# Original points
# ---------------
# First we create a data set of 9 samples from 3 classes, and plot the points
# in the original space. For this example, we focus on the classification of
# point no. 3. The thickness of a link between point no. 3 and another point
# is proportional to their distance.
X, y = make_classification(n_samples=9, n_features=2, n_informative=2,
n_redundant=0, n_classes=3, n_clusters_per_class=1,
class_sep=1.0, random_state=0)
plt.figure(1)
ax = plt.gca()
for i in range(X.shape[0]):
ax.text(X[i, 0], X[i, 1], str(i), va='center', ha='center')
ax.scatter(X[i, 0], X[i, 1], s=300, c=cm.Set1(y[[i]]), alpha=0.4)
ax.set_title("Original points")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axis('equal') # so that boundaries are displayed correctly as circles
def link_thickness_i(X, i):
diff_embedded = X[i] - X
dist_embedded = np.einsum('ij,ij->i', diff_embedded,
diff_embedded)
dist_embedded[i] = np.inf
# compute exponentiated distances (use the log-sum-exp trick to
# avoid numerical instabilities
exp_dist_embedded = np.exp(-dist_embedded -
logsumexp(-dist_embedded))
return exp_dist_embedded
def relate_point(X, i, ax):
pt_i = X[i]
for j, pt_j in enumerate(X):
thickness = link_thickness_i(X, i)
if i != j:
line = ([pt_i[0], pt_j[0]], [pt_i[1], pt_j[1]])
ax.plot(*line, c=cm.Set1(y[j]),
linewidth=5*thickness[j])
i = 3
relate_point(X, i, ax)
#plt.show()
# %%
# Learning an embedding
# ---------------------
# We use :class:`~sklearn.neighbors.NeighborhoodComponentsAnalysis` to learn an
# embedding and plot the points after the transformation. We then take the
# embedding and find the nearest neighbors.
nca = NeighborhoodComponentsAnalysis(max_iter=30, random_state=0)
nca = nca.fit(X, y)
plt.figure(2)
ax2 = plt.gca()
X_embedded = nca.transform(X)
relate_point(X_embedded, i, ax2)
for i in range(len(X)):
ax2.text(X_embedded[i, 0], X_embedded[i, 1], str(i),
va='center', ha='center')
ax2.scatter(X_embedded[i, 0], X_embedded[i, 1], s=300, c=cm.Set1(y[[i]]),
alpha=0.4)
ax2.set_title("NCA embedding")
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('equal')
#plt.show()
| [
"giannos.gavrielides@gmail.com"
] | giannos.gavrielides@gmail.com |
4d116e9f48c01c916108f6b25030e48995c015ed | 51605d344bdf22070ab546144660caba3ecb1186 | /124alexus.py | f80c19cd59a73d41b1b19e1589aa139b703abf88 | [] | no_license | riley-csp-2019-20/1-2-4-turtle-escape-alexus83142 | 5f962f0761ec7b6c6dd57f1ff09b812bd0a35bd0 | 37cb6af6691b2c61cca520b17e52cc66f2d94ae1 | refs/heads/master | 2020-10-01T23:47:53.203098 | 2019-12-13T15:57:27 | 2019-12-13T15:57:27 | 227,650,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | import turtle as trtl
drawBot = trtl.Turtle()
drawBot.ht()
amount = 15
wall_width = 10
for i in range(25):
drawBot.forward(amount/3)
drawBot.penup()
drawBot.forward(wall_width+5)
drawBot.pendown()
drawBot.forward(amount/5)
if i > 4:
drawBot.left(90)
drawBot.forward(wall_width+10)
drawBot.backward(wall_width+10)
drawBot.right(90)
drawBot.forward(2*amount/3)
drawBot.left(90)
amount += wall_width
wn = trtl.Screen()
wn.mainloop() | [
"noreply@github.com"
] | riley-csp-2019-20.noreply@github.com |
9885b1468a9955b2abebf766246b43363943cfde | 6e507e231d37d0b61d70d6694ffc928c1c638973 | /pie.py | bbe0882646f06c73f4ab0246d7779c76f3280bce | [] | no_license | pjz987/pdx_code_intro_class | 7a4998be23b90883aad55664ceb97baffe3fcf92 | e85c2e01718e75124b956b924af01e87cdd95ee1 | refs/heads/master | 2020-09-12T21:25:50.152682 | 2019-11-18T23:10:38 | 2019-11-18T23:10:38 | 222,561,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | '''
filename : pie.py
#if #
'''
import random
#through import in at the top if you need it
pie_list = ['pumpkin', 'keylime']
ingredient_list = ['pumpkins', 'keylimes']
while True:
user_pie = input("What pie would you like to make? ").lower()
if user_pie in ['pumpkin', 'keylime']:
break
#.lower() makes the user input lowercase
in_season = random.choice(ingredient_list)
print(f"{in_season} are in season")
if user_pie == 'pumpkin':
if in_season == 'pumpkins':
print("Yum! Thanksgiving pie!")
if in_season == 'keylimes':
print("Bad time for pumpkin pie.")
if user_pie == 'keylime':
if in_season == 'keylimes':
print("Great time for lime!")
if in_season == 'pumpkins':
print("Wouldn't you prefer pumpkin?")
| [
"pwj2012@gmail.com"
] | pwj2012@gmail.com |
c998f6bbb7480d8a244e757af0f08e32e02a7dc4 | fbf84df5f400af88e010bba7978eeba7953bd9eb | /test.py | 830709b6be683d9dede2eef4d77e2e9aa0885a17 | [] | no_license | dheeraj7596/covid-analysis | f9e695908b6a867528aa610de71abab7b31a8375 | b7e3cae177797bdf5da552fc2f93f750ac888f7c | refs/heads/master | 2022-04-10T01:20:32.016964 | 2020-03-27T14:52:14 | 2020-03-27T14:52:14 | 250,349,378 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | import pickle
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
if __name__ == "__main__":
data_path = "./data/"
phrase_freq_map = pickle.load(open(data_path + "phrase_freq_map.pkl", "rb"))
multi_word_phrase_freq_map = {}
for p in phrase_freq_map:
if len(p.strip().split()) > 1:
multi_word_phrase_freq_map[p] = phrase_freq_map[p]
stopwords = set(STOPWORDS)
wordcloud = WordCloud(stopwords=stopwords, background_color="white").generate_from_frequencies(
multi_word_phrase_freq_map)
# Display the generated image:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
pass
| [
"dheeraj7596.dm@gmail.com"
] | dheeraj7596.dm@gmail.com |
6d853b88dbe6c22ce940287ce2034466c474b9ac | 19d03d646fcee318cca8078af27636732290d77b | /parlai/agents/image_seq2seq/image_seq2seq.py | 19692a9676a2d4e6d2f7e5b09b24973abbe39bd4 | [
"MIT"
] | permissive | yongkyung-oh/CMU-Studio-Project | 2d6fe6ef6fa30fda1a4f2d1fc45c5b85d6143775 | 448492f342e8157df2e736aa52825b66b1d66fd7 | refs/heads/master | 2022-10-24T16:56:46.763865 | 2020-07-01T10:03:00 | 2020-07-01T10:03:00 | 252,878,283 | 2 | 5 | MIT | 2021-03-25T23:50:27 | 2020-04-04T01:02:44 | Python | UTF-8 | Python | false | false | 7,246 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Image+Seq2Seq Agent.
"""
from typing import Dict, List, Tuple
import torch
from .modules import ImageSeq2seqModel
from parlai.agents.transformer.transformer import TransformerGeneratorAgent
from parlai.core.dict import DictionaryAgent
from parlai.core.torch_agent import Batch
from parlai.core.torch_image_agent import TorchImageAgent
TOKEN_IMAGE = '__image__'
TOKEN_NO_IMAGE = '__no_image__'
class ImageSeq2seqAgent(TransformerGeneratorAgent, TorchImageAgent):
"""
ImageSeq2seqAgent Agent.
Combines a transformer generator with images.
"""
def build_model(self) -> ImageSeq2seqModel:
"""
Override to build appropriate model.
"""
self.model = ImageSeq2seqModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
self.model.embeddings.weight, self.opt['embedding_type']
)
return self.model
@classmethod
def add_cmdline_args(cls, argparser):
"""
Override to add one arg.
"""
TransformerGeneratorAgent.add_cmdline_args(argparser)
TorchImageAgent.add_cmdline_args(argparser)
group = argparser.add_argument_group('Image Encoder Args')
group.add_argument(
'--include-image-token',
type='bool',
default=True,
recommended=True,
help='if true, include image token (or no image token) for each example',
)
def build_dictionary(self) -> DictionaryAgent:
"""
Override to include image tokens.
"""
self.dict = super().build_dictionary()
if self.opt.get('include_image_token') and TOKEN_IMAGE not in self.dict:
self.dict[TOKEN_IMAGE] = 1
self.dict[TOKEN_NO_IMAGE] = 1
return self.dict
def _set_text_vec(self, *args, **kwargs) -> dict:
"""
Override to include image token.
"""
obs = super()._set_text_vec(*args, **kwargs)
if 'text' not in obs or 'text_vec' not in obs:
return obs
if self.opt.get('include_image_token', False):
# `truncate` is the third arg to this function
truncate = args[2] - 1 if args[2] is not None else None
vec = torch.LongTensor(
self._check_truncate(obs['text_vec'], truncate, True)
)
token = TOKEN_NO_IMAGE
if obs.get('image', None) is not None:
token = TOKEN_IMAGE
obs.force_set(
'text_vec',
torch.cat([vec, vec.new_tensor(self.dict[token]).unsqueeze(0)], 0),
)
return obs
def _dummy_batch(self, batchsize: int, maxlen: int) -> Batch:
"""
Override to include image feats.
"""
return Batch(
text_vec=torch.ones(batchsize, maxlen).long().cuda(),
label_vec=torch.ones(batchsize, 2).long().cuda(),
image=torch.ones(batchsize, self.image_features_dim).cuda(),
personalities=torch.ones(batchsize, self.opt.get('embedding_size')).cuda(),
)
def batchify_image_features(self, batch: Batch) -> Batch:
"""
Format and return the batched image features.
Image features represented by tensors will set to the right type.
"""
if type(batch.image) == list and any(b is not None for b in batch):
images = []
for img in batch.image:
if isinstance(img, torch.Tensor):
img = self._process_image_features(img)
images.append(img)
batch.image = images
return batch
def _model_input(self, batch: Batch) -> Tuple[torch.Tensor, List[object]]:
return (batch.text_vec, batch.image)
def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):
"""
Override for custom loading.
Three reasons:
1. When using an init model without an image encoder
2. When using an init model with only an encoder provided
In this case, we may need to add the START token to the state_dict
3. When using an init model without image tokens in the embeddings.
This is only the case if the embs differ by 2 in dimension 0
"""
state_dict['encoder.dummy_image_enc'] = self.model.encoder.dummy_image_enc
state_dict['encoder.ones_mask'] = self.model.encoder.ones_mask
# Case 1 -> No Image Encoder
if 'encoder.image_encoder.0.weight' not in state_dict:
for k, v in self.model.encoder.image_encoder.state_dict().items():
state_dict[f'encoder.image_encoder.{k}'] = v
# Case 2 -> Only an Encoder provided
if not (any('decoder' in state_key for state_key in state_dict)):
for k, v in self.model.decoder.state_dict().items():
state_dict[f'decoder.{k}'] = v
state_dict['decoder.embeddings.weight'] = state_dict['embeddings.weight']
if 'START' not in state_dict:
state_dict['START'] = self.model.START
if self.opt['init_model'] is not None:
try:
self.model.load_state_dict(state_dict)
return
except RuntimeError as e:
# Case 3 --> Check for Embedding Diffs. Make sure dims match up
embs = state_dict['embeddings.weight']
enc_embs = state_dict['encoder.embeddings.weight']
dec_embs = state_dict['decoder.embeddings.weight']
init_embs = self.model.embeddings.weight
if (
embs.shape[0] + 2 != init_embs.shape[0]
or embs.shape[1] != init_embs.shape[1]
):
raise e
state_dict.update(
{
'embeddings.weight': torch.cat(
(
embs.to(init_embs.device, dtype=init_embs.dtype),
init_embs[-2:, :],
)
),
'encoder.embeddings.weight': torch.cat(
(
enc_embs.to(init_embs.device, dtype=init_embs.dtype),
init_embs[-2:, :],
)
),
'decoder.embeddings.weight': torch.cat(
(
dec_embs.to(init_embs.device, dtype=init_embs.dtype),
init_embs[-2:, :],
)
),
}
)
pct_init = round(embs.shape[0] / len(self.dict) * 100, 1)
print(
f'Initialized embeddings for {embs.shape[0]} tokens ({pct_init}%)'
)
self.model.load_state_dict(state_dict)
| [
"yongkyung-oh@outlook.com"
] | yongkyung-oh@outlook.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.